Loading...
1/*
2 * Copyright (C) 2009 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/sort.h>
22#include "ctree.h"
23#include "delayed-ref.h"
24#include "transaction.h"
25
26struct kmem_cache *btrfs_delayed_ref_head_cachep;
27struct kmem_cache *btrfs_delayed_tree_ref_cachep;
28struct kmem_cache *btrfs_delayed_data_ref_cachep;
29struct kmem_cache *btrfs_delayed_extent_op_cachep;
30/*
31 * delayed back reference update tracking. For subvolume trees
32 * we queue up extent allocations and backref maintenance for
33 * delayed processing. This avoids deep call chains where we
34 * add extents in the middle of btrfs_search_slot, and it allows
35 * us to buffer up frequently modified backrefs in an rb tree instead
36 * of hammering updates on the extent allocation tree.
37 */
38
39/*
40 * compare two delayed tree backrefs with same bytenr and type
41 */
42static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
43 struct btrfs_delayed_tree_ref *ref1, int type)
44{
45 if (type == BTRFS_TREE_BLOCK_REF_KEY) {
46 if (ref1->root < ref2->root)
47 return -1;
48 if (ref1->root > ref2->root)
49 return 1;
50 } else {
51 if (ref1->parent < ref2->parent)
52 return -1;
53 if (ref1->parent > ref2->parent)
54 return 1;
55 }
56 return 0;
57}
58
59/*
60 * compare two delayed data backrefs with same bytenr and type
61 */
62static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
63 struct btrfs_delayed_data_ref *ref1)
64{
65 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
66 if (ref1->root < ref2->root)
67 return -1;
68 if (ref1->root > ref2->root)
69 return 1;
70 if (ref1->objectid < ref2->objectid)
71 return -1;
72 if (ref1->objectid > ref2->objectid)
73 return 1;
74 if (ref1->offset < ref2->offset)
75 return -1;
76 if (ref1->offset > ref2->offset)
77 return 1;
78 } else {
79 if (ref1->parent < ref2->parent)
80 return -1;
81 if (ref1->parent > ref2->parent)
82 return 1;
83 }
84 return 0;
85}
86
87/*
88 * entries in the rb tree are ordered by the byte number of the extent,
89 * type of the delayed backrefs and content of delayed backrefs.
90 */
91static int comp_entry(struct btrfs_delayed_ref_node *ref2,
92 struct btrfs_delayed_ref_node *ref1,
93 bool compare_seq)
94{
95 if (ref1->bytenr < ref2->bytenr)
96 return -1;
97 if (ref1->bytenr > ref2->bytenr)
98 return 1;
99 if (ref1->is_head && ref2->is_head)
100 return 0;
101 if (ref2->is_head)
102 return -1;
103 if (ref1->is_head)
104 return 1;
105 if (ref1->type < ref2->type)
106 return -1;
107 if (ref1->type > ref2->type)
108 return 1;
109 /* merging of sequenced refs is not allowed */
110 if (compare_seq) {
111 if (ref1->seq < ref2->seq)
112 return -1;
113 if (ref1->seq > ref2->seq)
114 return 1;
115 }
116 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
117 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
118 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
119 btrfs_delayed_node_to_tree_ref(ref1),
120 ref1->type);
121 } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
122 ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
123 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
124 btrfs_delayed_node_to_data_ref(ref1));
125 }
126 BUG();
127 return 0;
128}
129
130/*
131 * insert a new ref into the rbtree. This returns any existing refs
132 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
133 * inserted.
134 */
135static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
136 struct rb_node *node)
137{
138 struct rb_node **p = &root->rb_node;
139 struct rb_node *parent_node = NULL;
140 struct btrfs_delayed_ref_node *entry;
141 struct btrfs_delayed_ref_node *ins;
142 int cmp;
143
144 ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
145 while (*p) {
146 parent_node = *p;
147 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
148 rb_node);
149
150 cmp = comp_entry(entry, ins, 1);
151 if (cmp < 0)
152 p = &(*p)->rb_left;
153 else if (cmp > 0)
154 p = &(*p)->rb_right;
155 else
156 return entry;
157 }
158
159 rb_link_node(node, parent_node, p);
160 rb_insert_color(node, root);
161 return NULL;
162}
163
164/* insert a new ref to head ref rbtree */
165static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
166 struct rb_node *node)
167{
168 struct rb_node **p = &root->rb_node;
169 struct rb_node *parent_node = NULL;
170 struct btrfs_delayed_ref_head *entry;
171 struct btrfs_delayed_ref_head *ins;
172 u64 bytenr;
173
174 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
175 bytenr = ins->node.bytenr;
176 while (*p) {
177 parent_node = *p;
178 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
179 href_node);
180
181 if (bytenr < entry->node.bytenr)
182 p = &(*p)->rb_left;
183 else if (bytenr > entry->node.bytenr)
184 p = &(*p)->rb_right;
185 else
186 return entry;
187 }
188
189 rb_link_node(node, parent_node, p);
190 rb_insert_color(node, root);
191 return NULL;
192}
193
194/*
195 * find an head entry based on bytenr. This returns the delayed ref
196 * head if it was able to find one, or NULL if nothing was in that spot.
197 * If return_bigger is given, the next bigger entry is returned if no exact
198 * match is found.
199 */
200static struct btrfs_delayed_ref_head *
201find_ref_head(struct rb_root *root, u64 bytenr,
202 int return_bigger)
203{
204 struct rb_node *n;
205 struct btrfs_delayed_ref_head *entry;
206
207 n = root->rb_node;
208 entry = NULL;
209 while (n) {
210 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
211
212 if (bytenr < entry->node.bytenr)
213 n = n->rb_left;
214 else if (bytenr > entry->node.bytenr)
215 n = n->rb_right;
216 else
217 return entry;
218 }
219 if (entry && return_bigger) {
220 if (bytenr > entry->node.bytenr) {
221 n = rb_next(&entry->href_node);
222 if (!n)
223 n = rb_first(root);
224 entry = rb_entry(n, struct btrfs_delayed_ref_head,
225 href_node);
226 return entry;
227 }
228 return entry;
229 }
230 return NULL;
231}
232
233int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
234 struct btrfs_delayed_ref_head *head)
235{
236 struct btrfs_delayed_ref_root *delayed_refs;
237
238 delayed_refs = &trans->transaction->delayed_refs;
239 assert_spin_locked(&delayed_refs->lock);
240 if (mutex_trylock(&head->mutex))
241 return 0;
242
243 atomic_inc(&head->node.refs);
244 spin_unlock(&delayed_refs->lock);
245
246 mutex_lock(&head->mutex);
247 spin_lock(&delayed_refs->lock);
248 if (!head->node.in_tree) {
249 mutex_unlock(&head->mutex);
250 btrfs_put_delayed_ref(&head->node);
251 return -EAGAIN;
252 }
253 btrfs_put_delayed_ref(&head->node);
254 return 0;
255}
256
257static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
258 struct btrfs_delayed_ref_root *delayed_refs,
259 struct btrfs_delayed_ref_head *head,
260 struct btrfs_delayed_ref_node *ref)
261{
262 if (btrfs_delayed_ref_is_head(ref)) {
263 head = btrfs_delayed_node_to_head(ref);
264 rb_erase(&head->href_node, &delayed_refs->href_root);
265 } else {
266 assert_spin_locked(&head->lock);
267 rb_erase(&ref->rb_node, &head->ref_root);
268 }
269 ref->in_tree = 0;
270 btrfs_put_delayed_ref(ref);
271 atomic_dec(&delayed_refs->num_entries);
272 if (trans->delayed_ref_updates)
273 trans->delayed_ref_updates--;
274}
275
276static int merge_ref(struct btrfs_trans_handle *trans,
277 struct btrfs_delayed_ref_root *delayed_refs,
278 struct btrfs_delayed_ref_head *head,
279 struct btrfs_delayed_ref_node *ref, u64 seq)
280{
281 struct rb_node *node;
282 int mod = 0;
283 int done = 0;
284
285 node = rb_next(&ref->rb_node);
286 while (!done && node) {
287 struct btrfs_delayed_ref_node *next;
288
289 next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
290 node = rb_next(node);
291 if (seq && next->seq >= seq)
292 break;
293 if (comp_entry(ref, next, 0))
294 continue;
295
296 if (ref->action == next->action) {
297 mod = next->ref_mod;
298 } else {
299 if (ref->ref_mod < next->ref_mod) {
300 struct btrfs_delayed_ref_node *tmp;
301
302 tmp = ref;
303 ref = next;
304 next = tmp;
305 done = 1;
306 }
307 mod = -next->ref_mod;
308 }
309
310 drop_delayed_ref(trans, delayed_refs, head, next);
311 ref->ref_mod += mod;
312 if (ref->ref_mod == 0) {
313 drop_delayed_ref(trans, delayed_refs, head, ref);
314 done = 1;
315 } else {
316 /*
317 * You can't have multiples of the same ref on a tree
318 * block.
319 */
320 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
321 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
322 }
323 }
324 return done;
325}
326
327void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
328 struct btrfs_fs_info *fs_info,
329 struct btrfs_delayed_ref_root *delayed_refs,
330 struct btrfs_delayed_ref_head *head)
331{
332 struct rb_node *node;
333 u64 seq = 0;
334
335 assert_spin_locked(&head->lock);
336 /*
337 * We don't have too much refs to merge in the case of delayed data
338 * refs.
339 */
340 if (head->is_data)
341 return;
342
343 spin_lock(&fs_info->tree_mod_seq_lock);
344 if (!list_empty(&fs_info->tree_mod_seq_list)) {
345 struct seq_list *elem;
346
347 elem = list_first_entry(&fs_info->tree_mod_seq_list,
348 struct seq_list, list);
349 seq = elem->seq;
350 }
351 spin_unlock(&fs_info->tree_mod_seq_lock);
352
353 node = rb_first(&head->ref_root);
354 while (node) {
355 struct btrfs_delayed_ref_node *ref;
356
357 ref = rb_entry(node, struct btrfs_delayed_ref_node,
358 rb_node);
359 /* We can't merge refs that are outside of our seq count */
360 if (seq && ref->seq >= seq)
361 break;
362 if (merge_ref(trans, delayed_refs, head, ref, seq))
363 node = rb_first(&head->ref_root);
364 else
365 node = rb_next(&ref->rb_node);
366 }
367}
368
369int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
370 struct btrfs_delayed_ref_root *delayed_refs,
371 u64 seq)
372{
373 struct seq_list *elem;
374 int ret = 0;
375
376 spin_lock(&fs_info->tree_mod_seq_lock);
377 if (!list_empty(&fs_info->tree_mod_seq_list)) {
378 elem = list_first_entry(&fs_info->tree_mod_seq_list,
379 struct seq_list, list);
380 if (seq >= elem->seq) {
381 pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
382 (u32)(seq >> 32), (u32)seq,
383 (u32)(elem->seq >> 32), (u32)elem->seq,
384 delayed_refs);
385 ret = 1;
386 }
387 }
388
389 spin_unlock(&fs_info->tree_mod_seq_lock);
390 return ret;
391}
392
393struct btrfs_delayed_ref_head *
394btrfs_select_ref_head(struct btrfs_trans_handle *trans)
395{
396 struct btrfs_delayed_ref_root *delayed_refs;
397 struct btrfs_delayed_ref_head *head;
398 u64 start;
399 bool loop = false;
400
401 delayed_refs = &trans->transaction->delayed_refs;
402
403again:
404 start = delayed_refs->run_delayed_start;
405 head = find_ref_head(&delayed_refs->href_root, start, 1);
406 if (!head && !loop) {
407 delayed_refs->run_delayed_start = 0;
408 start = 0;
409 loop = true;
410 head = find_ref_head(&delayed_refs->href_root, start, 1);
411 if (!head)
412 return NULL;
413 } else if (!head && loop) {
414 return NULL;
415 }
416
417 while (head->processing) {
418 struct rb_node *node;
419
420 node = rb_next(&head->href_node);
421 if (!node) {
422 if (loop)
423 return NULL;
424 delayed_refs->run_delayed_start = 0;
425 start = 0;
426 loop = true;
427 goto again;
428 }
429 head = rb_entry(node, struct btrfs_delayed_ref_head,
430 href_node);
431 }
432
433 head->processing = 1;
434 WARN_ON(delayed_refs->num_heads_ready == 0);
435 delayed_refs->num_heads_ready--;
436 delayed_refs->run_delayed_start = head->node.bytenr +
437 head->node.num_bytes;
438 return head;
439}
440
441/*
442 * helper function to update an extent delayed ref in the
443 * rbtree. existing and update must both have the same
444 * bytenr and parent
445 *
446 * This may free existing if the update cancels out whatever
447 * operation it was doing.
448 */
449static noinline void
450update_existing_ref(struct btrfs_trans_handle *trans,
451 struct btrfs_delayed_ref_root *delayed_refs,
452 struct btrfs_delayed_ref_head *head,
453 struct btrfs_delayed_ref_node *existing,
454 struct btrfs_delayed_ref_node *update)
455{
456 if (update->action != existing->action) {
457 /*
458 * this is effectively undoing either an add or a
459 * drop. We decrement the ref_mod, and if it goes
460 * down to zero we just delete the entry without
461 * every changing the extent allocation tree.
462 */
463 existing->ref_mod--;
464 if (existing->ref_mod == 0)
465 drop_delayed_ref(trans, delayed_refs, head, existing);
466 else
467 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
468 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
469 } else {
470 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
471 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
472 /*
473 * the action on the existing ref matches
474 * the action on the ref we're trying to add.
475 * Bump the ref_mod by one so the backref that
476 * is eventually added/removed has the correct
477 * reference count
478 */
479 existing->ref_mod += update->ref_mod;
480 }
481}
482
483/*
484 * helper function to update the accounting in the head ref
485 * existing and update must have the same bytenr
486 */
487static noinline void
488update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
489 struct btrfs_delayed_ref_node *update)
490{
491 struct btrfs_delayed_ref_head *existing_ref;
492 struct btrfs_delayed_ref_head *ref;
493
494 existing_ref = btrfs_delayed_node_to_head(existing);
495 ref = btrfs_delayed_node_to_head(update);
496 BUG_ON(existing_ref->is_data != ref->is_data);
497
498 spin_lock(&existing_ref->lock);
499 if (ref->must_insert_reserved) {
500 /* if the extent was freed and then
501 * reallocated before the delayed ref
502 * entries were processed, we can end up
503 * with an existing head ref without
504 * the must_insert_reserved flag set.
505 * Set it again here
506 */
507 existing_ref->must_insert_reserved = ref->must_insert_reserved;
508
509 /*
510 * update the num_bytes so we make sure the accounting
511 * is done correctly
512 */
513 existing->num_bytes = update->num_bytes;
514
515 }
516
517 if (ref->extent_op) {
518 if (!existing_ref->extent_op) {
519 existing_ref->extent_op = ref->extent_op;
520 } else {
521 if (ref->extent_op->update_key) {
522 memcpy(&existing_ref->extent_op->key,
523 &ref->extent_op->key,
524 sizeof(ref->extent_op->key));
525 existing_ref->extent_op->update_key = 1;
526 }
527 if (ref->extent_op->update_flags) {
528 existing_ref->extent_op->flags_to_set |=
529 ref->extent_op->flags_to_set;
530 existing_ref->extent_op->update_flags = 1;
531 }
532 btrfs_free_delayed_extent_op(ref->extent_op);
533 }
534 }
535 /*
536 * update the reference mod on the head to reflect this new operation,
537 * only need the lock for this case cause we could be processing it
538 * currently, for refs we just added we know we're a-ok.
539 */
540 existing->ref_mod += update->ref_mod;
541 spin_unlock(&existing_ref->lock);
542}
543
544/*
545 * helper function to actually insert a head node into the rbtree.
546 * this does all the dirty work in terms of maintaining the correct
547 * overall modification count.
548 */
549static noinline struct btrfs_delayed_ref_head *
550add_delayed_ref_head(struct btrfs_fs_info *fs_info,
551 struct btrfs_trans_handle *trans,
552 struct btrfs_delayed_ref_node *ref, u64 bytenr,
553 u64 num_bytes, int action, int is_data)
554{
555 struct btrfs_delayed_ref_head *existing;
556 struct btrfs_delayed_ref_head *head_ref = NULL;
557 struct btrfs_delayed_ref_root *delayed_refs;
558 int count_mod = 1;
559 int must_insert_reserved = 0;
560
561 /*
562 * the head node stores the sum of all the mods, so dropping a ref
563 * should drop the sum in the head node by one.
564 */
565 if (action == BTRFS_UPDATE_DELAYED_HEAD)
566 count_mod = 0;
567 else if (action == BTRFS_DROP_DELAYED_REF)
568 count_mod = -1;
569
570 /*
571 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
572 * the reserved accounting when the extent is finally added, or
573 * if a later modification deletes the delayed ref without ever
574 * inserting the extent into the extent allocation tree.
575 * ref->must_insert_reserved is the flag used to record
576 * that accounting mods are required.
577 *
578 * Once we record must_insert_reserved, switch the action to
579 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
580 */
581 if (action == BTRFS_ADD_DELAYED_EXTENT)
582 must_insert_reserved = 1;
583 else
584 must_insert_reserved = 0;
585
586 delayed_refs = &trans->transaction->delayed_refs;
587
588 /* first set the basic ref node struct up */
589 atomic_set(&ref->refs, 1);
590 ref->bytenr = bytenr;
591 ref->num_bytes = num_bytes;
592 ref->ref_mod = count_mod;
593 ref->type = 0;
594 ref->action = 0;
595 ref->is_head = 1;
596 ref->in_tree = 1;
597 ref->seq = 0;
598
599 head_ref = btrfs_delayed_node_to_head(ref);
600 head_ref->must_insert_reserved = must_insert_reserved;
601 head_ref->is_data = is_data;
602 head_ref->ref_root = RB_ROOT;
603 head_ref->processing = 0;
604
605 spin_lock_init(&head_ref->lock);
606 mutex_init(&head_ref->mutex);
607
608 trace_add_delayed_ref_head(ref, head_ref, action);
609
610 existing = htree_insert(&delayed_refs->href_root,
611 &head_ref->href_node);
612 if (existing) {
613 update_existing_head_ref(&existing->node, ref);
614 /*
615 * we've updated the existing ref, free the newly
616 * allocated ref
617 */
618 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
619 head_ref = existing;
620 } else {
621 delayed_refs->num_heads++;
622 delayed_refs->num_heads_ready++;
623 atomic_inc(&delayed_refs->num_entries);
624 trans->delayed_ref_updates++;
625 }
626 return head_ref;
627}
628
629/*
630 * helper to insert a delayed tree ref into the rbtree.
631 */
632static noinline void
633add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
634 struct btrfs_trans_handle *trans,
635 struct btrfs_delayed_ref_head *head_ref,
636 struct btrfs_delayed_ref_node *ref, u64 bytenr,
637 u64 num_bytes, u64 parent, u64 ref_root, int level,
638 int action, int for_cow)
639{
640 struct btrfs_delayed_ref_node *existing;
641 struct btrfs_delayed_tree_ref *full_ref;
642 struct btrfs_delayed_ref_root *delayed_refs;
643 u64 seq = 0;
644
645 if (action == BTRFS_ADD_DELAYED_EXTENT)
646 action = BTRFS_ADD_DELAYED_REF;
647
648 delayed_refs = &trans->transaction->delayed_refs;
649
650 /* first set the basic ref node struct up */
651 atomic_set(&ref->refs, 1);
652 ref->bytenr = bytenr;
653 ref->num_bytes = num_bytes;
654 ref->ref_mod = 1;
655 ref->action = action;
656 ref->is_head = 0;
657 ref->in_tree = 1;
658
659 if (need_ref_seq(for_cow, ref_root))
660 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
661 ref->seq = seq;
662
663 full_ref = btrfs_delayed_node_to_tree_ref(ref);
664 full_ref->parent = parent;
665 full_ref->root = ref_root;
666 if (parent)
667 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
668 else
669 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
670 full_ref->level = level;
671
672 trace_add_delayed_tree_ref(ref, full_ref, action);
673
674 spin_lock(&head_ref->lock);
675 existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
676 if (existing) {
677 update_existing_ref(trans, delayed_refs, head_ref, existing,
678 ref);
679 /*
680 * we've updated the existing ref, free the newly
681 * allocated ref
682 */
683 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
684 } else {
685 atomic_inc(&delayed_refs->num_entries);
686 trans->delayed_ref_updates++;
687 }
688 spin_unlock(&head_ref->lock);
689}
690
691/*
692 * helper to insert a delayed data ref into the rbtree.
693 */
694static noinline void
695add_delayed_data_ref(struct btrfs_fs_info *fs_info,
696 struct btrfs_trans_handle *trans,
697 struct btrfs_delayed_ref_head *head_ref,
698 struct btrfs_delayed_ref_node *ref, u64 bytenr,
699 u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
700 u64 offset, int action, int for_cow)
701{
702 struct btrfs_delayed_ref_node *existing;
703 struct btrfs_delayed_data_ref *full_ref;
704 struct btrfs_delayed_ref_root *delayed_refs;
705 u64 seq = 0;
706
707 if (action == BTRFS_ADD_DELAYED_EXTENT)
708 action = BTRFS_ADD_DELAYED_REF;
709
710 delayed_refs = &trans->transaction->delayed_refs;
711
712 /* first set the basic ref node struct up */
713 atomic_set(&ref->refs, 1);
714 ref->bytenr = bytenr;
715 ref->num_bytes = num_bytes;
716 ref->ref_mod = 1;
717 ref->action = action;
718 ref->is_head = 0;
719 ref->in_tree = 1;
720
721 if (need_ref_seq(for_cow, ref_root))
722 seq = btrfs_get_tree_mod_seq(fs_info, &trans->delayed_ref_elem);
723 ref->seq = seq;
724
725 full_ref = btrfs_delayed_node_to_data_ref(ref);
726 full_ref->parent = parent;
727 full_ref->root = ref_root;
728 if (parent)
729 ref->type = BTRFS_SHARED_DATA_REF_KEY;
730 else
731 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
732
733 full_ref->objectid = owner;
734 full_ref->offset = offset;
735
736 trace_add_delayed_data_ref(ref, full_ref, action);
737
738 spin_lock(&head_ref->lock);
739 existing = tree_insert(&head_ref->ref_root, &ref->rb_node);
740 if (existing) {
741 update_existing_ref(trans, delayed_refs, head_ref, existing,
742 ref);
743 /*
744 * we've updated the existing ref, free the newly
745 * allocated ref
746 */
747 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
748 } else {
749 atomic_inc(&delayed_refs->num_entries);
750 trans->delayed_ref_updates++;
751 }
752 spin_unlock(&head_ref->lock);
753}
754
755/*
756 * add a delayed tree ref. This does all of the accounting required
757 * to make sure the delayed ref is eventually processed before this
758 * transaction commits.
759 */
760int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
761 struct btrfs_trans_handle *trans,
762 u64 bytenr, u64 num_bytes, u64 parent,
763 u64 ref_root, int level, int action,
764 struct btrfs_delayed_extent_op *extent_op,
765 int for_cow)
766{
767 struct btrfs_delayed_tree_ref *ref;
768 struct btrfs_delayed_ref_head *head_ref;
769 struct btrfs_delayed_ref_root *delayed_refs;
770
771 BUG_ON(extent_op && extent_op->is_data);
772 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
773 if (!ref)
774 return -ENOMEM;
775
776 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
777 if (!head_ref) {
778 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
779 return -ENOMEM;
780 }
781
782 head_ref->extent_op = extent_op;
783
784 delayed_refs = &trans->transaction->delayed_refs;
785 spin_lock(&delayed_refs->lock);
786
787 /*
788 * insert both the head node and the new ref without dropping
789 * the spin lock
790 */
791 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
792 bytenr, num_bytes, action, 0);
793
794 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
795 num_bytes, parent, ref_root, level, action,
796 for_cow);
797 spin_unlock(&delayed_refs->lock);
798 if (need_ref_seq(for_cow, ref_root))
799 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
800
801 return 0;
802}
803
804/*
805 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
806 */
807int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
808 struct btrfs_trans_handle *trans,
809 u64 bytenr, u64 num_bytes,
810 u64 parent, u64 ref_root,
811 u64 owner, u64 offset, int action,
812 struct btrfs_delayed_extent_op *extent_op,
813 int for_cow)
814{
815 struct btrfs_delayed_data_ref *ref;
816 struct btrfs_delayed_ref_head *head_ref;
817 struct btrfs_delayed_ref_root *delayed_refs;
818
819 BUG_ON(extent_op && !extent_op->is_data);
820 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
821 if (!ref)
822 return -ENOMEM;
823
824 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
825 if (!head_ref) {
826 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
827 return -ENOMEM;
828 }
829
830 head_ref->extent_op = extent_op;
831
832 delayed_refs = &trans->transaction->delayed_refs;
833 spin_lock(&delayed_refs->lock);
834
835 /*
836 * insert both the head node and the new ref without dropping
837 * the spin lock
838 */
839 head_ref = add_delayed_ref_head(fs_info, trans, &head_ref->node,
840 bytenr, num_bytes, action, 1);
841
842 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
843 num_bytes, parent, ref_root, owner, offset,
844 action, for_cow);
845 spin_unlock(&delayed_refs->lock);
846 if (need_ref_seq(for_cow, ref_root))
847 btrfs_qgroup_record_ref(trans, &ref->node, extent_op);
848
849 return 0;
850}
851
852int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
853 struct btrfs_trans_handle *trans,
854 u64 bytenr, u64 num_bytes,
855 struct btrfs_delayed_extent_op *extent_op)
856{
857 struct btrfs_delayed_ref_head *head_ref;
858 struct btrfs_delayed_ref_root *delayed_refs;
859
860 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
861 if (!head_ref)
862 return -ENOMEM;
863
864 head_ref->extent_op = extent_op;
865
866 delayed_refs = &trans->transaction->delayed_refs;
867 spin_lock(&delayed_refs->lock);
868
869 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
870 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
871 extent_op->is_data);
872
873 spin_unlock(&delayed_refs->lock);
874 return 0;
875}
876
877/*
878 * this does a simple search for the head node for a given extent.
879 * It must be called with the delayed ref spinlock held, and it returns
880 * the head node if any where found, or NULL if not.
881 */
882struct btrfs_delayed_ref_head *
883btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
884{
885 struct btrfs_delayed_ref_root *delayed_refs;
886
887 delayed_refs = &trans->transaction->delayed_refs;
888 return find_ref_head(&delayed_refs->href_root, bytenr, 0);
889}
890
891void btrfs_delayed_ref_exit(void)
892{
893 if (btrfs_delayed_ref_head_cachep)
894 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
895 if (btrfs_delayed_tree_ref_cachep)
896 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
897 if (btrfs_delayed_data_ref_cachep)
898 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
899 if (btrfs_delayed_extent_op_cachep)
900 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
901}
902
903int btrfs_delayed_ref_init(void)
904{
905 btrfs_delayed_ref_head_cachep = kmem_cache_create(
906 "btrfs_delayed_ref_head",
907 sizeof(struct btrfs_delayed_ref_head), 0,
908 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
909 if (!btrfs_delayed_ref_head_cachep)
910 goto fail;
911
912 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
913 "btrfs_delayed_tree_ref",
914 sizeof(struct btrfs_delayed_tree_ref), 0,
915 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
916 if (!btrfs_delayed_tree_ref_cachep)
917 goto fail;
918
919 btrfs_delayed_data_ref_cachep = kmem_cache_create(
920 "btrfs_delayed_data_ref",
921 sizeof(struct btrfs_delayed_data_ref), 0,
922 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
923 if (!btrfs_delayed_data_ref_cachep)
924 goto fail;
925
926 btrfs_delayed_extent_op_cachep = kmem_cache_create(
927 "btrfs_delayed_extent_op",
928 sizeof(struct btrfs_delayed_extent_op), 0,
929 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
930 if (!btrfs_delayed_extent_op_cachep)
931 goto fail;
932
933 return 0;
934fail:
935 btrfs_delayed_ref_exit();
936 return -ENOMEM;
937}
1/*
2 * Copyright (C) 2009 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/sort.h>
22#include "ctree.h"
23#include "delayed-ref.h"
24#include "transaction.h"
25
26/*
27 * delayed back reference update tracking. For subvolume trees
28 * we queue up extent allocations and backref maintenance for
29 * delayed processing. This avoids deep call chains where we
30 * add extents in the middle of btrfs_search_slot, and it allows
31 * us to buffer up frequently modified backrefs in an rb tree instead
32 * of hammering updates on the extent allocation tree.
33 */
34
35/*
36 * compare two delayed tree backrefs with same bytenr and type
37 */
38static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
39 struct btrfs_delayed_tree_ref *ref1)
40{
41 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
42 if (ref1->root < ref2->root)
43 return -1;
44 if (ref1->root > ref2->root)
45 return 1;
46 } else {
47 if (ref1->parent < ref2->parent)
48 return -1;
49 if (ref1->parent > ref2->parent)
50 return 1;
51 }
52 return 0;
53}
54
55/*
56 * compare two delayed data backrefs with same bytenr and type
57 */
58static int comp_data_refs(struct btrfs_delayed_data_ref *ref2,
59 struct btrfs_delayed_data_ref *ref1)
60{
61 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
62 if (ref1->root < ref2->root)
63 return -1;
64 if (ref1->root > ref2->root)
65 return 1;
66 if (ref1->objectid < ref2->objectid)
67 return -1;
68 if (ref1->objectid > ref2->objectid)
69 return 1;
70 if (ref1->offset < ref2->offset)
71 return -1;
72 if (ref1->offset > ref2->offset)
73 return 1;
74 } else {
75 if (ref1->parent < ref2->parent)
76 return -1;
77 if (ref1->parent > ref2->parent)
78 return 1;
79 }
80 return 0;
81}
82
83/*
84 * entries in the rb tree are ordered by the byte number of the extent,
85 * type of the delayed backrefs and content of delayed backrefs.
86 */
87static int comp_entry(struct btrfs_delayed_ref_node *ref2,
88 struct btrfs_delayed_ref_node *ref1)
89{
90 if (ref1->bytenr < ref2->bytenr)
91 return -1;
92 if (ref1->bytenr > ref2->bytenr)
93 return 1;
94 if (ref1->is_head && ref2->is_head)
95 return 0;
96 if (ref2->is_head)
97 return -1;
98 if (ref1->is_head)
99 return 1;
100 if (ref1->type < ref2->type)
101 return -1;
102 if (ref1->type > ref2->type)
103 return 1;
104 /* merging of sequenced refs is not allowed */
105 if (ref1->seq < ref2->seq)
106 return -1;
107 if (ref1->seq > ref2->seq)
108 return 1;
109 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
110 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
111 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
112 btrfs_delayed_node_to_tree_ref(ref1));
113 } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
114 ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
115 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
116 btrfs_delayed_node_to_data_ref(ref1));
117 }
118 BUG();
119 return 0;
120}
121
122/*
123 * insert a new ref into the rbtree. This returns any existing refs
124 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
125 * inserted.
126 */
127static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
128 struct rb_node *node)
129{
130 struct rb_node **p = &root->rb_node;
131 struct rb_node *parent_node = NULL;
132 struct btrfs_delayed_ref_node *entry;
133 struct btrfs_delayed_ref_node *ins;
134 int cmp;
135
136 ins = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
137 while (*p) {
138 parent_node = *p;
139 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
140 rb_node);
141
142 cmp = comp_entry(entry, ins);
143 if (cmp < 0)
144 p = &(*p)->rb_left;
145 else if (cmp > 0)
146 p = &(*p)->rb_right;
147 else
148 return entry;
149 }
150
151 rb_link_node(node, parent_node, p);
152 rb_insert_color(node, root);
153 return NULL;
154}
155
156/*
157 * find an head entry based on bytenr. This returns the delayed ref
158 * head if it was able to find one, or NULL if nothing was in that spot.
159 * If return_bigger is given, the next bigger entry is returned if no exact
160 * match is found.
161 */
162static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
163 u64 bytenr,
164 struct btrfs_delayed_ref_node **last,
165 int return_bigger)
166{
167 struct rb_node *n;
168 struct btrfs_delayed_ref_node *entry;
169 int cmp = 0;
170
171again:
172 n = root->rb_node;
173 entry = NULL;
174 while (n) {
175 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
176 WARN_ON(!entry->in_tree);
177 if (last)
178 *last = entry;
179
180 if (bytenr < entry->bytenr)
181 cmp = -1;
182 else if (bytenr > entry->bytenr)
183 cmp = 1;
184 else if (!btrfs_delayed_ref_is_head(entry))
185 cmp = 1;
186 else
187 cmp = 0;
188
189 if (cmp < 0)
190 n = n->rb_left;
191 else if (cmp > 0)
192 n = n->rb_right;
193 else
194 return entry;
195 }
196 if (entry && return_bigger) {
197 if (cmp > 0) {
198 n = rb_next(&entry->rb_node);
199 if (!n)
200 n = rb_first(root);
201 entry = rb_entry(n, struct btrfs_delayed_ref_node,
202 rb_node);
203 bytenr = entry->bytenr;
204 return_bigger = 0;
205 goto again;
206 }
207 return entry;
208 }
209 return NULL;
210}
211
212int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
213 struct btrfs_delayed_ref_head *head)
214{
215 struct btrfs_delayed_ref_root *delayed_refs;
216
217 delayed_refs = &trans->transaction->delayed_refs;
218 assert_spin_locked(&delayed_refs->lock);
219 if (mutex_trylock(&head->mutex))
220 return 0;
221
222 atomic_inc(&head->node.refs);
223 spin_unlock(&delayed_refs->lock);
224
225 mutex_lock(&head->mutex);
226 spin_lock(&delayed_refs->lock);
227 if (!head->node.in_tree) {
228 mutex_unlock(&head->mutex);
229 btrfs_put_delayed_ref(&head->node);
230 return -EAGAIN;
231 }
232 btrfs_put_delayed_ref(&head->node);
233 return 0;
234}
235
236int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
237 u64 seq)
238{
239 struct seq_list *elem;
240
241 assert_spin_locked(&delayed_refs->lock);
242 if (list_empty(&delayed_refs->seq_head))
243 return 0;
244
245 elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list);
246 if (seq >= elem->seq) {
247 pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n",
248 seq, elem->seq, delayed_refs);
249 return 1;
250 }
251 return 0;
252}
253
254int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
255 struct list_head *cluster, u64 start)
256{
257 int count = 0;
258 struct btrfs_delayed_ref_root *delayed_refs;
259 struct rb_node *node;
260 struct btrfs_delayed_ref_node *ref;
261 struct btrfs_delayed_ref_head *head;
262
263 delayed_refs = &trans->transaction->delayed_refs;
264 if (start == 0) {
265 node = rb_first(&delayed_refs->root);
266 } else {
267 ref = NULL;
268 find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
269 if (ref) {
270 node = &ref->rb_node;
271 } else
272 node = rb_first(&delayed_refs->root);
273 }
274again:
275 while (node && count < 32) {
276 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
277 if (btrfs_delayed_ref_is_head(ref)) {
278 head = btrfs_delayed_node_to_head(ref);
279 if (list_empty(&head->cluster)) {
280 list_add_tail(&head->cluster, cluster);
281 delayed_refs->run_delayed_start =
282 head->node.bytenr;
283 count++;
284
285 WARN_ON(delayed_refs->num_heads_ready == 0);
286 delayed_refs->num_heads_ready--;
287 } else if (count) {
288 /* the goal of the clustering is to find extents
289 * that are likely to end up in the same extent
290 * leaf on disk. So, we don't want them spread
291 * all over the tree. Stop now if we've hit
292 * a head that was already in use
293 */
294 break;
295 }
296 }
297 node = rb_next(node);
298 }
299 if (count) {
300 return 0;
301 } else if (start) {
302 /*
303 * we've gone to the end of the rbtree without finding any
304 * clusters. start from the beginning and try again
305 */
306 start = 0;
307 node = rb_first(&delayed_refs->root);
308 goto again;
309 }
310 return 1;
311}
312
313/*
314 * helper function to update an extent delayed ref in the
315 * rbtree. existing and update must both have the same
316 * bytenr and parent
317 *
318 * This may free existing if the update cancels out whatever
319 * operation it was doing.
320 */
321static noinline void
322update_existing_ref(struct btrfs_trans_handle *trans,
323 struct btrfs_delayed_ref_root *delayed_refs,
324 struct btrfs_delayed_ref_node *existing,
325 struct btrfs_delayed_ref_node *update)
326{
327 if (update->action != existing->action) {
328 /*
329 * this is effectively undoing either an add or a
330 * drop. We decrement the ref_mod, and if it goes
331 * down to zero we just delete the entry without
332 * every changing the extent allocation tree.
333 */
334 existing->ref_mod--;
335 if (existing->ref_mod == 0) {
336 rb_erase(&existing->rb_node,
337 &delayed_refs->root);
338 existing->in_tree = 0;
339 btrfs_put_delayed_ref(existing);
340 delayed_refs->num_entries--;
341 if (trans->delayed_ref_updates)
342 trans->delayed_ref_updates--;
343 } else {
344 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
345 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
346 }
347 } else {
348 WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY ||
349 existing->type == BTRFS_SHARED_BLOCK_REF_KEY);
350 /*
351 * the action on the existing ref matches
352 * the action on the ref we're trying to add.
353 * Bump the ref_mod by one so the backref that
354 * is eventually added/removed has the correct
355 * reference count
356 */
357 existing->ref_mod += update->ref_mod;
358 }
359}
360
361/*
362 * helper function to update the accounting in the head ref
363 * existing and update must have the same bytenr
364 */
365static noinline void
366update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
367 struct btrfs_delayed_ref_node *update)
368{
369 struct btrfs_delayed_ref_head *existing_ref;
370 struct btrfs_delayed_ref_head *ref;
371
372 existing_ref = btrfs_delayed_node_to_head(existing);
373 ref = btrfs_delayed_node_to_head(update);
374 BUG_ON(existing_ref->is_data != ref->is_data);
375
376 if (ref->must_insert_reserved) {
377 /* if the extent was freed and then
378 * reallocated before the delayed ref
379 * entries were processed, we can end up
380 * with an existing head ref without
381 * the must_insert_reserved flag set.
382 * Set it again here
383 */
384 existing_ref->must_insert_reserved = ref->must_insert_reserved;
385
386 /*
387 * update the num_bytes so we make sure the accounting
388 * is done correctly
389 */
390 existing->num_bytes = update->num_bytes;
391
392 }
393
394 if (ref->extent_op) {
395 if (!existing_ref->extent_op) {
396 existing_ref->extent_op = ref->extent_op;
397 } else {
398 if (ref->extent_op->update_key) {
399 memcpy(&existing_ref->extent_op->key,
400 &ref->extent_op->key,
401 sizeof(ref->extent_op->key));
402 existing_ref->extent_op->update_key = 1;
403 }
404 if (ref->extent_op->update_flags) {
405 existing_ref->extent_op->flags_to_set |=
406 ref->extent_op->flags_to_set;
407 existing_ref->extent_op->update_flags = 1;
408 }
409 kfree(ref->extent_op);
410 }
411 }
412 /*
413 * update the reference mod on the head to reflect this new operation
414 */
415 existing->ref_mod += update->ref_mod;
416}
417
418/*
419 * helper function to actually insert a head node into the rbtree.
420 * this does all the dirty work in terms of maintaining the correct
421 * overall modification count.
422 */
423static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
424 struct btrfs_trans_handle *trans,
425 struct btrfs_delayed_ref_node *ref,
426 u64 bytenr, u64 num_bytes,
427 int action, int is_data)
428{
429 struct btrfs_delayed_ref_node *existing;
430 struct btrfs_delayed_ref_head *head_ref = NULL;
431 struct btrfs_delayed_ref_root *delayed_refs;
432 int count_mod = 1;
433 int must_insert_reserved = 0;
434
435 /*
436 * the head node stores the sum of all the mods, so dropping a ref
437 * should drop the sum in the head node by one.
438 */
439 if (action == BTRFS_UPDATE_DELAYED_HEAD)
440 count_mod = 0;
441 else if (action == BTRFS_DROP_DELAYED_REF)
442 count_mod = -1;
443
444 /*
445 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
446 * the reserved accounting when the extent is finally added, or
447 * if a later modification deletes the delayed ref without ever
448 * inserting the extent into the extent allocation tree.
449 * ref->must_insert_reserved is the flag used to record
450 * that accounting mods are required.
451 *
452 * Once we record must_insert_reserved, switch the action to
453 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
454 */
455 if (action == BTRFS_ADD_DELAYED_EXTENT)
456 must_insert_reserved = 1;
457 else
458 must_insert_reserved = 0;
459
460 delayed_refs = &trans->transaction->delayed_refs;
461
462 /* first set the basic ref node struct up */
463 atomic_set(&ref->refs, 1);
464 ref->bytenr = bytenr;
465 ref->num_bytes = num_bytes;
466 ref->ref_mod = count_mod;
467 ref->type = 0;
468 ref->action = 0;
469 ref->is_head = 1;
470 ref->in_tree = 1;
471 ref->seq = 0;
472
473 head_ref = btrfs_delayed_node_to_head(ref);
474 head_ref->must_insert_reserved = must_insert_reserved;
475 head_ref->is_data = is_data;
476
477 INIT_LIST_HEAD(&head_ref->cluster);
478 mutex_init(&head_ref->mutex);
479
480 trace_btrfs_delayed_ref_head(ref, head_ref, action);
481
482 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
483
484 if (existing) {
485 update_existing_head_ref(existing, ref);
486 /*
487 * we've updated the existing ref, free the newly
488 * allocated ref
489 */
490 kfree(head_ref);
491 } else {
492 delayed_refs->num_heads++;
493 delayed_refs->num_heads_ready++;
494 delayed_refs->num_entries++;
495 trans->delayed_ref_updates++;
496 }
497}
498
499/*
500 * helper to insert a delayed tree ref into the rbtree.
501 */
502static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
503 struct btrfs_trans_handle *trans,
504 struct btrfs_delayed_ref_node *ref,
505 u64 bytenr, u64 num_bytes, u64 parent,
506 u64 ref_root, int level, int action,
507 int for_cow)
508{
509 struct btrfs_delayed_ref_node *existing;
510 struct btrfs_delayed_tree_ref *full_ref;
511 struct btrfs_delayed_ref_root *delayed_refs;
512 u64 seq = 0;
513
514 if (action == BTRFS_ADD_DELAYED_EXTENT)
515 action = BTRFS_ADD_DELAYED_REF;
516
517 delayed_refs = &trans->transaction->delayed_refs;
518
519 /* first set the basic ref node struct up */
520 atomic_set(&ref->refs, 1);
521 ref->bytenr = bytenr;
522 ref->num_bytes = num_bytes;
523 ref->ref_mod = 1;
524 ref->action = action;
525 ref->is_head = 0;
526 ref->in_tree = 1;
527
528 if (is_fstree(ref_root))
529 seq = inc_delayed_seq(delayed_refs);
530 ref->seq = seq;
531
532 full_ref = btrfs_delayed_node_to_tree_ref(ref);
533 full_ref->parent = parent;
534 full_ref->root = ref_root;
535 if (parent)
536 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
537 else
538 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
539 full_ref->level = level;
540
541 trace_btrfs_delayed_tree_ref(ref, full_ref, action);
542
543 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
544
545 if (existing) {
546 update_existing_ref(trans, delayed_refs, existing, ref);
547 /*
548 * we've updated the existing ref, free the newly
549 * allocated ref
550 */
551 kfree(full_ref);
552 } else {
553 delayed_refs->num_entries++;
554 trans->delayed_ref_updates++;
555 }
556}
557
558/*
559 * helper to insert a delayed data ref into the rbtree.
560 */
561static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
562 struct btrfs_trans_handle *trans,
563 struct btrfs_delayed_ref_node *ref,
564 u64 bytenr, u64 num_bytes, u64 parent,
565 u64 ref_root, u64 owner, u64 offset,
566 int action, int for_cow)
567{
568 struct btrfs_delayed_ref_node *existing;
569 struct btrfs_delayed_data_ref *full_ref;
570 struct btrfs_delayed_ref_root *delayed_refs;
571 u64 seq = 0;
572
573 if (action == BTRFS_ADD_DELAYED_EXTENT)
574 action = BTRFS_ADD_DELAYED_REF;
575
576 delayed_refs = &trans->transaction->delayed_refs;
577
578 /* first set the basic ref node struct up */
579 atomic_set(&ref->refs, 1);
580 ref->bytenr = bytenr;
581 ref->num_bytes = num_bytes;
582 ref->ref_mod = 1;
583 ref->action = action;
584 ref->is_head = 0;
585 ref->in_tree = 1;
586
587 if (is_fstree(ref_root))
588 seq = inc_delayed_seq(delayed_refs);
589 ref->seq = seq;
590
591 full_ref = btrfs_delayed_node_to_data_ref(ref);
592 full_ref->parent = parent;
593 full_ref->root = ref_root;
594 if (parent)
595 ref->type = BTRFS_SHARED_DATA_REF_KEY;
596 else
597 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
598
599 full_ref->objectid = owner;
600 full_ref->offset = offset;
601
602 trace_btrfs_delayed_data_ref(ref, full_ref, action);
603
604 existing = tree_insert(&delayed_refs->root, &ref->rb_node);
605
606 if (existing) {
607 update_existing_ref(trans, delayed_refs, existing, ref);
608 /*
609 * we've updated the existing ref, free the newly
610 * allocated ref
611 */
612 kfree(full_ref);
613 } else {
614 delayed_refs->num_entries++;
615 trans->delayed_ref_updates++;
616 }
617}
618
619/*
620 * add a delayed tree ref. This does all of the accounting required
621 * to make sure the delayed ref is eventually processed before this
622 * transaction commits.
623 */
624int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
625 struct btrfs_trans_handle *trans,
626 u64 bytenr, u64 num_bytes, u64 parent,
627 u64 ref_root, int level, int action,
628 struct btrfs_delayed_extent_op *extent_op,
629 int for_cow)
630{
631 struct btrfs_delayed_tree_ref *ref;
632 struct btrfs_delayed_ref_head *head_ref;
633 struct btrfs_delayed_ref_root *delayed_refs;
634
635 BUG_ON(extent_op && extent_op->is_data);
636 ref = kmalloc(sizeof(*ref), GFP_NOFS);
637 if (!ref)
638 return -ENOMEM;
639
640 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
641 if (!head_ref) {
642 kfree(ref);
643 return -ENOMEM;
644 }
645
646 head_ref->extent_op = extent_op;
647
648 delayed_refs = &trans->transaction->delayed_refs;
649 spin_lock(&delayed_refs->lock);
650
651 /*
652 * insert both the head node and the new ref without dropping
653 * the spin lock
654 */
655 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
656 num_bytes, action, 0);
657
658 add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
659 num_bytes, parent, ref_root, level, action,
660 for_cow);
661 if (!is_fstree(ref_root) &&
662 waitqueue_active(&delayed_refs->seq_wait))
663 wake_up(&delayed_refs->seq_wait);
664 spin_unlock(&delayed_refs->lock);
665
666 return 0;
667}
668
669/*
670 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
671 */
672int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
673 struct btrfs_trans_handle *trans,
674 u64 bytenr, u64 num_bytes,
675 u64 parent, u64 ref_root,
676 u64 owner, u64 offset, int action,
677 struct btrfs_delayed_extent_op *extent_op,
678 int for_cow)
679{
680 struct btrfs_delayed_data_ref *ref;
681 struct btrfs_delayed_ref_head *head_ref;
682 struct btrfs_delayed_ref_root *delayed_refs;
683
684 BUG_ON(extent_op && !extent_op->is_data);
685 ref = kmalloc(sizeof(*ref), GFP_NOFS);
686 if (!ref)
687 return -ENOMEM;
688
689 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
690 if (!head_ref) {
691 kfree(ref);
692 return -ENOMEM;
693 }
694
695 head_ref->extent_op = extent_op;
696
697 delayed_refs = &trans->transaction->delayed_refs;
698 spin_lock(&delayed_refs->lock);
699
700 /*
701 * insert both the head node and the new ref without dropping
702 * the spin lock
703 */
704 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
705 num_bytes, action, 1);
706
707 add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
708 num_bytes, parent, ref_root, owner, offset,
709 action, for_cow);
710 if (!is_fstree(ref_root) &&
711 waitqueue_active(&delayed_refs->seq_wait))
712 wake_up(&delayed_refs->seq_wait);
713 spin_unlock(&delayed_refs->lock);
714
715 return 0;
716}
717
718int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
719 struct btrfs_trans_handle *trans,
720 u64 bytenr, u64 num_bytes,
721 struct btrfs_delayed_extent_op *extent_op)
722{
723 struct btrfs_delayed_ref_head *head_ref;
724 struct btrfs_delayed_ref_root *delayed_refs;
725
726 head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
727 if (!head_ref)
728 return -ENOMEM;
729
730 head_ref->extent_op = extent_op;
731
732 delayed_refs = &trans->transaction->delayed_refs;
733 spin_lock(&delayed_refs->lock);
734
735 add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
736 num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
737 extent_op->is_data);
738
739 if (waitqueue_active(&delayed_refs->seq_wait))
740 wake_up(&delayed_refs->seq_wait);
741 spin_unlock(&delayed_refs->lock);
742 return 0;
743}
744
745/*
746 * this does a simple search for the head node for a given extent.
747 * It must be called with the delayed ref spinlock held, and it returns
748 * the head node if any where found, or NULL if not.
749 */
750struct btrfs_delayed_ref_head *
751btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
752{
753 struct btrfs_delayed_ref_node *ref;
754 struct btrfs_delayed_ref_root *delayed_refs;
755
756 delayed_refs = &trans->transaction->delayed_refs;
757 ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
758 if (ref)
759 return btrfs_delayed_node_to_head(ref);
760 return NULL;
761}