Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/sort.h>
9#include "messages.h"
10#include "ctree.h"
11#include "delayed-ref.h"
12#include "transaction.h"
13#include "qgroup.h"
14#include "space-info.h"
15#include "tree-mod-log.h"
16#include "fs.h"
17
18struct kmem_cache *btrfs_delayed_ref_head_cachep;
19struct kmem_cache *btrfs_delayed_tree_ref_cachep;
20struct kmem_cache *btrfs_delayed_data_ref_cachep;
21struct kmem_cache *btrfs_delayed_extent_op_cachep;
22/*
23 * delayed back reference update tracking. For subvolume trees
24 * we queue up extent allocations and backref maintenance for
25 * delayed processing. This avoids deep call chains where we
26 * add extents in the middle of btrfs_search_slot, and it allows
27 * us to buffer up frequently modified backrefs in an rb tree instead
28 * of hammering updates on the extent allocation tree.
29 */
30
31bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
32{
33 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
34 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
35 bool ret = false;
36 u64 reserved;
37
38 spin_lock(&global_rsv->lock);
39 reserved = global_rsv->reserved;
40 spin_unlock(&global_rsv->lock);
41
42 /*
43 * Since the global reserve is just kind of magic we don't really want
44 * to rely on it to save our bacon, so if our size is more than the
45 * delayed_refs_rsv and the global rsv then it's time to think about
46 * bailing.
47 */
48 spin_lock(&delayed_refs_rsv->lock);
49 reserved += delayed_refs_rsv->reserved;
50 if (delayed_refs_rsv->size >= reserved)
51 ret = true;
52 spin_unlock(&delayed_refs_rsv->lock);
53 return ret;
54}
55
56int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
57{
58 u64 num_entries =
59 atomic_read(&trans->transaction->delayed_refs.num_entries);
60 u64 avg_runtime;
61 u64 val;
62
63 smp_mb();
64 avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
65 val = num_entries * avg_runtime;
66 if (val >= NSEC_PER_SEC)
67 return 1;
68 if (val >= NSEC_PER_SEC / 2)
69 return 2;
70
71 return btrfs_check_space_for_delayed_refs(trans->fs_info);
72}
73
74/*
75 * Release a ref head's reservation.
76 *
77 * @fs_info: the filesystem
78 * @nr: number of items to drop
79 *
80 * Drops the delayed ref head's count from the delayed refs rsv and free any
81 * excess reservation we had.
82 */
83void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
84{
85 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
86 u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
87 u64 released = 0;
88
89 /*
90 * We have to check the mount option here because we could be enabling
91 * the free space tree for the first time and don't have the compat_ro
92 * option set yet.
93 *
94 * We need extra reservations if we have the free space tree because
95 * we'll have to modify that tree as well.
96 */
97 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
98 num_bytes *= 2;
99
100 released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
101 if (released)
102 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
103 0, released, 0);
104}
105
106/*
107 * Adjust the size of the delayed refs rsv.
108 *
109 * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
110 * it'll calculate the additional size and add it to the delayed_refs_rsv.
111 */
112void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
113{
114 struct btrfs_fs_info *fs_info = trans->fs_info;
115 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
116 u64 num_bytes;
117
118 if (!trans->delayed_ref_updates)
119 return;
120
121 num_bytes = btrfs_calc_insert_metadata_size(fs_info,
122 trans->delayed_ref_updates);
123 /*
124 * We have to check the mount option here because we could be enabling
125 * the free space tree for the first time and don't have the compat_ro
126 * option set yet.
127 *
128 * We need extra reservations if we have the free space tree because
129 * we'll have to modify that tree as well.
130 */
131 if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
132 num_bytes *= 2;
133
134 spin_lock(&delayed_rsv->lock);
135 delayed_rsv->size += num_bytes;
136 delayed_rsv->full = false;
137 spin_unlock(&delayed_rsv->lock);
138 trans->delayed_ref_updates = 0;
139}
140
141/*
142 * Transfer bytes to our delayed refs rsv.
143 *
144 * @fs_info: the filesystem
145 * @src: source block rsv to transfer from
146 * @num_bytes: number of bytes to transfer
147 *
148 * This transfers up to the num_bytes amount from the src rsv to the
149 * delayed_refs_rsv. Any extra bytes are returned to the space info.
150 */
151void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
152 struct btrfs_block_rsv *src,
153 u64 num_bytes)
154{
155 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
156 u64 to_free = 0;
157
158 spin_lock(&src->lock);
159 src->reserved -= num_bytes;
160 src->size -= num_bytes;
161 spin_unlock(&src->lock);
162
163 spin_lock(&delayed_refs_rsv->lock);
164 if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
165 u64 delta = delayed_refs_rsv->size -
166 delayed_refs_rsv->reserved;
167 if (num_bytes > delta) {
168 to_free = num_bytes - delta;
169 num_bytes = delta;
170 }
171 } else {
172 to_free = num_bytes;
173 num_bytes = 0;
174 }
175
176 if (num_bytes)
177 delayed_refs_rsv->reserved += num_bytes;
178 if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
179 delayed_refs_rsv->full = true;
180 spin_unlock(&delayed_refs_rsv->lock);
181
182 if (num_bytes)
183 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
184 0, num_bytes, 1);
185 if (to_free)
186 btrfs_space_info_free_bytes_may_use(fs_info,
187 delayed_refs_rsv->space_info, to_free);
188}
189
190/*
191 * Refill based on our delayed refs usage.
192 *
193 * @fs_info: the filesystem
194 * @flush: control how we can flush for this reservation.
195 *
196 * This will refill the delayed block_rsv up to 1 items size worth of space and
197 * will return -ENOSPC if we can't make the reservation.
198 */
199int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
200 enum btrfs_reserve_flush_enum flush)
201{
202 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
203 u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
204 u64 num_bytes = 0;
205 int ret = -ENOSPC;
206
207 spin_lock(&block_rsv->lock);
208 if (block_rsv->reserved < block_rsv->size) {
209 num_bytes = block_rsv->size - block_rsv->reserved;
210 num_bytes = min(num_bytes, limit);
211 }
212 spin_unlock(&block_rsv->lock);
213
214 if (!num_bytes)
215 return 0;
216
217 ret = btrfs_reserve_metadata_bytes(fs_info, block_rsv, num_bytes, flush);
218 if (ret)
219 return ret;
220 btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
221 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
222 0, num_bytes, 1);
223 return 0;
224}
225
226/*
227 * compare two delayed tree backrefs with same bytenr and type
228 */
229static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
230 struct btrfs_delayed_tree_ref *ref2)
231{
232 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
233 if (ref1->root < ref2->root)
234 return -1;
235 if (ref1->root > ref2->root)
236 return 1;
237 } else {
238 if (ref1->parent < ref2->parent)
239 return -1;
240 if (ref1->parent > ref2->parent)
241 return 1;
242 }
243 return 0;
244}
245
246/*
247 * compare two delayed data backrefs with same bytenr and type
248 */
249static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
250 struct btrfs_delayed_data_ref *ref2)
251{
252 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
253 if (ref1->root < ref2->root)
254 return -1;
255 if (ref1->root > ref2->root)
256 return 1;
257 if (ref1->objectid < ref2->objectid)
258 return -1;
259 if (ref1->objectid > ref2->objectid)
260 return 1;
261 if (ref1->offset < ref2->offset)
262 return -1;
263 if (ref1->offset > ref2->offset)
264 return 1;
265 } else {
266 if (ref1->parent < ref2->parent)
267 return -1;
268 if (ref1->parent > ref2->parent)
269 return 1;
270 }
271 return 0;
272}
273
274static int comp_refs(struct btrfs_delayed_ref_node *ref1,
275 struct btrfs_delayed_ref_node *ref2,
276 bool check_seq)
277{
278 int ret = 0;
279
280 if (ref1->type < ref2->type)
281 return -1;
282 if (ref1->type > ref2->type)
283 return 1;
284 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
285 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
286 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
287 btrfs_delayed_node_to_tree_ref(ref2));
288 else
289 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
290 btrfs_delayed_node_to_data_ref(ref2));
291 if (ret)
292 return ret;
293 if (check_seq) {
294 if (ref1->seq < ref2->seq)
295 return -1;
296 if (ref1->seq > ref2->seq)
297 return 1;
298 }
299 return 0;
300}
301
302/* insert a new ref to head ref rbtree */
303static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
304 struct rb_node *node)
305{
306 struct rb_node **p = &root->rb_root.rb_node;
307 struct rb_node *parent_node = NULL;
308 struct btrfs_delayed_ref_head *entry;
309 struct btrfs_delayed_ref_head *ins;
310 u64 bytenr;
311 bool leftmost = true;
312
313 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
314 bytenr = ins->bytenr;
315 while (*p) {
316 parent_node = *p;
317 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
318 href_node);
319
320 if (bytenr < entry->bytenr) {
321 p = &(*p)->rb_left;
322 } else if (bytenr > entry->bytenr) {
323 p = &(*p)->rb_right;
324 leftmost = false;
325 } else {
326 return entry;
327 }
328 }
329
330 rb_link_node(node, parent_node, p);
331 rb_insert_color_cached(node, root, leftmost);
332 return NULL;
333}
334
335static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
336 struct btrfs_delayed_ref_node *ins)
337{
338 struct rb_node **p = &root->rb_root.rb_node;
339 struct rb_node *node = &ins->ref_node;
340 struct rb_node *parent_node = NULL;
341 struct btrfs_delayed_ref_node *entry;
342 bool leftmost = true;
343
344 while (*p) {
345 int comp;
346
347 parent_node = *p;
348 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
349 ref_node);
350 comp = comp_refs(ins, entry, true);
351 if (comp < 0) {
352 p = &(*p)->rb_left;
353 } else if (comp > 0) {
354 p = &(*p)->rb_right;
355 leftmost = false;
356 } else {
357 return entry;
358 }
359 }
360
361 rb_link_node(node, parent_node, p);
362 rb_insert_color_cached(node, root, leftmost);
363 return NULL;
364}
365
366static struct btrfs_delayed_ref_head *find_first_ref_head(
367 struct btrfs_delayed_ref_root *dr)
368{
369 struct rb_node *n;
370 struct btrfs_delayed_ref_head *entry;
371
372 n = rb_first_cached(&dr->href_root);
373 if (!n)
374 return NULL;
375
376 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
377
378 return entry;
379}
380
381/*
382 * Find a head entry based on bytenr. This returns the delayed ref head if it
383 * was able to find one, or NULL if nothing was in that spot. If return_bigger
384 * is given, the next bigger entry is returned if no exact match is found.
385 */
386static struct btrfs_delayed_ref_head *find_ref_head(
387 struct btrfs_delayed_ref_root *dr, u64 bytenr,
388 bool return_bigger)
389{
390 struct rb_root *root = &dr->href_root.rb_root;
391 struct rb_node *n;
392 struct btrfs_delayed_ref_head *entry;
393
394 n = root->rb_node;
395 entry = NULL;
396 while (n) {
397 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
398
399 if (bytenr < entry->bytenr)
400 n = n->rb_left;
401 else if (bytenr > entry->bytenr)
402 n = n->rb_right;
403 else
404 return entry;
405 }
406 if (entry && return_bigger) {
407 if (bytenr > entry->bytenr) {
408 n = rb_next(&entry->href_node);
409 if (!n)
410 return NULL;
411 entry = rb_entry(n, struct btrfs_delayed_ref_head,
412 href_node);
413 }
414 return entry;
415 }
416 return NULL;
417}
418
419int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
420 struct btrfs_delayed_ref_head *head)
421{
422 lockdep_assert_held(&delayed_refs->lock);
423 if (mutex_trylock(&head->mutex))
424 return 0;
425
426 refcount_inc(&head->refs);
427 spin_unlock(&delayed_refs->lock);
428
429 mutex_lock(&head->mutex);
430 spin_lock(&delayed_refs->lock);
431 if (RB_EMPTY_NODE(&head->href_node)) {
432 mutex_unlock(&head->mutex);
433 btrfs_put_delayed_ref_head(head);
434 return -EAGAIN;
435 }
436 btrfs_put_delayed_ref_head(head);
437 return 0;
438}
439
440static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
441 struct btrfs_delayed_ref_root *delayed_refs,
442 struct btrfs_delayed_ref_head *head,
443 struct btrfs_delayed_ref_node *ref)
444{
445 lockdep_assert_held(&head->lock);
446 rb_erase_cached(&ref->ref_node, &head->ref_tree);
447 RB_CLEAR_NODE(&ref->ref_node);
448 if (!list_empty(&ref->add_list))
449 list_del(&ref->add_list);
450 ref->in_tree = 0;
451 btrfs_put_delayed_ref(ref);
452 atomic_dec(&delayed_refs->num_entries);
453}
454
455static bool merge_ref(struct btrfs_trans_handle *trans,
456 struct btrfs_delayed_ref_root *delayed_refs,
457 struct btrfs_delayed_ref_head *head,
458 struct btrfs_delayed_ref_node *ref,
459 u64 seq)
460{
461 struct btrfs_delayed_ref_node *next;
462 struct rb_node *node = rb_next(&ref->ref_node);
463 bool done = false;
464
465 while (!done && node) {
466 int mod;
467
468 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
469 node = rb_next(node);
470 if (seq && next->seq >= seq)
471 break;
472 if (comp_refs(ref, next, false))
473 break;
474
475 if (ref->action == next->action) {
476 mod = next->ref_mod;
477 } else {
478 if (ref->ref_mod < next->ref_mod) {
479 swap(ref, next);
480 done = true;
481 }
482 mod = -next->ref_mod;
483 }
484
485 drop_delayed_ref(trans, delayed_refs, head, next);
486 ref->ref_mod += mod;
487 if (ref->ref_mod == 0) {
488 drop_delayed_ref(trans, delayed_refs, head, ref);
489 done = true;
490 } else {
491 /*
492 * Can't have multiples of the same ref on a tree block.
493 */
494 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
495 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
496 }
497 }
498
499 return done;
500}
501
502void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
503 struct btrfs_delayed_ref_root *delayed_refs,
504 struct btrfs_delayed_ref_head *head)
505{
506 struct btrfs_fs_info *fs_info = trans->fs_info;
507 struct btrfs_delayed_ref_node *ref;
508 struct rb_node *node;
509 u64 seq = 0;
510
511 lockdep_assert_held(&head->lock);
512
513 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
514 return;
515
516 /* We don't have too many refs to merge for data. */
517 if (head->is_data)
518 return;
519
520 seq = btrfs_tree_mod_log_lowest_seq(fs_info);
521again:
522 for (node = rb_first_cached(&head->ref_tree); node;
523 node = rb_next(node)) {
524 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
525 if (seq && ref->seq >= seq)
526 continue;
527 if (merge_ref(trans, delayed_refs, head, ref, seq))
528 goto again;
529 }
530}
531
532int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
533{
534 int ret = 0;
535 u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
536
537 if (min_seq != 0 && seq >= min_seq) {
538 btrfs_debug(fs_info,
539 "holding back delayed_ref %llu, lowest is %llu",
540 seq, min_seq);
541 ret = 1;
542 }
543
544 return ret;
545}
546
547struct btrfs_delayed_ref_head *btrfs_select_ref_head(
548 struct btrfs_delayed_ref_root *delayed_refs)
549{
550 struct btrfs_delayed_ref_head *head;
551
552again:
553 head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
554 true);
555 if (!head && delayed_refs->run_delayed_start != 0) {
556 delayed_refs->run_delayed_start = 0;
557 head = find_first_ref_head(delayed_refs);
558 }
559 if (!head)
560 return NULL;
561
562 while (head->processing) {
563 struct rb_node *node;
564
565 node = rb_next(&head->href_node);
566 if (!node) {
567 if (delayed_refs->run_delayed_start == 0)
568 return NULL;
569 delayed_refs->run_delayed_start = 0;
570 goto again;
571 }
572 head = rb_entry(node, struct btrfs_delayed_ref_head,
573 href_node);
574 }
575
576 head->processing = 1;
577 WARN_ON(delayed_refs->num_heads_ready == 0);
578 delayed_refs->num_heads_ready--;
579 delayed_refs->run_delayed_start = head->bytenr +
580 head->num_bytes;
581 return head;
582}
583
584void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
585 struct btrfs_delayed_ref_head *head)
586{
587 lockdep_assert_held(&delayed_refs->lock);
588 lockdep_assert_held(&head->lock);
589
590 rb_erase_cached(&head->href_node, &delayed_refs->href_root);
591 RB_CLEAR_NODE(&head->href_node);
592 atomic_dec(&delayed_refs->num_entries);
593 delayed_refs->num_heads--;
594 if (head->processing == 0)
595 delayed_refs->num_heads_ready--;
596}
597
598/*
599 * Helper to insert the ref_node to the tail or merge with tail.
600 *
601 * Return 0 for insert.
602 * Return >0 for merge.
603 */
604static int insert_delayed_ref(struct btrfs_trans_handle *trans,
605 struct btrfs_delayed_ref_root *root,
606 struct btrfs_delayed_ref_head *href,
607 struct btrfs_delayed_ref_node *ref)
608{
609 struct btrfs_delayed_ref_node *exist;
610 int mod;
611 int ret = 0;
612
613 spin_lock(&href->lock);
614 exist = tree_insert(&href->ref_tree, ref);
615 if (!exist)
616 goto inserted;
617
618 /* Now we are sure we can merge */
619 ret = 1;
620 if (exist->action == ref->action) {
621 mod = ref->ref_mod;
622 } else {
623 /* Need to change action */
624 if (exist->ref_mod < ref->ref_mod) {
625 exist->action = ref->action;
626 mod = -exist->ref_mod;
627 exist->ref_mod = ref->ref_mod;
628 if (ref->action == BTRFS_ADD_DELAYED_REF)
629 list_add_tail(&exist->add_list,
630 &href->ref_add_list);
631 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
632 ASSERT(!list_empty(&exist->add_list));
633 list_del(&exist->add_list);
634 } else {
635 ASSERT(0);
636 }
637 } else
638 mod = -ref->ref_mod;
639 }
640 exist->ref_mod += mod;
641
642 /* remove existing tail if its ref_mod is zero */
643 if (exist->ref_mod == 0)
644 drop_delayed_ref(trans, root, href, exist);
645 spin_unlock(&href->lock);
646 return ret;
647inserted:
648 if (ref->action == BTRFS_ADD_DELAYED_REF)
649 list_add_tail(&ref->add_list, &href->ref_add_list);
650 atomic_inc(&root->num_entries);
651 spin_unlock(&href->lock);
652 return ret;
653}
654
655/*
656 * helper function to update the accounting in the head ref
657 * existing and update must have the same bytenr
658 */
659static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
660 struct btrfs_delayed_ref_head *existing,
661 struct btrfs_delayed_ref_head *update)
662{
663 struct btrfs_delayed_ref_root *delayed_refs =
664 &trans->transaction->delayed_refs;
665 struct btrfs_fs_info *fs_info = trans->fs_info;
666 int old_ref_mod;
667
668 BUG_ON(existing->is_data != update->is_data);
669
670 spin_lock(&existing->lock);
671 if (update->must_insert_reserved) {
672 /* if the extent was freed and then
673 * reallocated before the delayed ref
674 * entries were processed, we can end up
675 * with an existing head ref without
676 * the must_insert_reserved flag set.
677 * Set it again here
678 */
679 existing->must_insert_reserved = update->must_insert_reserved;
680
681 /*
682 * update the num_bytes so we make sure the accounting
683 * is done correctly
684 */
685 existing->num_bytes = update->num_bytes;
686
687 }
688
689 if (update->extent_op) {
690 if (!existing->extent_op) {
691 existing->extent_op = update->extent_op;
692 } else {
693 if (update->extent_op->update_key) {
694 memcpy(&existing->extent_op->key,
695 &update->extent_op->key,
696 sizeof(update->extent_op->key));
697 existing->extent_op->update_key = true;
698 }
699 if (update->extent_op->update_flags) {
700 existing->extent_op->flags_to_set |=
701 update->extent_op->flags_to_set;
702 existing->extent_op->update_flags = true;
703 }
704 btrfs_free_delayed_extent_op(update->extent_op);
705 }
706 }
707 /*
708 * update the reference mod on the head to reflect this new operation,
709 * only need the lock for this case cause we could be processing it
710 * currently, for refs we just added we know we're a-ok.
711 */
712 old_ref_mod = existing->total_ref_mod;
713 existing->ref_mod += update->ref_mod;
714 existing->total_ref_mod += update->ref_mod;
715
716 /*
717 * If we are going to from a positive ref mod to a negative or vice
718 * versa we need to make sure to adjust pending_csums accordingly.
719 */
720 if (existing->is_data) {
721 u64 csum_leaves =
722 btrfs_csum_bytes_to_leaves(fs_info,
723 existing->num_bytes);
724
725 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
726 delayed_refs->pending_csums -= existing->num_bytes;
727 btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
728 }
729 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
730 delayed_refs->pending_csums += existing->num_bytes;
731 trans->delayed_ref_updates += csum_leaves;
732 }
733 }
734
735 spin_unlock(&existing->lock);
736}
737
738static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
739 struct btrfs_qgroup_extent_record *qrecord,
740 u64 bytenr, u64 num_bytes, u64 ref_root,
741 u64 reserved, int action, bool is_data,
742 bool is_system)
743{
744 int count_mod = 1;
745 int must_insert_reserved = 0;
746
747 /* If reserved is provided, it must be a data extent. */
748 BUG_ON(!is_data && reserved);
749
750 /*
751 * The head node stores the sum of all the mods, so dropping a ref
752 * should drop the sum in the head node by one.
753 */
754 if (action == BTRFS_UPDATE_DELAYED_HEAD)
755 count_mod = 0;
756 else if (action == BTRFS_DROP_DELAYED_REF)
757 count_mod = -1;
758
759 /*
760 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
761 * accounting when the extent is finally added, or if a later
762 * modification deletes the delayed ref without ever inserting the
763 * extent into the extent allocation tree. ref->must_insert_reserved
764 * is the flag used to record that accounting mods are required.
765 *
766 * Once we record must_insert_reserved, switch the action to
767 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
768 */
769 if (action == BTRFS_ADD_DELAYED_EXTENT)
770 must_insert_reserved = 1;
771 else
772 must_insert_reserved = 0;
773
774 refcount_set(&head_ref->refs, 1);
775 head_ref->bytenr = bytenr;
776 head_ref->num_bytes = num_bytes;
777 head_ref->ref_mod = count_mod;
778 head_ref->must_insert_reserved = must_insert_reserved;
779 head_ref->is_data = is_data;
780 head_ref->is_system = is_system;
781 head_ref->ref_tree = RB_ROOT_CACHED;
782 INIT_LIST_HEAD(&head_ref->ref_add_list);
783 RB_CLEAR_NODE(&head_ref->href_node);
784 head_ref->processing = 0;
785 head_ref->total_ref_mod = count_mod;
786 spin_lock_init(&head_ref->lock);
787 mutex_init(&head_ref->mutex);
788
789 if (qrecord) {
790 if (ref_root && reserved) {
791 qrecord->data_rsv = reserved;
792 qrecord->data_rsv_refroot = ref_root;
793 }
794 qrecord->bytenr = bytenr;
795 qrecord->num_bytes = num_bytes;
796 qrecord->old_roots = NULL;
797 }
798}
799
800/*
801 * helper function to actually insert a head node into the rbtree.
802 * this does all the dirty work in terms of maintaining the correct
803 * overall modification count.
804 */
805static noinline struct btrfs_delayed_ref_head *
806add_delayed_ref_head(struct btrfs_trans_handle *trans,
807 struct btrfs_delayed_ref_head *head_ref,
808 struct btrfs_qgroup_extent_record *qrecord,
809 int action, int *qrecord_inserted_ret)
810{
811 struct btrfs_delayed_ref_head *existing;
812 struct btrfs_delayed_ref_root *delayed_refs;
813 int qrecord_inserted = 0;
814
815 delayed_refs = &trans->transaction->delayed_refs;
816
817 /* Record qgroup extent info if provided */
818 if (qrecord) {
819 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
820 delayed_refs, qrecord))
821 kfree(qrecord);
822 else
823 qrecord_inserted = 1;
824 }
825
826 trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
827
828 existing = htree_insert(&delayed_refs->href_root,
829 &head_ref->href_node);
830 if (existing) {
831 update_existing_head_ref(trans, existing, head_ref);
832 /*
833 * we've updated the existing ref, free the newly
834 * allocated ref
835 */
836 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
837 head_ref = existing;
838 } else {
839 if (head_ref->is_data && head_ref->ref_mod < 0) {
840 delayed_refs->pending_csums += head_ref->num_bytes;
841 trans->delayed_ref_updates +=
842 btrfs_csum_bytes_to_leaves(trans->fs_info,
843 head_ref->num_bytes);
844 }
845 delayed_refs->num_heads++;
846 delayed_refs->num_heads_ready++;
847 atomic_inc(&delayed_refs->num_entries);
848 trans->delayed_ref_updates++;
849 }
850 if (qrecord_inserted_ret)
851 *qrecord_inserted_ret = qrecord_inserted;
852
853 return head_ref;
854}
855
856/*
857 * init_delayed_ref_common - Initialize the structure which represents a
858 * modification to a an extent.
859 *
860 * @fs_info: Internal to the mounted filesystem mount structure.
861 *
862 * @ref: The structure which is going to be initialized.
863 *
864 * @bytenr: The logical address of the extent for which a modification is
865 * going to be recorded.
866 *
867 * @num_bytes: Size of the extent whose modification is being recorded.
868 *
869 * @ref_root: The id of the root where this modification has originated, this
870 * can be either one of the well-known metadata trees or the
871 * subvolume id which references this extent.
872 *
873 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
874 * BTRFS_ADD_DELAYED_EXTENT
875 *
876 * @ref_type: Holds the type of the extent which is being recorded, can be
877 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
878 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
879 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
880 */
881static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
882 struct btrfs_delayed_ref_node *ref,
883 u64 bytenr, u64 num_bytes, u64 ref_root,
884 int action, u8 ref_type)
885{
886 u64 seq = 0;
887
888 if (action == BTRFS_ADD_DELAYED_EXTENT)
889 action = BTRFS_ADD_DELAYED_REF;
890
891 if (is_fstree(ref_root))
892 seq = atomic64_read(&fs_info->tree_mod_seq);
893
894 refcount_set(&ref->refs, 1);
895 ref->bytenr = bytenr;
896 ref->num_bytes = num_bytes;
897 ref->ref_mod = 1;
898 ref->action = action;
899 ref->is_head = 0;
900 ref->in_tree = 1;
901 ref->seq = seq;
902 ref->type = ref_type;
903 RB_CLEAR_NODE(&ref->ref_node);
904 INIT_LIST_HEAD(&ref->add_list);
905}
906
907/*
908 * add a delayed tree ref. This does all of the accounting required
909 * to make sure the delayed ref is eventually processed before this
910 * transaction commits.
911 */
912int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
913 struct btrfs_ref *generic_ref,
914 struct btrfs_delayed_extent_op *extent_op)
915{
916 struct btrfs_fs_info *fs_info = trans->fs_info;
917 struct btrfs_delayed_tree_ref *ref;
918 struct btrfs_delayed_ref_head *head_ref;
919 struct btrfs_delayed_ref_root *delayed_refs;
920 struct btrfs_qgroup_extent_record *record = NULL;
921 int qrecord_inserted;
922 bool is_system;
923 int action = generic_ref->action;
924 int level = generic_ref->tree_ref.level;
925 int ret;
926 u64 bytenr = generic_ref->bytenr;
927 u64 num_bytes = generic_ref->len;
928 u64 parent = generic_ref->parent;
929 u8 ref_type;
930
931 is_system = (generic_ref->tree_ref.owning_root == BTRFS_CHUNK_TREE_OBJECTID);
932
933 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
934 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
935 if (!ref)
936 return -ENOMEM;
937
938 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
939 if (!head_ref) {
940 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
941 return -ENOMEM;
942 }
943
944 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
945 !generic_ref->skip_qgroup) {
946 record = kzalloc(sizeof(*record), GFP_NOFS);
947 if (!record) {
948 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
949 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
950 return -ENOMEM;
951 }
952 }
953
954 if (parent)
955 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
956 else
957 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
958
959 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
960 generic_ref->tree_ref.owning_root, action,
961 ref_type);
962 ref->root = generic_ref->tree_ref.owning_root;
963 ref->parent = parent;
964 ref->level = level;
965
966 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
967 generic_ref->tree_ref.owning_root, 0, action,
968 false, is_system);
969 head_ref->extent_op = extent_op;
970
971 delayed_refs = &trans->transaction->delayed_refs;
972 spin_lock(&delayed_refs->lock);
973
974 /*
975 * insert both the head node and the new ref without dropping
976 * the spin lock
977 */
978 head_ref = add_delayed_ref_head(trans, head_ref, record,
979 action, &qrecord_inserted);
980
981 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
982 spin_unlock(&delayed_refs->lock);
983
984 /*
985 * Need to update the delayed_refs_rsv with any changes we may have
986 * made.
987 */
988 btrfs_update_delayed_refs_rsv(trans);
989
990 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
991 action == BTRFS_ADD_DELAYED_EXTENT ?
992 BTRFS_ADD_DELAYED_REF : action);
993 if (ret > 0)
994 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
995
996 if (qrecord_inserted)
997 btrfs_qgroup_trace_extent_post(trans, record);
998
999 return 0;
1000}
1001
1002/*
1003 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1004 */
1005int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1006 struct btrfs_ref *generic_ref,
1007 u64 reserved)
1008{
1009 struct btrfs_fs_info *fs_info = trans->fs_info;
1010 struct btrfs_delayed_data_ref *ref;
1011 struct btrfs_delayed_ref_head *head_ref;
1012 struct btrfs_delayed_ref_root *delayed_refs;
1013 struct btrfs_qgroup_extent_record *record = NULL;
1014 int qrecord_inserted;
1015 int action = generic_ref->action;
1016 int ret;
1017 u64 bytenr = generic_ref->bytenr;
1018 u64 num_bytes = generic_ref->len;
1019 u64 parent = generic_ref->parent;
1020 u64 ref_root = generic_ref->data_ref.owning_root;
1021 u64 owner = generic_ref->data_ref.ino;
1022 u64 offset = generic_ref->data_ref.offset;
1023 u8 ref_type;
1024
1025 ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
1026 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
1027 if (!ref)
1028 return -ENOMEM;
1029
1030 if (parent)
1031 ref_type = BTRFS_SHARED_DATA_REF_KEY;
1032 else
1033 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1034 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1035 ref_root, action, ref_type);
1036 ref->root = ref_root;
1037 ref->parent = parent;
1038 ref->objectid = owner;
1039 ref->offset = offset;
1040
1041
1042 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1043 if (!head_ref) {
1044 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1045 return -ENOMEM;
1046 }
1047
1048 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1049 !generic_ref->skip_qgroup) {
1050 record = kzalloc(sizeof(*record), GFP_NOFS);
1051 if (!record) {
1052 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1053 kmem_cache_free(btrfs_delayed_ref_head_cachep,
1054 head_ref);
1055 return -ENOMEM;
1056 }
1057 }
1058
1059 init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1060 reserved, action, true, false);
1061 head_ref->extent_op = NULL;
1062
1063 delayed_refs = &trans->transaction->delayed_refs;
1064 spin_lock(&delayed_refs->lock);
1065
1066 /*
1067 * insert both the head node and the new ref without dropping
1068 * the spin lock
1069 */
1070 head_ref = add_delayed_ref_head(trans, head_ref, record,
1071 action, &qrecord_inserted);
1072
1073 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
1074 spin_unlock(&delayed_refs->lock);
1075
1076 /*
1077 * Need to update the delayed_refs_rsv with any changes we may have
1078 * made.
1079 */
1080 btrfs_update_delayed_refs_rsv(trans);
1081
1082 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1083 action == BTRFS_ADD_DELAYED_EXTENT ?
1084 BTRFS_ADD_DELAYED_REF : action);
1085 if (ret > 0)
1086 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1087
1088
1089 if (qrecord_inserted)
1090 return btrfs_qgroup_trace_extent_post(trans, record);
1091 return 0;
1092}
1093
1094int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1095 u64 bytenr, u64 num_bytes,
1096 struct btrfs_delayed_extent_op *extent_op)
1097{
1098 struct btrfs_delayed_ref_head *head_ref;
1099 struct btrfs_delayed_ref_root *delayed_refs;
1100
1101 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1102 if (!head_ref)
1103 return -ENOMEM;
1104
1105 init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1106 BTRFS_UPDATE_DELAYED_HEAD, false, false);
1107 head_ref->extent_op = extent_op;
1108
1109 delayed_refs = &trans->transaction->delayed_refs;
1110 spin_lock(&delayed_refs->lock);
1111
1112 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1113 NULL);
1114
1115 spin_unlock(&delayed_refs->lock);
1116
1117 /*
1118 * Need to update the delayed_refs_rsv with any changes we may have
1119 * made.
1120 */
1121 btrfs_update_delayed_refs_rsv(trans);
1122 return 0;
1123}
1124
1125/*
1126 * This does a simple search for the head node for a given extent. Returns the
1127 * head node if found, or NULL if not.
1128 */
1129struct btrfs_delayed_ref_head *
1130btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1131{
1132 lockdep_assert_held(&delayed_refs->lock);
1133
1134 return find_ref_head(delayed_refs, bytenr, false);
1135}
1136
1137void __cold btrfs_delayed_ref_exit(void)
1138{
1139 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1140 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1141 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1142 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1143}
1144
1145int __init btrfs_delayed_ref_init(void)
1146{
1147 btrfs_delayed_ref_head_cachep = kmem_cache_create(
1148 "btrfs_delayed_ref_head",
1149 sizeof(struct btrfs_delayed_ref_head), 0,
1150 SLAB_MEM_SPREAD, NULL);
1151 if (!btrfs_delayed_ref_head_cachep)
1152 goto fail;
1153
1154 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1155 "btrfs_delayed_tree_ref",
1156 sizeof(struct btrfs_delayed_tree_ref), 0,
1157 SLAB_MEM_SPREAD, NULL);
1158 if (!btrfs_delayed_tree_ref_cachep)
1159 goto fail;
1160
1161 btrfs_delayed_data_ref_cachep = kmem_cache_create(
1162 "btrfs_delayed_data_ref",
1163 sizeof(struct btrfs_delayed_data_ref), 0,
1164 SLAB_MEM_SPREAD, NULL);
1165 if (!btrfs_delayed_data_ref_cachep)
1166 goto fail;
1167
1168 btrfs_delayed_extent_op_cachep = kmem_cache_create(
1169 "btrfs_delayed_extent_op",
1170 sizeof(struct btrfs_delayed_extent_op), 0,
1171 SLAB_MEM_SPREAD, NULL);
1172 if (!btrfs_delayed_extent_op_cachep)
1173 goto fail;
1174
1175 return 0;
1176fail:
1177 btrfs_delayed_ref_exit();
1178 return -ENOMEM;
1179}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/sort.h>
9#include "ctree.h"
10#include "delayed-ref.h"
11#include "transaction.h"
12#include "qgroup.h"
13
14struct kmem_cache *btrfs_delayed_ref_head_cachep;
15struct kmem_cache *btrfs_delayed_tree_ref_cachep;
16struct kmem_cache *btrfs_delayed_data_ref_cachep;
17struct kmem_cache *btrfs_delayed_extent_op_cachep;
18/*
19 * delayed back reference update tracking. For subvolume trees
20 * we queue up extent allocations and backref maintenance for
21 * delayed processing. This avoids deep call chains where we
22 * add extents in the middle of btrfs_search_slot, and it allows
23 * us to buffer up frequently modified backrefs in an rb tree instead
24 * of hammering updates on the extent allocation tree.
25 */
26
27/*
28 * compare two delayed tree backrefs with same bytenr and type
29 */
30static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
31 struct btrfs_delayed_tree_ref *ref2)
32{
33 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
34 if (ref1->root < ref2->root)
35 return -1;
36 if (ref1->root > ref2->root)
37 return 1;
38 } else {
39 if (ref1->parent < ref2->parent)
40 return -1;
41 if (ref1->parent > ref2->parent)
42 return 1;
43 }
44 return 0;
45}
46
47/*
48 * compare two delayed data backrefs with same bytenr and type
49 */
50static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
51 struct btrfs_delayed_data_ref *ref2)
52{
53 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
54 if (ref1->root < ref2->root)
55 return -1;
56 if (ref1->root > ref2->root)
57 return 1;
58 if (ref1->objectid < ref2->objectid)
59 return -1;
60 if (ref1->objectid > ref2->objectid)
61 return 1;
62 if (ref1->offset < ref2->offset)
63 return -1;
64 if (ref1->offset > ref2->offset)
65 return 1;
66 } else {
67 if (ref1->parent < ref2->parent)
68 return -1;
69 if (ref1->parent > ref2->parent)
70 return 1;
71 }
72 return 0;
73}
74
75static int comp_refs(struct btrfs_delayed_ref_node *ref1,
76 struct btrfs_delayed_ref_node *ref2,
77 bool check_seq)
78{
79 int ret = 0;
80
81 if (ref1->type < ref2->type)
82 return -1;
83 if (ref1->type > ref2->type)
84 return 1;
85 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
86 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
87 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
88 btrfs_delayed_node_to_tree_ref(ref2));
89 else
90 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
91 btrfs_delayed_node_to_data_ref(ref2));
92 if (ret)
93 return ret;
94 if (check_seq) {
95 if (ref1->seq < ref2->seq)
96 return -1;
97 if (ref1->seq > ref2->seq)
98 return 1;
99 }
100 return 0;
101}
102
103/* insert a new ref to head ref rbtree */
104static struct btrfs_delayed_ref_head *htree_insert(struct rb_root *root,
105 struct rb_node *node)
106{
107 struct rb_node **p = &root->rb_node;
108 struct rb_node *parent_node = NULL;
109 struct btrfs_delayed_ref_head *entry;
110 struct btrfs_delayed_ref_head *ins;
111 u64 bytenr;
112
113 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
114 bytenr = ins->bytenr;
115 while (*p) {
116 parent_node = *p;
117 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
118 href_node);
119
120 if (bytenr < entry->bytenr)
121 p = &(*p)->rb_left;
122 else if (bytenr > entry->bytenr)
123 p = &(*p)->rb_right;
124 else
125 return entry;
126 }
127
128 rb_link_node(node, parent_node, p);
129 rb_insert_color(node, root);
130 return NULL;
131}
132
133static struct btrfs_delayed_ref_node* tree_insert(struct rb_root *root,
134 struct btrfs_delayed_ref_node *ins)
135{
136 struct rb_node **p = &root->rb_node;
137 struct rb_node *node = &ins->ref_node;
138 struct rb_node *parent_node = NULL;
139 struct btrfs_delayed_ref_node *entry;
140
141 while (*p) {
142 int comp;
143
144 parent_node = *p;
145 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
146 ref_node);
147 comp = comp_refs(ins, entry, true);
148 if (comp < 0)
149 p = &(*p)->rb_left;
150 else if (comp > 0)
151 p = &(*p)->rb_right;
152 else
153 return entry;
154 }
155
156 rb_link_node(node, parent_node, p);
157 rb_insert_color(node, root);
158 return NULL;
159}
160
161/*
162 * find an head entry based on bytenr. This returns the delayed ref
163 * head if it was able to find one, or NULL if nothing was in that spot.
164 * If return_bigger is given, the next bigger entry is returned if no exact
165 * match is found.
166 */
167static struct btrfs_delayed_ref_head *
168find_ref_head(struct rb_root *root, u64 bytenr,
169 int return_bigger)
170{
171 struct rb_node *n;
172 struct btrfs_delayed_ref_head *entry;
173
174 n = root->rb_node;
175 entry = NULL;
176 while (n) {
177 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
178
179 if (bytenr < entry->bytenr)
180 n = n->rb_left;
181 else if (bytenr > entry->bytenr)
182 n = n->rb_right;
183 else
184 return entry;
185 }
186 if (entry && return_bigger) {
187 if (bytenr > entry->bytenr) {
188 n = rb_next(&entry->href_node);
189 if (!n)
190 n = rb_first(root);
191 entry = rb_entry(n, struct btrfs_delayed_ref_head,
192 href_node);
193 return entry;
194 }
195 return entry;
196 }
197 return NULL;
198}
199
200int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
201 struct btrfs_delayed_ref_head *head)
202{
203 struct btrfs_delayed_ref_root *delayed_refs;
204
205 delayed_refs = &trans->transaction->delayed_refs;
206 lockdep_assert_held(&delayed_refs->lock);
207 if (mutex_trylock(&head->mutex))
208 return 0;
209
210 refcount_inc(&head->refs);
211 spin_unlock(&delayed_refs->lock);
212
213 mutex_lock(&head->mutex);
214 spin_lock(&delayed_refs->lock);
215 if (RB_EMPTY_NODE(&head->href_node)) {
216 mutex_unlock(&head->mutex);
217 btrfs_put_delayed_ref_head(head);
218 return -EAGAIN;
219 }
220 btrfs_put_delayed_ref_head(head);
221 return 0;
222}
223
224static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
225 struct btrfs_delayed_ref_root *delayed_refs,
226 struct btrfs_delayed_ref_head *head,
227 struct btrfs_delayed_ref_node *ref)
228{
229 lockdep_assert_held(&head->lock);
230 rb_erase(&ref->ref_node, &head->ref_tree);
231 RB_CLEAR_NODE(&ref->ref_node);
232 if (!list_empty(&ref->add_list))
233 list_del(&ref->add_list);
234 ref->in_tree = 0;
235 btrfs_put_delayed_ref(ref);
236 atomic_dec(&delayed_refs->num_entries);
237 if (trans->delayed_ref_updates)
238 trans->delayed_ref_updates--;
239}
240
241static bool merge_ref(struct btrfs_trans_handle *trans,
242 struct btrfs_delayed_ref_root *delayed_refs,
243 struct btrfs_delayed_ref_head *head,
244 struct btrfs_delayed_ref_node *ref,
245 u64 seq)
246{
247 struct btrfs_delayed_ref_node *next;
248 struct rb_node *node = rb_next(&ref->ref_node);
249 bool done = false;
250
251 while (!done && node) {
252 int mod;
253
254 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
255 node = rb_next(node);
256 if (seq && next->seq >= seq)
257 break;
258 if (comp_refs(ref, next, false))
259 break;
260
261 if (ref->action == next->action) {
262 mod = next->ref_mod;
263 } else {
264 if (ref->ref_mod < next->ref_mod) {
265 swap(ref, next);
266 done = true;
267 }
268 mod = -next->ref_mod;
269 }
270
271 drop_delayed_ref(trans, delayed_refs, head, next);
272 ref->ref_mod += mod;
273 if (ref->ref_mod == 0) {
274 drop_delayed_ref(trans, delayed_refs, head, ref);
275 done = true;
276 } else {
277 /*
278 * Can't have multiples of the same ref on a tree block.
279 */
280 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
281 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
282 }
283 }
284
285 return done;
286}
287
288void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
289 struct btrfs_fs_info *fs_info,
290 struct btrfs_delayed_ref_root *delayed_refs,
291 struct btrfs_delayed_ref_head *head)
292{
293 struct btrfs_delayed_ref_node *ref;
294 struct rb_node *node;
295 u64 seq = 0;
296
297 lockdep_assert_held(&head->lock);
298
299 if (RB_EMPTY_ROOT(&head->ref_tree))
300 return;
301
302 /* We don't have too many refs to merge for data. */
303 if (head->is_data)
304 return;
305
306 spin_lock(&fs_info->tree_mod_seq_lock);
307 if (!list_empty(&fs_info->tree_mod_seq_list)) {
308 struct seq_list *elem;
309
310 elem = list_first_entry(&fs_info->tree_mod_seq_list,
311 struct seq_list, list);
312 seq = elem->seq;
313 }
314 spin_unlock(&fs_info->tree_mod_seq_lock);
315
316again:
317 for (node = rb_first(&head->ref_tree); node; node = rb_next(node)) {
318 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
319 if (seq && ref->seq >= seq)
320 continue;
321 if (merge_ref(trans, delayed_refs, head, ref, seq))
322 goto again;
323 }
324}
325
326int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
327 struct btrfs_delayed_ref_root *delayed_refs,
328 u64 seq)
329{
330 struct seq_list *elem;
331 int ret = 0;
332
333 spin_lock(&fs_info->tree_mod_seq_lock);
334 if (!list_empty(&fs_info->tree_mod_seq_list)) {
335 elem = list_first_entry(&fs_info->tree_mod_seq_list,
336 struct seq_list, list);
337 if (seq >= elem->seq) {
338 btrfs_debug(fs_info,
339 "holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)",
340 (u32)(seq >> 32), (u32)seq,
341 (u32)(elem->seq >> 32), (u32)elem->seq,
342 delayed_refs);
343 ret = 1;
344 }
345 }
346
347 spin_unlock(&fs_info->tree_mod_seq_lock);
348 return ret;
349}
350
351struct btrfs_delayed_ref_head *
352btrfs_select_ref_head(struct btrfs_trans_handle *trans)
353{
354 struct btrfs_delayed_ref_root *delayed_refs;
355 struct btrfs_delayed_ref_head *head;
356 u64 start;
357 bool loop = false;
358
359 delayed_refs = &trans->transaction->delayed_refs;
360
361again:
362 start = delayed_refs->run_delayed_start;
363 head = find_ref_head(&delayed_refs->href_root, start, 1);
364 if (!head && !loop) {
365 delayed_refs->run_delayed_start = 0;
366 start = 0;
367 loop = true;
368 head = find_ref_head(&delayed_refs->href_root, start, 1);
369 if (!head)
370 return NULL;
371 } else if (!head && loop) {
372 return NULL;
373 }
374
375 while (head->processing) {
376 struct rb_node *node;
377
378 node = rb_next(&head->href_node);
379 if (!node) {
380 if (loop)
381 return NULL;
382 delayed_refs->run_delayed_start = 0;
383 start = 0;
384 loop = true;
385 goto again;
386 }
387 head = rb_entry(node, struct btrfs_delayed_ref_head,
388 href_node);
389 }
390
391 head->processing = 1;
392 WARN_ON(delayed_refs->num_heads_ready == 0);
393 delayed_refs->num_heads_ready--;
394 delayed_refs->run_delayed_start = head->bytenr +
395 head->num_bytes;
396 return head;
397}
398
399/*
400 * Helper to insert the ref_node to the tail or merge with tail.
401 *
402 * Return 0 for insert.
403 * Return >0 for merge.
404 */
405static int insert_delayed_ref(struct btrfs_trans_handle *trans,
406 struct btrfs_delayed_ref_root *root,
407 struct btrfs_delayed_ref_head *href,
408 struct btrfs_delayed_ref_node *ref)
409{
410 struct btrfs_delayed_ref_node *exist;
411 int mod;
412 int ret = 0;
413
414 spin_lock(&href->lock);
415 exist = tree_insert(&href->ref_tree, ref);
416 if (!exist)
417 goto inserted;
418
419 /* Now we are sure we can merge */
420 ret = 1;
421 if (exist->action == ref->action) {
422 mod = ref->ref_mod;
423 } else {
424 /* Need to change action */
425 if (exist->ref_mod < ref->ref_mod) {
426 exist->action = ref->action;
427 mod = -exist->ref_mod;
428 exist->ref_mod = ref->ref_mod;
429 if (ref->action == BTRFS_ADD_DELAYED_REF)
430 list_add_tail(&exist->add_list,
431 &href->ref_add_list);
432 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
433 ASSERT(!list_empty(&exist->add_list));
434 list_del(&exist->add_list);
435 } else {
436 ASSERT(0);
437 }
438 } else
439 mod = -ref->ref_mod;
440 }
441 exist->ref_mod += mod;
442
443 /* remove existing tail if its ref_mod is zero */
444 if (exist->ref_mod == 0)
445 drop_delayed_ref(trans, root, href, exist);
446 spin_unlock(&href->lock);
447 return ret;
448inserted:
449 if (ref->action == BTRFS_ADD_DELAYED_REF)
450 list_add_tail(&ref->add_list, &href->ref_add_list);
451 atomic_inc(&root->num_entries);
452 trans->delayed_ref_updates++;
453 spin_unlock(&href->lock);
454 return ret;
455}
456
457/*
458 * helper function to update the accounting in the head ref
459 * existing and update must have the same bytenr
460 */
461static noinline void
462update_existing_head_ref(struct btrfs_delayed_ref_root *delayed_refs,
463 struct btrfs_delayed_ref_head *existing,
464 struct btrfs_delayed_ref_head *update,
465 int *old_ref_mod_ret)
466{
467 int old_ref_mod;
468
469 BUG_ON(existing->is_data != update->is_data);
470
471 spin_lock(&existing->lock);
472 if (update->must_insert_reserved) {
473 /* if the extent was freed and then
474 * reallocated before the delayed ref
475 * entries were processed, we can end up
476 * with an existing head ref without
477 * the must_insert_reserved flag set.
478 * Set it again here
479 */
480 existing->must_insert_reserved = update->must_insert_reserved;
481
482 /*
483 * update the num_bytes so we make sure the accounting
484 * is done correctly
485 */
486 existing->num_bytes = update->num_bytes;
487
488 }
489
490 if (update->extent_op) {
491 if (!existing->extent_op) {
492 existing->extent_op = update->extent_op;
493 } else {
494 if (update->extent_op->update_key) {
495 memcpy(&existing->extent_op->key,
496 &update->extent_op->key,
497 sizeof(update->extent_op->key));
498 existing->extent_op->update_key = true;
499 }
500 if (update->extent_op->update_flags) {
501 existing->extent_op->flags_to_set |=
502 update->extent_op->flags_to_set;
503 existing->extent_op->update_flags = true;
504 }
505 btrfs_free_delayed_extent_op(update->extent_op);
506 }
507 }
508 /*
509 * update the reference mod on the head to reflect this new operation,
510 * only need the lock for this case cause we could be processing it
511 * currently, for refs we just added we know we're a-ok.
512 */
513 old_ref_mod = existing->total_ref_mod;
514 if (old_ref_mod_ret)
515 *old_ref_mod_ret = old_ref_mod;
516 existing->ref_mod += update->ref_mod;
517 existing->total_ref_mod += update->ref_mod;
518
519 /*
520 * If we are going to from a positive ref mod to a negative or vice
521 * versa we need to make sure to adjust pending_csums accordingly.
522 */
523 if (existing->is_data) {
524 if (existing->total_ref_mod >= 0 && old_ref_mod < 0)
525 delayed_refs->pending_csums -= existing->num_bytes;
526 if (existing->total_ref_mod < 0 && old_ref_mod >= 0)
527 delayed_refs->pending_csums += existing->num_bytes;
528 }
529 spin_unlock(&existing->lock);
530}
531
532/*
533 * helper function to actually insert a head node into the rbtree.
534 * this does all the dirty work in terms of maintaining the correct
535 * overall modification count.
536 */
537static noinline struct btrfs_delayed_ref_head *
538add_delayed_ref_head(struct btrfs_fs_info *fs_info,
539 struct btrfs_trans_handle *trans,
540 struct btrfs_delayed_ref_head *head_ref,
541 struct btrfs_qgroup_extent_record *qrecord,
542 u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved,
543 int action, int is_data, int is_system,
544 int *qrecord_inserted_ret,
545 int *old_ref_mod, int *new_ref_mod)
546
547{
548 struct btrfs_delayed_ref_head *existing;
549 struct btrfs_delayed_ref_root *delayed_refs;
550 int count_mod = 1;
551 int must_insert_reserved = 0;
552 int qrecord_inserted = 0;
553
554 /* If reserved is provided, it must be a data extent. */
555 BUG_ON(!is_data && reserved);
556
557 /*
558 * the head node stores the sum of all the mods, so dropping a ref
559 * should drop the sum in the head node by one.
560 */
561 if (action == BTRFS_UPDATE_DELAYED_HEAD)
562 count_mod = 0;
563 else if (action == BTRFS_DROP_DELAYED_REF)
564 count_mod = -1;
565
566 /*
567 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
568 * the reserved accounting when the extent is finally added, or
569 * if a later modification deletes the delayed ref without ever
570 * inserting the extent into the extent allocation tree.
571 * ref->must_insert_reserved is the flag used to record
572 * that accounting mods are required.
573 *
574 * Once we record must_insert_reserved, switch the action to
575 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
576 */
577 if (action == BTRFS_ADD_DELAYED_EXTENT)
578 must_insert_reserved = 1;
579 else
580 must_insert_reserved = 0;
581
582 delayed_refs = &trans->transaction->delayed_refs;
583
584 refcount_set(&head_ref->refs, 1);
585 head_ref->bytenr = bytenr;
586 head_ref->num_bytes = num_bytes;
587 head_ref->ref_mod = count_mod;
588 head_ref->must_insert_reserved = must_insert_reserved;
589 head_ref->is_data = is_data;
590 head_ref->is_system = is_system;
591 head_ref->ref_tree = RB_ROOT;
592 INIT_LIST_HEAD(&head_ref->ref_add_list);
593 RB_CLEAR_NODE(&head_ref->href_node);
594 head_ref->processing = 0;
595 head_ref->total_ref_mod = count_mod;
596 head_ref->qgroup_reserved = 0;
597 head_ref->qgroup_ref_root = 0;
598 spin_lock_init(&head_ref->lock);
599 mutex_init(&head_ref->mutex);
600
601 /* Record qgroup extent info if provided */
602 if (qrecord) {
603 if (ref_root && reserved) {
604 head_ref->qgroup_ref_root = ref_root;
605 head_ref->qgroup_reserved = reserved;
606 }
607
608 qrecord->bytenr = bytenr;
609 qrecord->num_bytes = num_bytes;
610 qrecord->old_roots = NULL;
611
612 if(btrfs_qgroup_trace_extent_nolock(fs_info,
613 delayed_refs, qrecord))
614 kfree(qrecord);
615 else
616 qrecord_inserted = 1;
617 }
618
619 trace_add_delayed_ref_head(fs_info, head_ref, action);
620
621 existing = htree_insert(&delayed_refs->href_root,
622 &head_ref->href_node);
623 if (existing) {
624 WARN_ON(ref_root && reserved && existing->qgroup_ref_root
625 && existing->qgroup_reserved);
626 update_existing_head_ref(delayed_refs, existing, head_ref,
627 old_ref_mod);
628 /*
629 * we've updated the existing ref, free the newly
630 * allocated ref
631 */
632 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
633 head_ref = existing;
634 } else {
635 if (old_ref_mod)
636 *old_ref_mod = 0;
637 if (is_data && count_mod < 0)
638 delayed_refs->pending_csums += num_bytes;
639 delayed_refs->num_heads++;
640 delayed_refs->num_heads_ready++;
641 atomic_inc(&delayed_refs->num_entries);
642 trans->delayed_ref_updates++;
643 }
644 if (qrecord_inserted_ret)
645 *qrecord_inserted_ret = qrecord_inserted;
646 if (new_ref_mod)
647 *new_ref_mod = head_ref->total_ref_mod;
648 return head_ref;
649}
650
651/*
652 * helper to insert a delayed tree ref into the rbtree.
653 */
654static noinline void
655add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
656 struct btrfs_trans_handle *trans,
657 struct btrfs_delayed_ref_head *head_ref,
658 struct btrfs_delayed_ref_node *ref, u64 bytenr,
659 u64 num_bytes, u64 parent, u64 ref_root, int level,
660 int action)
661{
662 struct btrfs_delayed_tree_ref *full_ref;
663 struct btrfs_delayed_ref_root *delayed_refs;
664 u64 seq = 0;
665 int ret;
666
667 if (action == BTRFS_ADD_DELAYED_EXTENT)
668 action = BTRFS_ADD_DELAYED_REF;
669
670 if (is_fstree(ref_root))
671 seq = atomic64_read(&fs_info->tree_mod_seq);
672 delayed_refs = &trans->transaction->delayed_refs;
673
674 /* first set the basic ref node struct up */
675 refcount_set(&ref->refs, 1);
676 ref->bytenr = bytenr;
677 ref->num_bytes = num_bytes;
678 ref->ref_mod = 1;
679 ref->action = action;
680 ref->is_head = 0;
681 ref->in_tree = 1;
682 ref->seq = seq;
683 RB_CLEAR_NODE(&ref->ref_node);
684 INIT_LIST_HEAD(&ref->add_list);
685
686 full_ref = btrfs_delayed_node_to_tree_ref(ref);
687 full_ref->parent = parent;
688 full_ref->root = ref_root;
689 if (parent)
690 ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
691 else
692 ref->type = BTRFS_TREE_BLOCK_REF_KEY;
693 full_ref->level = level;
694
695 trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
696
697 ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
698
699 /*
700 * XXX: memory should be freed at the same level allocated.
701 * But bad practice is anywhere... Follow it now. Need cleanup.
702 */
703 if (ret > 0)
704 kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
705}
706
707/*
708 * helper to insert a delayed data ref into the rbtree.
709 */
710static noinline void
711add_delayed_data_ref(struct btrfs_fs_info *fs_info,
712 struct btrfs_trans_handle *trans,
713 struct btrfs_delayed_ref_head *head_ref,
714 struct btrfs_delayed_ref_node *ref, u64 bytenr,
715 u64 num_bytes, u64 parent, u64 ref_root, u64 owner,
716 u64 offset, int action)
717{
718 struct btrfs_delayed_data_ref *full_ref;
719 struct btrfs_delayed_ref_root *delayed_refs;
720 u64 seq = 0;
721 int ret;
722
723 if (action == BTRFS_ADD_DELAYED_EXTENT)
724 action = BTRFS_ADD_DELAYED_REF;
725
726 delayed_refs = &trans->transaction->delayed_refs;
727
728 if (is_fstree(ref_root))
729 seq = atomic64_read(&fs_info->tree_mod_seq);
730
731 /* first set the basic ref node struct up */
732 refcount_set(&ref->refs, 1);
733 ref->bytenr = bytenr;
734 ref->num_bytes = num_bytes;
735 ref->ref_mod = 1;
736 ref->action = action;
737 ref->is_head = 0;
738 ref->in_tree = 1;
739 ref->seq = seq;
740 RB_CLEAR_NODE(&ref->ref_node);
741 INIT_LIST_HEAD(&ref->add_list);
742
743 full_ref = btrfs_delayed_node_to_data_ref(ref);
744 full_ref->parent = parent;
745 full_ref->root = ref_root;
746 if (parent)
747 ref->type = BTRFS_SHARED_DATA_REF_KEY;
748 else
749 ref->type = BTRFS_EXTENT_DATA_REF_KEY;
750
751 full_ref->objectid = owner;
752 full_ref->offset = offset;
753
754 trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
755
756 ret = insert_delayed_ref(trans, delayed_refs, head_ref, ref);
757 if (ret > 0)
758 kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
759}
760
761/*
762 * add a delayed tree ref. This does all of the accounting required
763 * to make sure the delayed ref is eventually processed before this
764 * transaction commits.
765 */
766int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
767 struct btrfs_trans_handle *trans,
768 u64 bytenr, u64 num_bytes, u64 parent,
769 u64 ref_root, int level, int action,
770 struct btrfs_delayed_extent_op *extent_op,
771 int *old_ref_mod, int *new_ref_mod)
772{
773 struct btrfs_delayed_tree_ref *ref;
774 struct btrfs_delayed_ref_head *head_ref;
775 struct btrfs_delayed_ref_root *delayed_refs;
776 struct btrfs_qgroup_extent_record *record = NULL;
777 int qrecord_inserted;
778 int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
779
780 BUG_ON(extent_op && extent_op->is_data);
781 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
782 if (!ref)
783 return -ENOMEM;
784
785 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
786 if (!head_ref)
787 goto free_ref;
788
789 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
790 is_fstree(ref_root)) {
791 record = kmalloc(sizeof(*record), GFP_NOFS);
792 if (!record)
793 goto free_head_ref;
794 }
795
796 head_ref->extent_op = extent_op;
797
798 delayed_refs = &trans->transaction->delayed_refs;
799 spin_lock(&delayed_refs->lock);
800
801 /*
802 * insert both the head node and the new ref without dropping
803 * the spin lock
804 */
805 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
806 bytenr, num_bytes, 0, 0, action, 0,
807 is_system, &qrecord_inserted,
808 old_ref_mod, new_ref_mod);
809
810 add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr,
811 num_bytes, parent, ref_root, level, action);
812 spin_unlock(&delayed_refs->lock);
813
814 if (qrecord_inserted)
815 btrfs_qgroup_trace_extent_post(fs_info, record);
816
817 return 0;
818
819free_head_ref:
820 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
821free_ref:
822 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
823
824 return -ENOMEM;
825}
826
827/*
828 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
829 */
830int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
831 struct btrfs_trans_handle *trans,
832 u64 bytenr, u64 num_bytes,
833 u64 parent, u64 ref_root,
834 u64 owner, u64 offset, u64 reserved, int action,
835 int *old_ref_mod, int *new_ref_mod)
836{
837 struct btrfs_delayed_data_ref *ref;
838 struct btrfs_delayed_ref_head *head_ref;
839 struct btrfs_delayed_ref_root *delayed_refs;
840 struct btrfs_qgroup_extent_record *record = NULL;
841 int qrecord_inserted;
842
843 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
844 if (!ref)
845 return -ENOMEM;
846
847 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
848 if (!head_ref) {
849 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
850 return -ENOMEM;
851 }
852
853 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
854 is_fstree(ref_root)) {
855 record = kmalloc(sizeof(*record), GFP_NOFS);
856 if (!record) {
857 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
858 kmem_cache_free(btrfs_delayed_ref_head_cachep,
859 head_ref);
860 return -ENOMEM;
861 }
862 }
863
864 head_ref->extent_op = NULL;
865
866 delayed_refs = &trans->transaction->delayed_refs;
867 spin_lock(&delayed_refs->lock);
868
869 /*
870 * insert both the head node and the new ref without dropping
871 * the spin lock
872 */
873 head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
874 bytenr, num_bytes, ref_root, reserved,
875 action, 1, 0, &qrecord_inserted,
876 old_ref_mod, new_ref_mod);
877
878 add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr,
879 num_bytes, parent, ref_root, owner, offset,
880 action);
881 spin_unlock(&delayed_refs->lock);
882
883 if (qrecord_inserted)
884 return btrfs_qgroup_trace_extent_post(fs_info, record);
885 return 0;
886}
887
888int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
889 struct btrfs_trans_handle *trans,
890 u64 bytenr, u64 num_bytes,
891 struct btrfs_delayed_extent_op *extent_op)
892{
893 struct btrfs_delayed_ref_head *head_ref;
894 struct btrfs_delayed_ref_root *delayed_refs;
895
896 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
897 if (!head_ref)
898 return -ENOMEM;
899
900 head_ref->extent_op = extent_op;
901
902 delayed_refs = &trans->transaction->delayed_refs;
903 spin_lock(&delayed_refs->lock);
904
905 /*
906 * extent_ops just modify the flags of an extent and they don't result
907 * in ref count changes, hence it's safe to pass false/0 for is_system
908 * argument
909 */
910 add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr,
911 num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD,
912 extent_op->is_data, 0, NULL, NULL, NULL);
913
914 spin_unlock(&delayed_refs->lock);
915 return 0;
916}
917
918/*
919 * this does a simple search for the head node for a given extent.
920 * It must be called with the delayed ref spinlock held, and it returns
921 * the head node if any where found, or NULL if not.
922 */
923struct btrfs_delayed_ref_head *
924btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
925{
926 return find_ref_head(&delayed_refs->href_root, bytenr, 0);
927}
928
929void __cold btrfs_delayed_ref_exit(void)
930{
931 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
932 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
933 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
934 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
935}
936
937int __init btrfs_delayed_ref_init(void)
938{
939 btrfs_delayed_ref_head_cachep = kmem_cache_create(
940 "btrfs_delayed_ref_head",
941 sizeof(struct btrfs_delayed_ref_head), 0,
942 SLAB_MEM_SPREAD, NULL);
943 if (!btrfs_delayed_ref_head_cachep)
944 goto fail;
945
946 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
947 "btrfs_delayed_tree_ref",
948 sizeof(struct btrfs_delayed_tree_ref), 0,
949 SLAB_MEM_SPREAD, NULL);
950 if (!btrfs_delayed_tree_ref_cachep)
951 goto fail;
952
953 btrfs_delayed_data_ref_cachep = kmem_cache_create(
954 "btrfs_delayed_data_ref",
955 sizeof(struct btrfs_delayed_data_ref), 0,
956 SLAB_MEM_SPREAD, NULL);
957 if (!btrfs_delayed_data_ref_cachep)
958 goto fail;
959
960 btrfs_delayed_extent_op_cachep = kmem_cache_create(
961 "btrfs_delayed_extent_op",
962 sizeof(struct btrfs_delayed_extent_op), 0,
963 SLAB_MEM_SPREAD, NULL);
964 if (!btrfs_delayed_extent_op_cachep)
965 goto fail;
966
967 return 0;
968fail:
969 btrfs_delayed_ref_exit();
970 return -ENOMEM;
971}