Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/sort.h>
9#include "ctree.h"
10#include "delayed-ref.h"
11#include "transaction.h"
12#include "qgroup.h"
13#include "space-info.h"
14
15struct kmem_cache *btrfs_delayed_ref_head_cachep;
16struct kmem_cache *btrfs_delayed_tree_ref_cachep;
17struct kmem_cache *btrfs_delayed_data_ref_cachep;
18struct kmem_cache *btrfs_delayed_extent_op_cachep;
19/*
20 * delayed back reference update tracking. For subvolume trees
21 * we queue up extent allocations and backref maintenance for
22 * delayed processing. This avoids deep call chains where we
23 * add extents in the middle of btrfs_search_slot, and it allows
24 * us to buffer up frequently modified backrefs in an rb tree instead
25 * of hammering updates on the extent allocation tree.
26 */
27
28bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
29{
30 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
31 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
32 bool ret = false;
33 u64 reserved;
34
35 spin_lock(&global_rsv->lock);
36 reserved = global_rsv->reserved;
37 spin_unlock(&global_rsv->lock);
38
39 /*
40 * Since the global reserve is just kind of magic we don't really want
41 * to rely on it to save our bacon, so if our size is more than the
42 * delayed_refs_rsv and the global rsv then it's time to think about
43 * bailing.
44 */
45 spin_lock(&delayed_refs_rsv->lock);
46 reserved += delayed_refs_rsv->reserved;
47 if (delayed_refs_rsv->size >= reserved)
48 ret = true;
49 spin_unlock(&delayed_refs_rsv->lock);
50 return ret;
51}
52
53int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
54{
55 u64 num_entries =
56 atomic_read(&trans->transaction->delayed_refs.num_entries);
57 u64 avg_runtime;
58 u64 val;
59
60 smp_mb();
61 avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
62 val = num_entries * avg_runtime;
63 if (val >= NSEC_PER_SEC)
64 return 1;
65 if (val >= NSEC_PER_SEC / 2)
66 return 2;
67
68 return btrfs_check_space_for_delayed_refs(trans->fs_info);
69}
70
71/**
72 * btrfs_delayed_refs_rsv_release - release a ref head's reservation.
73 * @fs_info - the fs_info for our fs.
74 * @nr - the number of items to drop.
75 *
76 * This drops the delayed ref head's count from the delayed refs rsv and frees
77 * any excess reservation we had.
78 */
79void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
80{
81 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
82 u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
83 u64 released = 0;
84
85 released = __btrfs_block_rsv_release(fs_info, block_rsv, num_bytes,
86 NULL);
87 if (released)
88 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
89 0, released, 0);
90}
91
92/*
93 * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
94 * @trans - the trans that may have generated delayed refs
95 *
96 * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
97 * it'll calculate the additional size and add it to the delayed_refs_rsv.
98 */
99void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
100{
101 struct btrfs_fs_info *fs_info = trans->fs_info;
102 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
103 u64 num_bytes;
104
105 if (!trans->delayed_ref_updates)
106 return;
107
108 num_bytes = btrfs_calc_insert_metadata_size(fs_info,
109 trans->delayed_ref_updates);
110 spin_lock(&delayed_rsv->lock);
111 delayed_rsv->size += num_bytes;
112 delayed_rsv->full = 0;
113 spin_unlock(&delayed_rsv->lock);
114 trans->delayed_ref_updates = 0;
115}
116
117/**
118 * btrfs_migrate_to_delayed_refs_rsv - transfer bytes to our delayed refs rsv.
119 * @fs_info - the fs info for our fs.
120 * @src - the source block rsv to transfer from.
121 * @num_bytes - the number of bytes to transfer.
122 *
123 * This transfers up to the num_bytes amount from the src rsv to the
124 * delayed_refs_rsv. Any extra bytes are returned to the space info.
125 */
126void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
127 struct btrfs_block_rsv *src,
128 u64 num_bytes)
129{
130 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
131 u64 to_free = 0;
132
133 spin_lock(&src->lock);
134 src->reserved -= num_bytes;
135 src->size -= num_bytes;
136 spin_unlock(&src->lock);
137
138 spin_lock(&delayed_refs_rsv->lock);
139 if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
140 u64 delta = delayed_refs_rsv->size -
141 delayed_refs_rsv->reserved;
142 if (num_bytes > delta) {
143 to_free = num_bytes - delta;
144 num_bytes = delta;
145 }
146 } else {
147 to_free = num_bytes;
148 num_bytes = 0;
149 }
150
151 if (num_bytes)
152 delayed_refs_rsv->reserved += num_bytes;
153 if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
154 delayed_refs_rsv->full = 1;
155 spin_unlock(&delayed_refs_rsv->lock);
156
157 if (num_bytes)
158 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
159 0, num_bytes, 1);
160 if (to_free)
161 btrfs_space_info_free_bytes_may_use(fs_info,
162 delayed_refs_rsv->space_info, to_free);
163}
164
165/**
166 * btrfs_delayed_refs_rsv_refill - refill based on our delayed refs usage.
167 * @fs_info - the fs_info for our fs.
168 * @flush - control how we can flush for this reservation.
169 *
170 * This will refill the delayed block_rsv up to 1 items size worth of space and
171 * will return -ENOSPC if we can't make the reservation.
172 */
173int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
174 enum btrfs_reserve_flush_enum flush)
175{
176 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
177 u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
178 u64 num_bytes = 0;
179 int ret = -ENOSPC;
180
181 spin_lock(&block_rsv->lock);
182 if (block_rsv->reserved < block_rsv->size) {
183 num_bytes = block_rsv->size - block_rsv->reserved;
184 num_bytes = min(num_bytes, limit);
185 }
186 spin_unlock(&block_rsv->lock);
187
188 if (!num_bytes)
189 return 0;
190
191 ret = btrfs_reserve_metadata_bytes(fs_info->extent_root, block_rsv,
192 num_bytes, flush);
193 if (ret)
194 return ret;
195 btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
196 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
197 0, num_bytes, 1);
198 return 0;
199}
200
201/*
202 * compare two delayed tree backrefs with same bytenr and type
203 */
204static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
205 struct btrfs_delayed_tree_ref *ref2)
206{
207 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
208 if (ref1->root < ref2->root)
209 return -1;
210 if (ref1->root > ref2->root)
211 return 1;
212 } else {
213 if (ref1->parent < ref2->parent)
214 return -1;
215 if (ref1->parent > ref2->parent)
216 return 1;
217 }
218 return 0;
219}
220
221/*
222 * compare two delayed data backrefs with same bytenr and type
223 */
224static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
225 struct btrfs_delayed_data_ref *ref2)
226{
227 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
228 if (ref1->root < ref2->root)
229 return -1;
230 if (ref1->root > ref2->root)
231 return 1;
232 if (ref1->objectid < ref2->objectid)
233 return -1;
234 if (ref1->objectid > ref2->objectid)
235 return 1;
236 if (ref1->offset < ref2->offset)
237 return -1;
238 if (ref1->offset > ref2->offset)
239 return 1;
240 } else {
241 if (ref1->parent < ref2->parent)
242 return -1;
243 if (ref1->parent > ref2->parent)
244 return 1;
245 }
246 return 0;
247}
248
249static int comp_refs(struct btrfs_delayed_ref_node *ref1,
250 struct btrfs_delayed_ref_node *ref2,
251 bool check_seq)
252{
253 int ret = 0;
254
255 if (ref1->type < ref2->type)
256 return -1;
257 if (ref1->type > ref2->type)
258 return 1;
259 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
260 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
261 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
262 btrfs_delayed_node_to_tree_ref(ref2));
263 else
264 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
265 btrfs_delayed_node_to_data_ref(ref2));
266 if (ret)
267 return ret;
268 if (check_seq) {
269 if (ref1->seq < ref2->seq)
270 return -1;
271 if (ref1->seq > ref2->seq)
272 return 1;
273 }
274 return 0;
275}
276
277/* insert a new ref to head ref rbtree */
278static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
279 struct rb_node *node)
280{
281 struct rb_node **p = &root->rb_root.rb_node;
282 struct rb_node *parent_node = NULL;
283 struct btrfs_delayed_ref_head *entry;
284 struct btrfs_delayed_ref_head *ins;
285 u64 bytenr;
286 bool leftmost = true;
287
288 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
289 bytenr = ins->bytenr;
290 while (*p) {
291 parent_node = *p;
292 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
293 href_node);
294
295 if (bytenr < entry->bytenr) {
296 p = &(*p)->rb_left;
297 } else if (bytenr > entry->bytenr) {
298 p = &(*p)->rb_right;
299 leftmost = false;
300 } else {
301 return entry;
302 }
303 }
304
305 rb_link_node(node, parent_node, p);
306 rb_insert_color_cached(node, root, leftmost);
307 return NULL;
308}
309
310static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
311 struct btrfs_delayed_ref_node *ins)
312{
313 struct rb_node **p = &root->rb_root.rb_node;
314 struct rb_node *node = &ins->ref_node;
315 struct rb_node *parent_node = NULL;
316 struct btrfs_delayed_ref_node *entry;
317 bool leftmost = true;
318
319 while (*p) {
320 int comp;
321
322 parent_node = *p;
323 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
324 ref_node);
325 comp = comp_refs(ins, entry, true);
326 if (comp < 0) {
327 p = &(*p)->rb_left;
328 } else if (comp > 0) {
329 p = &(*p)->rb_right;
330 leftmost = false;
331 } else {
332 return entry;
333 }
334 }
335
336 rb_link_node(node, parent_node, p);
337 rb_insert_color_cached(node, root, leftmost);
338 return NULL;
339}
340
341static struct btrfs_delayed_ref_head *find_first_ref_head(
342 struct btrfs_delayed_ref_root *dr)
343{
344 struct rb_node *n;
345 struct btrfs_delayed_ref_head *entry;
346
347 n = rb_first_cached(&dr->href_root);
348 if (!n)
349 return NULL;
350
351 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
352
353 return entry;
354}
355
356/*
357 * Find a head entry based on bytenr. This returns the delayed ref head if it
358 * was able to find one, or NULL if nothing was in that spot. If return_bigger
359 * is given, the next bigger entry is returned if no exact match is found.
360 */
361static struct btrfs_delayed_ref_head *find_ref_head(
362 struct btrfs_delayed_ref_root *dr, u64 bytenr,
363 bool return_bigger)
364{
365 struct rb_root *root = &dr->href_root.rb_root;
366 struct rb_node *n;
367 struct btrfs_delayed_ref_head *entry;
368
369 n = root->rb_node;
370 entry = NULL;
371 while (n) {
372 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
373
374 if (bytenr < entry->bytenr)
375 n = n->rb_left;
376 else if (bytenr > entry->bytenr)
377 n = n->rb_right;
378 else
379 return entry;
380 }
381 if (entry && return_bigger) {
382 if (bytenr > entry->bytenr) {
383 n = rb_next(&entry->href_node);
384 if (!n)
385 return NULL;
386 entry = rb_entry(n, struct btrfs_delayed_ref_head,
387 href_node);
388 }
389 return entry;
390 }
391 return NULL;
392}
393
394int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
395 struct btrfs_delayed_ref_head *head)
396{
397 lockdep_assert_held(&delayed_refs->lock);
398 if (mutex_trylock(&head->mutex))
399 return 0;
400
401 refcount_inc(&head->refs);
402 spin_unlock(&delayed_refs->lock);
403
404 mutex_lock(&head->mutex);
405 spin_lock(&delayed_refs->lock);
406 if (RB_EMPTY_NODE(&head->href_node)) {
407 mutex_unlock(&head->mutex);
408 btrfs_put_delayed_ref_head(head);
409 return -EAGAIN;
410 }
411 btrfs_put_delayed_ref_head(head);
412 return 0;
413}
414
415static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
416 struct btrfs_delayed_ref_root *delayed_refs,
417 struct btrfs_delayed_ref_head *head,
418 struct btrfs_delayed_ref_node *ref)
419{
420 lockdep_assert_held(&head->lock);
421 rb_erase_cached(&ref->ref_node, &head->ref_tree);
422 RB_CLEAR_NODE(&ref->ref_node);
423 if (!list_empty(&ref->add_list))
424 list_del(&ref->add_list);
425 ref->in_tree = 0;
426 btrfs_put_delayed_ref(ref);
427 atomic_dec(&delayed_refs->num_entries);
428}
429
430static bool merge_ref(struct btrfs_trans_handle *trans,
431 struct btrfs_delayed_ref_root *delayed_refs,
432 struct btrfs_delayed_ref_head *head,
433 struct btrfs_delayed_ref_node *ref,
434 u64 seq)
435{
436 struct btrfs_delayed_ref_node *next;
437 struct rb_node *node = rb_next(&ref->ref_node);
438 bool done = false;
439
440 while (!done && node) {
441 int mod;
442
443 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
444 node = rb_next(node);
445 if (seq && next->seq >= seq)
446 break;
447 if (comp_refs(ref, next, false))
448 break;
449
450 if (ref->action == next->action) {
451 mod = next->ref_mod;
452 } else {
453 if (ref->ref_mod < next->ref_mod) {
454 swap(ref, next);
455 done = true;
456 }
457 mod = -next->ref_mod;
458 }
459
460 drop_delayed_ref(trans, delayed_refs, head, next);
461 ref->ref_mod += mod;
462 if (ref->ref_mod == 0) {
463 drop_delayed_ref(trans, delayed_refs, head, ref);
464 done = true;
465 } else {
466 /*
467 * Can't have multiples of the same ref on a tree block.
468 */
469 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
470 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
471 }
472 }
473
474 return done;
475}
476
477void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
478 struct btrfs_delayed_ref_root *delayed_refs,
479 struct btrfs_delayed_ref_head *head)
480{
481 struct btrfs_fs_info *fs_info = trans->fs_info;
482 struct btrfs_delayed_ref_node *ref;
483 struct rb_node *node;
484 u64 seq = 0;
485
486 lockdep_assert_held(&head->lock);
487
488 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
489 return;
490
491 /* We don't have too many refs to merge for data. */
492 if (head->is_data)
493 return;
494
495 spin_lock(&fs_info->tree_mod_seq_lock);
496 if (!list_empty(&fs_info->tree_mod_seq_list)) {
497 struct seq_list *elem;
498
499 elem = list_first_entry(&fs_info->tree_mod_seq_list,
500 struct seq_list, list);
501 seq = elem->seq;
502 }
503 spin_unlock(&fs_info->tree_mod_seq_lock);
504
505again:
506 for (node = rb_first_cached(&head->ref_tree); node;
507 node = rb_next(node)) {
508 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
509 if (seq && ref->seq >= seq)
510 continue;
511 if (merge_ref(trans, delayed_refs, head, ref, seq))
512 goto again;
513 }
514}
515
516int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
517{
518 struct seq_list *elem;
519 int ret = 0;
520
521 spin_lock(&fs_info->tree_mod_seq_lock);
522 if (!list_empty(&fs_info->tree_mod_seq_list)) {
523 elem = list_first_entry(&fs_info->tree_mod_seq_list,
524 struct seq_list, list);
525 if (seq >= elem->seq) {
526 btrfs_debug(fs_info,
527 "holding back delayed_ref %#x.%x, lowest is %#x.%x",
528 (u32)(seq >> 32), (u32)seq,
529 (u32)(elem->seq >> 32), (u32)elem->seq);
530 ret = 1;
531 }
532 }
533
534 spin_unlock(&fs_info->tree_mod_seq_lock);
535 return ret;
536}
537
538struct btrfs_delayed_ref_head *btrfs_select_ref_head(
539 struct btrfs_delayed_ref_root *delayed_refs)
540{
541 struct btrfs_delayed_ref_head *head;
542
543again:
544 head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
545 true);
546 if (!head && delayed_refs->run_delayed_start != 0) {
547 delayed_refs->run_delayed_start = 0;
548 head = find_first_ref_head(delayed_refs);
549 }
550 if (!head)
551 return NULL;
552
553 while (head->processing) {
554 struct rb_node *node;
555
556 node = rb_next(&head->href_node);
557 if (!node) {
558 if (delayed_refs->run_delayed_start == 0)
559 return NULL;
560 delayed_refs->run_delayed_start = 0;
561 goto again;
562 }
563 head = rb_entry(node, struct btrfs_delayed_ref_head,
564 href_node);
565 }
566
567 head->processing = 1;
568 WARN_ON(delayed_refs->num_heads_ready == 0);
569 delayed_refs->num_heads_ready--;
570 delayed_refs->run_delayed_start = head->bytenr +
571 head->num_bytes;
572 return head;
573}
574
575void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
576 struct btrfs_delayed_ref_head *head)
577{
578 lockdep_assert_held(&delayed_refs->lock);
579 lockdep_assert_held(&head->lock);
580
581 rb_erase_cached(&head->href_node, &delayed_refs->href_root);
582 RB_CLEAR_NODE(&head->href_node);
583 atomic_dec(&delayed_refs->num_entries);
584 delayed_refs->num_heads--;
585 if (head->processing == 0)
586 delayed_refs->num_heads_ready--;
587}
588
589/*
590 * Helper to insert the ref_node to the tail or merge with tail.
591 *
592 * Return 0 for insert.
593 * Return >0 for merge.
594 */
595static int insert_delayed_ref(struct btrfs_trans_handle *trans,
596 struct btrfs_delayed_ref_root *root,
597 struct btrfs_delayed_ref_head *href,
598 struct btrfs_delayed_ref_node *ref)
599{
600 struct btrfs_delayed_ref_node *exist;
601 int mod;
602 int ret = 0;
603
604 spin_lock(&href->lock);
605 exist = tree_insert(&href->ref_tree, ref);
606 if (!exist)
607 goto inserted;
608
609 /* Now we are sure we can merge */
610 ret = 1;
611 if (exist->action == ref->action) {
612 mod = ref->ref_mod;
613 } else {
614 /* Need to change action */
615 if (exist->ref_mod < ref->ref_mod) {
616 exist->action = ref->action;
617 mod = -exist->ref_mod;
618 exist->ref_mod = ref->ref_mod;
619 if (ref->action == BTRFS_ADD_DELAYED_REF)
620 list_add_tail(&exist->add_list,
621 &href->ref_add_list);
622 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
623 ASSERT(!list_empty(&exist->add_list));
624 list_del(&exist->add_list);
625 } else {
626 ASSERT(0);
627 }
628 } else
629 mod = -ref->ref_mod;
630 }
631 exist->ref_mod += mod;
632
633 /* remove existing tail if its ref_mod is zero */
634 if (exist->ref_mod == 0)
635 drop_delayed_ref(trans, root, href, exist);
636 spin_unlock(&href->lock);
637 return ret;
638inserted:
639 if (ref->action == BTRFS_ADD_DELAYED_REF)
640 list_add_tail(&ref->add_list, &href->ref_add_list);
641 atomic_inc(&root->num_entries);
642 spin_unlock(&href->lock);
643 return ret;
644}
645
646/*
647 * helper function to update the accounting in the head ref
648 * existing and update must have the same bytenr
649 */
650static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
651 struct btrfs_delayed_ref_head *existing,
652 struct btrfs_delayed_ref_head *update,
653 int *old_ref_mod_ret)
654{
655 struct btrfs_delayed_ref_root *delayed_refs =
656 &trans->transaction->delayed_refs;
657 struct btrfs_fs_info *fs_info = trans->fs_info;
658 int old_ref_mod;
659
660 BUG_ON(existing->is_data != update->is_data);
661
662 spin_lock(&existing->lock);
663 if (update->must_insert_reserved) {
664 /* if the extent was freed and then
665 * reallocated before the delayed ref
666 * entries were processed, we can end up
667 * with an existing head ref without
668 * the must_insert_reserved flag set.
669 * Set it again here
670 */
671 existing->must_insert_reserved = update->must_insert_reserved;
672
673 /*
674 * update the num_bytes so we make sure the accounting
675 * is done correctly
676 */
677 existing->num_bytes = update->num_bytes;
678
679 }
680
681 if (update->extent_op) {
682 if (!existing->extent_op) {
683 existing->extent_op = update->extent_op;
684 } else {
685 if (update->extent_op->update_key) {
686 memcpy(&existing->extent_op->key,
687 &update->extent_op->key,
688 sizeof(update->extent_op->key));
689 existing->extent_op->update_key = true;
690 }
691 if (update->extent_op->update_flags) {
692 existing->extent_op->flags_to_set |=
693 update->extent_op->flags_to_set;
694 existing->extent_op->update_flags = true;
695 }
696 btrfs_free_delayed_extent_op(update->extent_op);
697 }
698 }
699 /*
700 * update the reference mod on the head to reflect this new operation,
701 * only need the lock for this case cause we could be processing it
702 * currently, for refs we just added we know we're a-ok.
703 */
704 old_ref_mod = existing->total_ref_mod;
705 if (old_ref_mod_ret)
706 *old_ref_mod_ret = old_ref_mod;
707 existing->ref_mod += update->ref_mod;
708 existing->total_ref_mod += update->ref_mod;
709
710 /*
711 * If we are going to from a positive ref mod to a negative or vice
712 * versa we need to make sure to adjust pending_csums accordingly.
713 */
714 if (existing->is_data) {
715 u64 csum_leaves =
716 btrfs_csum_bytes_to_leaves(fs_info,
717 existing->num_bytes);
718
719 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
720 delayed_refs->pending_csums -= existing->num_bytes;
721 btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
722 }
723 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
724 delayed_refs->pending_csums += existing->num_bytes;
725 trans->delayed_ref_updates += csum_leaves;
726 }
727 }
728 spin_unlock(&existing->lock);
729}
730
731static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
732 struct btrfs_qgroup_extent_record *qrecord,
733 u64 bytenr, u64 num_bytes, u64 ref_root,
734 u64 reserved, int action, bool is_data,
735 bool is_system)
736{
737 int count_mod = 1;
738 int must_insert_reserved = 0;
739
740 /* If reserved is provided, it must be a data extent. */
741 BUG_ON(!is_data && reserved);
742
743 /*
744 * The head node stores the sum of all the mods, so dropping a ref
745 * should drop the sum in the head node by one.
746 */
747 if (action == BTRFS_UPDATE_DELAYED_HEAD)
748 count_mod = 0;
749 else if (action == BTRFS_DROP_DELAYED_REF)
750 count_mod = -1;
751
752 /*
753 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
754 * accounting when the extent is finally added, or if a later
755 * modification deletes the delayed ref without ever inserting the
756 * extent into the extent allocation tree. ref->must_insert_reserved
757 * is the flag used to record that accounting mods are required.
758 *
759 * Once we record must_insert_reserved, switch the action to
760 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
761 */
762 if (action == BTRFS_ADD_DELAYED_EXTENT)
763 must_insert_reserved = 1;
764 else
765 must_insert_reserved = 0;
766
767 refcount_set(&head_ref->refs, 1);
768 head_ref->bytenr = bytenr;
769 head_ref->num_bytes = num_bytes;
770 head_ref->ref_mod = count_mod;
771 head_ref->must_insert_reserved = must_insert_reserved;
772 head_ref->is_data = is_data;
773 head_ref->is_system = is_system;
774 head_ref->ref_tree = RB_ROOT_CACHED;
775 INIT_LIST_HEAD(&head_ref->ref_add_list);
776 RB_CLEAR_NODE(&head_ref->href_node);
777 head_ref->processing = 0;
778 head_ref->total_ref_mod = count_mod;
779 spin_lock_init(&head_ref->lock);
780 mutex_init(&head_ref->mutex);
781
782 if (qrecord) {
783 if (ref_root && reserved) {
784 qrecord->data_rsv = reserved;
785 qrecord->data_rsv_refroot = ref_root;
786 }
787 qrecord->bytenr = bytenr;
788 qrecord->num_bytes = num_bytes;
789 qrecord->old_roots = NULL;
790 }
791}
792
793/*
794 * helper function to actually insert a head node into the rbtree.
795 * this does all the dirty work in terms of maintaining the correct
796 * overall modification count.
797 */
798static noinline struct btrfs_delayed_ref_head *
799add_delayed_ref_head(struct btrfs_trans_handle *trans,
800 struct btrfs_delayed_ref_head *head_ref,
801 struct btrfs_qgroup_extent_record *qrecord,
802 int action, int *qrecord_inserted_ret,
803 int *old_ref_mod, int *new_ref_mod)
804{
805 struct btrfs_delayed_ref_head *existing;
806 struct btrfs_delayed_ref_root *delayed_refs;
807 int qrecord_inserted = 0;
808
809 delayed_refs = &trans->transaction->delayed_refs;
810
811 /* Record qgroup extent info if provided */
812 if (qrecord) {
813 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
814 delayed_refs, qrecord))
815 kfree(qrecord);
816 else
817 qrecord_inserted = 1;
818 }
819
820 trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
821
822 existing = htree_insert(&delayed_refs->href_root,
823 &head_ref->href_node);
824 if (existing) {
825 update_existing_head_ref(trans, existing, head_ref,
826 old_ref_mod);
827 /*
828 * we've updated the existing ref, free the newly
829 * allocated ref
830 */
831 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
832 head_ref = existing;
833 } else {
834 if (old_ref_mod)
835 *old_ref_mod = 0;
836 if (head_ref->is_data && head_ref->ref_mod < 0) {
837 delayed_refs->pending_csums += head_ref->num_bytes;
838 trans->delayed_ref_updates +=
839 btrfs_csum_bytes_to_leaves(trans->fs_info,
840 head_ref->num_bytes);
841 }
842 delayed_refs->num_heads++;
843 delayed_refs->num_heads_ready++;
844 atomic_inc(&delayed_refs->num_entries);
845 trans->delayed_ref_updates++;
846 }
847 if (qrecord_inserted_ret)
848 *qrecord_inserted_ret = qrecord_inserted;
849 if (new_ref_mod)
850 *new_ref_mod = head_ref->total_ref_mod;
851
852 return head_ref;
853}
854
855/*
856 * init_delayed_ref_common - Initialize the structure which represents a
857 * modification to a an extent.
858 *
859 * @fs_info: Internal to the mounted filesystem mount structure.
860 *
861 * @ref: The structure which is going to be initialized.
862 *
863 * @bytenr: The logical address of the extent for which a modification is
864 * going to be recorded.
865 *
866 * @num_bytes: Size of the extent whose modification is being recorded.
867 *
868 * @ref_root: The id of the root where this modification has originated, this
869 * can be either one of the well-known metadata trees or the
870 * subvolume id which references this extent.
871 *
872 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
873 * BTRFS_ADD_DELAYED_EXTENT
874 *
875 * @ref_type: Holds the type of the extent which is being recorded, can be
876 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
877 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
878 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
879 */
880static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
881 struct btrfs_delayed_ref_node *ref,
882 u64 bytenr, u64 num_bytes, u64 ref_root,
883 int action, u8 ref_type)
884{
885 u64 seq = 0;
886
887 if (action == BTRFS_ADD_DELAYED_EXTENT)
888 action = BTRFS_ADD_DELAYED_REF;
889
890 if (is_fstree(ref_root))
891 seq = atomic64_read(&fs_info->tree_mod_seq);
892
893 refcount_set(&ref->refs, 1);
894 ref->bytenr = bytenr;
895 ref->num_bytes = num_bytes;
896 ref->ref_mod = 1;
897 ref->action = action;
898 ref->is_head = 0;
899 ref->in_tree = 1;
900 ref->seq = seq;
901 ref->type = ref_type;
902 RB_CLEAR_NODE(&ref->ref_node);
903 INIT_LIST_HEAD(&ref->add_list);
904}
905
906/*
907 * add a delayed tree ref. This does all of the accounting required
908 * to make sure the delayed ref is eventually processed before this
909 * transaction commits.
910 */
911int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
912 struct btrfs_ref *generic_ref,
913 struct btrfs_delayed_extent_op *extent_op,
914 int *old_ref_mod, int *new_ref_mod)
915{
916 struct btrfs_fs_info *fs_info = trans->fs_info;
917 struct btrfs_delayed_tree_ref *ref;
918 struct btrfs_delayed_ref_head *head_ref;
919 struct btrfs_delayed_ref_root *delayed_refs;
920 struct btrfs_qgroup_extent_record *record = NULL;
921 int qrecord_inserted;
922 bool is_system;
923 int action = generic_ref->action;
924 int level = generic_ref->tree_ref.level;
925 int ret;
926 u64 bytenr = generic_ref->bytenr;
927 u64 num_bytes = generic_ref->len;
928 u64 parent = generic_ref->parent;
929 u8 ref_type;
930
931 is_system = (generic_ref->real_root == BTRFS_CHUNK_TREE_OBJECTID);
932
933 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
934 BUG_ON(extent_op && extent_op->is_data);
935 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
936 if (!ref)
937 return -ENOMEM;
938
939 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
940 if (!head_ref) {
941 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
942 return -ENOMEM;
943 }
944
945 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
946 is_fstree(generic_ref->real_root) &&
947 is_fstree(generic_ref->tree_ref.root) &&
948 !generic_ref->skip_qgroup) {
949 record = kzalloc(sizeof(*record), GFP_NOFS);
950 if (!record) {
951 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
952 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
953 return -ENOMEM;
954 }
955 }
956
957 if (parent)
958 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
959 else
960 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
961
962 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
963 generic_ref->tree_ref.root, action, ref_type);
964 ref->root = generic_ref->tree_ref.root;
965 ref->parent = parent;
966 ref->level = level;
967
968 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
969 generic_ref->tree_ref.root, 0, action, false,
970 is_system);
971 head_ref->extent_op = extent_op;
972
973 delayed_refs = &trans->transaction->delayed_refs;
974 spin_lock(&delayed_refs->lock);
975
976 /*
977 * insert both the head node and the new ref without dropping
978 * the spin lock
979 */
980 head_ref = add_delayed_ref_head(trans, head_ref, record,
981 action, &qrecord_inserted,
982 old_ref_mod, new_ref_mod);
983
984 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
985 spin_unlock(&delayed_refs->lock);
986
987 /*
988 * Need to update the delayed_refs_rsv with any changes we may have
989 * made.
990 */
991 btrfs_update_delayed_refs_rsv(trans);
992
993 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
994 action == BTRFS_ADD_DELAYED_EXTENT ?
995 BTRFS_ADD_DELAYED_REF : action);
996 if (ret > 0)
997 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
998
999 if (qrecord_inserted)
1000 btrfs_qgroup_trace_extent_post(fs_info, record);
1001
1002 return 0;
1003}
1004
1005/*
1006 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1007 */
1008int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1009 struct btrfs_ref *generic_ref,
1010 u64 reserved, int *old_ref_mod,
1011 int *new_ref_mod)
1012{
1013 struct btrfs_fs_info *fs_info = trans->fs_info;
1014 struct btrfs_delayed_data_ref *ref;
1015 struct btrfs_delayed_ref_head *head_ref;
1016 struct btrfs_delayed_ref_root *delayed_refs;
1017 struct btrfs_qgroup_extent_record *record = NULL;
1018 int qrecord_inserted;
1019 int action = generic_ref->action;
1020 int ret;
1021 u64 bytenr = generic_ref->bytenr;
1022 u64 num_bytes = generic_ref->len;
1023 u64 parent = generic_ref->parent;
1024 u64 ref_root = generic_ref->data_ref.ref_root;
1025 u64 owner = generic_ref->data_ref.ino;
1026 u64 offset = generic_ref->data_ref.offset;
1027 u8 ref_type;
1028
1029 ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
1030 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
1031 if (!ref)
1032 return -ENOMEM;
1033
1034 if (parent)
1035 ref_type = BTRFS_SHARED_DATA_REF_KEY;
1036 else
1037 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1038 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1039 ref_root, action, ref_type);
1040 ref->root = ref_root;
1041 ref->parent = parent;
1042 ref->objectid = owner;
1043 ref->offset = offset;
1044
1045
1046 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1047 if (!head_ref) {
1048 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1049 return -ENOMEM;
1050 }
1051
1052 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1053 is_fstree(ref_root) &&
1054 is_fstree(generic_ref->real_root) &&
1055 !generic_ref->skip_qgroup) {
1056 record = kzalloc(sizeof(*record), GFP_NOFS);
1057 if (!record) {
1058 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1059 kmem_cache_free(btrfs_delayed_ref_head_cachep,
1060 head_ref);
1061 return -ENOMEM;
1062 }
1063 }
1064
1065 init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1066 reserved, action, true, false);
1067 head_ref->extent_op = NULL;
1068
1069 delayed_refs = &trans->transaction->delayed_refs;
1070 spin_lock(&delayed_refs->lock);
1071
1072 /*
1073 * insert both the head node and the new ref without dropping
1074 * the spin lock
1075 */
1076 head_ref = add_delayed_ref_head(trans, head_ref, record,
1077 action, &qrecord_inserted,
1078 old_ref_mod, new_ref_mod);
1079
1080 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
1081 spin_unlock(&delayed_refs->lock);
1082
1083 /*
1084 * Need to update the delayed_refs_rsv with any changes we may have
1085 * made.
1086 */
1087 btrfs_update_delayed_refs_rsv(trans);
1088
1089 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1090 action == BTRFS_ADD_DELAYED_EXTENT ?
1091 BTRFS_ADD_DELAYED_REF : action);
1092 if (ret > 0)
1093 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1094
1095
1096 if (qrecord_inserted)
1097 return btrfs_qgroup_trace_extent_post(fs_info, record);
1098 return 0;
1099}
1100
1101int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1102 u64 bytenr, u64 num_bytes,
1103 struct btrfs_delayed_extent_op *extent_op)
1104{
1105 struct btrfs_delayed_ref_head *head_ref;
1106 struct btrfs_delayed_ref_root *delayed_refs;
1107
1108 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1109 if (!head_ref)
1110 return -ENOMEM;
1111
1112 init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1113 BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
1114 false);
1115 head_ref->extent_op = extent_op;
1116
1117 delayed_refs = &trans->transaction->delayed_refs;
1118 spin_lock(&delayed_refs->lock);
1119
1120 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1121 NULL, NULL, NULL);
1122
1123 spin_unlock(&delayed_refs->lock);
1124
1125 /*
1126 * Need to update the delayed_refs_rsv with any changes we may have
1127 * made.
1128 */
1129 btrfs_update_delayed_refs_rsv(trans);
1130 return 0;
1131}
1132
1133/*
1134 * This does a simple search for the head node for a given extent. Returns the
1135 * head node if found, or NULL if not.
1136 */
1137struct btrfs_delayed_ref_head *
1138btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1139{
1140 lockdep_assert_held(&delayed_refs->lock);
1141
1142 return find_ref_head(delayed_refs, bytenr, false);
1143}
1144
1145void __cold btrfs_delayed_ref_exit(void)
1146{
1147 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1148 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1149 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1150 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1151}
1152
1153int __init btrfs_delayed_ref_init(void)
1154{
1155 btrfs_delayed_ref_head_cachep = kmem_cache_create(
1156 "btrfs_delayed_ref_head",
1157 sizeof(struct btrfs_delayed_ref_head), 0,
1158 SLAB_MEM_SPREAD, NULL);
1159 if (!btrfs_delayed_ref_head_cachep)
1160 goto fail;
1161
1162 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1163 "btrfs_delayed_tree_ref",
1164 sizeof(struct btrfs_delayed_tree_ref), 0,
1165 SLAB_MEM_SPREAD, NULL);
1166 if (!btrfs_delayed_tree_ref_cachep)
1167 goto fail;
1168
1169 btrfs_delayed_data_ref_cachep = kmem_cache_create(
1170 "btrfs_delayed_data_ref",
1171 sizeof(struct btrfs_delayed_data_ref), 0,
1172 SLAB_MEM_SPREAD, NULL);
1173 if (!btrfs_delayed_data_ref_cachep)
1174 goto fail;
1175
1176 btrfs_delayed_extent_op_cachep = kmem_cache_create(
1177 "btrfs_delayed_extent_op",
1178 sizeof(struct btrfs_delayed_extent_op), 0,
1179 SLAB_MEM_SPREAD, NULL);
1180 if (!btrfs_delayed_extent_op_cachep)
1181 goto fail;
1182
1183 return 0;
1184fail:
1185 btrfs_delayed_ref_exit();
1186 return -ENOMEM;
1187}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/sort.h>
9#include "messages.h"
10#include "ctree.h"
11#include "delayed-ref.h"
12#include "extent-tree.h"
13#include "transaction.h"
14#include "qgroup.h"
15#include "space-info.h"
16#include "tree-mod-log.h"
17#include "fs.h"
18
19struct kmem_cache *btrfs_delayed_ref_head_cachep;
20struct kmem_cache *btrfs_delayed_ref_node_cachep;
21struct kmem_cache *btrfs_delayed_extent_op_cachep;
22/*
23 * delayed back reference update tracking. For subvolume trees
24 * we queue up extent allocations and backref maintenance for
25 * delayed processing. This avoids deep call chains where we
26 * add extents in the middle of btrfs_search_slot, and it allows
27 * us to buffer up frequently modified backrefs in an rb tree instead
28 * of hammering updates on the extent allocation tree.
29 */
30
31bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
32{
33 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
34 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
35 bool ret = false;
36 u64 reserved;
37
38 spin_lock(&global_rsv->lock);
39 reserved = global_rsv->reserved;
40 spin_unlock(&global_rsv->lock);
41
42 /*
43 * Since the global reserve is just kind of magic we don't really want
44 * to rely on it to save our bacon, so if our size is more than the
45 * delayed_refs_rsv and the global rsv then it's time to think about
46 * bailing.
47 */
48 spin_lock(&delayed_refs_rsv->lock);
49 reserved += delayed_refs_rsv->reserved;
50 if (delayed_refs_rsv->size >= reserved)
51 ret = true;
52 spin_unlock(&delayed_refs_rsv->lock);
53 return ret;
54}
55
56/*
57 * Release a ref head's reservation.
58 *
59 * @fs_info: the filesystem
60 * @nr_refs: number of delayed refs to drop
61 * @nr_csums: number of csum items to drop
62 *
63 * Drops the delayed ref head's count from the delayed refs rsv and free any
64 * excess reservation we had.
65 */
66void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums)
67{
68 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
69 u64 num_bytes;
70 u64 released;
71
72 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr_refs);
73 num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
74
75 released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
76 if (released)
77 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
78 0, released, 0);
79}
80
81/*
82 * Adjust the size of the delayed refs rsv.
83 *
84 * This is to be called anytime we may have adjusted trans->delayed_ref_updates
85 * or trans->delayed_ref_csum_deletions, it'll calculate the additional size and
86 * add it to the delayed_refs_rsv.
87 */
88void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
89{
90 struct btrfs_fs_info *fs_info = trans->fs_info;
91 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
92 struct btrfs_block_rsv *local_rsv = &trans->delayed_rsv;
93 u64 num_bytes;
94 u64 reserved_bytes;
95
96 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates);
97 num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info,
98 trans->delayed_ref_csum_deletions);
99
100 if (num_bytes == 0)
101 return;
102
103 /*
104 * Try to take num_bytes from the transaction's local delayed reserve.
105 * If not possible, try to take as much as it's available. If the local
106 * reserve doesn't have enough reserved space, the delayed refs reserve
107 * will be refilled next time btrfs_delayed_refs_rsv_refill() is called
108 * by someone or if a transaction commit is triggered before that, the
109 * global block reserve will be used. We want to minimize using the
110 * global block reserve for cases we can account for in advance, to
111 * avoid exhausting it and reach -ENOSPC during a transaction commit.
112 */
113 spin_lock(&local_rsv->lock);
114 reserved_bytes = min(num_bytes, local_rsv->reserved);
115 local_rsv->reserved -= reserved_bytes;
116 local_rsv->full = (local_rsv->reserved >= local_rsv->size);
117 spin_unlock(&local_rsv->lock);
118
119 spin_lock(&delayed_rsv->lock);
120 delayed_rsv->size += num_bytes;
121 delayed_rsv->reserved += reserved_bytes;
122 delayed_rsv->full = (delayed_rsv->reserved >= delayed_rsv->size);
123 spin_unlock(&delayed_rsv->lock);
124 trans->delayed_ref_updates = 0;
125 trans->delayed_ref_csum_deletions = 0;
126}
127
128/*
129 * Adjust the size of the delayed refs block reserve for 1 block group item
130 * insertion, used after allocating a block group.
131 */
132void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
133{
134 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
135
136 spin_lock(&delayed_rsv->lock);
137 /*
138 * Inserting a block group item does not require changing the free space
139 * tree, only the extent tree or the block group tree, so this is all we
140 * need.
141 */
142 delayed_rsv->size += btrfs_calc_insert_metadata_size(fs_info, 1);
143 delayed_rsv->full = false;
144 spin_unlock(&delayed_rsv->lock);
145}
146
147/*
148 * Adjust the size of the delayed refs block reserve to release space for 1
149 * block group item insertion.
150 */
151void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
152{
153 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
154 const u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
155 u64 released;
156
157 released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
158 if (released > 0)
159 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
160 0, released, 0);
161}
162
163/*
164 * Adjust the size of the delayed refs block reserve for 1 block group item
165 * update.
166 */
167void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
168{
169 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
170
171 spin_lock(&delayed_rsv->lock);
172 /*
173 * Updating a block group item does not result in new nodes/leaves and
174 * does not require changing the free space tree, only the extent tree
175 * or the block group tree, so this is all we need.
176 */
177 delayed_rsv->size += btrfs_calc_metadata_size(fs_info, 1);
178 delayed_rsv->full = false;
179 spin_unlock(&delayed_rsv->lock);
180}
181
182/*
183 * Adjust the size of the delayed refs block reserve to release space for 1
184 * block group item update.
185 */
186void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
187{
188 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
189 const u64 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
190 u64 released;
191
192 released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
193 if (released > 0)
194 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
195 0, released, 0);
196}
197
198/*
199 * Refill based on our delayed refs usage.
200 *
201 * @fs_info: the filesystem
202 * @flush: control how we can flush for this reservation.
203 *
204 * This will refill the delayed block_rsv up to 1 items size worth of space and
205 * will return -ENOSPC if we can't make the reservation.
206 */
207int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
208 enum btrfs_reserve_flush_enum flush)
209{
210 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
211 struct btrfs_space_info *space_info = block_rsv->space_info;
212 u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
213 u64 num_bytes = 0;
214 u64 refilled_bytes;
215 u64 to_free;
216 int ret = -ENOSPC;
217
218 spin_lock(&block_rsv->lock);
219 if (block_rsv->reserved < block_rsv->size) {
220 num_bytes = block_rsv->size - block_rsv->reserved;
221 num_bytes = min(num_bytes, limit);
222 }
223 spin_unlock(&block_rsv->lock);
224
225 if (!num_bytes)
226 return 0;
227
228 ret = btrfs_reserve_metadata_bytes(fs_info, space_info, num_bytes, flush);
229 if (ret)
230 return ret;
231
232 /*
233 * We may have raced with someone else, so check again if we the block
234 * reserve is still not full and release any excess space.
235 */
236 spin_lock(&block_rsv->lock);
237 if (block_rsv->reserved < block_rsv->size) {
238 u64 needed = block_rsv->size - block_rsv->reserved;
239
240 if (num_bytes >= needed) {
241 block_rsv->reserved += needed;
242 block_rsv->full = true;
243 to_free = num_bytes - needed;
244 refilled_bytes = needed;
245 } else {
246 block_rsv->reserved += num_bytes;
247 to_free = 0;
248 refilled_bytes = num_bytes;
249 }
250 } else {
251 to_free = num_bytes;
252 refilled_bytes = 0;
253 }
254 spin_unlock(&block_rsv->lock);
255
256 if (to_free > 0)
257 btrfs_space_info_free_bytes_may_use(fs_info, space_info, to_free);
258
259 if (refilled_bytes > 0)
260 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
261 refilled_bytes, 1);
262 return 0;
263}
264
265/*
266 * compare two delayed data backrefs with same bytenr and type
267 */
268static int comp_data_refs(struct btrfs_delayed_ref_node *ref1,
269 struct btrfs_delayed_ref_node *ref2)
270{
271 if (ref1->data_ref.objectid < ref2->data_ref.objectid)
272 return -1;
273 if (ref1->data_ref.objectid > ref2->data_ref.objectid)
274 return 1;
275 if (ref1->data_ref.offset < ref2->data_ref.offset)
276 return -1;
277 if (ref1->data_ref.offset > ref2->data_ref.offset)
278 return 1;
279 return 0;
280}
281
282static int comp_refs(struct btrfs_delayed_ref_node *ref1,
283 struct btrfs_delayed_ref_node *ref2,
284 bool check_seq)
285{
286 int ret = 0;
287
288 if (ref1->type < ref2->type)
289 return -1;
290 if (ref1->type > ref2->type)
291 return 1;
292 if (ref1->type == BTRFS_SHARED_BLOCK_REF_KEY ||
293 ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
294 if (ref1->parent < ref2->parent)
295 return -1;
296 if (ref1->parent > ref2->parent)
297 return 1;
298 } else {
299 if (ref1->ref_root < ref2->ref_root)
300 return -1;
301 if (ref1->ref_root > ref2->ref_root)
302 return 1;
303 if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY)
304 ret = comp_data_refs(ref1, ref2);
305 }
306 if (ret)
307 return ret;
308 if (check_seq) {
309 if (ref1->seq < ref2->seq)
310 return -1;
311 if (ref1->seq > ref2->seq)
312 return 1;
313 }
314 return 0;
315}
316
317static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
318 struct btrfs_delayed_ref_node *ins)
319{
320 struct rb_node **p = &root->rb_root.rb_node;
321 struct rb_node *node = &ins->ref_node;
322 struct rb_node *parent_node = NULL;
323 struct btrfs_delayed_ref_node *entry;
324 bool leftmost = true;
325
326 while (*p) {
327 int comp;
328
329 parent_node = *p;
330 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
331 ref_node);
332 comp = comp_refs(ins, entry, true);
333 if (comp < 0) {
334 p = &(*p)->rb_left;
335 } else if (comp > 0) {
336 p = &(*p)->rb_right;
337 leftmost = false;
338 } else {
339 return entry;
340 }
341 }
342
343 rb_link_node(node, parent_node, p);
344 rb_insert_color_cached(node, root, leftmost);
345 return NULL;
346}
347
348static struct btrfs_delayed_ref_head *find_first_ref_head(
349 struct btrfs_delayed_ref_root *dr)
350{
351 unsigned long from = 0;
352
353 lockdep_assert_held(&dr->lock);
354
355 return xa_find(&dr->head_refs, &from, ULONG_MAX, XA_PRESENT);
356}
357
358static bool btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
359 struct btrfs_delayed_ref_head *head)
360{
361 lockdep_assert_held(&delayed_refs->lock);
362 if (mutex_trylock(&head->mutex))
363 return true;
364
365 refcount_inc(&head->refs);
366 spin_unlock(&delayed_refs->lock);
367
368 mutex_lock(&head->mutex);
369 spin_lock(&delayed_refs->lock);
370 if (!head->tracked) {
371 mutex_unlock(&head->mutex);
372 btrfs_put_delayed_ref_head(head);
373 return false;
374 }
375 btrfs_put_delayed_ref_head(head);
376 return true;
377}
378
379static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
380 struct btrfs_delayed_ref_root *delayed_refs,
381 struct btrfs_delayed_ref_head *head,
382 struct btrfs_delayed_ref_node *ref)
383{
384 lockdep_assert_held(&head->lock);
385 rb_erase_cached(&ref->ref_node, &head->ref_tree);
386 RB_CLEAR_NODE(&ref->ref_node);
387 if (!list_empty(&ref->add_list))
388 list_del(&ref->add_list);
389 btrfs_put_delayed_ref(ref);
390 btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
391}
392
393static bool merge_ref(struct btrfs_fs_info *fs_info,
394 struct btrfs_delayed_ref_root *delayed_refs,
395 struct btrfs_delayed_ref_head *head,
396 struct btrfs_delayed_ref_node *ref,
397 u64 seq)
398{
399 struct btrfs_delayed_ref_node *next;
400 struct rb_node *node = rb_next(&ref->ref_node);
401 bool done = false;
402
403 while (!done && node) {
404 int mod;
405
406 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
407 node = rb_next(node);
408 if (seq && next->seq >= seq)
409 break;
410 if (comp_refs(ref, next, false))
411 break;
412
413 if (ref->action == next->action) {
414 mod = next->ref_mod;
415 } else {
416 if (ref->ref_mod < next->ref_mod) {
417 swap(ref, next);
418 done = true;
419 }
420 mod = -next->ref_mod;
421 }
422
423 drop_delayed_ref(fs_info, delayed_refs, head, next);
424 ref->ref_mod += mod;
425 if (ref->ref_mod == 0) {
426 drop_delayed_ref(fs_info, delayed_refs, head, ref);
427 done = true;
428 } else {
429 /*
430 * Can't have multiples of the same ref on a tree block.
431 */
432 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
433 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
434 }
435 }
436
437 return done;
438}
439
440void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
441 struct btrfs_delayed_ref_root *delayed_refs,
442 struct btrfs_delayed_ref_head *head)
443{
444 struct btrfs_delayed_ref_node *ref;
445 struct rb_node *node;
446 u64 seq = 0;
447
448 lockdep_assert_held(&head->lock);
449
450 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
451 return;
452
453 /* We don't have too many refs to merge for data. */
454 if (head->is_data)
455 return;
456
457 seq = btrfs_tree_mod_log_lowest_seq(fs_info);
458again:
459 for (node = rb_first_cached(&head->ref_tree); node;
460 node = rb_next(node)) {
461 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
462 if (seq && ref->seq >= seq)
463 continue;
464 if (merge_ref(fs_info, delayed_refs, head, ref, seq))
465 goto again;
466 }
467}
468
469int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
470{
471 int ret = 0;
472 u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
473
474 if (min_seq != 0 && seq >= min_seq) {
475 btrfs_debug(fs_info,
476 "holding back delayed_ref %llu, lowest is %llu",
477 seq, min_seq);
478 ret = 1;
479 }
480
481 return ret;
482}
483
484struct btrfs_delayed_ref_head *btrfs_select_ref_head(
485 const struct btrfs_fs_info *fs_info,
486 struct btrfs_delayed_ref_root *delayed_refs)
487{
488 struct btrfs_delayed_ref_head *head;
489 unsigned long start_index;
490 unsigned long found_index;
491 bool found_head = false;
492 bool locked;
493
494 spin_lock(&delayed_refs->lock);
495again:
496 start_index = (delayed_refs->run_delayed_start >> fs_info->sectorsize_bits);
497 xa_for_each_start(&delayed_refs->head_refs, found_index, head, start_index) {
498 if (!head->processing) {
499 found_head = true;
500 break;
501 }
502 }
503 if (!found_head) {
504 if (delayed_refs->run_delayed_start == 0) {
505 spin_unlock(&delayed_refs->lock);
506 return NULL;
507 }
508 delayed_refs->run_delayed_start = 0;
509 goto again;
510 }
511
512 head->processing = true;
513 WARN_ON(delayed_refs->num_heads_ready == 0);
514 delayed_refs->num_heads_ready--;
515 delayed_refs->run_delayed_start = head->bytenr +
516 head->num_bytes;
517
518 locked = btrfs_delayed_ref_lock(delayed_refs, head);
519 spin_unlock(&delayed_refs->lock);
520
521 /*
522 * We may have dropped the spin lock to get the head mutex lock, and
523 * that might have given someone else time to free the head. If that's
524 * true, it has been removed from our list and we can move on.
525 */
526 if (!locked)
527 return ERR_PTR(-EAGAIN);
528
529 return head;
530}
531
532void btrfs_unselect_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
533 struct btrfs_delayed_ref_head *head)
534{
535 spin_lock(&delayed_refs->lock);
536 head->processing = false;
537 delayed_refs->num_heads_ready++;
538 spin_unlock(&delayed_refs->lock);
539 btrfs_delayed_ref_unlock(head);
540}
541
542void btrfs_delete_ref_head(const struct btrfs_fs_info *fs_info,
543 struct btrfs_delayed_ref_root *delayed_refs,
544 struct btrfs_delayed_ref_head *head)
545{
546 const unsigned long index = (head->bytenr >> fs_info->sectorsize_bits);
547
548 lockdep_assert_held(&delayed_refs->lock);
549 lockdep_assert_held(&head->lock);
550
551 xa_erase(&delayed_refs->head_refs, index);
552 head->tracked = false;
553 delayed_refs->num_heads--;
554 if (!head->processing)
555 delayed_refs->num_heads_ready--;
556}
557
558/*
559 * Helper to insert the ref_node to the tail or merge with tail.
560 *
561 * Return false if the ref was inserted.
562 * Return true if the ref was merged into an existing one (and therefore can be
563 * freed by the caller).
564 */
565static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
566 struct btrfs_delayed_ref_head *href,
567 struct btrfs_delayed_ref_node *ref)
568{
569 struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs;
570 struct btrfs_delayed_ref_node *exist;
571 int mod;
572
573 spin_lock(&href->lock);
574 exist = tree_insert(&href->ref_tree, ref);
575 if (!exist) {
576 if (ref->action == BTRFS_ADD_DELAYED_REF)
577 list_add_tail(&ref->add_list, &href->ref_add_list);
578 spin_unlock(&href->lock);
579 trans->delayed_ref_updates++;
580 return false;
581 }
582
583 /* Now we are sure we can merge */
584 if (exist->action == ref->action) {
585 mod = ref->ref_mod;
586 } else {
587 /* Need to change action */
588 if (exist->ref_mod < ref->ref_mod) {
589 exist->action = ref->action;
590 mod = -exist->ref_mod;
591 exist->ref_mod = ref->ref_mod;
592 if (ref->action == BTRFS_ADD_DELAYED_REF)
593 list_add_tail(&exist->add_list,
594 &href->ref_add_list);
595 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
596 ASSERT(!list_empty(&exist->add_list));
597 list_del_init(&exist->add_list);
598 } else {
599 ASSERT(0);
600 }
601 } else
602 mod = -ref->ref_mod;
603 }
604 exist->ref_mod += mod;
605
606 /* remove existing tail if its ref_mod is zero */
607 if (exist->ref_mod == 0)
608 drop_delayed_ref(trans->fs_info, root, href, exist);
609 spin_unlock(&href->lock);
610 return true;
611}
612
613/*
614 * helper function to update the accounting in the head ref
615 * existing and update must have the same bytenr
616 */
617static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
618 struct btrfs_delayed_ref_head *existing,
619 struct btrfs_delayed_ref_head *update)
620{
621 struct btrfs_delayed_ref_root *delayed_refs =
622 &trans->transaction->delayed_refs;
623 struct btrfs_fs_info *fs_info = trans->fs_info;
624 int old_ref_mod;
625
626 BUG_ON(existing->is_data != update->is_data);
627
628 spin_lock(&existing->lock);
629
630 /*
631 * When freeing an extent, we may not know the owning root when we
632 * first create the head_ref. However, some deref before the last deref
633 * will know it, so we just need to update the head_ref accordingly.
634 */
635 if (!existing->owning_root)
636 existing->owning_root = update->owning_root;
637
638 if (update->must_insert_reserved) {
639 /* if the extent was freed and then
640 * reallocated before the delayed ref
641 * entries were processed, we can end up
642 * with an existing head ref without
643 * the must_insert_reserved flag set.
644 * Set it again here
645 */
646 existing->must_insert_reserved = update->must_insert_reserved;
647 existing->owning_root = update->owning_root;
648
649 /*
650 * update the num_bytes so we make sure the accounting
651 * is done correctly
652 */
653 existing->num_bytes = update->num_bytes;
654
655 }
656
657 if (update->extent_op) {
658 if (!existing->extent_op) {
659 existing->extent_op = update->extent_op;
660 } else {
661 if (update->extent_op->update_key) {
662 memcpy(&existing->extent_op->key,
663 &update->extent_op->key,
664 sizeof(update->extent_op->key));
665 existing->extent_op->update_key = true;
666 }
667 if (update->extent_op->update_flags) {
668 existing->extent_op->flags_to_set |=
669 update->extent_op->flags_to_set;
670 existing->extent_op->update_flags = true;
671 }
672 btrfs_free_delayed_extent_op(update->extent_op);
673 }
674 }
675 /*
676 * update the reference mod on the head to reflect this new operation,
677 * only need the lock for this case cause we could be processing it
678 * currently, for refs we just added we know we're a-ok.
679 */
680 old_ref_mod = existing->total_ref_mod;
681 existing->ref_mod += update->ref_mod;
682 existing->total_ref_mod += update->ref_mod;
683
684 /*
685 * If we are going to from a positive ref mod to a negative or vice
686 * versa we need to make sure to adjust pending_csums accordingly.
687 * We reserve bytes for csum deletion when adding or updating a ref head
688 * see add_delayed_ref_head() for more details.
689 */
690 if (existing->is_data) {
691 u64 csum_leaves =
692 btrfs_csum_bytes_to_leaves(fs_info,
693 existing->num_bytes);
694
695 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
696 delayed_refs->pending_csums -= existing->num_bytes;
697 btrfs_delayed_refs_rsv_release(fs_info, 0, csum_leaves);
698 }
699 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
700 delayed_refs->pending_csums += existing->num_bytes;
701 trans->delayed_ref_csum_deletions += csum_leaves;
702 }
703 }
704
705 spin_unlock(&existing->lock);
706}
707
708static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
709 struct btrfs_ref *generic_ref,
710 struct btrfs_qgroup_extent_record *qrecord,
711 u64 reserved)
712{
713 int count_mod = 1;
714 bool must_insert_reserved = false;
715
716 /* If reserved is provided, it must be a data extent. */
717 BUG_ON(generic_ref->type != BTRFS_REF_DATA && reserved);
718
719 switch (generic_ref->action) {
720 case BTRFS_ADD_DELAYED_REF:
721 /* count_mod is already set to 1. */
722 break;
723 case BTRFS_UPDATE_DELAYED_HEAD:
724 count_mod = 0;
725 break;
726 case BTRFS_DROP_DELAYED_REF:
727 /*
728 * The head node stores the sum of all the mods, so dropping a ref
729 * should drop the sum in the head node by one.
730 */
731 count_mod = -1;
732 break;
733 case BTRFS_ADD_DELAYED_EXTENT:
734 /*
735 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the
736 * reserved accounting when the extent is finally added, or if a
737 * later modification deletes the delayed ref without ever
738 * inserting the extent into the extent allocation tree.
739 * ref->must_insert_reserved is the flag used to record that
740 * accounting mods are required.
741 *
742 * Once we record must_insert_reserved, switch the action to
743 * BTRFS_ADD_DELAYED_REF because other special casing is not
744 * required.
745 */
746 must_insert_reserved = true;
747 break;
748 }
749
750 refcount_set(&head_ref->refs, 1);
751 head_ref->bytenr = generic_ref->bytenr;
752 head_ref->num_bytes = generic_ref->num_bytes;
753 head_ref->ref_mod = count_mod;
754 head_ref->reserved_bytes = reserved;
755 head_ref->must_insert_reserved = must_insert_reserved;
756 head_ref->owning_root = generic_ref->owning_root;
757 head_ref->is_data = (generic_ref->type == BTRFS_REF_DATA);
758 head_ref->is_system = (generic_ref->ref_root == BTRFS_CHUNK_TREE_OBJECTID);
759 head_ref->ref_tree = RB_ROOT_CACHED;
760 INIT_LIST_HEAD(&head_ref->ref_add_list);
761 head_ref->tracked = false;
762 head_ref->processing = false;
763 head_ref->total_ref_mod = count_mod;
764 spin_lock_init(&head_ref->lock);
765 mutex_init(&head_ref->mutex);
766
767 /* If not metadata set an impossible level to help debugging. */
768 if (generic_ref->type == BTRFS_REF_METADATA)
769 head_ref->level = generic_ref->tree_ref.level;
770 else
771 head_ref->level = U8_MAX;
772
773 if (qrecord) {
774 if (generic_ref->ref_root && reserved) {
775 qrecord->data_rsv = reserved;
776 qrecord->data_rsv_refroot = generic_ref->ref_root;
777 }
778 qrecord->num_bytes = generic_ref->num_bytes;
779 qrecord->old_roots = NULL;
780 }
781}
782
783/*
784 * helper function to actually insert a head node into the rbtree.
785 * this does all the dirty work in terms of maintaining the correct
786 * overall modification count.
787 *
788 * Returns an error pointer in case of an error.
789 */
790static noinline struct btrfs_delayed_ref_head *
791add_delayed_ref_head(struct btrfs_trans_handle *trans,
792 struct btrfs_delayed_ref_head *head_ref,
793 struct btrfs_qgroup_extent_record *qrecord,
794 int action, bool *qrecord_inserted_ret)
795{
796 struct btrfs_fs_info *fs_info = trans->fs_info;
797 struct btrfs_delayed_ref_head *existing;
798 struct btrfs_delayed_ref_root *delayed_refs;
799 const unsigned long index = (head_ref->bytenr >> fs_info->sectorsize_bits);
800 bool qrecord_inserted = false;
801
802 delayed_refs = &trans->transaction->delayed_refs;
803 lockdep_assert_held(&delayed_refs->lock);
804
805#if BITS_PER_LONG == 32
806 if (head_ref->bytenr >= MAX_LFS_FILESIZE) {
807 if (qrecord)
808 xa_release(&delayed_refs->dirty_extents, index);
809 btrfs_err_rl(fs_info,
810"delayed ref head %llu is beyond 32bit page cache and xarray index limit",
811 head_ref->bytenr);
812 btrfs_err_32bit_limit(fs_info);
813 return ERR_PTR(-EOVERFLOW);
814 }
815#endif
816
817 /* Record qgroup extent info if provided */
818 if (qrecord) {
819 int ret;
820
821 ret = btrfs_qgroup_trace_extent_nolock(fs_info, delayed_refs, qrecord,
822 head_ref->bytenr);
823 if (ret) {
824 /* Clean up if insertion fails or item exists. */
825 xa_release(&delayed_refs->dirty_extents, index);
826 /* Caller responsible for freeing qrecord on error. */
827 if (ret < 0)
828 return ERR_PTR(ret);
829 kfree(qrecord);
830 } else {
831 qrecord_inserted = true;
832 }
833 }
834
835 trace_add_delayed_ref_head(fs_info, head_ref, action);
836
837 existing = xa_load(&delayed_refs->head_refs, index);
838 if (existing) {
839 update_existing_head_ref(trans, existing, head_ref);
840 /*
841 * we've updated the existing ref, free the newly
842 * allocated ref
843 */
844 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
845 head_ref = existing;
846 } else {
847 existing = xa_store(&delayed_refs->head_refs, index, head_ref, GFP_ATOMIC);
848 if (xa_is_err(existing)) {
849 /* Memory was preallocated by the caller. */
850 ASSERT(xa_err(existing) != -ENOMEM);
851 return ERR_PTR(xa_err(existing));
852 } else if (WARN_ON(existing)) {
853 /*
854 * Shouldn't happen we just did a lookup before under
855 * delayed_refs->lock.
856 */
857 return ERR_PTR(-EEXIST);
858 }
859 head_ref->tracked = true;
860 /*
861 * We reserve the amount of bytes needed to delete csums when
862 * adding the ref head and not when adding individual drop refs
863 * since the csum items are deleted only after running the last
864 * delayed drop ref (the data extent's ref count drops to 0).
865 */
866 if (head_ref->is_data && head_ref->ref_mod < 0) {
867 delayed_refs->pending_csums += head_ref->num_bytes;
868 trans->delayed_ref_csum_deletions +=
869 btrfs_csum_bytes_to_leaves(fs_info, head_ref->num_bytes);
870 }
871 delayed_refs->num_heads++;
872 delayed_refs->num_heads_ready++;
873 }
874 if (qrecord_inserted_ret)
875 *qrecord_inserted_ret = qrecord_inserted;
876
877 return head_ref;
878}
879
880/*
881 * Initialize the structure which represents a modification to a an extent.
882 *
883 * @fs_info: Internal to the mounted filesystem mount structure.
884 *
885 * @ref: The structure which is going to be initialized.
886 *
887 * @bytenr: The logical address of the extent for which a modification is
888 * going to be recorded.
889 *
890 * @num_bytes: Size of the extent whose modification is being recorded.
891 *
892 * @ref_root: The id of the root where this modification has originated, this
893 * can be either one of the well-known metadata trees or the
894 * subvolume id which references this extent.
895 *
896 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
897 * BTRFS_ADD_DELAYED_EXTENT
898 *
899 * @ref_type: Holds the type of the extent which is being recorded, can be
900 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
901 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
902 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
903 */
904static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
905 struct btrfs_delayed_ref_node *ref,
906 struct btrfs_ref *generic_ref)
907{
908 int action = generic_ref->action;
909 u64 seq = 0;
910
911 if (action == BTRFS_ADD_DELAYED_EXTENT)
912 action = BTRFS_ADD_DELAYED_REF;
913
914 if (is_fstree(generic_ref->ref_root))
915 seq = atomic64_read(&fs_info->tree_mod_seq);
916
917 refcount_set(&ref->refs, 1);
918 ref->bytenr = generic_ref->bytenr;
919 ref->num_bytes = generic_ref->num_bytes;
920 ref->ref_mod = 1;
921 ref->action = action;
922 ref->seq = seq;
923 ref->type = btrfs_ref_type(generic_ref);
924 ref->ref_root = generic_ref->ref_root;
925 ref->parent = generic_ref->parent;
926 RB_CLEAR_NODE(&ref->ref_node);
927 INIT_LIST_HEAD(&ref->add_list);
928
929 if (generic_ref->type == BTRFS_REF_DATA)
930 ref->data_ref = generic_ref->data_ref;
931 else
932 ref->tree_ref = generic_ref->tree_ref;
933}
934
935void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 mod_root,
936 bool skip_qgroup)
937{
938#ifdef CONFIG_BTRFS_FS_REF_VERIFY
939 /* If @real_root not set, use @root as fallback */
940 generic_ref->real_root = mod_root ?: generic_ref->ref_root;
941#endif
942 generic_ref->tree_ref.level = level;
943 generic_ref->type = BTRFS_REF_METADATA;
944 if (skip_qgroup || !(is_fstree(generic_ref->ref_root) &&
945 (!mod_root || is_fstree(mod_root))))
946 generic_ref->skip_qgroup = true;
947 else
948 generic_ref->skip_qgroup = false;
949
950}
951
952void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ino, u64 offset,
953 u64 mod_root, bool skip_qgroup)
954{
955#ifdef CONFIG_BTRFS_FS_REF_VERIFY
956 /* If @real_root not set, use @root as fallback */
957 generic_ref->real_root = mod_root ?: generic_ref->ref_root;
958#endif
959 generic_ref->data_ref.objectid = ino;
960 generic_ref->data_ref.offset = offset;
961 generic_ref->type = BTRFS_REF_DATA;
962 if (skip_qgroup || !(is_fstree(generic_ref->ref_root) &&
963 (!mod_root || is_fstree(mod_root))))
964 generic_ref->skip_qgroup = true;
965 else
966 generic_ref->skip_qgroup = false;
967}
968
969static int add_delayed_ref(struct btrfs_trans_handle *trans,
970 struct btrfs_ref *generic_ref,
971 struct btrfs_delayed_extent_op *extent_op,
972 u64 reserved)
973{
974 struct btrfs_fs_info *fs_info = trans->fs_info;
975 struct btrfs_delayed_ref_node *node;
976 struct btrfs_delayed_ref_head *head_ref;
977 struct btrfs_delayed_ref_head *new_head_ref;
978 struct btrfs_delayed_ref_root *delayed_refs;
979 struct btrfs_qgroup_extent_record *record = NULL;
980 const unsigned long index = (generic_ref->bytenr >> fs_info->sectorsize_bits);
981 bool qrecord_reserved = false;
982 bool qrecord_inserted;
983 int action = generic_ref->action;
984 bool merged;
985 int ret;
986
987 node = kmem_cache_alloc(btrfs_delayed_ref_node_cachep, GFP_NOFS);
988 if (!node)
989 return -ENOMEM;
990
991 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
992 if (!head_ref) {
993 ret = -ENOMEM;
994 goto free_node;
995 }
996
997 delayed_refs = &trans->transaction->delayed_refs;
998
999 if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
1000 record = kzalloc(sizeof(*record), GFP_NOFS);
1001 if (!record) {
1002 ret = -ENOMEM;
1003 goto free_head_ref;
1004 }
1005 if (xa_reserve(&delayed_refs->dirty_extents, index, GFP_NOFS)) {
1006 ret = -ENOMEM;
1007 goto free_record;
1008 }
1009 qrecord_reserved = true;
1010 }
1011
1012 ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1013 if (ret) {
1014 if (qrecord_reserved)
1015 xa_release(&delayed_refs->dirty_extents, index);
1016 goto free_record;
1017 }
1018
1019 init_delayed_ref_common(fs_info, node, generic_ref);
1020 init_delayed_ref_head(head_ref, generic_ref, record, reserved);
1021 head_ref->extent_op = extent_op;
1022
1023 spin_lock(&delayed_refs->lock);
1024
1025 /*
1026 * insert both the head node and the new ref without dropping
1027 * the spin lock
1028 */
1029 new_head_ref = add_delayed_ref_head(trans, head_ref, record,
1030 action, &qrecord_inserted);
1031 if (IS_ERR(new_head_ref)) {
1032 xa_release(&delayed_refs->head_refs, index);
1033 spin_unlock(&delayed_refs->lock);
1034 ret = PTR_ERR(new_head_ref);
1035 goto free_record;
1036 }
1037 head_ref = new_head_ref;
1038
1039 merged = insert_delayed_ref(trans, head_ref, node);
1040 spin_unlock(&delayed_refs->lock);
1041
1042 /*
1043 * Need to update the delayed_refs_rsv with any changes we may have
1044 * made.
1045 */
1046 btrfs_update_delayed_refs_rsv(trans);
1047
1048 if (generic_ref->type == BTRFS_REF_DATA)
1049 trace_add_delayed_data_ref(trans->fs_info, node);
1050 else
1051 trace_add_delayed_tree_ref(trans->fs_info, node);
1052 if (merged)
1053 kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
1054
1055 if (qrecord_inserted)
1056 return btrfs_qgroup_trace_extent_post(trans, record, generic_ref->bytenr);
1057 return 0;
1058
1059free_record:
1060 kfree(record);
1061free_head_ref:
1062 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1063free_node:
1064 kmem_cache_free(btrfs_delayed_ref_node_cachep, node);
1065 return ret;
1066}
1067
1068/*
1069 * Add a delayed tree ref. This does all of the accounting required to make sure
1070 * the delayed ref is eventually processed before this transaction commits.
1071 */
1072int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
1073 struct btrfs_ref *generic_ref,
1074 struct btrfs_delayed_extent_op *extent_op)
1075{
1076 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
1077 return add_delayed_ref(trans, generic_ref, extent_op, 0);
1078}
1079
1080/*
1081 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1082 */
1083int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1084 struct btrfs_ref *generic_ref,
1085 u64 reserved)
1086{
1087 ASSERT(generic_ref->type == BTRFS_REF_DATA && generic_ref->action);
1088 return add_delayed_ref(trans, generic_ref, NULL, reserved);
1089}
1090
1091int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1092 u64 bytenr, u64 num_bytes, u8 level,
1093 struct btrfs_delayed_extent_op *extent_op)
1094{
1095 const unsigned long index = (bytenr >> trans->fs_info->sectorsize_bits);
1096 struct btrfs_delayed_ref_head *head_ref;
1097 struct btrfs_delayed_ref_head *head_ref_ret;
1098 struct btrfs_delayed_ref_root *delayed_refs;
1099 struct btrfs_ref generic_ref = {
1100 .type = BTRFS_REF_METADATA,
1101 .action = BTRFS_UPDATE_DELAYED_HEAD,
1102 .bytenr = bytenr,
1103 .num_bytes = num_bytes,
1104 .tree_ref.level = level,
1105 };
1106 int ret;
1107
1108 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1109 if (!head_ref)
1110 return -ENOMEM;
1111
1112 init_delayed_ref_head(head_ref, &generic_ref, NULL, 0);
1113 head_ref->extent_op = extent_op;
1114
1115 delayed_refs = &trans->transaction->delayed_refs;
1116
1117 ret = xa_reserve(&delayed_refs->head_refs, index, GFP_NOFS);
1118 if (ret) {
1119 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1120 return ret;
1121 }
1122
1123 spin_lock(&delayed_refs->lock);
1124 head_ref_ret = add_delayed_ref_head(trans, head_ref, NULL,
1125 BTRFS_UPDATE_DELAYED_HEAD, NULL);
1126 if (IS_ERR(head_ref_ret)) {
1127 xa_release(&delayed_refs->head_refs, index);
1128 spin_unlock(&delayed_refs->lock);
1129 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1130 return PTR_ERR(head_ref_ret);
1131 }
1132 spin_unlock(&delayed_refs->lock);
1133
1134 /*
1135 * Need to update the delayed_refs_rsv with any changes we may have
1136 * made.
1137 */
1138 btrfs_update_delayed_refs_rsv(trans);
1139 return 0;
1140}
1141
1142void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
1143{
1144 if (refcount_dec_and_test(&ref->refs)) {
1145 WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
1146 kmem_cache_free(btrfs_delayed_ref_node_cachep, ref);
1147 }
1148}
1149
1150/*
1151 * This does a simple search for the head node for a given extent. Returns the
1152 * head node if found, or NULL if not.
1153 */
1154struct btrfs_delayed_ref_head *
1155btrfs_find_delayed_ref_head(const struct btrfs_fs_info *fs_info,
1156 struct btrfs_delayed_ref_root *delayed_refs,
1157 u64 bytenr)
1158{
1159 const unsigned long index = (bytenr >> fs_info->sectorsize_bits);
1160
1161 lockdep_assert_held(&delayed_refs->lock);
1162
1163 return xa_load(&delayed_refs->head_refs, index);
1164}
1165
1166static int find_comp(struct btrfs_delayed_ref_node *entry, u64 root, u64 parent)
1167{
1168 int type = parent ? BTRFS_SHARED_BLOCK_REF_KEY : BTRFS_TREE_BLOCK_REF_KEY;
1169
1170 if (type < entry->type)
1171 return -1;
1172 if (type > entry->type)
1173 return 1;
1174
1175 if (type == BTRFS_TREE_BLOCK_REF_KEY) {
1176 if (root < entry->ref_root)
1177 return -1;
1178 if (root > entry->ref_root)
1179 return 1;
1180 } else {
1181 if (parent < entry->parent)
1182 return -1;
1183 if (parent > entry->parent)
1184 return 1;
1185 }
1186 return 0;
1187}
1188
1189/*
1190 * Check to see if a given root/parent reference is attached to the head. This
1191 * only checks for BTRFS_ADD_DELAYED_REF references that match, as that
1192 * indicates the reference exists for the given root or parent. This is for
1193 * tree blocks only.
1194 *
1195 * @head: the head of the bytenr we're searching.
1196 * @root: the root objectid of the reference if it is a normal reference.
1197 * @parent: the parent if this is a shared backref.
1198 */
1199bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
1200 u64 root, u64 parent)
1201{
1202 struct rb_node *node;
1203 bool found = false;
1204
1205 lockdep_assert_held(&head->mutex);
1206
1207 spin_lock(&head->lock);
1208 node = head->ref_tree.rb_root.rb_node;
1209 while (node) {
1210 struct btrfs_delayed_ref_node *entry;
1211 int ret;
1212
1213 entry = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
1214 ret = find_comp(entry, root, parent);
1215 if (ret < 0) {
1216 node = node->rb_left;
1217 } else if (ret > 0) {
1218 node = node->rb_right;
1219 } else {
1220 /*
1221 * We only want to count ADD actions, as drops mean the
1222 * ref doesn't exist.
1223 */
1224 if (entry->action == BTRFS_ADD_DELAYED_REF)
1225 found = true;
1226 break;
1227 }
1228 }
1229 spin_unlock(&head->lock);
1230 return found;
1231}
1232
1233void btrfs_destroy_delayed_refs(struct btrfs_transaction *trans)
1234{
1235 struct btrfs_delayed_ref_root *delayed_refs = &trans->delayed_refs;
1236 struct btrfs_fs_info *fs_info = trans->fs_info;
1237
1238 spin_lock(&delayed_refs->lock);
1239 while (true) {
1240 struct btrfs_delayed_ref_head *head;
1241 struct rb_node *n;
1242 bool pin_bytes = false;
1243
1244 head = find_first_ref_head(delayed_refs);
1245 if (!head)
1246 break;
1247
1248 if (!btrfs_delayed_ref_lock(delayed_refs, head))
1249 continue;
1250
1251 spin_lock(&head->lock);
1252 while ((n = rb_first_cached(&head->ref_tree)) != NULL) {
1253 struct btrfs_delayed_ref_node *ref;
1254
1255 ref = rb_entry(n, struct btrfs_delayed_ref_node, ref_node);
1256 drop_delayed_ref(fs_info, delayed_refs, head, ref);
1257 }
1258 if (head->must_insert_reserved)
1259 pin_bytes = true;
1260 btrfs_free_delayed_extent_op(head->extent_op);
1261 btrfs_delete_ref_head(fs_info, delayed_refs, head);
1262 spin_unlock(&head->lock);
1263 spin_unlock(&delayed_refs->lock);
1264 mutex_unlock(&head->mutex);
1265
1266 if (pin_bytes) {
1267 struct btrfs_block_group *bg;
1268
1269 bg = btrfs_lookup_block_group(fs_info, head->bytenr);
1270 if (WARN_ON_ONCE(bg == NULL)) {
1271 /*
1272 * Unexpected and there's nothing we can do here
1273 * because we are in a transaction abort path,
1274 * so any errors can only be ignored or reported
1275 * while attempting to cleanup all resources.
1276 */
1277 btrfs_err(fs_info,
1278"block group for delayed ref at %llu was not found while destroying ref head",
1279 head->bytenr);
1280 } else {
1281 spin_lock(&bg->space_info->lock);
1282 spin_lock(&bg->lock);
1283 bg->pinned += head->num_bytes;
1284 btrfs_space_info_update_bytes_pinned(fs_info,
1285 bg->space_info,
1286 head->num_bytes);
1287 bg->reserved -= head->num_bytes;
1288 bg->space_info->bytes_reserved -= head->num_bytes;
1289 spin_unlock(&bg->lock);
1290 spin_unlock(&bg->space_info->lock);
1291
1292 btrfs_put_block_group(bg);
1293 }
1294
1295 btrfs_error_unpin_extent_range(fs_info, head->bytenr,
1296 head->bytenr + head->num_bytes - 1);
1297 }
1298 btrfs_cleanup_ref_head_accounting(fs_info, delayed_refs, head);
1299 btrfs_put_delayed_ref_head(head);
1300 cond_resched();
1301 spin_lock(&delayed_refs->lock);
1302 }
1303 btrfs_qgroup_destroy_extent_records(trans);
1304
1305 spin_unlock(&delayed_refs->lock);
1306}
1307
1308void __cold btrfs_delayed_ref_exit(void)
1309{
1310 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1311 kmem_cache_destroy(btrfs_delayed_ref_node_cachep);
1312 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1313}
1314
1315int __init btrfs_delayed_ref_init(void)
1316{
1317 btrfs_delayed_ref_head_cachep = KMEM_CACHE(btrfs_delayed_ref_head, 0);
1318 if (!btrfs_delayed_ref_head_cachep)
1319 goto fail;
1320
1321 btrfs_delayed_ref_node_cachep = KMEM_CACHE(btrfs_delayed_ref_node, 0);
1322 if (!btrfs_delayed_ref_node_cachep)
1323 goto fail;
1324
1325 btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0);
1326 if (!btrfs_delayed_extent_op_cachep)
1327 goto fail;
1328
1329 return 0;
1330fail:
1331 btrfs_delayed_ref_exit();
1332 return -ENOMEM;
1333}