Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/sort.h>
9#include "messages.h"
10#include "ctree.h"
11#include "delayed-ref.h"
12#include "transaction.h"
13#include "qgroup.h"
14#include "space-info.h"
15#include "tree-mod-log.h"
16#include "fs.h"
17
18struct kmem_cache *btrfs_delayed_ref_head_cachep;
19struct kmem_cache *btrfs_delayed_tree_ref_cachep;
20struct kmem_cache *btrfs_delayed_data_ref_cachep;
21struct kmem_cache *btrfs_delayed_extent_op_cachep;
22/*
23 * delayed back reference update tracking. For subvolume trees
24 * we queue up extent allocations and backref maintenance for
25 * delayed processing. This avoids deep call chains where we
26 * add extents in the middle of btrfs_search_slot, and it allows
27 * us to buffer up frequently modified backrefs in an rb tree instead
28 * of hammering updates on the extent allocation tree.
29 */
30
31bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
32{
33 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
34 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
35 bool ret = false;
36 u64 reserved;
37
38 spin_lock(&global_rsv->lock);
39 reserved = global_rsv->reserved;
40 spin_unlock(&global_rsv->lock);
41
42 /*
43 * Since the global reserve is just kind of magic we don't really want
44 * to rely on it to save our bacon, so if our size is more than the
45 * delayed_refs_rsv and the global rsv then it's time to think about
46 * bailing.
47 */
48 spin_lock(&delayed_refs_rsv->lock);
49 reserved += delayed_refs_rsv->reserved;
50 if (delayed_refs_rsv->size >= reserved)
51 ret = true;
52 spin_unlock(&delayed_refs_rsv->lock);
53 return ret;
54}
55
56/*
57 * Release a ref head's reservation.
58 *
59 * @fs_info: the filesystem
60 * @nr_refs: number of delayed refs to drop
61 * @nr_csums: number of csum items to drop
62 *
63 * Drops the delayed ref head's count from the delayed refs rsv and free any
64 * excess reservation we had.
65 */
66void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr_refs, int nr_csums)
67{
68 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
69 u64 num_bytes;
70 u64 released;
71
72 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, nr_refs);
73 num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info, nr_csums);
74
75 released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
76 if (released)
77 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
78 0, released, 0);
79}
80
81/*
82 * Adjust the size of the delayed refs rsv.
83 *
84 * This is to be called anytime we may have adjusted trans->delayed_ref_updates
85 * or trans->delayed_ref_csum_deletions, it'll calculate the additional size and
86 * add it to the delayed_refs_rsv.
87 */
88void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
89{
90 struct btrfs_fs_info *fs_info = trans->fs_info;
91 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
92 struct btrfs_block_rsv *local_rsv = &trans->delayed_rsv;
93 u64 num_bytes;
94 u64 reserved_bytes;
95
96 num_bytes = btrfs_calc_delayed_ref_bytes(fs_info, trans->delayed_ref_updates);
97 num_bytes += btrfs_calc_delayed_ref_csum_bytes(fs_info,
98 trans->delayed_ref_csum_deletions);
99
100 if (num_bytes == 0)
101 return;
102
103 /*
104 * Try to take num_bytes from the transaction's local delayed reserve.
105 * If not possible, try to take as much as it's available. If the local
106 * reserve doesn't have enough reserved space, the delayed refs reserve
107 * will be refilled next time btrfs_delayed_refs_rsv_refill() is called
108 * by someone or if a transaction commit is triggered before that, the
109 * global block reserve will be used. We want to minimize using the
110 * global block reserve for cases we can account for in advance, to
111 * avoid exhausting it and reach -ENOSPC during a transaction commit.
112 */
113 spin_lock(&local_rsv->lock);
114 reserved_bytes = min(num_bytes, local_rsv->reserved);
115 local_rsv->reserved -= reserved_bytes;
116 local_rsv->full = (local_rsv->reserved >= local_rsv->size);
117 spin_unlock(&local_rsv->lock);
118
119 spin_lock(&delayed_rsv->lock);
120 delayed_rsv->size += num_bytes;
121 delayed_rsv->reserved += reserved_bytes;
122 delayed_rsv->full = (delayed_rsv->reserved >= delayed_rsv->size);
123 spin_unlock(&delayed_rsv->lock);
124 trans->delayed_ref_updates = 0;
125 trans->delayed_ref_csum_deletions = 0;
126}
127
128/*
129 * Adjust the size of the delayed refs block reserve for 1 block group item
130 * insertion, used after allocating a block group.
131 */
132void btrfs_inc_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
133{
134 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
135
136 spin_lock(&delayed_rsv->lock);
137 /*
138 * Inserting a block group item does not require changing the free space
139 * tree, only the extent tree or the block group tree, so this is all we
140 * need.
141 */
142 delayed_rsv->size += btrfs_calc_insert_metadata_size(fs_info, 1);
143 delayed_rsv->full = false;
144 spin_unlock(&delayed_rsv->lock);
145}
146
147/*
148 * Adjust the size of the delayed refs block reserve to release space for 1
149 * block group item insertion.
150 */
151void btrfs_dec_delayed_refs_rsv_bg_inserts(struct btrfs_fs_info *fs_info)
152{
153 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
154 const u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, 1);
155 u64 released;
156
157 released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
158 if (released > 0)
159 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
160 0, released, 0);
161}
162
163/*
164 * Adjust the size of the delayed refs block reserve for 1 block group item
165 * update.
166 */
167void btrfs_inc_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
168{
169 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
170
171 spin_lock(&delayed_rsv->lock);
172 /*
173 * Updating a block group item does not result in new nodes/leaves and
174 * does not require changing the free space tree, only the extent tree
175 * or the block group tree, so this is all we need.
176 */
177 delayed_rsv->size += btrfs_calc_metadata_size(fs_info, 1);
178 delayed_rsv->full = false;
179 spin_unlock(&delayed_rsv->lock);
180}
181
182/*
183 * Adjust the size of the delayed refs block reserve to release space for 1
184 * block group item update.
185 */
186void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info)
187{
188 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
189 const u64 num_bytes = btrfs_calc_metadata_size(fs_info, 1);
190 u64 released;
191
192 released = btrfs_block_rsv_release(fs_info, delayed_rsv, num_bytes, NULL);
193 if (released > 0)
194 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
195 0, released, 0);
196}
197
198/*
199 * Transfer bytes to our delayed refs rsv.
200 *
201 * @fs_info: the filesystem
202 * @num_bytes: number of bytes to transfer
203 *
204 * This transfers up to the num_bytes amount, previously reserved, to the
205 * delayed_refs_rsv. Any extra bytes are returned to the space info.
206 */
207void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
208 u64 num_bytes)
209{
210 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
211 u64 to_free = 0;
212
213 spin_lock(&delayed_refs_rsv->lock);
214 if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
215 u64 delta = delayed_refs_rsv->size -
216 delayed_refs_rsv->reserved;
217 if (num_bytes > delta) {
218 to_free = num_bytes - delta;
219 num_bytes = delta;
220 }
221 } else {
222 to_free = num_bytes;
223 num_bytes = 0;
224 }
225
226 if (num_bytes)
227 delayed_refs_rsv->reserved += num_bytes;
228 if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
229 delayed_refs_rsv->full = true;
230 spin_unlock(&delayed_refs_rsv->lock);
231
232 if (num_bytes)
233 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
234 0, num_bytes, 1);
235 if (to_free)
236 btrfs_space_info_free_bytes_may_use(fs_info,
237 delayed_refs_rsv->space_info, to_free);
238}
239
240/*
241 * Refill based on our delayed refs usage.
242 *
243 * @fs_info: the filesystem
244 * @flush: control how we can flush for this reservation.
245 *
246 * This will refill the delayed block_rsv up to 1 items size worth of space and
247 * will return -ENOSPC if we can't make the reservation.
248 */
249int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
250 enum btrfs_reserve_flush_enum flush)
251{
252 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
253 struct btrfs_space_info *space_info = block_rsv->space_info;
254 u64 limit = btrfs_calc_delayed_ref_bytes(fs_info, 1);
255 u64 num_bytes = 0;
256 u64 refilled_bytes;
257 u64 to_free;
258 int ret = -ENOSPC;
259
260 spin_lock(&block_rsv->lock);
261 if (block_rsv->reserved < block_rsv->size) {
262 num_bytes = block_rsv->size - block_rsv->reserved;
263 num_bytes = min(num_bytes, limit);
264 }
265 spin_unlock(&block_rsv->lock);
266
267 if (!num_bytes)
268 return 0;
269
270 ret = btrfs_reserve_metadata_bytes(fs_info, space_info, num_bytes, flush);
271 if (ret)
272 return ret;
273
274 /*
275 * We may have raced with someone else, so check again if we the block
276 * reserve is still not full and release any excess space.
277 */
278 spin_lock(&block_rsv->lock);
279 if (block_rsv->reserved < block_rsv->size) {
280 u64 needed = block_rsv->size - block_rsv->reserved;
281
282 if (num_bytes >= needed) {
283 block_rsv->reserved += needed;
284 block_rsv->full = true;
285 to_free = num_bytes - needed;
286 refilled_bytes = needed;
287 } else {
288 block_rsv->reserved += num_bytes;
289 to_free = 0;
290 refilled_bytes = num_bytes;
291 }
292 } else {
293 to_free = num_bytes;
294 refilled_bytes = 0;
295 }
296 spin_unlock(&block_rsv->lock);
297
298 if (to_free > 0)
299 btrfs_space_info_free_bytes_may_use(fs_info, space_info, to_free);
300
301 if (refilled_bytes > 0)
302 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv", 0,
303 refilled_bytes, 1);
304 return 0;
305}
306
307/*
308 * compare two delayed tree backrefs with same bytenr and type
309 */
310static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
311 struct btrfs_delayed_tree_ref *ref2)
312{
313 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
314 if (ref1->root < ref2->root)
315 return -1;
316 if (ref1->root > ref2->root)
317 return 1;
318 } else {
319 if (ref1->parent < ref2->parent)
320 return -1;
321 if (ref1->parent > ref2->parent)
322 return 1;
323 }
324 return 0;
325}
326
327/*
328 * compare two delayed data backrefs with same bytenr and type
329 */
330static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
331 struct btrfs_delayed_data_ref *ref2)
332{
333 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
334 if (ref1->root < ref2->root)
335 return -1;
336 if (ref1->root > ref2->root)
337 return 1;
338 if (ref1->objectid < ref2->objectid)
339 return -1;
340 if (ref1->objectid > ref2->objectid)
341 return 1;
342 if (ref1->offset < ref2->offset)
343 return -1;
344 if (ref1->offset > ref2->offset)
345 return 1;
346 } else {
347 if (ref1->parent < ref2->parent)
348 return -1;
349 if (ref1->parent > ref2->parent)
350 return 1;
351 }
352 return 0;
353}
354
355static int comp_refs(struct btrfs_delayed_ref_node *ref1,
356 struct btrfs_delayed_ref_node *ref2,
357 bool check_seq)
358{
359 int ret = 0;
360
361 if (ref1->type < ref2->type)
362 return -1;
363 if (ref1->type > ref2->type)
364 return 1;
365 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
366 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
367 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
368 btrfs_delayed_node_to_tree_ref(ref2));
369 else
370 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
371 btrfs_delayed_node_to_data_ref(ref2));
372 if (ret)
373 return ret;
374 if (check_seq) {
375 if (ref1->seq < ref2->seq)
376 return -1;
377 if (ref1->seq > ref2->seq)
378 return 1;
379 }
380 return 0;
381}
382
383/* insert a new ref to head ref rbtree */
384static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
385 struct rb_node *node)
386{
387 struct rb_node **p = &root->rb_root.rb_node;
388 struct rb_node *parent_node = NULL;
389 struct btrfs_delayed_ref_head *entry;
390 struct btrfs_delayed_ref_head *ins;
391 u64 bytenr;
392 bool leftmost = true;
393
394 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
395 bytenr = ins->bytenr;
396 while (*p) {
397 parent_node = *p;
398 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
399 href_node);
400
401 if (bytenr < entry->bytenr) {
402 p = &(*p)->rb_left;
403 } else if (bytenr > entry->bytenr) {
404 p = &(*p)->rb_right;
405 leftmost = false;
406 } else {
407 return entry;
408 }
409 }
410
411 rb_link_node(node, parent_node, p);
412 rb_insert_color_cached(node, root, leftmost);
413 return NULL;
414}
415
416static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
417 struct btrfs_delayed_ref_node *ins)
418{
419 struct rb_node **p = &root->rb_root.rb_node;
420 struct rb_node *node = &ins->ref_node;
421 struct rb_node *parent_node = NULL;
422 struct btrfs_delayed_ref_node *entry;
423 bool leftmost = true;
424
425 while (*p) {
426 int comp;
427
428 parent_node = *p;
429 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
430 ref_node);
431 comp = comp_refs(ins, entry, true);
432 if (comp < 0) {
433 p = &(*p)->rb_left;
434 } else if (comp > 0) {
435 p = &(*p)->rb_right;
436 leftmost = false;
437 } else {
438 return entry;
439 }
440 }
441
442 rb_link_node(node, parent_node, p);
443 rb_insert_color_cached(node, root, leftmost);
444 return NULL;
445}
446
447static struct btrfs_delayed_ref_head *find_first_ref_head(
448 struct btrfs_delayed_ref_root *dr)
449{
450 struct rb_node *n;
451 struct btrfs_delayed_ref_head *entry;
452
453 n = rb_first_cached(&dr->href_root);
454 if (!n)
455 return NULL;
456
457 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
458
459 return entry;
460}
461
462/*
463 * Find a head entry based on bytenr. This returns the delayed ref head if it
464 * was able to find one, or NULL if nothing was in that spot. If return_bigger
465 * is given, the next bigger entry is returned if no exact match is found.
466 */
467static struct btrfs_delayed_ref_head *find_ref_head(
468 struct btrfs_delayed_ref_root *dr, u64 bytenr,
469 bool return_bigger)
470{
471 struct rb_root *root = &dr->href_root.rb_root;
472 struct rb_node *n;
473 struct btrfs_delayed_ref_head *entry;
474
475 n = root->rb_node;
476 entry = NULL;
477 while (n) {
478 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
479
480 if (bytenr < entry->bytenr)
481 n = n->rb_left;
482 else if (bytenr > entry->bytenr)
483 n = n->rb_right;
484 else
485 return entry;
486 }
487 if (entry && return_bigger) {
488 if (bytenr > entry->bytenr) {
489 n = rb_next(&entry->href_node);
490 if (!n)
491 return NULL;
492 entry = rb_entry(n, struct btrfs_delayed_ref_head,
493 href_node);
494 }
495 return entry;
496 }
497 return NULL;
498}
499
500int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
501 struct btrfs_delayed_ref_head *head)
502{
503 lockdep_assert_held(&delayed_refs->lock);
504 if (mutex_trylock(&head->mutex))
505 return 0;
506
507 refcount_inc(&head->refs);
508 spin_unlock(&delayed_refs->lock);
509
510 mutex_lock(&head->mutex);
511 spin_lock(&delayed_refs->lock);
512 if (RB_EMPTY_NODE(&head->href_node)) {
513 mutex_unlock(&head->mutex);
514 btrfs_put_delayed_ref_head(head);
515 return -EAGAIN;
516 }
517 btrfs_put_delayed_ref_head(head);
518 return 0;
519}
520
521static inline void drop_delayed_ref(struct btrfs_fs_info *fs_info,
522 struct btrfs_delayed_ref_root *delayed_refs,
523 struct btrfs_delayed_ref_head *head,
524 struct btrfs_delayed_ref_node *ref)
525{
526 lockdep_assert_held(&head->lock);
527 rb_erase_cached(&ref->ref_node, &head->ref_tree);
528 RB_CLEAR_NODE(&ref->ref_node);
529 if (!list_empty(&ref->add_list))
530 list_del(&ref->add_list);
531 btrfs_put_delayed_ref(ref);
532 atomic_dec(&delayed_refs->num_entries);
533 btrfs_delayed_refs_rsv_release(fs_info, 1, 0);
534}
535
536static bool merge_ref(struct btrfs_fs_info *fs_info,
537 struct btrfs_delayed_ref_root *delayed_refs,
538 struct btrfs_delayed_ref_head *head,
539 struct btrfs_delayed_ref_node *ref,
540 u64 seq)
541{
542 struct btrfs_delayed_ref_node *next;
543 struct rb_node *node = rb_next(&ref->ref_node);
544 bool done = false;
545
546 while (!done && node) {
547 int mod;
548
549 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
550 node = rb_next(node);
551 if (seq && next->seq >= seq)
552 break;
553 if (comp_refs(ref, next, false))
554 break;
555
556 if (ref->action == next->action) {
557 mod = next->ref_mod;
558 } else {
559 if (ref->ref_mod < next->ref_mod) {
560 swap(ref, next);
561 done = true;
562 }
563 mod = -next->ref_mod;
564 }
565
566 drop_delayed_ref(fs_info, delayed_refs, head, next);
567 ref->ref_mod += mod;
568 if (ref->ref_mod == 0) {
569 drop_delayed_ref(fs_info, delayed_refs, head, ref);
570 done = true;
571 } else {
572 /*
573 * Can't have multiples of the same ref on a tree block.
574 */
575 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
576 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
577 }
578 }
579
580 return done;
581}
582
583void btrfs_merge_delayed_refs(struct btrfs_fs_info *fs_info,
584 struct btrfs_delayed_ref_root *delayed_refs,
585 struct btrfs_delayed_ref_head *head)
586{
587 struct btrfs_delayed_ref_node *ref;
588 struct rb_node *node;
589 u64 seq = 0;
590
591 lockdep_assert_held(&head->lock);
592
593 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
594 return;
595
596 /* We don't have too many refs to merge for data. */
597 if (head->is_data)
598 return;
599
600 seq = btrfs_tree_mod_log_lowest_seq(fs_info);
601again:
602 for (node = rb_first_cached(&head->ref_tree); node;
603 node = rb_next(node)) {
604 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
605 if (seq && ref->seq >= seq)
606 continue;
607 if (merge_ref(fs_info, delayed_refs, head, ref, seq))
608 goto again;
609 }
610}
611
612int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
613{
614 int ret = 0;
615 u64 min_seq = btrfs_tree_mod_log_lowest_seq(fs_info);
616
617 if (min_seq != 0 && seq >= min_seq) {
618 btrfs_debug(fs_info,
619 "holding back delayed_ref %llu, lowest is %llu",
620 seq, min_seq);
621 ret = 1;
622 }
623
624 return ret;
625}
626
627struct btrfs_delayed_ref_head *btrfs_select_ref_head(
628 struct btrfs_delayed_ref_root *delayed_refs)
629{
630 struct btrfs_delayed_ref_head *head;
631
632 lockdep_assert_held(&delayed_refs->lock);
633again:
634 head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
635 true);
636 if (!head && delayed_refs->run_delayed_start != 0) {
637 delayed_refs->run_delayed_start = 0;
638 head = find_first_ref_head(delayed_refs);
639 }
640 if (!head)
641 return NULL;
642
643 while (head->processing) {
644 struct rb_node *node;
645
646 node = rb_next(&head->href_node);
647 if (!node) {
648 if (delayed_refs->run_delayed_start == 0)
649 return NULL;
650 delayed_refs->run_delayed_start = 0;
651 goto again;
652 }
653 head = rb_entry(node, struct btrfs_delayed_ref_head,
654 href_node);
655 }
656
657 head->processing = true;
658 WARN_ON(delayed_refs->num_heads_ready == 0);
659 delayed_refs->num_heads_ready--;
660 delayed_refs->run_delayed_start = head->bytenr +
661 head->num_bytes;
662 return head;
663}
664
665void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
666 struct btrfs_delayed_ref_head *head)
667{
668 lockdep_assert_held(&delayed_refs->lock);
669 lockdep_assert_held(&head->lock);
670
671 rb_erase_cached(&head->href_node, &delayed_refs->href_root);
672 RB_CLEAR_NODE(&head->href_node);
673 atomic_dec(&delayed_refs->num_entries);
674 delayed_refs->num_heads--;
675 if (!head->processing)
676 delayed_refs->num_heads_ready--;
677}
678
679/*
680 * Helper to insert the ref_node to the tail or merge with tail.
681 *
682 * Return false if the ref was inserted.
683 * Return true if the ref was merged into an existing one (and therefore can be
684 * freed by the caller).
685 */
686static bool insert_delayed_ref(struct btrfs_trans_handle *trans,
687 struct btrfs_delayed_ref_head *href,
688 struct btrfs_delayed_ref_node *ref)
689{
690 struct btrfs_delayed_ref_root *root = &trans->transaction->delayed_refs;
691 struct btrfs_delayed_ref_node *exist;
692 int mod;
693
694 spin_lock(&href->lock);
695 exist = tree_insert(&href->ref_tree, ref);
696 if (!exist) {
697 if (ref->action == BTRFS_ADD_DELAYED_REF)
698 list_add_tail(&ref->add_list, &href->ref_add_list);
699 atomic_inc(&root->num_entries);
700 spin_unlock(&href->lock);
701 trans->delayed_ref_updates++;
702 return false;
703 }
704
705 /* Now we are sure we can merge */
706 if (exist->action == ref->action) {
707 mod = ref->ref_mod;
708 } else {
709 /* Need to change action */
710 if (exist->ref_mod < ref->ref_mod) {
711 exist->action = ref->action;
712 mod = -exist->ref_mod;
713 exist->ref_mod = ref->ref_mod;
714 if (ref->action == BTRFS_ADD_DELAYED_REF)
715 list_add_tail(&exist->add_list,
716 &href->ref_add_list);
717 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
718 ASSERT(!list_empty(&exist->add_list));
719 list_del(&exist->add_list);
720 } else {
721 ASSERT(0);
722 }
723 } else
724 mod = -ref->ref_mod;
725 }
726 exist->ref_mod += mod;
727
728 /* remove existing tail if its ref_mod is zero */
729 if (exist->ref_mod == 0)
730 drop_delayed_ref(trans->fs_info, root, href, exist);
731 spin_unlock(&href->lock);
732 return true;
733}
734
735/*
736 * helper function to update the accounting in the head ref
737 * existing and update must have the same bytenr
738 */
739static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
740 struct btrfs_delayed_ref_head *existing,
741 struct btrfs_delayed_ref_head *update)
742{
743 struct btrfs_delayed_ref_root *delayed_refs =
744 &trans->transaction->delayed_refs;
745 struct btrfs_fs_info *fs_info = trans->fs_info;
746 int old_ref_mod;
747
748 BUG_ON(existing->is_data != update->is_data);
749
750 spin_lock(&existing->lock);
751
752 /*
753 * When freeing an extent, we may not know the owning root when we
754 * first create the head_ref. However, some deref before the last deref
755 * will know it, so we just need to update the head_ref accordingly.
756 */
757 if (!existing->owning_root)
758 existing->owning_root = update->owning_root;
759
760 if (update->must_insert_reserved) {
761 /* if the extent was freed and then
762 * reallocated before the delayed ref
763 * entries were processed, we can end up
764 * with an existing head ref without
765 * the must_insert_reserved flag set.
766 * Set it again here
767 */
768 existing->must_insert_reserved = update->must_insert_reserved;
769 existing->owning_root = update->owning_root;
770
771 /*
772 * update the num_bytes so we make sure the accounting
773 * is done correctly
774 */
775 existing->num_bytes = update->num_bytes;
776
777 }
778
779 if (update->extent_op) {
780 if (!existing->extent_op) {
781 existing->extent_op = update->extent_op;
782 } else {
783 if (update->extent_op->update_key) {
784 memcpy(&existing->extent_op->key,
785 &update->extent_op->key,
786 sizeof(update->extent_op->key));
787 existing->extent_op->update_key = true;
788 }
789 if (update->extent_op->update_flags) {
790 existing->extent_op->flags_to_set |=
791 update->extent_op->flags_to_set;
792 existing->extent_op->update_flags = true;
793 }
794 btrfs_free_delayed_extent_op(update->extent_op);
795 }
796 }
797 /*
798 * update the reference mod on the head to reflect this new operation,
799 * only need the lock for this case cause we could be processing it
800 * currently, for refs we just added we know we're a-ok.
801 */
802 old_ref_mod = existing->total_ref_mod;
803 existing->ref_mod += update->ref_mod;
804 existing->total_ref_mod += update->ref_mod;
805
806 /*
807 * If we are going to from a positive ref mod to a negative or vice
808 * versa we need to make sure to adjust pending_csums accordingly.
809 * We reserve bytes for csum deletion when adding or updating a ref head
810 * see add_delayed_ref_head() for more details.
811 */
812 if (existing->is_data) {
813 u64 csum_leaves =
814 btrfs_csum_bytes_to_leaves(fs_info,
815 existing->num_bytes);
816
817 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
818 delayed_refs->pending_csums -= existing->num_bytes;
819 btrfs_delayed_refs_rsv_release(fs_info, 0, csum_leaves);
820 }
821 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
822 delayed_refs->pending_csums += existing->num_bytes;
823 trans->delayed_ref_csum_deletions += csum_leaves;
824 }
825 }
826
827 spin_unlock(&existing->lock);
828}
829
830static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
831 struct btrfs_qgroup_extent_record *qrecord,
832 u64 bytenr, u64 num_bytes, u64 ref_root,
833 u64 reserved, int action, bool is_data,
834 bool is_system, u64 owning_root)
835{
836 int count_mod = 1;
837 bool must_insert_reserved = false;
838
839 /* If reserved is provided, it must be a data extent. */
840 BUG_ON(!is_data && reserved);
841
842 switch (action) {
843 case BTRFS_UPDATE_DELAYED_HEAD:
844 count_mod = 0;
845 break;
846 case BTRFS_DROP_DELAYED_REF:
847 /*
848 * The head node stores the sum of all the mods, so dropping a ref
849 * should drop the sum in the head node by one.
850 */
851 count_mod = -1;
852 break;
853 case BTRFS_ADD_DELAYED_EXTENT:
854 /*
855 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the
856 * reserved accounting when the extent is finally added, or if a
857 * later modification deletes the delayed ref without ever
858 * inserting the extent into the extent allocation tree.
859 * ref->must_insert_reserved is the flag used to record that
860 * accounting mods are required.
861 *
862 * Once we record must_insert_reserved, switch the action to
863 * BTRFS_ADD_DELAYED_REF because other special casing is not
864 * required.
865 */
866 must_insert_reserved = true;
867 break;
868 }
869
870 refcount_set(&head_ref->refs, 1);
871 head_ref->bytenr = bytenr;
872 head_ref->num_bytes = num_bytes;
873 head_ref->ref_mod = count_mod;
874 head_ref->reserved_bytes = reserved;
875 head_ref->must_insert_reserved = must_insert_reserved;
876 head_ref->owning_root = owning_root;
877 head_ref->is_data = is_data;
878 head_ref->is_system = is_system;
879 head_ref->ref_tree = RB_ROOT_CACHED;
880 INIT_LIST_HEAD(&head_ref->ref_add_list);
881 RB_CLEAR_NODE(&head_ref->href_node);
882 head_ref->processing = false;
883 head_ref->total_ref_mod = count_mod;
884 spin_lock_init(&head_ref->lock);
885 mutex_init(&head_ref->mutex);
886
887 if (qrecord) {
888 if (ref_root && reserved) {
889 qrecord->data_rsv = reserved;
890 qrecord->data_rsv_refroot = ref_root;
891 }
892 qrecord->bytenr = bytenr;
893 qrecord->num_bytes = num_bytes;
894 qrecord->old_roots = NULL;
895 }
896}
897
898/*
899 * helper function to actually insert a head node into the rbtree.
900 * this does all the dirty work in terms of maintaining the correct
901 * overall modification count.
902 */
903static noinline struct btrfs_delayed_ref_head *
904add_delayed_ref_head(struct btrfs_trans_handle *trans,
905 struct btrfs_delayed_ref_head *head_ref,
906 struct btrfs_qgroup_extent_record *qrecord,
907 int action, bool *qrecord_inserted_ret)
908{
909 struct btrfs_delayed_ref_head *existing;
910 struct btrfs_delayed_ref_root *delayed_refs;
911 bool qrecord_inserted = false;
912
913 delayed_refs = &trans->transaction->delayed_refs;
914
915 /* Record qgroup extent info if provided */
916 if (qrecord) {
917 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
918 delayed_refs, qrecord))
919 kfree(qrecord);
920 else
921 qrecord_inserted = true;
922 }
923
924 trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
925
926 existing = htree_insert(&delayed_refs->href_root,
927 &head_ref->href_node);
928 if (existing) {
929 update_existing_head_ref(trans, existing, head_ref);
930 /*
931 * we've updated the existing ref, free the newly
932 * allocated ref
933 */
934 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
935 head_ref = existing;
936 } else {
937 /*
938 * We reserve the amount of bytes needed to delete csums when
939 * adding the ref head and not when adding individual drop refs
940 * since the csum items are deleted only after running the last
941 * delayed drop ref (the data extent's ref count drops to 0).
942 */
943 if (head_ref->is_data && head_ref->ref_mod < 0) {
944 delayed_refs->pending_csums += head_ref->num_bytes;
945 trans->delayed_ref_csum_deletions +=
946 btrfs_csum_bytes_to_leaves(trans->fs_info,
947 head_ref->num_bytes);
948 }
949 delayed_refs->num_heads++;
950 delayed_refs->num_heads_ready++;
951 atomic_inc(&delayed_refs->num_entries);
952 }
953 if (qrecord_inserted_ret)
954 *qrecord_inserted_ret = qrecord_inserted;
955
956 return head_ref;
957}
958
959/*
960 * Initialize the structure which represents a modification to a an extent.
961 *
962 * @fs_info: Internal to the mounted filesystem mount structure.
963 *
964 * @ref: The structure which is going to be initialized.
965 *
966 * @bytenr: The logical address of the extent for which a modification is
967 * going to be recorded.
968 *
969 * @num_bytes: Size of the extent whose modification is being recorded.
970 *
971 * @ref_root: The id of the root where this modification has originated, this
972 * can be either one of the well-known metadata trees or the
973 * subvolume id which references this extent.
974 *
975 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
976 * BTRFS_ADD_DELAYED_EXTENT
977 *
978 * @ref_type: Holds the type of the extent which is being recorded, can be
979 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
980 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
981 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
982 */
983static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
984 struct btrfs_delayed_ref_node *ref,
985 u64 bytenr, u64 num_bytes, u64 ref_root,
986 int action, u8 ref_type)
987{
988 u64 seq = 0;
989
990 if (action == BTRFS_ADD_DELAYED_EXTENT)
991 action = BTRFS_ADD_DELAYED_REF;
992
993 if (is_fstree(ref_root))
994 seq = atomic64_read(&fs_info->tree_mod_seq);
995
996 refcount_set(&ref->refs, 1);
997 ref->bytenr = bytenr;
998 ref->num_bytes = num_bytes;
999 ref->ref_mod = 1;
1000 ref->action = action;
1001 ref->seq = seq;
1002 ref->type = ref_type;
1003 RB_CLEAR_NODE(&ref->ref_node);
1004 INIT_LIST_HEAD(&ref->add_list);
1005}
1006
1007void btrfs_init_generic_ref(struct btrfs_ref *generic_ref, int action, u64 bytenr,
1008 u64 len, u64 parent, u64 owning_root)
1009{
1010 generic_ref->action = action;
1011 generic_ref->bytenr = bytenr;
1012 generic_ref->len = len;
1013 generic_ref->parent = parent;
1014 generic_ref->owning_root = owning_root;
1015}
1016
1017void btrfs_init_tree_ref(struct btrfs_ref *generic_ref, int level, u64 root,
1018 u64 mod_root, bool skip_qgroup)
1019{
1020#ifdef CONFIG_BTRFS_FS_REF_VERIFY
1021 /* If @real_root not set, use @root as fallback */
1022 generic_ref->real_root = mod_root ?: root;
1023#endif
1024 generic_ref->tree_ref.level = level;
1025 generic_ref->tree_ref.ref_root = root;
1026 generic_ref->type = BTRFS_REF_METADATA;
1027 if (skip_qgroup || !(is_fstree(root) &&
1028 (!mod_root || is_fstree(mod_root))))
1029 generic_ref->skip_qgroup = true;
1030 else
1031 generic_ref->skip_qgroup = false;
1032
1033}
1034
1035void btrfs_init_data_ref(struct btrfs_ref *generic_ref, u64 ref_root, u64 ino,
1036 u64 offset, u64 mod_root, bool skip_qgroup)
1037{
1038#ifdef CONFIG_BTRFS_FS_REF_VERIFY
1039 /* If @real_root not set, use @root as fallback */
1040 generic_ref->real_root = mod_root ?: ref_root;
1041#endif
1042 generic_ref->data_ref.ref_root = ref_root;
1043 generic_ref->data_ref.ino = ino;
1044 generic_ref->data_ref.offset = offset;
1045 generic_ref->type = BTRFS_REF_DATA;
1046 if (skip_qgroup || !(is_fstree(ref_root) &&
1047 (!mod_root || is_fstree(mod_root))))
1048 generic_ref->skip_qgroup = true;
1049 else
1050 generic_ref->skip_qgroup = false;
1051}
1052
1053/*
1054 * add a delayed tree ref. This does all of the accounting required
1055 * to make sure the delayed ref is eventually processed before this
1056 * transaction commits.
1057 */
1058int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
1059 struct btrfs_ref *generic_ref,
1060 struct btrfs_delayed_extent_op *extent_op)
1061{
1062 struct btrfs_fs_info *fs_info = trans->fs_info;
1063 struct btrfs_delayed_tree_ref *ref;
1064 struct btrfs_delayed_ref_head *head_ref;
1065 struct btrfs_delayed_ref_root *delayed_refs;
1066 struct btrfs_qgroup_extent_record *record = NULL;
1067 bool qrecord_inserted;
1068 bool is_system;
1069 bool merged;
1070 int action = generic_ref->action;
1071 int level = generic_ref->tree_ref.level;
1072 u64 bytenr = generic_ref->bytenr;
1073 u64 num_bytes = generic_ref->len;
1074 u64 parent = generic_ref->parent;
1075 u8 ref_type;
1076
1077 is_system = (generic_ref->tree_ref.ref_root == BTRFS_CHUNK_TREE_OBJECTID);
1078
1079 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
1080 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
1081 if (!ref)
1082 return -ENOMEM;
1083
1084 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1085 if (!head_ref) {
1086 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
1087 return -ENOMEM;
1088 }
1089
1090 if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
1091 record = kzalloc(sizeof(*record), GFP_NOFS);
1092 if (!record) {
1093 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
1094 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
1095 return -ENOMEM;
1096 }
1097 }
1098
1099 if (parent)
1100 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
1101 else
1102 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
1103
1104 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1105 generic_ref->tree_ref.ref_root, action,
1106 ref_type);
1107 ref->root = generic_ref->tree_ref.ref_root;
1108 ref->parent = parent;
1109 ref->level = level;
1110
1111 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
1112 generic_ref->tree_ref.ref_root, 0, action,
1113 false, is_system, generic_ref->owning_root);
1114 head_ref->extent_op = extent_op;
1115
1116 delayed_refs = &trans->transaction->delayed_refs;
1117 spin_lock(&delayed_refs->lock);
1118
1119 /*
1120 * insert both the head node and the new ref without dropping
1121 * the spin lock
1122 */
1123 head_ref = add_delayed_ref_head(trans, head_ref, record,
1124 action, &qrecord_inserted);
1125
1126 merged = insert_delayed_ref(trans, head_ref, &ref->node);
1127 spin_unlock(&delayed_refs->lock);
1128
1129 /*
1130 * Need to update the delayed_refs_rsv with any changes we may have
1131 * made.
1132 */
1133 btrfs_update_delayed_refs_rsv(trans);
1134
1135 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
1136 action == BTRFS_ADD_DELAYED_EXTENT ?
1137 BTRFS_ADD_DELAYED_REF : action);
1138 if (merged)
1139 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
1140
1141 if (qrecord_inserted)
1142 btrfs_qgroup_trace_extent_post(trans, record);
1143
1144 return 0;
1145}
1146
1147/*
1148 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1149 */
1150int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1151 struct btrfs_ref *generic_ref,
1152 u64 reserved)
1153{
1154 struct btrfs_fs_info *fs_info = trans->fs_info;
1155 struct btrfs_delayed_data_ref *ref;
1156 struct btrfs_delayed_ref_head *head_ref;
1157 struct btrfs_delayed_ref_root *delayed_refs;
1158 struct btrfs_qgroup_extent_record *record = NULL;
1159 bool qrecord_inserted;
1160 int action = generic_ref->action;
1161 bool merged;
1162 u64 bytenr = generic_ref->bytenr;
1163 u64 num_bytes = generic_ref->len;
1164 u64 parent = generic_ref->parent;
1165 u64 ref_root = generic_ref->data_ref.ref_root;
1166 u64 owner = generic_ref->data_ref.ino;
1167 u64 offset = generic_ref->data_ref.offset;
1168 u8 ref_type;
1169
1170 ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
1171 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
1172 if (!ref)
1173 return -ENOMEM;
1174
1175 if (parent)
1176 ref_type = BTRFS_SHARED_DATA_REF_KEY;
1177 else
1178 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1179 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1180 ref_root, action, ref_type);
1181 ref->root = ref_root;
1182 ref->parent = parent;
1183 ref->objectid = owner;
1184 ref->offset = offset;
1185
1186
1187 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1188 if (!head_ref) {
1189 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1190 return -ENOMEM;
1191 }
1192
1193 if (btrfs_qgroup_full_accounting(fs_info) && !generic_ref->skip_qgroup) {
1194 record = kzalloc(sizeof(*record), GFP_NOFS);
1195 if (!record) {
1196 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1197 kmem_cache_free(btrfs_delayed_ref_head_cachep,
1198 head_ref);
1199 return -ENOMEM;
1200 }
1201 }
1202
1203 init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1204 reserved, action, true, false, generic_ref->owning_root);
1205 head_ref->extent_op = NULL;
1206
1207 delayed_refs = &trans->transaction->delayed_refs;
1208 spin_lock(&delayed_refs->lock);
1209
1210 /*
1211 * insert both the head node and the new ref without dropping
1212 * the spin lock
1213 */
1214 head_ref = add_delayed_ref_head(trans, head_ref, record,
1215 action, &qrecord_inserted);
1216
1217 merged = insert_delayed_ref(trans, head_ref, &ref->node);
1218 spin_unlock(&delayed_refs->lock);
1219
1220 /*
1221 * Need to update the delayed_refs_rsv with any changes we may have
1222 * made.
1223 */
1224 btrfs_update_delayed_refs_rsv(trans);
1225
1226 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1227 action == BTRFS_ADD_DELAYED_EXTENT ?
1228 BTRFS_ADD_DELAYED_REF : action);
1229 if (merged)
1230 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1231
1232
1233 if (qrecord_inserted)
1234 return btrfs_qgroup_trace_extent_post(trans, record);
1235 return 0;
1236}
1237
1238int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1239 u64 bytenr, u64 num_bytes,
1240 struct btrfs_delayed_extent_op *extent_op)
1241{
1242 struct btrfs_delayed_ref_head *head_ref;
1243 struct btrfs_delayed_ref_root *delayed_refs;
1244
1245 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1246 if (!head_ref)
1247 return -ENOMEM;
1248
1249 init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1250 BTRFS_UPDATE_DELAYED_HEAD, false, false, 0);
1251 head_ref->extent_op = extent_op;
1252
1253 delayed_refs = &trans->transaction->delayed_refs;
1254 spin_lock(&delayed_refs->lock);
1255
1256 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1257 NULL);
1258
1259 spin_unlock(&delayed_refs->lock);
1260
1261 /*
1262 * Need to update the delayed_refs_rsv with any changes we may have
1263 * made.
1264 */
1265 btrfs_update_delayed_refs_rsv(trans);
1266 return 0;
1267}
1268
1269void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
1270{
1271 if (refcount_dec_and_test(&ref->refs)) {
1272 WARN_ON(!RB_EMPTY_NODE(&ref->ref_node));
1273 switch (ref->type) {
1274 case BTRFS_TREE_BLOCK_REF_KEY:
1275 case BTRFS_SHARED_BLOCK_REF_KEY:
1276 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
1277 break;
1278 case BTRFS_EXTENT_DATA_REF_KEY:
1279 case BTRFS_SHARED_DATA_REF_KEY:
1280 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1281 break;
1282 default:
1283 BUG();
1284 }
1285 }
1286}
1287
1288/*
1289 * This does a simple search for the head node for a given extent. Returns the
1290 * head node if found, or NULL if not.
1291 */
1292struct btrfs_delayed_ref_head *
1293btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1294{
1295 lockdep_assert_held(&delayed_refs->lock);
1296
1297 return find_ref_head(delayed_refs, bytenr, false);
1298}
1299
1300void __cold btrfs_delayed_ref_exit(void)
1301{
1302 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1303 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1304 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1305 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1306}
1307
1308int __init btrfs_delayed_ref_init(void)
1309{
1310 btrfs_delayed_ref_head_cachep = KMEM_CACHE(btrfs_delayed_ref_head, 0);
1311 if (!btrfs_delayed_ref_head_cachep)
1312 goto fail;
1313
1314 btrfs_delayed_tree_ref_cachep = KMEM_CACHE(btrfs_delayed_tree_ref, 0);
1315 if (!btrfs_delayed_tree_ref_cachep)
1316 goto fail;
1317
1318 btrfs_delayed_data_ref_cachep = KMEM_CACHE(btrfs_delayed_data_ref, 0);
1319 if (!btrfs_delayed_data_ref_cachep)
1320 goto fail;
1321
1322 btrfs_delayed_extent_op_cachep = KMEM_CACHE(btrfs_delayed_extent_op, 0);
1323 if (!btrfs_delayed_extent_op_cachep)
1324 goto fail;
1325
1326 return 0;
1327fail:
1328 btrfs_delayed_ref_exit();
1329 return -ENOMEM;
1330}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2009 Oracle. All rights reserved.
4 */
5
6#include <linux/sched.h>
7#include <linux/slab.h>
8#include <linux/sort.h>
9#include "ctree.h"
10#include "delayed-ref.h"
11#include "transaction.h"
12#include "qgroup.h"
13#include "space-info.h"
14
15struct kmem_cache *btrfs_delayed_ref_head_cachep;
16struct kmem_cache *btrfs_delayed_tree_ref_cachep;
17struct kmem_cache *btrfs_delayed_data_ref_cachep;
18struct kmem_cache *btrfs_delayed_extent_op_cachep;
19/*
20 * delayed back reference update tracking. For subvolume trees
21 * we queue up extent allocations and backref maintenance for
22 * delayed processing. This avoids deep call chains where we
23 * add extents in the middle of btrfs_search_slot, and it allows
24 * us to buffer up frequently modified backrefs in an rb tree instead
25 * of hammering updates on the extent allocation tree.
26 */
27
28bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info)
29{
30 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
31 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
32 bool ret = false;
33 u64 reserved;
34
35 spin_lock(&global_rsv->lock);
36 reserved = global_rsv->reserved;
37 spin_unlock(&global_rsv->lock);
38
39 /*
40 * Since the global reserve is just kind of magic we don't really want
41 * to rely on it to save our bacon, so if our size is more than the
42 * delayed_refs_rsv and the global rsv then it's time to think about
43 * bailing.
44 */
45 spin_lock(&delayed_refs_rsv->lock);
46 reserved += delayed_refs_rsv->reserved;
47 if (delayed_refs_rsv->size >= reserved)
48 ret = true;
49 spin_unlock(&delayed_refs_rsv->lock);
50 return ret;
51}
52
53int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans)
54{
55 u64 num_entries =
56 atomic_read(&trans->transaction->delayed_refs.num_entries);
57 u64 avg_runtime;
58 u64 val;
59
60 smp_mb();
61 avg_runtime = trans->fs_info->avg_delayed_ref_runtime;
62 val = num_entries * avg_runtime;
63 if (val >= NSEC_PER_SEC)
64 return 1;
65 if (val >= NSEC_PER_SEC / 2)
66 return 2;
67
68 return btrfs_check_space_for_delayed_refs(trans->fs_info);
69}
70
71/**
72 * btrfs_delayed_refs_rsv_release - release a ref head's reservation.
73 * @fs_info - the fs_info for our fs.
74 * @nr - the number of items to drop.
75 *
76 * This drops the delayed ref head's count from the delayed refs rsv and frees
77 * any excess reservation we had.
78 */
79void btrfs_delayed_refs_rsv_release(struct btrfs_fs_info *fs_info, int nr)
80{
81 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
82 u64 num_bytes = btrfs_calc_insert_metadata_size(fs_info, nr);
83 u64 released = 0;
84
85 released = btrfs_block_rsv_release(fs_info, block_rsv, num_bytes, NULL);
86 if (released)
87 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
88 0, released, 0);
89}
90
91/*
92 * btrfs_update_delayed_refs_rsv - adjust the size of the delayed refs rsv
93 * @trans - the trans that may have generated delayed refs
94 *
95 * This is to be called anytime we may have adjusted trans->delayed_ref_updates,
96 * it'll calculate the additional size and add it to the delayed_refs_rsv.
97 */
98void btrfs_update_delayed_refs_rsv(struct btrfs_trans_handle *trans)
99{
100 struct btrfs_fs_info *fs_info = trans->fs_info;
101 struct btrfs_block_rsv *delayed_rsv = &fs_info->delayed_refs_rsv;
102 u64 num_bytes;
103
104 if (!trans->delayed_ref_updates)
105 return;
106
107 num_bytes = btrfs_calc_insert_metadata_size(fs_info,
108 trans->delayed_ref_updates);
109 spin_lock(&delayed_rsv->lock);
110 delayed_rsv->size += num_bytes;
111 delayed_rsv->full = 0;
112 spin_unlock(&delayed_rsv->lock);
113 trans->delayed_ref_updates = 0;
114}
115
116/**
117 * btrfs_migrate_to_delayed_refs_rsv - transfer bytes to our delayed refs rsv.
118 * @fs_info - the fs info for our fs.
119 * @src - the source block rsv to transfer from.
120 * @num_bytes - the number of bytes to transfer.
121 *
122 * This transfers up to the num_bytes amount from the src rsv to the
123 * delayed_refs_rsv. Any extra bytes are returned to the space info.
124 */
125void btrfs_migrate_to_delayed_refs_rsv(struct btrfs_fs_info *fs_info,
126 struct btrfs_block_rsv *src,
127 u64 num_bytes)
128{
129 struct btrfs_block_rsv *delayed_refs_rsv = &fs_info->delayed_refs_rsv;
130 u64 to_free = 0;
131
132 spin_lock(&src->lock);
133 src->reserved -= num_bytes;
134 src->size -= num_bytes;
135 spin_unlock(&src->lock);
136
137 spin_lock(&delayed_refs_rsv->lock);
138 if (delayed_refs_rsv->size > delayed_refs_rsv->reserved) {
139 u64 delta = delayed_refs_rsv->size -
140 delayed_refs_rsv->reserved;
141 if (num_bytes > delta) {
142 to_free = num_bytes - delta;
143 num_bytes = delta;
144 }
145 } else {
146 to_free = num_bytes;
147 num_bytes = 0;
148 }
149
150 if (num_bytes)
151 delayed_refs_rsv->reserved += num_bytes;
152 if (delayed_refs_rsv->reserved >= delayed_refs_rsv->size)
153 delayed_refs_rsv->full = 1;
154 spin_unlock(&delayed_refs_rsv->lock);
155
156 if (num_bytes)
157 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
158 0, num_bytes, 1);
159 if (to_free)
160 btrfs_space_info_free_bytes_may_use(fs_info,
161 delayed_refs_rsv->space_info, to_free);
162}
163
164/**
165 * btrfs_delayed_refs_rsv_refill - refill based on our delayed refs usage.
166 * @fs_info - the fs_info for our fs.
167 * @flush - control how we can flush for this reservation.
168 *
169 * This will refill the delayed block_rsv up to 1 items size worth of space and
170 * will return -ENOSPC if we can't make the reservation.
171 */
172int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
173 enum btrfs_reserve_flush_enum flush)
174{
175 struct btrfs_block_rsv *block_rsv = &fs_info->delayed_refs_rsv;
176 u64 limit = btrfs_calc_insert_metadata_size(fs_info, 1);
177 u64 num_bytes = 0;
178 int ret = -ENOSPC;
179
180 spin_lock(&block_rsv->lock);
181 if (block_rsv->reserved < block_rsv->size) {
182 num_bytes = block_rsv->size - block_rsv->reserved;
183 num_bytes = min(num_bytes, limit);
184 }
185 spin_unlock(&block_rsv->lock);
186
187 if (!num_bytes)
188 return 0;
189
190 ret = btrfs_reserve_metadata_bytes(fs_info->extent_root, block_rsv,
191 num_bytes, flush);
192 if (ret)
193 return ret;
194 btrfs_block_rsv_add_bytes(block_rsv, num_bytes, 0);
195 trace_btrfs_space_reservation(fs_info, "delayed_refs_rsv",
196 0, num_bytes, 1);
197 return 0;
198}
199
200/*
201 * compare two delayed tree backrefs with same bytenr and type
202 */
203static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref1,
204 struct btrfs_delayed_tree_ref *ref2)
205{
206 if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) {
207 if (ref1->root < ref2->root)
208 return -1;
209 if (ref1->root > ref2->root)
210 return 1;
211 } else {
212 if (ref1->parent < ref2->parent)
213 return -1;
214 if (ref1->parent > ref2->parent)
215 return 1;
216 }
217 return 0;
218}
219
220/*
221 * compare two delayed data backrefs with same bytenr and type
222 */
223static int comp_data_refs(struct btrfs_delayed_data_ref *ref1,
224 struct btrfs_delayed_data_ref *ref2)
225{
226 if (ref1->node.type == BTRFS_EXTENT_DATA_REF_KEY) {
227 if (ref1->root < ref2->root)
228 return -1;
229 if (ref1->root > ref2->root)
230 return 1;
231 if (ref1->objectid < ref2->objectid)
232 return -1;
233 if (ref1->objectid > ref2->objectid)
234 return 1;
235 if (ref1->offset < ref2->offset)
236 return -1;
237 if (ref1->offset > ref2->offset)
238 return 1;
239 } else {
240 if (ref1->parent < ref2->parent)
241 return -1;
242 if (ref1->parent > ref2->parent)
243 return 1;
244 }
245 return 0;
246}
247
248static int comp_refs(struct btrfs_delayed_ref_node *ref1,
249 struct btrfs_delayed_ref_node *ref2,
250 bool check_seq)
251{
252 int ret = 0;
253
254 if (ref1->type < ref2->type)
255 return -1;
256 if (ref1->type > ref2->type)
257 return 1;
258 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
259 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY)
260 ret = comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref1),
261 btrfs_delayed_node_to_tree_ref(ref2));
262 else
263 ret = comp_data_refs(btrfs_delayed_node_to_data_ref(ref1),
264 btrfs_delayed_node_to_data_ref(ref2));
265 if (ret)
266 return ret;
267 if (check_seq) {
268 if (ref1->seq < ref2->seq)
269 return -1;
270 if (ref1->seq > ref2->seq)
271 return 1;
272 }
273 return 0;
274}
275
276/* insert a new ref to head ref rbtree */
277static struct btrfs_delayed_ref_head *htree_insert(struct rb_root_cached *root,
278 struct rb_node *node)
279{
280 struct rb_node **p = &root->rb_root.rb_node;
281 struct rb_node *parent_node = NULL;
282 struct btrfs_delayed_ref_head *entry;
283 struct btrfs_delayed_ref_head *ins;
284 u64 bytenr;
285 bool leftmost = true;
286
287 ins = rb_entry(node, struct btrfs_delayed_ref_head, href_node);
288 bytenr = ins->bytenr;
289 while (*p) {
290 parent_node = *p;
291 entry = rb_entry(parent_node, struct btrfs_delayed_ref_head,
292 href_node);
293
294 if (bytenr < entry->bytenr) {
295 p = &(*p)->rb_left;
296 } else if (bytenr > entry->bytenr) {
297 p = &(*p)->rb_right;
298 leftmost = false;
299 } else {
300 return entry;
301 }
302 }
303
304 rb_link_node(node, parent_node, p);
305 rb_insert_color_cached(node, root, leftmost);
306 return NULL;
307}
308
309static struct btrfs_delayed_ref_node* tree_insert(struct rb_root_cached *root,
310 struct btrfs_delayed_ref_node *ins)
311{
312 struct rb_node **p = &root->rb_root.rb_node;
313 struct rb_node *node = &ins->ref_node;
314 struct rb_node *parent_node = NULL;
315 struct btrfs_delayed_ref_node *entry;
316 bool leftmost = true;
317
318 while (*p) {
319 int comp;
320
321 parent_node = *p;
322 entry = rb_entry(parent_node, struct btrfs_delayed_ref_node,
323 ref_node);
324 comp = comp_refs(ins, entry, true);
325 if (comp < 0) {
326 p = &(*p)->rb_left;
327 } else if (comp > 0) {
328 p = &(*p)->rb_right;
329 leftmost = false;
330 } else {
331 return entry;
332 }
333 }
334
335 rb_link_node(node, parent_node, p);
336 rb_insert_color_cached(node, root, leftmost);
337 return NULL;
338}
339
340static struct btrfs_delayed_ref_head *find_first_ref_head(
341 struct btrfs_delayed_ref_root *dr)
342{
343 struct rb_node *n;
344 struct btrfs_delayed_ref_head *entry;
345
346 n = rb_first_cached(&dr->href_root);
347 if (!n)
348 return NULL;
349
350 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
351
352 return entry;
353}
354
355/*
356 * Find a head entry based on bytenr. This returns the delayed ref head if it
357 * was able to find one, or NULL if nothing was in that spot. If return_bigger
358 * is given, the next bigger entry is returned if no exact match is found.
359 */
360static struct btrfs_delayed_ref_head *find_ref_head(
361 struct btrfs_delayed_ref_root *dr, u64 bytenr,
362 bool return_bigger)
363{
364 struct rb_root *root = &dr->href_root.rb_root;
365 struct rb_node *n;
366 struct btrfs_delayed_ref_head *entry;
367
368 n = root->rb_node;
369 entry = NULL;
370 while (n) {
371 entry = rb_entry(n, struct btrfs_delayed_ref_head, href_node);
372
373 if (bytenr < entry->bytenr)
374 n = n->rb_left;
375 else if (bytenr > entry->bytenr)
376 n = n->rb_right;
377 else
378 return entry;
379 }
380 if (entry && return_bigger) {
381 if (bytenr > entry->bytenr) {
382 n = rb_next(&entry->href_node);
383 if (!n)
384 return NULL;
385 entry = rb_entry(n, struct btrfs_delayed_ref_head,
386 href_node);
387 }
388 return entry;
389 }
390 return NULL;
391}
392
393int btrfs_delayed_ref_lock(struct btrfs_delayed_ref_root *delayed_refs,
394 struct btrfs_delayed_ref_head *head)
395{
396 lockdep_assert_held(&delayed_refs->lock);
397 if (mutex_trylock(&head->mutex))
398 return 0;
399
400 refcount_inc(&head->refs);
401 spin_unlock(&delayed_refs->lock);
402
403 mutex_lock(&head->mutex);
404 spin_lock(&delayed_refs->lock);
405 if (RB_EMPTY_NODE(&head->href_node)) {
406 mutex_unlock(&head->mutex);
407 btrfs_put_delayed_ref_head(head);
408 return -EAGAIN;
409 }
410 btrfs_put_delayed_ref_head(head);
411 return 0;
412}
413
414static inline void drop_delayed_ref(struct btrfs_trans_handle *trans,
415 struct btrfs_delayed_ref_root *delayed_refs,
416 struct btrfs_delayed_ref_head *head,
417 struct btrfs_delayed_ref_node *ref)
418{
419 lockdep_assert_held(&head->lock);
420 rb_erase_cached(&ref->ref_node, &head->ref_tree);
421 RB_CLEAR_NODE(&ref->ref_node);
422 if (!list_empty(&ref->add_list))
423 list_del(&ref->add_list);
424 ref->in_tree = 0;
425 btrfs_put_delayed_ref(ref);
426 atomic_dec(&delayed_refs->num_entries);
427}
428
429static bool merge_ref(struct btrfs_trans_handle *trans,
430 struct btrfs_delayed_ref_root *delayed_refs,
431 struct btrfs_delayed_ref_head *head,
432 struct btrfs_delayed_ref_node *ref,
433 u64 seq)
434{
435 struct btrfs_delayed_ref_node *next;
436 struct rb_node *node = rb_next(&ref->ref_node);
437 bool done = false;
438
439 while (!done && node) {
440 int mod;
441
442 next = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
443 node = rb_next(node);
444 if (seq && next->seq >= seq)
445 break;
446 if (comp_refs(ref, next, false))
447 break;
448
449 if (ref->action == next->action) {
450 mod = next->ref_mod;
451 } else {
452 if (ref->ref_mod < next->ref_mod) {
453 swap(ref, next);
454 done = true;
455 }
456 mod = -next->ref_mod;
457 }
458
459 drop_delayed_ref(trans, delayed_refs, head, next);
460 ref->ref_mod += mod;
461 if (ref->ref_mod == 0) {
462 drop_delayed_ref(trans, delayed_refs, head, ref);
463 done = true;
464 } else {
465 /*
466 * Can't have multiples of the same ref on a tree block.
467 */
468 WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY ||
469 ref->type == BTRFS_SHARED_BLOCK_REF_KEY);
470 }
471 }
472
473 return done;
474}
475
476void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans,
477 struct btrfs_delayed_ref_root *delayed_refs,
478 struct btrfs_delayed_ref_head *head)
479{
480 struct btrfs_fs_info *fs_info = trans->fs_info;
481 struct btrfs_delayed_ref_node *ref;
482 struct rb_node *node;
483 u64 seq = 0;
484
485 lockdep_assert_held(&head->lock);
486
487 if (RB_EMPTY_ROOT(&head->ref_tree.rb_root))
488 return;
489
490 /* We don't have too many refs to merge for data. */
491 if (head->is_data)
492 return;
493
494 read_lock(&fs_info->tree_mod_log_lock);
495 if (!list_empty(&fs_info->tree_mod_seq_list)) {
496 struct seq_list *elem;
497
498 elem = list_first_entry(&fs_info->tree_mod_seq_list,
499 struct seq_list, list);
500 seq = elem->seq;
501 }
502 read_unlock(&fs_info->tree_mod_log_lock);
503
504again:
505 for (node = rb_first_cached(&head->ref_tree); node;
506 node = rb_next(node)) {
507 ref = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
508 if (seq && ref->seq >= seq)
509 continue;
510 if (merge_ref(trans, delayed_refs, head, ref, seq))
511 goto again;
512 }
513}
514
515int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, u64 seq)
516{
517 struct seq_list *elem;
518 int ret = 0;
519
520 read_lock(&fs_info->tree_mod_log_lock);
521 if (!list_empty(&fs_info->tree_mod_seq_list)) {
522 elem = list_first_entry(&fs_info->tree_mod_seq_list,
523 struct seq_list, list);
524 if (seq >= elem->seq) {
525 btrfs_debug(fs_info,
526 "holding back delayed_ref %#x.%x, lowest is %#x.%x",
527 (u32)(seq >> 32), (u32)seq,
528 (u32)(elem->seq >> 32), (u32)elem->seq);
529 ret = 1;
530 }
531 }
532
533 read_unlock(&fs_info->tree_mod_log_lock);
534 return ret;
535}
536
537struct btrfs_delayed_ref_head *btrfs_select_ref_head(
538 struct btrfs_delayed_ref_root *delayed_refs)
539{
540 struct btrfs_delayed_ref_head *head;
541
542again:
543 head = find_ref_head(delayed_refs, delayed_refs->run_delayed_start,
544 true);
545 if (!head && delayed_refs->run_delayed_start != 0) {
546 delayed_refs->run_delayed_start = 0;
547 head = find_first_ref_head(delayed_refs);
548 }
549 if (!head)
550 return NULL;
551
552 while (head->processing) {
553 struct rb_node *node;
554
555 node = rb_next(&head->href_node);
556 if (!node) {
557 if (delayed_refs->run_delayed_start == 0)
558 return NULL;
559 delayed_refs->run_delayed_start = 0;
560 goto again;
561 }
562 head = rb_entry(node, struct btrfs_delayed_ref_head,
563 href_node);
564 }
565
566 head->processing = 1;
567 WARN_ON(delayed_refs->num_heads_ready == 0);
568 delayed_refs->num_heads_ready--;
569 delayed_refs->run_delayed_start = head->bytenr +
570 head->num_bytes;
571 return head;
572}
573
574void btrfs_delete_ref_head(struct btrfs_delayed_ref_root *delayed_refs,
575 struct btrfs_delayed_ref_head *head)
576{
577 lockdep_assert_held(&delayed_refs->lock);
578 lockdep_assert_held(&head->lock);
579
580 rb_erase_cached(&head->href_node, &delayed_refs->href_root);
581 RB_CLEAR_NODE(&head->href_node);
582 atomic_dec(&delayed_refs->num_entries);
583 delayed_refs->num_heads--;
584 if (head->processing == 0)
585 delayed_refs->num_heads_ready--;
586}
587
588/*
589 * Helper to insert the ref_node to the tail or merge with tail.
590 *
591 * Return 0 for insert.
592 * Return >0 for merge.
593 */
594static int insert_delayed_ref(struct btrfs_trans_handle *trans,
595 struct btrfs_delayed_ref_root *root,
596 struct btrfs_delayed_ref_head *href,
597 struct btrfs_delayed_ref_node *ref)
598{
599 struct btrfs_delayed_ref_node *exist;
600 int mod;
601 int ret = 0;
602
603 spin_lock(&href->lock);
604 exist = tree_insert(&href->ref_tree, ref);
605 if (!exist)
606 goto inserted;
607
608 /* Now we are sure we can merge */
609 ret = 1;
610 if (exist->action == ref->action) {
611 mod = ref->ref_mod;
612 } else {
613 /* Need to change action */
614 if (exist->ref_mod < ref->ref_mod) {
615 exist->action = ref->action;
616 mod = -exist->ref_mod;
617 exist->ref_mod = ref->ref_mod;
618 if (ref->action == BTRFS_ADD_DELAYED_REF)
619 list_add_tail(&exist->add_list,
620 &href->ref_add_list);
621 else if (ref->action == BTRFS_DROP_DELAYED_REF) {
622 ASSERT(!list_empty(&exist->add_list));
623 list_del(&exist->add_list);
624 } else {
625 ASSERT(0);
626 }
627 } else
628 mod = -ref->ref_mod;
629 }
630 exist->ref_mod += mod;
631
632 /* remove existing tail if its ref_mod is zero */
633 if (exist->ref_mod == 0)
634 drop_delayed_ref(trans, root, href, exist);
635 spin_unlock(&href->lock);
636 return ret;
637inserted:
638 if (ref->action == BTRFS_ADD_DELAYED_REF)
639 list_add_tail(&ref->add_list, &href->ref_add_list);
640 atomic_inc(&root->num_entries);
641 spin_unlock(&href->lock);
642 return ret;
643}
644
645/*
646 * helper function to update the accounting in the head ref
647 * existing and update must have the same bytenr
648 */
649static noinline void update_existing_head_ref(struct btrfs_trans_handle *trans,
650 struct btrfs_delayed_ref_head *existing,
651 struct btrfs_delayed_ref_head *update,
652 int *old_ref_mod_ret)
653{
654 struct btrfs_delayed_ref_root *delayed_refs =
655 &trans->transaction->delayed_refs;
656 struct btrfs_fs_info *fs_info = trans->fs_info;
657 int old_ref_mod;
658
659 BUG_ON(existing->is_data != update->is_data);
660
661 spin_lock(&existing->lock);
662 if (update->must_insert_reserved) {
663 /* if the extent was freed and then
664 * reallocated before the delayed ref
665 * entries were processed, we can end up
666 * with an existing head ref without
667 * the must_insert_reserved flag set.
668 * Set it again here
669 */
670 existing->must_insert_reserved = update->must_insert_reserved;
671
672 /*
673 * update the num_bytes so we make sure the accounting
674 * is done correctly
675 */
676 existing->num_bytes = update->num_bytes;
677
678 }
679
680 if (update->extent_op) {
681 if (!existing->extent_op) {
682 existing->extent_op = update->extent_op;
683 } else {
684 if (update->extent_op->update_key) {
685 memcpy(&existing->extent_op->key,
686 &update->extent_op->key,
687 sizeof(update->extent_op->key));
688 existing->extent_op->update_key = true;
689 }
690 if (update->extent_op->update_flags) {
691 existing->extent_op->flags_to_set |=
692 update->extent_op->flags_to_set;
693 existing->extent_op->update_flags = true;
694 }
695 btrfs_free_delayed_extent_op(update->extent_op);
696 }
697 }
698 /*
699 * update the reference mod on the head to reflect this new operation,
700 * only need the lock for this case cause we could be processing it
701 * currently, for refs we just added we know we're a-ok.
702 */
703 old_ref_mod = existing->total_ref_mod;
704 if (old_ref_mod_ret)
705 *old_ref_mod_ret = old_ref_mod;
706 existing->ref_mod += update->ref_mod;
707 existing->total_ref_mod += update->ref_mod;
708
709 /*
710 * If we are going to from a positive ref mod to a negative or vice
711 * versa we need to make sure to adjust pending_csums accordingly.
712 */
713 if (existing->is_data) {
714 u64 csum_leaves =
715 btrfs_csum_bytes_to_leaves(fs_info,
716 existing->num_bytes);
717
718 if (existing->total_ref_mod >= 0 && old_ref_mod < 0) {
719 delayed_refs->pending_csums -= existing->num_bytes;
720 btrfs_delayed_refs_rsv_release(fs_info, csum_leaves);
721 }
722 if (existing->total_ref_mod < 0 && old_ref_mod >= 0) {
723 delayed_refs->pending_csums += existing->num_bytes;
724 trans->delayed_ref_updates += csum_leaves;
725 }
726 }
727 spin_unlock(&existing->lock);
728}
729
730static void init_delayed_ref_head(struct btrfs_delayed_ref_head *head_ref,
731 struct btrfs_qgroup_extent_record *qrecord,
732 u64 bytenr, u64 num_bytes, u64 ref_root,
733 u64 reserved, int action, bool is_data,
734 bool is_system)
735{
736 int count_mod = 1;
737 int must_insert_reserved = 0;
738
739 /* If reserved is provided, it must be a data extent. */
740 BUG_ON(!is_data && reserved);
741
742 /*
743 * The head node stores the sum of all the mods, so dropping a ref
744 * should drop the sum in the head node by one.
745 */
746 if (action == BTRFS_UPDATE_DELAYED_HEAD)
747 count_mod = 0;
748 else if (action == BTRFS_DROP_DELAYED_REF)
749 count_mod = -1;
750
751 /*
752 * BTRFS_ADD_DELAYED_EXTENT means that we need to update the reserved
753 * accounting when the extent is finally added, or if a later
754 * modification deletes the delayed ref without ever inserting the
755 * extent into the extent allocation tree. ref->must_insert_reserved
756 * is the flag used to record that accounting mods are required.
757 *
758 * Once we record must_insert_reserved, switch the action to
759 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
760 */
761 if (action == BTRFS_ADD_DELAYED_EXTENT)
762 must_insert_reserved = 1;
763 else
764 must_insert_reserved = 0;
765
766 refcount_set(&head_ref->refs, 1);
767 head_ref->bytenr = bytenr;
768 head_ref->num_bytes = num_bytes;
769 head_ref->ref_mod = count_mod;
770 head_ref->must_insert_reserved = must_insert_reserved;
771 head_ref->is_data = is_data;
772 head_ref->is_system = is_system;
773 head_ref->ref_tree = RB_ROOT_CACHED;
774 INIT_LIST_HEAD(&head_ref->ref_add_list);
775 RB_CLEAR_NODE(&head_ref->href_node);
776 head_ref->processing = 0;
777 head_ref->total_ref_mod = count_mod;
778 spin_lock_init(&head_ref->lock);
779 mutex_init(&head_ref->mutex);
780
781 if (qrecord) {
782 if (ref_root && reserved) {
783 qrecord->data_rsv = reserved;
784 qrecord->data_rsv_refroot = ref_root;
785 }
786 qrecord->bytenr = bytenr;
787 qrecord->num_bytes = num_bytes;
788 qrecord->old_roots = NULL;
789 }
790}
791
792/*
793 * helper function to actually insert a head node into the rbtree.
794 * this does all the dirty work in terms of maintaining the correct
795 * overall modification count.
796 */
797static noinline struct btrfs_delayed_ref_head *
798add_delayed_ref_head(struct btrfs_trans_handle *trans,
799 struct btrfs_delayed_ref_head *head_ref,
800 struct btrfs_qgroup_extent_record *qrecord,
801 int action, int *qrecord_inserted_ret,
802 int *old_ref_mod, int *new_ref_mod)
803{
804 struct btrfs_delayed_ref_head *existing;
805 struct btrfs_delayed_ref_root *delayed_refs;
806 int qrecord_inserted = 0;
807
808 delayed_refs = &trans->transaction->delayed_refs;
809
810 /* Record qgroup extent info if provided */
811 if (qrecord) {
812 if (btrfs_qgroup_trace_extent_nolock(trans->fs_info,
813 delayed_refs, qrecord))
814 kfree(qrecord);
815 else
816 qrecord_inserted = 1;
817 }
818
819 trace_add_delayed_ref_head(trans->fs_info, head_ref, action);
820
821 existing = htree_insert(&delayed_refs->href_root,
822 &head_ref->href_node);
823 if (existing) {
824 update_existing_head_ref(trans, existing, head_ref,
825 old_ref_mod);
826 /*
827 * we've updated the existing ref, free the newly
828 * allocated ref
829 */
830 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
831 head_ref = existing;
832 } else {
833 if (old_ref_mod)
834 *old_ref_mod = 0;
835 if (head_ref->is_data && head_ref->ref_mod < 0) {
836 delayed_refs->pending_csums += head_ref->num_bytes;
837 trans->delayed_ref_updates +=
838 btrfs_csum_bytes_to_leaves(trans->fs_info,
839 head_ref->num_bytes);
840 }
841 delayed_refs->num_heads++;
842 delayed_refs->num_heads_ready++;
843 atomic_inc(&delayed_refs->num_entries);
844 trans->delayed_ref_updates++;
845 }
846 if (qrecord_inserted_ret)
847 *qrecord_inserted_ret = qrecord_inserted;
848 if (new_ref_mod)
849 *new_ref_mod = head_ref->total_ref_mod;
850
851 return head_ref;
852}
853
854/*
855 * init_delayed_ref_common - Initialize the structure which represents a
856 * modification to a an extent.
857 *
858 * @fs_info: Internal to the mounted filesystem mount structure.
859 *
860 * @ref: The structure which is going to be initialized.
861 *
862 * @bytenr: The logical address of the extent for which a modification is
863 * going to be recorded.
864 *
865 * @num_bytes: Size of the extent whose modification is being recorded.
866 *
867 * @ref_root: The id of the root where this modification has originated, this
868 * can be either one of the well-known metadata trees or the
869 * subvolume id which references this extent.
870 *
871 * @action: Can be one of BTRFS_ADD_DELAYED_REF/BTRFS_DROP_DELAYED_REF or
872 * BTRFS_ADD_DELAYED_EXTENT
873 *
874 * @ref_type: Holds the type of the extent which is being recorded, can be
875 * one of BTRFS_SHARED_BLOCK_REF_KEY/BTRFS_TREE_BLOCK_REF_KEY
876 * when recording a metadata extent or BTRFS_SHARED_DATA_REF_KEY/
877 * BTRFS_EXTENT_DATA_REF_KEY when recording data extent
878 */
879static void init_delayed_ref_common(struct btrfs_fs_info *fs_info,
880 struct btrfs_delayed_ref_node *ref,
881 u64 bytenr, u64 num_bytes, u64 ref_root,
882 int action, u8 ref_type)
883{
884 u64 seq = 0;
885
886 if (action == BTRFS_ADD_DELAYED_EXTENT)
887 action = BTRFS_ADD_DELAYED_REF;
888
889 if (is_fstree(ref_root))
890 seq = atomic64_read(&fs_info->tree_mod_seq);
891
892 refcount_set(&ref->refs, 1);
893 ref->bytenr = bytenr;
894 ref->num_bytes = num_bytes;
895 ref->ref_mod = 1;
896 ref->action = action;
897 ref->is_head = 0;
898 ref->in_tree = 1;
899 ref->seq = seq;
900 ref->type = ref_type;
901 RB_CLEAR_NODE(&ref->ref_node);
902 INIT_LIST_HEAD(&ref->add_list);
903}
904
905/*
906 * add a delayed tree ref. This does all of the accounting required
907 * to make sure the delayed ref is eventually processed before this
908 * transaction commits.
909 */
910int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
911 struct btrfs_ref *generic_ref,
912 struct btrfs_delayed_extent_op *extent_op,
913 int *old_ref_mod, int *new_ref_mod)
914{
915 struct btrfs_fs_info *fs_info = trans->fs_info;
916 struct btrfs_delayed_tree_ref *ref;
917 struct btrfs_delayed_ref_head *head_ref;
918 struct btrfs_delayed_ref_root *delayed_refs;
919 struct btrfs_qgroup_extent_record *record = NULL;
920 int qrecord_inserted;
921 bool is_system;
922 int action = generic_ref->action;
923 int level = generic_ref->tree_ref.level;
924 int ret;
925 u64 bytenr = generic_ref->bytenr;
926 u64 num_bytes = generic_ref->len;
927 u64 parent = generic_ref->parent;
928 u8 ref_type;
929
930 is_system = (generic_ref->real_root == BTRFS_CHUNK_TREE_OBJECTID);
931
932 ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
933 BUG_ON(extent_op && extent_op->is_data);
934 ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
935 if (!ref)
936 return -ENOMEM;
937
938 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
939 if (!head_ref) {
940 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
941 return -ENOMEM;
942 }
943
944 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
945 is_fstree(generic_ref->real_root) &&
946 is_fstree(generic_ref->tree_ref.root) &&
947 !generic_ref->skip_qgroup) {
948 record = kzalloc(sizeof(*record), GFP_NOFS);
949 if (!record) {
950 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
951 kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
952 return -ENOMEM;
953 }
954 }
955
956 if (parent)
957 ref_type = BTRFS_SHARED_BLOCK_REF_KEY;
958 else
959 ref_type = BTRFS_TREE_BLOCK_REF_KEY;
960
961 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
962 generic_ref->tree_ref.root, action, ref_type);
963 ref->root = generic_ref->tree_ref.root;
964 ref->parent = parent;
965 ref->level = level;
966
967 init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
968 generic_ref->tree_ref.root, 0, action, false,
969 is_system);
970 head_ref->extent_op = extent_op;
971
972 delayed_refs = &trans->transaction->delayed_refs;
973 spin_lock(&delayed_refs->lock);
974
975 /*
976 * insert both the head node and the new ref without dropping
977 * the spin lock
978 */
979 head_ref = add_delayed_ref_head(trans, head_ref, record,
980 action, &qrecord_inserted,
981 old_ref_mod, new_ref_mod);
982
983 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
984 spin_unlock(&delayed_refs->lock);
985
986 /*
987 * Need to update the delayed_refs_rsv with any changes we may have
988 * made.
989 */
990 btrfs_update_delayed_refs_rsv(trans);
991
992 trace_add_delayed_tree_ref(fs_info, &ref->node, ref,
993 action == BTRFS_ADD_DELAYED_EXTENT ?
994 BTRFS_ADD_DELAYED_REF : action);
995 if (ret > 0)
996 kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
997
998 if (qrecord_inserted)
999 btrfs_qgroup_trace_extent_post(fs_info, record);
1000
1001 return 0;
1002}
1003
1004/*
1005 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
1006 */
1007int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
1008 struct btrfs_ref *generic_ref,
1009 u64 reserved, int *old_ref_mod,
1010 int *new_ref_mod)
1011{
1012 struct btrfs_fs_info *fs_info = trans->fs_info;
1013 struct btrfs_delayed_data_ref *ref;
1014 struct btrfs_delayed_ref_head *head_ref;
1015 struct btrfs_delayed_ref_root *delayed_refs;
1016 struct btrfs_qgroup_extent_record *record = NULL;
1017 int qrecord_inserted;
1018 int action = generic_ref->action;
1019 int ret;
1020 u64 bytenr = generic_ref->bytenr;
1021 u64 num_bytes = generic_ref->len;
1022 u64 parent = generic_ref->parent;
1023 u64 ref_root = generic_ref->data_ref.ref_root;
1024 u64 owner = generic_ref->data_ref.ino;
1025 u64 offset = generic_ref->data_ref.offset;
1026 u8 ref_type;
1027
1028 ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
1029 ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
1030 if (!ref)
1031 return -ENOMEM;
1032
1033 if (parent)
1034 ref_type = BTRFS_SHARED_DATA_REF_KEY;
1035 else
1036 ref_type = BTRFS_EXTENT_DATA_REF_KEY;
1037 init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
1038 ref_root, action, ref_type);
1039 ref->root = ref_root;
1040 ref->parent = parent;
1041 ref->objectid = owner;
1042 ref->offset = offset;
1043
1044
1045 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1046 if (!head_ref) {
1047 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1048 return -ENOMEM;
1049 }
1050
1051 if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
1052 is_fstree(ref_root) &&
1053 is_fstree(generic_ref->real_root) &&
1054 !generic_ref->skip_qgroup) {
1055 record = kzalloc(sizeof(*record), GFP_NOFS);
1056 if (!record) {
1057 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1058 kmem_cache_free(btrfs_delayed_ref_head_cachep,
1059 head_ref);
1060 return -ENOMEM;
1061 }
1062 }
1063
1064 init_delayed_ref_head(head_ref, record, bytenr, num_bytes, ref_root,
1065 reserved, action, true, false);
1066 head_ref->extent_op = NULL;
1067
1068 delayed_refs = &trans->transaction->delayed_refs;
1069 spin_lock(&delayed_refs->lock);
1070
1071 /*
1072 * insert both the head node and the new ref without dropping
1073 * the spin lock
1074 */
1075 head_ref = add_delayed_ref_head(trans, head_ref, record,
1076 action, &qrecord_inserted,
1077 old_ref_mod, new_ref_mod);
1078
1079 ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
1080 spin_unlock(&delayed_refs->lock);
1081
1082 /*
1083 * Need to update the delayed_refs_rsv with any changes we may have
1084 * made.
1085 */
1086 btrfs_update_delayed_refs_rsv(trans);
1087
1088 trace_add_delayed_data_ref(trans->fs_info, &ref->node, ref,
1089 action == BTRFS_ADD_DELAYED_EXTENT ?
1090 BTRFS_ADD_DELAYED_REF : action);
1091 if (ret > 0)
1092 kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
1093
1094
1095 if (qrecord_inserted)
1096 return btrfs_qgroup_trace_extent_post(fs_info, record);
1097 return 0;
1098}
1099
1100int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
1101 u64 bytenr, u64 num_bytes,
1102 struct btrfs_delayed_extent_op *extent_op)
1103{
1104 struct btrfs_delayed_ref_head *head_ref;
1105 struct btrfs_delayed_ref_root *delayed_refs;
1106
1107 head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
1108 if (!head_ref)
1109 return -ENOMEM;
1110
1111 init_delayed_ref_head(head_ref, NULL, bytenr, num_bytes, 0, 0,
1112 BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data,
1113 false);
1114 head_ref->extent_op = extent_op;
1115
1116 delayed_refs = &trans->transaction->delayed_refs;
1117 spin_lock(&delayed_refs->lock);
1118
1119 add_delayed_ref_head(trans, head_ref, NULL, BTRFS_UPDATE_DELAYED_HEAD,
1120 NULL, NULL, NULL);
1121
1122 spin_unlock(&delayed_refs->lock);
1123
1124 /*
1125 * Need to update the delayed_refs_rsv with any changes we may have
1126 * made.
1127 */
1128 btrfs_update_delayed_refs_rsv(trans);
1129 return 0;
1130}
1131
1132/*
1133 * This does a simple search for the head node for a given extent. Returns the
1134 * head node if found, or NULL if not.
1135 */
1136struct btrfs_delayed_ref_head *
1137btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 bytenr)
1138{
1139 lockdep_assert_held(&delayed_refs->lock);
1140
1141 return find_ref_head(delayed_refs, bytenr, false);
1142}
1143
1144void __cold btrfs_delayed_ref_exit(void)
1145{
1146 kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
1147 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
1148 kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
1149 kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
1150}
1151
1152int __init btrfs_delayed_ref_init(void)
1153{
1154 btrfs_delayed_ref_head_cachep = kmem_cache_create(
1155 "btrfs_delayed_ref_head",
1156 sizeof(struct btrfs_delayed_ref_head), 0,
1157 SLAB_MEM_SPREAD, NULL);
1158 if (!btrfs_delayed_ref_head_cachep)
1159 goto fail;
1160
1161 btrfs_delayed_tree_ref_cachep = kmem_cache_create(
1162 "btrfs_delayed_tree_ref",
1163 sizeof(struct btrfs_delayed_tree_ref), 0,
1164 SLAB_MEM_SPREAD, NULL);
1165 if (!btrfs_delayed_tree_ref_cachep)
1166 goto fail;
1167
1168 btrfs_delayed_data_ref_cachep = kmem_cache_create(
1169 "btrfs_delayed_data_ref",
1170 sizeof(struct btrfs_delayed_data_ref), 0,
1171 SLAB_MEM_SPREAD, NULL);
1172 if (!btrfs_delayed_data_ref_cachep)
1173 goto fail;
1174
1175 btrfs_delayed_extent_op_cachep = kmem_cache_create(
1176 "btrfs_delayed_extent_op",
1177 sizeof(struct btrfs_delayed_extent_op), 0,
1178 SLAB_MEM_SPREAD, NULL);
1179 if (!btrfs_delayed_extent_op_cachep)
1180 goto fail;
1181
1182 return 0;
1183fail:
1184 btrfs_delayed_ref_exit();
1185 return -ENOMEM;
1186}