Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011 STRATO. All rights reserved.
4 */
5
6#include <linux/mm.h>
7#include <linux/rbtree.h>
8#include <trace/events/btrfs.h>
9#include "ctree.h"
10#include "disk-io.h"
11#include "backref.h"
12#include "ulist.h"
13#include "transaction.h"
14#include "delayed-ref.h"
15#include "locking.h"
16#include "misc.h"
17#include "tree-mod-log.h"
18#include "fs.h"
19#include "accessors.h"
20#include "extent-tree.h"
21#include "relocation.h"
22#include "tree-checker.h"
23
24/* Just arbitrary numbers so we can be sure one of these happened. */
25#define BACKREF_FOUND_SHARED 6
26#define BACKREF_FOUND_NOT_SHARED 7
27
28struct extent_inode_elem {
29 u64 inum;
30 u64 offset;
31 u64 num_bytes;
32 struct extent_inode_elem *next;
33};
34
35static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
36 const struct btrfs_key *key,
37 const struct extent_buffer *eb,
38 const struct btrfs_file_extent_item *fi,
39 struct extent_inode_elem **eie)
40{
41 const u64 data_len = btrfs_file_extent_num_bytes(eb, fi);
42 u64 offset = key->offset;
43 struct extent_inode_elem *e;
44 const u64 *root_ids;
45 int root_count;
46 bool cached;
47
48 if (!ctx->ignore_extent_item_pos &&
49 !btrfs_file_extent_compression(eb, fi) &&
50 !btrfs_file_extent_encryption(eb, fi) &&
51 !btrfs_file_extent_other_encoding(eb, fi)) {
52 u64 data_offset;
53
54 data_offset = btrfs_file_extent_offset(eb, fi);
55
56 if (ctx->extent_item_pos < data_offset ||
57 ctx->extent_item_pos >= data_offset + data_len)
58 return 1;
59 offset += ctx->extent_item_pos - data_offset;
60 }
61
62 if (!ctx->indirect_ref_iterator || !ctx->cache_lookup)
63 goto add_inode_elem;
64
65 cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids,
66 &root_count);
67 if (!cached)
68 goto add_inode_elem;
69
70 for (int i = 0; i < root_count; i++) {
71 int ret;
72
73 ret = ctx->indirect_ref_iterator(key->objectid, offset,
74 data_len, root_ids[i],
75 ctx->user_ctx);
76 if (ret)
77 return ret;
78 }
79
80add_inode_elem:
81 e = kmalloc(sizeof(*e), GFP_NOFS);
82 if (!e)
83 return -ENOMEM;
84
85 e->next = *eie;
86 e->inum = key->objectid;
87 e->offset = offset;
88 e->num_bytes = data_len;
89 *eie = e;
90
91 return 0;
92}
93
94static void free_inode_elem_list(struct extent_inode_elem *eie)
95{
96 struct extent_inode_elem *eie_next;
97
98 for (; eie; eie = eie_next) {
99 eie_next = eie->next;
100 kfree(eie);
101 }
102}
103
104static int find_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
105 const struct extent_buffer *eb,
106 struct extent_inode_elem **eie)
107{
108 u64 disk_byte;
109 struct btrfs_key key;
110 struct btrfs_file_extent_item *fi;
111 int slot;
112 int nritems;
113 int extent_type;
114 int ret;
115
116 /*
117 * from the shared data ref, we only have the leaf but we need
118 * the key. thus, we must look into all items and see that we
119 * find one (some) with a reference to our extent item.
120 */
121 nritems = btrfs_header_nritems(eb);
122 for (slot = 0; slot < nritems; ++slot) {
123 btrfs_item_key_to_cpu(eb, &key, slot);
124 if (key.type != BTRFS_EXTENT_DATA_KEY)
125 continue;
126 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
127 extent_type = btrfs_file_extent_type(eb, fi);
128 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
129 continue;
130 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
131 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
132 if (disk_byte != ctx->bytenr)
133 continue;
134
135 ret = check_extent_in_eb(ctx, &key, eb, fi, eie);
136 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
137 return ret;
138 }
139
140 return 0;
141}
142
143struct preftree {
144 struct rb_root_cached root;
145 unsigned int count;
146};
147
148#define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
149
150struct preftrees {
151 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
152 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
153 struct preftree indirect_missing_keys;
154};
155
156/*
157 * Checks for a shared extent during backref search.
158 *
159 * The share_count tracks prelim_refs (direct and indirect) having a
160 * ref->count >0:
161 * - incremented when a ref->count transitions to >0
162 * - decremented when a ref->count transitions to <1
163 */
164struct share_check {
165 struct btrfs_backref_share_check_ctx *ctx;
166 struct btrfs_root *root;
167 u64 inum;
168 u64 data_bytenr;
169 u64 data_extent_gen;
170 /*
171 * Counts number of inodes that refer to an extent (different inodes in
172 * the same root or different roots) that we could find. The sharedness
173 * check typically stops once this counter gets greater than 1, so it
174 * may not reflect the total number of inodes.
175 */
176 int share_count;
177 /*
178 * The number of times we found our inode refers to the data extent we
179 * are determining the sharedness. In other words, how many file extent
180 * items we could find for our inode that point to our target data
181 * extent. The value we get here after finishing the extent sharedness
182 * check may be smaller than reality, but if it ends up being greater
183 * than 1, then we know for sure the inode has multiple file extent
184 * items that point to our inode, and we can safely assume it's useful
185 * to cache the sharedness check result.
186 */
187 int self_ref_count;
188 bool have_delayed_delete_refs;
189};
190
191static inline int extent_is_shared(struct share_check *sc)
192{
193 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
194}
195
196static struct kmem_cache *btrfs_prelim_ref_cache;
197
198int __init btrfs_prelim_ref_init(void)
199{
200 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
201 sizeof(struct prelim_ref), 0, 0, NULL);
202 if (!btrfs_prelim_ref_cache)
203 return -ENOMEM;
204 return 0;
205}
206
207void __cold btrfs_prelim_ref_exit(void)
208{
209 kmem_cache_destroy(btrfs_prelim_ref_cache);
210}
211
212static void free_pref(struct prelim_ref *ref)
213{
214 kmem_cache_free(btrfs_prelim_ref_cache, ref);
215}
216
217/*
218 * Return 0 when both refs are for the same block (and can be merged).
219 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
220 * indicates a 'higher' block.
221 */
222static int prelim_ref_compare(const struct prelim_ref *ref1,
223 const struct prelim_ref *ref2)
224{
225 if (ref1->level < ref2->level)
226 return -1;
227 if (ref1->level > ref2->level)
228 return 1;
229 if (ref1->root_id < ref2->root_id)
230 return -1;
231 if (ref1->root_id > ref2->root_id)
232 return 1;
233 if (ref1->key_for_search.type < ref2->key_for_search.type)
234 return -1;
235 if (ref1->key_for_search.type > ref2->key_for_search.type)
236 return 1;
237 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
238 return -1;
239 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
240 return 1;
241 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
242 return -1;
243 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
244 return 1;
245 if (ref1->parent < ref2->parent)
246 return -1;
247 if (ref1->parent > ref2->parent)
248 return 1;
249
250 return 0;
251}
252
253static void update_share_count(struct share_check *sc, int oldcount,
254 int newcount, const struct prelim_ref *newref)
255{
256 if ((!sc) || (oldcount == 0 && newcount < 1))
257 return;
258
259 if (oldcount > 0 && newcount < 1)
260 sc->share_count--;
261 else if (oldcount < 1 && newcount > 0)
262 sc->share_count++;
263
264 if (newref->root_id == btrfs_root_id(sc->root) &&
265 newref->wanted_disk_byte == sc->data_bytenr &&
266 newref->key_for_search.objectid == sc->inum)
267 sc->self_ref_count += newref->count;
268}
269
270/*
271 * Add @newref to the @root rbtree, merging identical refs.
272 *
273 * Callers should assume that newref has been freed after calling.
274 */
275static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
276 struct preftree *preftree,
277 struct prelim_ref *newref,
278 struct share_check *sc)
279{
280 struct rb_root_cached *root;
281 struct rb_node **p;
282 struct rb_node *parent = NULL;
283 struct prelim_ref *ref;
284 int result;
285 bool leftmost = true;
286
287 root = &preftree->root;
288 p = &root->rb_root.rb_node;
289
290 while (*p) {
291 parent = *p;
292 ref = rb_entry(parent, struct prelim_ref, rbnode);
293 result = prelim_ref_compare(ref, newref);
294 if (result < 0) {
295 p = &(*p)->rb_left;
296 } else if (result > 0) {
297 p = &(*p)->rb_right;
298 leftmost = false;
299 } else {
300 /* Identical refs, merge them and free @newref */
301 struct extent_inode_elem *eie = ref->inode_list;
302
303 while (eie && eie->next)
304 eie = eie->next;
305
306 if (!eie)
307 ref->inode_list = newref->inode_list;
308 else
309 eie->next = newref->inode_list;
310 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
311 preftree->count);
312 /*
313 * A delayed ref can have newref->count < 0.
314 * The ref->count is updated to follow any
315 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
316 */
317 update_share_count(sc, ref->count,
318 ref->count + newref->count, newref);
319 ref->count += newref->count;
320 free_pref(newref);
321 return;
322 }
323 }
324
325 update_share_count(sc, 0, newref->count, newref);
326 preftree->count++;
327 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
328 rb_link_node(&newref->rbnode, parent, p);
329 rb_insert_color_cached(&newref->rbnode, root, leftmost);
330}
331
332/*
333 * Release the entire tree. We don't care about internal consistency so
334 * just free everything and then reset the tree root.
335 */
336static void prelim_release(struct preftree *preftree)
337{
338 struct prelim_ref *ref, *next_ref;
339
340 rbtree_postorder_for_each_entry_safe(ref, next_ref,
341 &preftree->root.rb_root, rbnode) {
342 free_inode_elem_list(ref->inode_list);
343 free_pref(ref);
344 }
345
346 preftree->root = RB_ROOT_CACHED;
347 preftree->count = 0;
348}
349
350/*
351 * the rules for all callers of this function are:
352 * - obtaining the parent is the goal
353 * - if you add a key, you must know that it is a correct key
354 * - if you cannot add the parent or a correct key, then we will look into the
355 * block later to set a correct key
356 *
357 * delayed refs
358 * ============
359 * backref type | shared | indirect | shared | indirect
360 * information | tree | tree | data | data
361 * --------------------+--------+----------+--------+----------
362 * parent logical | y | - | - | -
363 * key to resolve | - | y | y | y
364 * tree block logical | - | - | - | -
365 * root for resolving | y | y | y | y
366 *
367 * - column 1: we've the parent -> done
368 * - column 2, 3, 4: we use the key to find the parent
369 *
370 * on disk refs (inline or keyed)
371 * ==============================
372 * backref type | shared | indirect | shared | indirect
373 * information | tree | tree | data | data
374 * --------------------+--------+----------+--------+----------
375 * parent logical | y | - | y | -
376 * key to resolve | - | - | - | y
377 * tree block logical | y | y | y | y
378 * root for resolving | - | y | y | y
379 *
380 * - column 1, 3: we've the parent -> done
381 * - column 2: we take the first key from the block to find the parent
382 * (see add_missing_keys)
383 * - column 4: we use the key to find the parent
384 *
385 * additional information that's available but not required to find the parent
386 * block might help in merging entries to gain some speed.
387 */
388static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
389 struct preftree *preftree, u64 root_id,
390 const struct btrfs_key *key, int level, u64 parent,
391 u64 wanted_disk_byte, int count,
392 struct share_check *sc, gfp_t gfp_mask)
393{
394 struct prelim_ref *ref;
395
396 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
397 return 0;
398
399 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
400 if (!ref)
401 return -ENOMEM;
402
403 ref->root_id = root_id;
404 if (key)
405 ref->key_for_search = *key;
406 else
407 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
408
409 ref->inode_list = NULL;
410 ref->level = level;
411 ref->count = count;
412 ref->parent = parent;
413 ref->wanted_disk_byte = wanted_disk_byte;
414 prelim_ref_insert(fs_info, preftree, ref, sc);
415 return extent_is_shared(sc);
416}
417
418/* direct refs use root == 0, key == NULL */
419static int add_direct_ref(const struct btrfs_fs_info *fs_info,
420 struct preftrees *preftrees, int level, u64 parent,
421 u64 wanted_disk_byte, int count,
422 struct share_check *sc, gfp_t gfp_mask)
423{
424 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
425 parent, wanted_disk_byte, count, sc, gfp_mask);
426}
427
428/* indirect refs use parent == 0 */
429static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
430 struct preftrees *preftrees, u64 root_id,
431 const struct btrfs_key *key, int level,
432 u64 wanted_disk_byte, int count,
433 struct share_check *sc, gfp_t gfp_mask)
434{
435 struct preftree *tree = &preftrees->indirect;
436
437 if (!key)
438 tree = &preftrees->indirect_missing_keys;
439 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
440 wanted_disk_byte, count, sc, gfp_mask);
441}
442
443static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
444{
445 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
446 struct rb_node *parent = NULL;
447 struct prelim_ref *ref = NULL;
448 struct prelim_ref target = {};
449 int result;
450
451 target.parent = bytenr;
452
453 while (*p) {
454 parent = *p;
455 ref = rb_entry(parent, struct prelim_ref, rbnode);
456 result = prelim_ref_compare(ref, &target);
457
458 if (result < 0)
459 p = &(*p)->rb_left;
460 else if (result > 0)
461 p = &(*p)->rb_right;
462 else
463 return 1;
464 }
465 return 0;
466}
467
468static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
469 struct btrfs_root *root, struct btrfs_path *path,
470 struct ulist *parents,
471 struct preftrees *preftrees, struct prelim_ref *ref,
472 int level)
473{
474 int ret = 0;
475 int slot;
476 struct extent_buffer *eb;
477 struct btrfs_key key;
478 struct btrfs_key *key_for_search = &ref->key_for_search;
479 struct btrfs_file_extent_item *fi;
480 struct extent_inode_elem *eie = NULL, *old = NULL;
481 u64 disk_byte;
482 u64 wanted_disk_byte = ref->wanted_disk_byte;
483 u64 count = 0;
484 u64 data_offset;
485 u8 type;
486
487 if (level != 0) {
488 eb = path->nodes[level];
489 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
490 if (ret < 0)
491 return ret;
492 return 0;
493 }
494
495 /*
496 * 1. We normally enter this function with the path already pointing to
497 * the first item to check. But sometimes, we may enter it with
498 * slot == nritems.
499 * 2. We are searching for normal backref but bytenr of this leaf
500 * matches shared data backref
501 * 3. The leaf owner is not equal to the root we are searching
502 *
503 * For these cases, go to the next leaf before we continue.
504 */
505 eb = path->nodes[0];
506 if (path->slots[0] >= btrfs_header_nritems(eb) ||
507 is_shared_data_backref(preftrees, eb->start) ||
508 ref->root_id != btrfs_header_owner(eb)) {
509 if (ctx->time_seq == BTRFS_SEQ_LAST)
510 ret = btrfs_next_leaf(root, path);
511 else
512 ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
513 }
514
515 while (!ret && count < ref->count) {
516 eb = path->nodes[0];
517 slot = path->slots[0];
518
519 btrfs_item_key_to_cpu(eb, &key, slot);
520
521 if (key.objectid != key_for_search->objectid ||
522 key.type != BTRFS_EXTENT_DATA_KEY)
523 break;
524
525 /*
526 * We are searching for normal backref but bytenr of this leaf
527 * matches shared data backref, OR
528 * the leaf owner is not equal to the root we are searching for
529 */
530 if (slot == 0 &&
531 (is_shared_data_backref(preftrees, eb->start) ||
532 ref->root_id != btrfs_header_owner(eb))) {
533 if (ctx->time_seq == BTRFS_SEQ_LAST)
534 ret = btrfs_next_leaf(root, path);
535 else
536 ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
537 continue;
538 }
539 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
540 type = btrfs_file_extent_type(eb, fi);
541 if (type == BTRFS_FILE_EXTENT_INLINE)
542 goto next;
543 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
544 data_offset = btrfs_file_extent_offset(eb, fi);
545
546 if (disk_byte == wanted_disk_byte) {
547 eie = NULL;
548 old = NULL;
549 if (ref->key_for_search.offset == key.offset - data_offset)
550 count++;
551 else
552 goto next;
553 if (!ctx->skip_inode_ref_list) {
554 ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
555 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
556 ret < 0)
557 break;
558 }
559 if (ret > 0)
560 goto next;
561 ret = ulist_add_merge_ptr(parents, eb->start,
562 eie, (void **)&old, GFP_NOFS);
563 if (ret < 0)
564 break;
565 if (!ret && !ctx->skip_inode_ref_list) {
566 while (old->next)
567 old = old->next;
568 old->next = eie;
569 }
570 eie = NULL;
571 }
572next:
573 if (ctx->time_seq == BTRFS_SEQ_LAST)
574 ret = btrfs_next_item(root, path);
575 else
576 ret = btrfs_next_old_item(root, path, ctx->time_seq);
577 }
578
579 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
580 free_inode_elem_list(eie);
581 else if (ret > 0)
582 ret = 0;
583
584 return ret;
585}
586
587/*
588 * resolve an indirect backref in the form (root_id, key, level)
589 * to a logical address
590 */
591static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx,
592 struct btrfs_path *path,
593 struct preftrees *preftrees,
594 struct prelim_ref *ref, struct ulist *parents)
595{
596 struct btrfs_root *root;
597 struct extent_buffer *eb;
598 int ret = 0;
599 int root_level;
600 int level = ref->level;
601 struct btrfs_key search_key = ref->key_for_search;
602
603 /*
604 * If we're search_commit_root we could possibly be holding locks on
605 * other tree nodes. This happens when qgroups does backref walks when
606 * adding new delayed refs. To deal with this we need to look in cache
607 * for the root, and if we don't find it then we need to search the
608 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
609 * here.
610 */
611 if (path->search_commit_root)
612 root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id);
613 else
614 root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false);
615 if (IS_ERR(root)) {
616 ret = PTR_ERR(root);
617 goto out_free;
618 }
619
620 if (!path->search_commit_root &&
621 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
622 ret = -ENOENT;
623 goto out;
624 }
625
626 if (btrfs_is_testing(ctx->fs_info)) {
627 ret = -ENOENT;
628 goto out;
629 }
630
631 if (path->search_commit_root)
632 root_level = btrfs_header_level(root->commit_root);
633 else if (ctx->time_seq == BTRFS_SEQ_LAST)
634 root_level = btrfs_header_level(root->node);
635 else
636 root_level = btrfs_old_root_level(root, ctx->time_seq);
637
638 if (root_level + 1 == level)
639 goto out;
640
641 /*
642 * We can often find data backrefs with an offset that is too large
643 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
644 * subtracting a file's offset with the data offset of its
645 * corresponding extent data item. This can happen for example in the
646 * clone ioctl.
647 *
648 * So if we detect such case we set the search key's offset to zero to
649 * make sure we will find the matching file extent item at
650 * add_all_parents(), otherwise we will miss it because the offset
651 * taken form the backref is much larger then the offset of the file
652 * extent item. This can make us scan a very large number of file
653 * extent items, but at least it will not make us miss any.
654 *
655 * This is an ugly workaround for a behaviour that should have never
656 * existed, but it does and a fix for the clone ioctl would touch a lot
657 * of places, cause backwards incompatibility and would not fix the
658 * problem for extents cloned with older kernels.
659 */
660 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
661 search_key.offset >= LLONG_MAX)
662 search_key.offset = 0;
663 path->lowest_level = level;
664 if (ctx->time_seq == BTRFS_SEQ_LAST)
665 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
666 else
667 ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq);
668
669 btrfs_debug(ctx->fs_info,
670 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
671 ref->root_id, level, ref->count, ret,
672 ref->key_for_search.objectid, ref->key_for_search.type,
673 ref->key_for_search.offset);
674 if (ret < 0)
675 goto out;
676
677 eb = path->nodes[level];
678 while (!eb) {
679 if (WARN_ON(!level)) {
680 ret = 1;
681 goto out;
682 }
683 level--;
684 eb = path->nodes[level];
685 }
686
687 ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level);
688out:
689 btrfs_put_root(root);
690out_free:
691 path->lowest_level = 0;
692 btrfs_release_path(path);
693 return ret;
694}
695
696static struct extent_inode_elem *
697unode_aux_to_inode_list(struct ulist_node *node)
698{
699 if (!node)
700 return NULL;
701 return (struct extent_inode_elem *)(uintptr_t)node->aux;
702}
703
704static void free_leaf_list(struct ulist *ulist)
705{
706 struct ulist_node *node;
707 struct ulist_iterator uiter;
708
709 ULIST_ITER_INIT(&uiter);
710 while ((node = ulist_next(ulist, &uiter)))
711 free_inode_elem_list(unode_aux_to_inode_list(node));
712
713 ulist_free(ulist);
714}
715
716/*
717 * We maintain three separate rbtrees: one for direct refs, one for
718 * indirect refs which have a key, and one for indirect refs which do not
719 * have a key. Each tree does merge on insertion.
720 *
721 * Once all of the references are located, we iterate over the tree of
722 * indirect refs with missing keys. An appropriate key is located and
723 * the ref is moved onto the tree for indirect refs. After all missing
724 * keys are thus located, we iterate over the indirect ref tree, resolve
725 * each reference, and then insert the resolved reference onto the
726 * direct tree (merging there too).
727 *
728 * New backrefs (i.e., for parent nodes) are added to the appropriate
729 * rbtree as they are encountered. The new backrefs are subsequently
730 * resolved as above.
731 */
732static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
733 struct btrfs_path *path,
734 struct preftrees *preftrees,
735 struct share_check *sc)
736{
737 int err;
738 int ret = 0;
739 struct ulist *parents;
740 struct ulist_node *node;
741 struct ulist_iterator uiter;
742 struct rb_node *rnode;
743
744 parents = ulist_alloc(GFP_NOFS);
745 if (!parents)
746 return -ENOMEM;
747
748 /*
749 * We could trade memory usage for performance here by iterating
750 * the tree, allocating new refs for each insertion, and then
751 * freeing the entire indirect tree when we're done. In some test
752 * cases, the tree can grow quite large (~200k objects).
753 */
754 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
755 struct prelim_ref *ref;
756
757 ref = rb_entry(rnode, struct prelim_ref, rbnode);
758 if (WARN(ref->parent,
759 "BUG: direct ref found in indirect tree")) {
760 ret = -EINVAL;
761 goto out;
762 }
763
764 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
765 preftrees->indirect.count--;
766
767 if (ref->count == 0) {
768 free_pref(ref);
769 continue;
770 }
771
772 if (sc && ref->root_id != btrfs_root_id(sc->root)) {
773 free_pref(ref);
774 ret = BACKREF_FOUND_SHARED;
775 goto out;
776 }
777 err = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
778 /*
779 * we can only tolerate ENOENT,otherwise,we should catch error
780 * and return directly.
781 */
782 if (err == -ENOENT) {
783 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref,
784 NULL);
785 continue;
786 } else if (err) {
787 free_pref(ref);
788 ret = err;
789 goto out;
790 }
791
792 /* we put the first parent into the ref at hand */
793 ULIST_ITER_INIT(&uiter);
794 node = ulist_next(parents, &uiter);
795 ref->parent = node ? node->val : 0;
796 ref->inode_list = unode_aux_to_inode_list(node);
797
798 /* Add a prelim_ref(s) for any other parent(s). */
799 while ((node = ulist_next(parents, &uiter))) {
800 struct prelim_ref *new_ref;
801
802 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
803 GFP_NOFS);
804 if (!new_ref) {
805 free_pref(ref);
806 ret = -ENOMEM;
807 goto out;
808 }
809 memcpy(new_ref, ref, sizeof(*ref));
810 new_ref->parent = node->val;
811 new_ref->inode_list = unode_aux_to_inode_list(node);
812 prelim_ref_insert(ctx->fs_info, &preftrees->direct,
813 new_ref, NULL);
814 }
815
816 /*
817 * Now it's a direct ref, put it in the direct tree. We must
818 * do this last because the ref could be merged/freed here.
819 */
820 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL);
821
822 ulist_reinit(parents);
823 cond_resched();
824 }
825out:
826 /*
827 * We may have inode lists attached to refs in the parents ulist, so we
828 * must free them before freeing the ulist and its refs.
829 */
830 free_leaf_list(parents);
831 return ret;
832}
833
834/*
835 * read tree blocks and add keys where required.
836 */
837static int add_missing_keys(struct btrfs_fs_info *fs_info,
838 struct preftrees *preftrees, bool lock)
839{
840 struct prelim_ref *ref;
841 struct extent_buffer *eb;
842 struct preftree *tree = &preftrees->indirect_missing_keys;
843 struct rb_node *node;
844
845 while ((node = rb_first_cached(&tree->root))) {
846 struct btrfs_tree_parent_check check = { 0 };
847
848 ref = rb_entry(node, struct prelim_ref, rbnode);
849 rb_erase_cached(node, &tree->root);
850
851 BUG_ON(ref->parent); /* should not be a direct ref */
852 BUG_ON(ref->key_for_search.type);
853 BUG_ON(!ref->wanted_disk_byte);
854
855 check.level = ref->level - 1;
856 check.owner_root = ref->root_id;
857
858 eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check);
859 if (IS_ERR(eb)) {
860 free_pref(ref);
861 return PTR_ERR(eb);
862 }
863 if (!extent_buffer_uptodate(eb)) {
864 free_pref(ref);
865 free_extent_buffer(eb);
866 return -EIO;
867 }
868
869 if (lock)
870 btrfs_tree_read_lock(eb);
871 if (btrfs_header_level(eb) == 0)
872 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
873 else
874 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
875 if (lock)
876 btrfs_tree_read_unlock(eb);
877 free_extent_buffer(eb);
878 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
879 cond_resched();
880 }
881 return 0;
882}
883
884/*
885 * add all currently queued delayed refs from this head whose seq nr is
886 * smaller or equal that seq to the list
887 */
888static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
889 struct btrfs_delayed_ref_head *head, u64 seq,
890 struct preftrees *preftrees, struct share_check *sc)
891{
892 struct btrfs_delayed_ref_node *node;
893 struct btrfs_key key;
894 struct rb_node *n;
895 int count;
896 int ret = 0;
897
898 spin_lock(&head->lock);
899 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
900 node = rb_entry(n, struct btrfs_delayed_ref_node,
901 ref_node);
902 if (node->seq > seq)
903 continue;
904
905 switch (node->action) {
906 case BTRFS_ADD_DELAYED_EXTENT:
907 case BTRFS_UPDATE_DELAYED_HEAD:
908 WARN_ON(1);
909 continue;
910 case BTRFS_ADD_DELAYED_REF:
911 count = node->ref_mod;
912 break;
913 case BTRFS_DROP_DELAYED_REF:
914 count = node->ref_mod * -1;
915 break;
916 default:
917 BUG();
918 }
919 switch (node->type) {
920 case BTRFS_TREE_BLOCK_REF_KEY: {
921 /* NORMAL INDIRECT METADATA backref */
922 struct btrfs_key *key_ptr = NULL;
923 /* The owner of a tree block ref is the level. */
924 int level = btrfs_delayed_ref_owner(node);
925
926 if (head->extent_op && head->extent_op->update_key) {
927 btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
928 key_ptr = &key;
929 }
930
931 ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
932 key_ptr, level + 1, node->bytenr,
933 count, sc, GFP_ATOMIC);
934 break;
935 }
936 case BTRFS_SHARED_BLOCK_REF_KEY: {
937 /*
938 * SHARED DIRECT METADATA backref
939 *
940 * The owner of a tree block ref is the level.
941 */
942 int level = btrfs_delayed_ref_owner(node);
943
944 ret = add_direct_ref(fs_info, preftrees, level + 1,
945 node->parent, node->bytenr, count,
946 sc, GFP_ATOMIC);
947 break;
948 }
949 case BTRFS_EXTENT_DATA_REF_KEY: {
950 /* NORMAL INDIRECT DATA backref */
951 key.objectid = btrfs_delayed_ref_owner(node);
952 key.type = BTRFS_EXTENT_DATA_KEY;
953 key.offset = btrfs_delayed_ref_offset(node);
954
955 /*
956 * If we have a share check context and a reference for
957 * another inode, we can't exit immediately. This is
958 * because even if this is a BTRFS_ADD_DELAYED_REF
959 * reference we may find next a BTRFS_DROP_DELAYED_REF
960 * which cancels out this ADD reference.
961 *
962 * If this is a DROP reference and there was no previous
963 * ADD reference, then we need to signal that when we
964 * process references from the extent tree (through
965 * add_inline_refs() and add_keyed_refs()), we should
966 * not exit early if we find a reference for another
967 * inode, because one of the delayed DROP references
968 * may cancel that reference in the extent tree.
969 */
970 if (sc && count < 0)
971 sc->have_delayed_delete_refs = true;
972
973 ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
974 &key, 0, node->bytenr, count, sc,
975 GFP_ATOMIC);
976 break;
977 }
978 case BTRFS_SHARED_DATA_REF_KEY: {
979 /* SHARED DIRECT FULL backref */
980 ret = add_direct_ref(fs_info, preftrees, 0, node->parent,
981 node->bytenr, count, sc,
982 GFP_ATOMIC);
983 break;
984 }
985 default:
986 WARN_ON(1);
987 }
988 /*
989 * We must ignore BACKREF_FOUND_SHARED until all delayed
990 * refs have been checked.
991 */
992 if (ret && (ret != BACKREF_FOUND_SHARED))
993 break;
994 }
995 if (!ret)
996 ret = extent_is_shared(sc);
997
998 spin_unlock(&head->lock);
999 return ret;
1000}
1001
1002/*
1003 * add all inline backrefs for bytenr to the list
1004 *
1005 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1006 */
1007static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
1008 struct btrfs_path *path,
1009 int *info_level, struct preftrees *preftrees,
1010 struct share_check *sc)
1011{
1012 int ret = 0;
1013 int slot;
1014 struct extent_buffer *leaf;
1015 struct btrfs_key key;
1016 struct btrfs_key found_key;
1017 unsigned long ptr;
1018 unsigned long end;
1019 struct btrfs_extent_item *ei;
1020 u64 flags;
1021 u64 item_size;
1022
1023 /*
1024 * enumerate all inline refs
1025 */
1026 leaf = path->nodes[0];
1027 slot = path->slots[0];
1028
1029 item_size = btrfs_item_size(leaf, slot);
1030 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
1031
1032 if (ctx->check_extent_item) {
1033 ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx);
1034 if (ret)
1035 return ret;
1036 }
1037
1038 flags = btrfs_extent_flags(leaf, ei);
1039 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1040
1041 ptr = (unsigned long)(ei + 1);
1042 end = (unsigned long)ei + item_size;
1043
1044 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1045 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1046 struct btrfs_tree_block_info *info;
1047
1048 info = (struct btrfs_tree_block_info *)ptr;
1049 *info_level = btrfs_tree_block_level(leaf, info);
1050 ptr += sizeof(struct btrfs_tree_block_info);
1051 BUG_ON(ptr > end);
1052 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1053 *info_level = found_key.offset;
1054 } else {
1055 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1056 }
1057
1058 while (ptr < end) {
1059 struct btrfs_extent_inline_ref *iref;
1060 u64 offset;
1061 int type;
1062
1063 iref = (struct btrfs_extent_inline_ref *)ptr;
1064 type = btrfs_get_extent_inline_ref_type(leaf, iref,
1065 BTRFS_REF_TYPE_ANY);
1066 if (type == BTRFS_REF_TYPE_INVALID)
1067 return -EUCLEAN;
1068
1069 offset = btrfs_extent_inline_ref_offset(leaf, iref);
1070
1071 switch (type) {
1072 case BTRFS_SHARED_BLOCK_REF_KEY:
1073 ret = add_direct_ref(ctx->fs_info, preftrees,
1074 *info_level + 1, offset,
1075 ctx->bytenr, 1, NULL, GFP_NOFS);
1076 break;
1077 case BTRFS_SHARED_DATA_REF_KEY: {
1078 struct btrfs_shared_data_ref *sdref;
1079 int count;
1080
1081 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1082 count = btrfs_shared_data_ref_count(leaf, sdref);
1083
1084 ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset,
1085 ctx->bytenr, count, sc, GFP_NOFS);
1086 break;
1087 }
1088 case BTRFS_TREE_BLOCK_REF_KEY:
1089 ret = add_indirect_ref(ctx->fs_info, preftrees, offset,
1090 NULL, *info_level + 1,
1091 ctx->bytenr, 1, NULL, GFP_NOFS);
1092 break;
1093 case BTRFS_EXTENT_DATA_REF_KEY: {
1094 struct btrfs_extent_data_ref *dref;
1095 int count;
1096 u64 root;
1097
1098 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1099 count = btrfs_extent_data_ref_count(leaf, dref);
1100 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1101 dref);
1102 key.type = BTRFS_EXTENT_DATA_KEY;
1103 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1104
1105 if (sc && key.objectid != sc->inum &&
1106 !sc->have_delayed_delete_refs) {
1107 ret = BACKREF_FOUND_SHARED;
1108 break;
1109 }
1110
1111 root = btrfs_extent_data_ref_root(leaf, dref);
1112
1113 if (!ctx->skip_data_ref ||
1114 !ctx->skip_data_ref(root, key.objectid, key.offset,
1115 ctx->user_ctx))
1116 ret = add_indirect_ref(ctx->fs_info, preftrees,
1117 root, &key, 0, ctx->bytenr,
1118 count, sc, GFP_NOFS);
1119 break;
1120 }
1121 case BTRFS_EXTENT_OWNER_REF_KEY:
1122 ASSERT(btrfs_fs_incompat(ctx->fs_info, SIMPLE_QUOTA));
1123 break;
1124 default:
1125 WARN_ON(1);
1126 }
1127 if (ret)
1128 return ret;
1129 ptr += btrfs_extent_inline_ref_size(type);
1130 }
1131
1132 return 0;
1133}
1134
1135/*
1136 * add all non-inline backrefs for bytenr to the list
1137 *
1138 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1139 */
1140static int add_keyed_refs(struct btrfs_backref_walk_ctx *ctx,
1141 struct btrfs_root *extent_root,
1142 struct btrfs_path *path,
1143 int info_level, struct preftrees *preftrees,
1144 struct share_check *sc)
1145{
1146 struct btrfs_fs_info *fs_info = extent_root->fs_info;
1147 int ret;
1148 int slot;
1149 struct extent_buffer *leaf;
1150 struct btrfs_key key;
1151
1152 while (1) {
1153 ret = btrfs_next_item(extent_root, path);
1154 if (ret < 0)
1155 break;
1156 if (ret) {
1157 ret = 0;
1158 break;
1159 }
1160
1161 slot = path->slots[0];
1162 leaf = path->nodes[0];
1163 btrfs_item_key_to_cpu(leaf, &key, slot);
1164
1165 if (key.objectid != ctx->bytenr)
1166 break;
1167 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1168 continue;
1169 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1170 break;
1171
1172 switch (key.type) {
1173 case BTRFS_SHARED_BLOCK_REF_KEY:
1174 /* SHARED DIRECT METADATA backref */
1175 ret = add_direct_ref(fs_info, preftrees,
1176 info_level + 1, key.offset,
1177 ctx->bytenr, 1, NULL, GFP_NOFS);
1178 break;
1179 case BTRFS_SHARED_DATA_REF_KEY: {
1180 /* SHARED DIRECT FULL backref */
1181 struct btrfs_shared_data_ref *sdref;
1182 int count;
1183
1184 sdref = btrfs_item_ptr(leaf, slot,
1185 struct btrfs_shared_data_ref);
1186 count = btrfs_shared_data_ref_count(leaf, sdref);
1187 ret = add_direct_ref(fs_info, preftrees, 0,
1188 key.offset, ctx->bytenr, count,
1189 sc, GFP_NOFS);
1190 break;
1191 }
1192 case BTRFS_TREE_BLOCK_REF_KEY:
1193 /* NORMAL INDIRECT METADATA backref */
1194 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1195 NULL, info_level + 1, ctx->bytenr,
1196 1, NULL, GFP_NOFS);
1197 break;
1198 case BTRFS_EXTENT_DATA_REF_KEY: {
1199 /* NORMAL INDIRECT DATA backref */
1200 struct btrfs_extent_data_ref *dref;
1201 int count;
1202 u64 root;
1203
1204 dref = btrfs_item_ptr(leaf, slot,
1205 struct btrfs_extent_data_ref);
1206 count = btrfs_extent_data_ref_count(leaf, dref);
1207 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1208 dref);
1209 key.type = BTRFS_EXTENT_DATA_KEY;
1210 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1211
1212 if (sc && key.objectid != sc->inum &&
1213 !sc->have_delayed_delete_refs) {
1214 ret = BACKREF_FOUND_SHARED;
1215 break;
1216 }
1217
1218 root = btrfs_extent_data_ref_root(leaf, dref);
1219
1220 if (!ctx->skip_data_ref ||
1221 !ctx->skip_data_ref(root, key.objectid, key.offset,
1222 ctx->user_ctx))
1223 ret = add_indirect_ref(fs_info, preftrees, root,
1224 &key, 0, ctx->bytenr,
1225 count, sc, GFP_NOFS);
1226 break;
1227 }
1228 default:
1229 WARN_ON(1);
1230 }
1231 if (ret)
1232 return ret;
1233
1234 }
1235
1236 return ret;
1237}
1238
1239/*
1240 * The caller has joined a transaction or is holding a read lock on the
1241 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1242 * snapshot field changing while updating or checking the cache.
1243 */
1244static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1245 struct btrfs_root *root,
1246 u64 bytenr, int level, bool *is_shared)
1247{
1248 const struct btrfs_fs_info *fs_info = root->fs_info;
1249 struct btrfs_backref_shared_cache_entry *entry;
1250
1251 if (!current->journal_info)
1252 lockdep_assert_held(&fs_info->commit_root_sem);
1253
1254 if (!ctx->use_path_cache)
1255 return false;
1256
1257 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1258 return false;
1259
1260 /*
1261 * Level -1 is used for the data extent, which is not reliable to cache
1262 * because its reference count can increase or decrease without us
1263 * realizing. We cache results only for extent buffers that lead from
1264 * the root node down to the leaf with the file extent item.
1265 */
1266 ASSERT(level >= 0);
1267
1268 entry = &ctx->path_cache_entries[level];
1269
1270 /* Unused cache entry or being used for some other extent buffer. */
1271 if (entry->bytenr != bytenr)
1272 return false;
1273
1274 /*
1275 * We cached a false result, but the last snapshot generation of the
1276 * root changed, so we now have a snapshot. Don't trust the result.
1277 */
1278 if (!entry->is_shared &&
1279 entry->gen != btrfs_root_last_snapshot(&root->root_item))
1280 return false;
1281
1282 /*
1283 * If we cached a true result and the last generation used for dropping
1284 * a root changed, we can not trust the result, because the dropped root
1285 * could be a snapshot sharing this extent buffer.
1286 */
1287 if (entry->is_shared &&
1288 entry->gen != btrfs_get_last_root_drop_gen(fs_info))
1289 return false;
1290
1291 *is_shared = entry->is_shared;
1292 /*
1293 * If the node at this level is shared, than all nodes below are also
1294 * shared. Currently some of the nodes below may be marked as not shared
1295 * because we have just switched from one leaf to another, and switched
1296 * also other nodes above the leaf and below the current level, so mark
1297 * them as shared.
1298 */
1299 if (*is_shared) {
1300 for (int i = 0; i < level; i++) {
1301 ctx->path_cache_entries[i].is_shared = true;
1302 ctx->path_cache_entries[i].gen = entry->gen;
1303 }
1304 }
1305
1306 return true;
1307}
1308
1309/*
1310 * The caller has joined a transaction or is holding a read lock on the
1311 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1312 * snapshot field changing while updating or checking the cache.
1313 */
1314static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1315 struct btrfs_root *root,
1316 u64 bytenr, int level, bool is_shared)
1317{
1318 const struct btrfs_fs_info *fs_info = root->fs_info;
1319 struct btrfs_backref_shared_cache_entry *entry;
1320 u64 gen;
1321
1322 if (!current->journal_info)
1323 lockdep_assert_held(&fs_info->commit_root_sem);
1324
1325 if (!ctx->use_path_cache)
1326 return;
1327
1328 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1329 return;
1330
1331 /*
1332 * Level -1 is used for the data extent, which is not reliable to cache
1333 * because its reference count can increase or decrease without us
1334 * realizing. We cache results only for extent buffers that lead from
1335 * the root node down to the leaf with the file extent item.
1336 */
1337 ASSERT(level >= 0);
1338
1339 if (is_shared)
1340 gen = btrfs_get_last_root_drop_gen(fs_info);
1341 else
1342 gen = btrfs_root_last_snapshot(&root->root_item);
1343
1344 entry = &ctx->path_cache_entries[level];
1345 entry->bytenr = bytenr;
1346 entry->is_shared = is_shared;
1347 entry->gen = gen;
1348
1349 /*
1350 * If we found an extent buffer is shared, set the cache result for all
1351 * extent buffers below it to true. As nodes in the path are COWed,
1352 * their sharedness is moved to their children, and if a leaf is COWed,
1353 * then the sharedness of a data extent becomes direct, the refcount of
1354 * data extent is increased in the extent item at the extent tree.
1355 */
1356 if (is_shared) {
1357 for (int i = 0; i < level; i++) {
1358 entry = &ctx->path_cache_entries[i];
1359 entry->is_shared = is_shared;
1360 entry->gen = gen;
1361 }
1362 }
1363}
1364
1365/*
1366 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1367 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1368 * indirect refs to their parent bytenr.
1369 * When roots are found, they're added to the roots list
1370 *
1371 * @ctx: Backref walking context object, must be not NULL.
1372 * @sc: If !NULL, then immediately return BACKREF_FOUND_SHARED when a
1373 * shared extent is detected.
1374 *
1375 * Otherwise this returns 0 for success and <0 for an error.
1376 *
1377 * FIXME some caching might speed things up
1378 */
1379static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
1380 struct share_check *sc)
1381{
1382 struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr);
1383 struct btrfs_key key;
1384 struct btrfs_path *path;
1385 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1386 struct btrfs_delayed_ref_head *head;
1387 int info_level = 0;
1388 int ret;
1389 struct prelim_ref *ref;
1390 struct rb_node *node;
1391 struct extent_inode_elem *eie = NULL;
1392 struct preftrees preftrees = {
1393 .direct = PREFTREE_INIT,
1394 .indirect = PREFTREE_INIT,
1395 .indirect_missing_keys = PREFTREE_INIT
1396 };
1397
1398 /* Roots ulist is not needed when using a sharedness check context. */
1399 if (sc)
1400 ASSERT(ctx->roots == NULL);
1401
1402 key.objectid = ctx->bytenr;
1403 key.offset = (u64)-1;
1404 if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA))
1405 key.type = BTRFS_METADATA_ITEM_KEY;
1406 else
1407 key.type = BTRFS_EXTENT_ITEM_KEY;
1408
1409 path = btrfs_alloc_path();
1410 if (!path)
1411 return -ENOMEM;
1412 if (!ctx->trans) {
1413 path->search_commit_root = 1;
1414 path->skip_locking = 1;
1415 }
1416
1417 if (ctx->time_seq == BTRFS_SEQ_LAST)
1418 path->skip_locking = 1;
1419
1420again:
1421 head = NULL;
1422
1423 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1424 if (ret < 0)
1425 goto out;
1426 if (ret == 0) {
1427 /*
1428 * Key with offset -1 found, there would have to exist an extent
1429 * item with such offset, but this is out of the valid range.
1430 */
1431 ret = -EUCLEAN;
1432 goto out;
1433 }
1434
1435 if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) &&
1436 ctx->time_seq != BTRFS_SEQ_LAST) {
1437 /*
1438 * We have a specific time_seq we care about and trans which
1439 * means we have the path lock, we need to grab the ref head and
1440 * lock it so we have a consistent view of the refs at the given
1441 * time.
1442 */
1443 delayed_refs = &ctx->trans->transaction->delayed_refs;
1444 spin_lock(&delayed_refs->lock);
1445 head = btrfs_find_delayed_ref_head(ctx->fs_info, delayed_refs,
1446 ctx->bytenr);
1447 if (head) {
1448 if (!mutex_trylock(&head->mutex)) {
1449 refcount_inc(&head->refs);
1450 spin_unlock(&delayed_refs->lock);
1451
1452 btrfs_release_path(path);
1453
1454 /*
1455 * Mutex was contended, block until it's
1456 * released and try again
1457 */
1458 mutex_lock(&head->mutex);
1459 mutex_unlock(&head->mutex);
1460 btrfs_put_delayed_ref_head(head);
1461 goto again;
1462 }
1463 spin_unlock(&delayed_refs->lock);
1464 ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq,
1465 &preftrees, sc);
1466 mutex_unlock(&head->mutex);
1467 if (ret)
1468 goto out;
1469 } else {
1470 spin_unlock(&delayed_refs->lock);
1471 }
1472 }
1473
1474 if (path->slots[0]) {
1475 struct extent_buffer *leaf;
1476 int slot;
1477
1478 path->slots[0]--;
1479 leaf = path->nodes[0];
1480 slot = path->slots[0];
1481 btrfs_item_key_to_cpu(leaf, &key, slot);
1482 if (key.objectid == ctx->bytenr &&
1483 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1484 key.type == BTRFS_METADATA_ITEM_KEY)) {
1485 ret = add_inline_refs(ctx, path, &info_level,
1486 &preftrees, sc);
1487 if (ret)
1488 goto out;
1489 ret = add_keyed_refs(ctx, root, path, info_level,
1490 &preftrees, sc);
1491 if (ret)
1492 goto out;
1493 }
1494 }
1495
1496 /*
1497 * If we have a share context and we reached here, it means the extent
1498 * is not directly shared (no multiple reference items for it),
1499 * otherwise we would have exited earlier with a return value of
1500 * BACKREF_FOUND_SHARED after processing delayed references or while
1501 * processing inline or keyed references from the extent tree.
1502 * The extent may however be indirectly shared through shared subtrees
1503 * as a result from creating snapshots, so we determine below what is
1504 * its parent node, in case we are dealing with a metadata extent, or
1505 * what's the leaf (or leaves), from a fs tree, that has a file extent
1506 * item pointing to it in case we are dealing with a data extent.
1507 */
1508 ASSERT(extent_is_shared(sc) == 0);
1509
1510 /*
1511 * If we are here for a data extent and we have a share_check structure
1512 * it means the data extent is not directly shared (does not have
1513 * multiple reference items), so we have to check if a path in the fs
1514 * tree (going from the root node down to the leaf that has the file
1515 * extent item pointing to the data extent) is shared, that is, if any
1516 * of the extent buffers in the path is referenced by other trees.
1517 */
1518 if (sc && ctx->bytenr == sc->data_bytenr) {
1519 /*
1520 * If our data extent is from a generation more recent than the
1521 * last generation used to snapshot the root, then we know that
1522 * it can not be shared through subtrees, so we can skip
1523 * resolving indirect references, there's no point in
1524 * determining the extent buffers for the path from the fs tree
1525 * root node down to the leaf that has the file extent item that
1526 * points to the data extent.
1527 */
1528 if (sc->data_extent_gen >
1529 btrfs_root_last_snapshot(&sc->root->root_item)) {
1530 ret = BACKREF_FOUND_NOT_SHARED;
1531 goto out;
1532 }
1533
1534 /*
1535 * If we are only determining if a data extent is shared or not
1536 * and the corresponding file extent item is located in the same
1537 * leaf as the previous file extent item, we can skip resolving
1538 * indirect references for a data extent, since the fs tree path
1539 * is the same (same leaf, so same path). We skip as long as the
1540 * cached result for the leaf is valid and only if there's only
1541 * one file extent item pointing to the data extent, because in
1542 * the case of multiple file extent items, they may be located
1543 * in different leaves and therefore we have multiple paths.
1544 */
1545 if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr &&
1546 sc->self_ref_count == 1) {
1547 bool cached;
1548 bool is_shared;
1549
1550 cached = lookup_backref_shared_cache(sc->ctx, sc->root,
1551 sc->ctx->curr_leaf_bytenr,
1552 0, &is_shared);
1553 if (cached) {
1554 if (is_shared)
1555 ret = BACKREF_FOUND_SHARED;
1556 else
1557 ret = BACKREF_FOUND_NOT_SHARED;
1558 goto out;
1559 }
1560 }
1561 }
1562
1563 btrfs_release_path(path);
1564
1565 ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0);
1566 if (ret)
1567 goto out;
1568
1569 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1570
1571 ret = resolve_indirect_refs(ctx, path, &preftrees, sc);
1572 if (ret)
1573 goto out;
1574
1575 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1576
1577 /*
1578 * This walks the tree of merged and resolved refs. Tree blocks are
1579 * read in as needed. Unique entries are added to the ulist, and
1580 * the list of found roots is updated.
1581 *
1582 * We release the entire tree in one go before returning.
1583 */
1584 node = rb_first_cached(&preftrees.direct.root);
1585 while (node) {
1586 ref = rb_entry(node, struct prelim_ref, rbnode);
1587 node = rb_next(&ref->rbnode);
1588 /*
1589 * ref->count < 0 can happen here if there are delayed
1590 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1591 * prelim_ref_insert() relies on this when merging
1592 * identical refs to keep the overall count correct.
1593 * prelim_ref_insert() will merge only those refs
1594 * which compare identically. Any refs having
1595 * e.g. different offsets would not be merged,
1596 * and would retain their original ref->count < 0.
1597 */
1598 if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) {
1599 /* no parent == root of tree */
1600 ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS);
1601 if (ret < 0)
1602 goto out;
1603 }
1604 if (ref->count && ref->parent) {
1605 if (!ctx->skip_inode_ref_list && !ref->inode_list &&
1606 ref->level == 0) {
1607 struct btrfs_tree_parent_check check = { 0 };
1608 struct extent_buffer *eb;
1609
1610 check.level = ref->level;
1611
1612 eb = read_tree_block(ctx->fs_info, ref->parent,
1613 &check);
1614 if (IS_ERR(eb)) {
1615 ret = PTR_ERR(eb);
1616 goto out;
1617 }
1618 if (!extent_buffer_uptodate(eb)) {
1619 free_extent_buffer(eb);
1620 ret = -EIO;
1621 goto out;
1622 }
1623
1624 if (!path->skip_locking)
1625 btrfs_tree_read_lock(eb);
1626 ret = find_extent_in_eb(ctx, eb, &eie);
1627 if (!path->skip_locking)
1628 btrfs_tree_read_unlock(eb);
1629 free_extent_buffer(eb);
1630 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1631 ret < 0)
1632 goto out;
1633 ref->inode_list = eie;
1634 /*
1635 * We transferred the list ownership to the ref,
1636 * so set to NULL to avoid a double free in case
1637 * an error happens after this.
1638 */
1639 eie = NULL;
1640 }
1641 ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
1642 ref->inode_list,
1643 (void **)&eie, GFP_NOFS);
1644 if (ret < 0)
1645 goto out;
1646 if (!ret && !ctx->skip_inode_ref_list) {
1647 /*
1648 * We've recorded that parent, so we must extend
1649 * its inode list here.
1650 *
1651 * However if there was corruption we may not
1652 * have found an eie, return an error in this
1653 * case.
1654 */
1655 ASSERT(eie);
1656 if (!eie) {
1657 ret = -EUCLEAN;
1658 goto out;
1659 }
1660 while (eie->next)
1661 eie = eie->next;
1662 eie->next = ref->inode_list;
1663 }
1664 eie = NULL;
1665 /*
1666 * We have transferred the inode list ownership from
1667 * this ref to the ref we added to the 'refs' ulist.
1668 * So set this ref's inode list to NULL to avoid
1669 * use-after-free when our caller uses it or double
1670 * frees in case an error happens before we return.
1671 */
1672 ref->inode_list = NULL;
1673 }
1674 cond_resched();
1675 }
1676
1677out:
1678 btrfs_free_path(path);
1679
1680 prelim_release(&preftrees.direct);
1681 prelim_release(&preftrees.indirect);
1682 prelim_release(&preftrees.indirect_missing_keys);
1683
1684 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
1685 free_inode_elem_list(eie);
1686 return ret;
1687}
1688
1689/*
1690 * Finds all leaves with a reference to the specified combination of
1691 * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
1692 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1693 * function. The caller should free the ulist with free_leaf_list() if
1694 * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
1695 * enough.
1696 *
1697 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1698 */
1699int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
1700{
1701 int ret;
1702
1703 ASSERT(ctx->refs == NULL);
1704
1705 ctx->refs = ulist_alloc(GFP_NOFS);
1706 if (!ctx->refs)
1707 return -ENOMEM;
1708
1709 ret = find_parent_nodes(ctx, NULL);
1710 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1711 (ret < 0 && ret != -ENOENT)) {
1712 free_leaf_list(ctx->refs);
1713 ctx->refs = NULL;
1714 return ret;
1715 }
1716
1717 return 0;
1718}
1719
1720/*
1721 * Walk all backrefs for a given extent to find all roots that reference this
1722 * extent. Walking a backref means finding all extents that reference this
1723 * extent and in turn walk the backrefs of those, too. Naturally this is a
1724 * recursive process, but here it is implemented in an iterative fashion: We
1725 * find all referencing extents for the extent in question and put them on a
1726 * list. In turn, we find all referencing extents for those, further appending
1727 * to the list. The way we iterate the list allows adding more elements after
1728 * the current while iterating. The process stops when we reach the end of the
1729 * list.
1730 *
1731 * Found roots are added to @ctx->roots, which is allocated by this function if
1732 * it points to NULL, in which case the caller is responsible for freeing it
1733 * after it's not needed anymore.
1734 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1735 * ulist to do temporary work, and frees it before returning.
1736 *
1737 * Returns 0 on success, < 0 on error.
1738 */
1739static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
1740{
1741 const u64 orig_bytenr = ctx->bytenr;
1742 const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list;
1743 bool roots_ulist_allocated = false;
1744 struct ulist_iterator uiter;
1745 int ret = 0;
1746
1747 ASSERT(ctx->refs == NULL);
1748
1749 ctx->refs = ulist_alloc(GFP_NOFS);
1750 if (!ctx->refs)
1751 return -ENOMEM;
1752
1753 if (!ctx->roots) {
1754 ctx->roots = ulist_alloc(GFP_NOFS);
1755 if (!ctx->roots) {
1756 ulist_free(ctx->refs);
1757 ctx->refs = NULL;
1758 return -ENOMEM;
1759 }
1760 roots_ulist_allocated = true;
1761 }
1762
1763 ctx->skip_inode_ref_list = true;
1764
1765 ULIST_ITER_INIT(&uiter);
1766 while (1) {
1767 struct ulist_node *node;
1768
1769 ret = find_parent_nodes(ctx, NULL);
1770 if (ret < 0 && ret != -ENOENT) {
1771 if (roots_ulist_allocated) {
1772 ulist_free(ctx->roots);
1773 ctx->roots = NULL;
1774 }
1775 break;
1776 }
1777 ret = 0;
1778 node = ulist_next(ctx->refs, &uiter);
1779 if (!node)
1780 break;
1781 ctx->bytenr = node->val;
1782 cond_resched();
1783 }
1784
1785 ulist_free(ctx->refs);
1786 ctx->refs = NULL;
1787 ctx->bytenr = orig_bytenr;
1788 ctx->skip_inode_ref_list = orig_skip_inode_ref_list;
1789
1790 return ret;
1791}
1792
1793int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx,
1794 bool skip_commit_root_sem)
1795{
1796 int ret;
1797
1798 if (!ctx->trans && !skip_commit_root_sem)
1799 down_read(&ctx->fs_info->commit_root_sem);
1800 ret = btrfs_find_all_roots_safe(ctx);
1801 if (!ctx->trans && !skip_commit_root_sem)
1802 up_read(&ctx->fs_info->commit_root_sem);
1803 return ret;
1804}
1805
1806struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void)
1807{
1808 struct btrfs_backref_share_check_ctx *ctx;
1809
1810 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1811 if (!ctx)
1812 return NULL;
1813
1814 ulist_init(&ctx->refs);
1815
1816 return ctx;
1817}
1818
1819void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx)
1820{
1821 if (!ctx)
1822 return;
1823
1824 ulist_release(&ctx->refs);
1825 kfree(ctx);
1826}
1827
1828/*
1829 * Check if a data extent is shared or not.
1830 *
1831 * @inode: The inode whose extent we are checking.
1832 * @bytenr: Logical bytenr of the extent we are checking.
1833 * @extent_gen: Generation of the extent (file extent item) or 0 if it is
1834 * not known.
1835 * @ctx: A backref sharedness check context.
1836 *
1837 * btrfs_is_data_extent_shared uses the backref walking code but will short
1838 * circuit as soon as it finds a root or inode that doesn't match the
1839 * one passed in. This provides a significant performance benefit for
1840 * callers (such as fiemap) which want to know whether the extent is
1841 * shared but do not need a ref count.
1842 *
1843 * This attempts to attach to the running transaction in order to account for
1844 * delayed refs, but continues on even when no running transaction exists.
1845 *
1846 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1847 */
1848int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
1849 u64 extent_gen,
1850 struct btrfs_backref_share_check_ctx *ctx)
1851{
1852 struct btrfs_backref_walk_ctx walk_ctx = { 0 };
1853 struct btrfs_root *root = inode->root;
1854 struct btrfs_fs_info *fs_info = root->fs_info;
1855 struct btrfs_trans_handle *trans;
1856 struct ulist_iterator uiter;
1857 struct ulist_node *node;
1858 struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1859 int ret = 0;
1860 struct share_check shared = {
1861 .ctx = ctx,
1862 .root = root,
1863 .inum = btrfs_ino(inode),
1864 .data_bytenr = bytenr,
1865 .data_extent_gen = extent_gen,
1866 .share_count = 0,
1867 .self_ref_count = 0,
1868 .have_delayed_delete_refs = false,
1869 };
1870 int level;
1871 bool leaf_cached;
1872 bool leaf_is_shared;
1873
1874 for (int i = 0; i < BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; i++) {
1875 if (ctx->prev_extents_cache[i].bytenr == bytenr)
1876 return ctx->prev_extents_cache[i].is_shared;
1877 }
1878
1879 ulist_init(&ctx->refs);
1880
1881 trans = btrfs_join_transaction_nostart(root);
1882 if (IS_ERR(trans)) {
1883 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1884 ret = PTR_ERR(trans);
1885 goto out;
1886 }
1887 trans = NULL;
1888 down_read(&fs_info->commit_root_sem);
1889 } else {
1890 btrfs_get_tree_mod_seq(fs_info, &elem);
1891 walk_ctx.time_seq = elem.seq;
1892 }
1893
1894 ctx->use_path_cache = true;
1895
1896 /*
1897 * We may have previously determined that the current leaf is shared.
1898 * If it is, then we have a data extent that is shared due to a shared
1899 * subtree (caused by snapshotting) and we don't need to check for data
1900 * backrefs. If the leaf is not shared, then we must do backref walking
1901 * to determine if the data extent is shared through reflinks.
1902 */
1903 leaf_cached = lookup_backref_shared_cache(ctx, root,
1904 ctx->curr_leaf_bytenr, 0,
1905 &leaf_is_shared);
1906 if (leaf_cached && leaf_is_shared) {
1907 ret = 1;
1908 goto out_trans;
1909 }
1910
1911 walk_ctx.skip_inode_ref_list = true;
1912 walk_ctx.trans = trans;
1913 walk_ctx.fs_info = fs_info;
1914 walk_ctx.refs = &ctx->refs;
1915
1916 /* -1 means we are in the bytenr of the data extent. */
1917 level = -1;
1918 ULIST_ITER_INIT(&uiter);
1919 while (1) {
1920 const unsigned long prev_ref_count = ctx->refs.nnodes;
1921
1922 walk_ctx.bytenr = bytenr;
1923 ret = find_parent_nodes(&walk_ctx, &shared);
1924 if (ret == BACKREF_FOUND_SHARED ||
1925 ret == BACKREF_FOUND_NOT_SHARED) {
1926 /* If shared must return 1, otherwise return 0. */
1927 ret = (ret == BACKREF_FOUND_SHARED) ? 1 : 0;
1928 if (level >= 0)
1929 store_backref_shared_cache(ctx, root, bytenr,
1930 level, ret == 1);
1931 break;
1932 }
1933 if (ret < 0 && ret != -ENOENT)
1934 break;
1935 ret = 0;
1936
1937 /*
1938 * More than one extent buffer (bytenr) may have been added to
1939 * the ctx->refs ulist, in which case we have to check multiple
1940 * tree paths in case the first one is not shared, so we can not
1941 * use the path cache which is made for a single path. Multiple
1942 * extent buffers at the current level happen when:
1943 *
1944 * 1) level -1, the data extent: If our data extent was not
1945 * directly shared (without multiple reference items), then
1946 * it might have a single reference item with a count > 1 for
1947 * the same offset, which means there are 2 (or more) file
1948 * extent items that point to the data extent - this happens
1949 * when a file extent item needs to be split and then one
1950 * item gets moved to another leaf due to a b+tree leaf split
1951 * when inserting some item. In this case the file extent
1952 * items may be located in different leaves and therefore
1953 * some of the leaves may be referenced through shared
1954 * subtrees while others are not. Since our extent buffer
1955 * cache only works for a single path (by far the most common
1956 * case and simpler to deal with), we can not use it if we
1957 * have multiple leaves (which implies multiple paths).
1958 *
1959 * 2) level >= 0, a tree node/leaf: We can have a mix of direct
1960 * and indirect references on a b+tree node/leaf, so we have
1961 * to check multiple paths, and the extent buffer (the
1962 * current bytenr) may be shared or not. One example is
1963 * during relocation as we may get a shared tree block ref
1964 * (direct ref) and a non-shared tree block ref (indirect
1965 * ref) for the same node/leaf.
1966 */
1967 if ((ctx->refs.nnodes - prev_ref_count) > 1)
1968 ctx->use_path_cache = false;
1969
1970 if (level >= 0)
1971 store_backref_shared_cache(ctx, root, bytenr,
1972 level, false);
1973 node = ulist_next(&ctx->refs, &uiter);
1974 if (!node)
1975 break;
1976 bytenr = node->val;
1977 if (ctx->use_path_cache) {
1978 bool is_shared;
1979 bool cached;
1980
1981 level++;
1982 cached = lookup_backref_shared_cache(ctx, root, bytenr,
1983 level, &is_shared);
1984 if (cached) {
1985 ret = (is_shared ? 1 : 0);
1986 break;
1987 }
1988 }
1989 shared.share_count = 0;
1990 shared.have_delayed_delete_refs = false;
1991 cond_resched();
1992 }
1993
1994 /*
1995 * If the path cache is disabled, then it means at some tree level we
1996 * got multiple parents due to a mix of direct and indirect backrefs or
1997 * multiple leaves with file extent items pointing to the same data
1998 * extent. We have to invalidate the cache and cache only the sharedness
1999 * result for the levels where we got only one node/reference.
2000 */
2001 if (!ctx->use_path_cache) {
2002 int i = 0;
2003
2004 level--;
2005 if (ret >= 0 && level >= 0) {
2006 bytenr = ctx->path_cache_entries[level].bytenr;
2007 ctx->use_path_cache = true;
2008 store_backref_shared_cache(ctx, root, bytenr, level, ret);
2009 i = level + 1;
2010 }
2011
2012 for ( ; i < BTRFS_MAX_LEVEL; i++)
2013 ctx->path_cache_entries[i].bytenr = 0;
2014 }
2015
2016 /*
2017 * Cache the sharedness result for the data extent if we know our inode
2018 * has more than 1 file extent item that refers to the data extent.
2019 */
2020 if (ret >= 0 && shared.self_ref_count > 1) {
2021 int slot = ctx->prev_extents_cache_slot;
2022
2023 ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr;
2024 ctx->prev_extents_cache[slot].is_shared = (ret == 1);
2025
2026 slot = (slot + 1) % BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE;
2027 ctx->prev_extents_cache_slot = slot;
2028 }
2029
2030out_trans:
2031 if (trans) {
2032 btrfs_put_tree_mod_seq(fs_info, &elem);
2033 btrfs_end_transaction(trans);
2034 } else {
2035 up_read(&fs_info->commit_root_sem);
2036 }
2037out:
2038 ulist_release(&ctx->refs);
2039 ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr;
2040
2041 return ret;
2042}
2043
2044int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
2045 u64 start_off, struct btrfs_path *path,
2046 struct btrfs_inode_extref **ret_extref,
2047 u64 *found_off)
2048{
2049 int ret, slot;
2050 struct btrfs_key key;
2051 struct btrfs_key found_key;
2052 struct btrfs_inode_extref *extref;
2053 const struct extent_buffer *leaf;
2054 unsigned long ptr;
2055
2056 key.objectid = inode_objectid;
2057 key.type = BTRFS_INODE_EXTREF_KEY;
2058 key.offset = start_off;
2059
2060 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2061 if (ret < 0)
2062 return ret;
2063
2064 while (1) {
2065 leaf = path->nodes[0];
2066 slot = path->slots[0];
2067 if (slot >= btrfs_header_nritems(leaf)) {
2068 /*
2069 * If the item at offset is not found,
2070 * btrfs_search_slot will point us to the slot
2071 * where it should be inserted. In our case
2072 * that will be the slot directly before the
2073 * next INODE_REF_KEY_V2 item. In the case
2074 * that we're pointing to the last slot in a
2075 * leaf, we must move one leaf over.
2076 */
2077 ret = btrfs_next_leaf(root, path);
2078 if (ret) {
2079 if (ret >= 1)
2080 ret = -ENOENT;
2081 break;
2082 }
2083 continue;
2084 }
2085
2086 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2087
2088 /*
2089 * Check that we're still looking at an extended ref key for
2090 * this particular objectid. If we have different
2091 * objectid or type then there are no more to be found
2092 * in the tree and we can exit.
2093 */
2094 ret = -ENOENT;
2095 if (found_key.objectid != inode_objectid)
2096 break;
2097 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
2098 break;
2099
2100 ret = 0;
2101 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2102 extref = (struct btrfs_inode_extref *)ptr;
2103 *ret_extref = extref;
2104 if (found_off)
2105 *found_off = found_key.offset;
2106 break;
2107 }
2108
2109 return ret;
2110}
2111
2112/*
2113 * this iterates to turn a name (from iref/extref) into a full filesystem path.
2114 * Elements of the path are separated by '/' and the path is guaranteed to be
2115 * 0-terminated. the path is only given within the current file system.
2116 * Therefore, it never starts with a '/'. the caller is responsible to provide
2117 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
2118 * the start point of the resulting string is returned. this pointer is within
2119 * dest, normally.
2120 * in case the path buffer would overflow, the pointer is decremented further
2121 * as if output was written to the buffer, though no more output is actually
2122 * generated. that way, the caller can determine how much space would be
2123 * required for the path to fit into the buffer. in that case, the returned
2124 * value will be smaller than dest. callers must check this!
2125 */
2126char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
2127 u32 name_len, unsigned long name_off,
2128 struct extent_buffer *eb_in, u64 parent,
2129 char *dest, u32 size)
2130{
2131 int slot;
2132 u64 next_inum;
2133 int ret;
2134 s64 bytes_left = ((s64)size) - 1;
2135 struct extent_buffer *eb = eb_in;
2136 struct btrfs_key found_key;
2137 struct btrfs_inode_ref *iref;
2138
2139 if (bytes_left >= 0)
2140 dest[bytes_left] = '\0';
2141
2142 while (1) {
2143 bytes_left -= name_len;
2144 if (bytes_left >= 0)
2145 read_extent_buffer(eb, dest + bytes_left,
2146 name_off, name_len);
2147 if (eb != eb_in) {
2148 if (!path->skip_locking)
2149 btrfs_tree_read_unlock(eb);
2150 free_extent_buffer(eb);
2151 }
2152 ret = btrfs_find_item(fs_root, path, parent, 0,
2153 BTRFS_INODE_REF_KEY, &found_key);
2154 if (ret > 0)
2155 ret = -ENOENT;
2156 if (ret)
2157 break;
2158
2159 next_inum = found_key.offset;
2160
2161 /* regular exit ahead */
2162 if (parent == next_inum)
2163 break;
2164
2165 slot = path->slots[0];
2166 eb = path->nodes[0];
2167 /* make sure we can use eb after releasing the path */
2168 if (eb != eb_in) {
2169 path->nodes[0] = NULL;
2170 path->locks[0] = 0;
2171 }
2172 btrfs_release_path(path);
2173 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2174
2175 name_len = btrfs_inode_ref_name_len(eb, iref);
2176 name_off = (unsigned long)(iref + 1);
2177
2178 parent = next_inum;
2179 --bytes_left;
2180 if (bytes_left >= 0)
2181 dest[bytes_left] = '/';
2182 }
2183
2184 btrfs_release_path(path);
2185
2186 if (ret)
2187 return ERR_PTR(ret);
2188
2189 return dest + bytes_left;
2190}
2191
2192/*
2193 * this makes the path point to (logical EXTENT_ITEM *)
2194 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
2195 * tree blocks and <0 on error.
2196 */
2197int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
2198 struct btrfs_path *path, struct btrfs_key *found_key,
2199 u64 *flags_ret)
2200{
2201 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
2202 int ret;
2203 u64 flags;
2204 u64 size = 0;
2205 u32 item_size;
2206 const struct extent_buffer *eb;
2207 struct btrfs_extent_item *ei;
2208 struct btrfs_key key;
2209
2210 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2211 key.type = BTRFS_METADATA_ITEM_KEY;
2212 else
2213 key.type = BTRFS_EXTENT_ITEM_KEY;
2214 key.objectid = logical;
2215 key.offset = (u64)-1;
2216
2217 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2218 if (ret < 0)
2219 return ret;
2220 if (ret == 0) {
2221 /*
2222 * Key with offset -1 found, there would have to exist an extent
2223 * item with such offset, but this is out of the valid range.
2224 */
2225 return -EUCLEAN;
2226 }
2227
2228 ret = btrfs_previous_extent_item(extent_root, path, 0);
2229 if (ret) {
2230 if (ret > 0)
2231 ret = -ENOENT;
2232 return ret;
2233 }
2234 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
2235 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
2236 size = fs_info->nodesize;
2237 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
2238 size = found_key->offset;
2239
2240 if (found_key->objectid > logical ||
2241 found_key->objectid + size <= logical) {
2242 btrfs_debug(fs_info,
2243 "logical %llu is not within any extent", logical);
2244 return -ENOENT;
2245 }
2246
2247 eb = path->nodes[0];
2248 item_size = btrfs_item_size(eb, path->slots[0]);
2249
2250 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
2251 flags = btrfs_extent_flags(eb, ei);
2252
2253 btrfs_debug(fs_info,
2254 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
2255 logical, logical - found_key->objectid, found_key->objectid,
2256 found_key->offset, flags, item_size);
2257
2258 WARN_ON(!flags_ret);
2259 if (flags_ret) {
2260 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2261 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
2262 else if (flags & BTRFS_EXTENT_FLAG_DATA)
2263 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
2264 else
2265 BUG();
2266 return 0;
2267 }
2268
2269 return -EIO;
2270}
2271
2272/*
2273 * helper function to iterate extent inline refs. ptr must point to a 0 value
2274 * for the first call and may be modified. it is used to track state.
2275 * if more refs exist, 0 is returned and the next call to
2276 * get_extent_inline_ref must pass the modified ptr parameter to get the
2277 * next ref. after the last ref was processed, 1 is returned.
2278 * returns <0 on error
2279 */
2280static int get_extent_inline_ref(unsigned long *ptr,
2281 const struct extent_buffer *eb,
2282 const struct btrfs_key *key,
2283 const struct btrfs_extent_item *ei,
2284 u32 item_size,
2285 struct btrfs_extent_inline_ref **out_eiref,
2286 int *out_type)
2287{
2288 unsigned long end;
2289 u64 flags;
2290 struct btrfs_tree_block_info *info;
2291
2292 if (!*ptr) {
2293 /* first call */
2294 flags = btrfs_extent_flags(eb, ei);
2295 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2296 if (key->type == BTRFS_METADATA_ITEM_KEY) {
2297 /* a skinny metadata extent */
2298 *out_eiref =
2299 (struct btrfs_extent_inline_ref *)(ei + 1);
2300 } else {
2301 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
2302 info = (struct btrfs_tree_block_info *)(ei + 1);
2303 *out_eiref =
2304 (struct btrfs_extent_inline_ref *)(info + 1);
2305 }
2306 } else {
2307 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
2308 }
2309 *ptr = (unsigned long)*out_eiref;
2310 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
2311 return -ENOENT;
2312 }
2313
2314 end = (unsigned long)ei + item_size;
2315 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
2316 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
2317 BTRFS_REF_TYPE_ANY);
2318 if (*out_type == BTRFS_REF_TYPE_INVALID)
2319 return -EUCLEAN;
2320
2321 *ptr += btrfs_extent_inline_ref_size(*out_type);
2322 WARN_ON(*ptr > end);
2323 if (*ptr == end)
2324 return 1; /* last */
2325
2326 return 0;
2327}
2328
2329/*
2330 * reads the tree block backref for an extent. tree level and root are returned
2331 * through out_level and out_root. ptr must point to a 0 value for the first
2332 * call and may be modified (see get_extent_inline_ref comment).
2333 * returns 0 if data was provided, 1 if there was no more data to provide or
2334 * <0 on error.
2335 */
2336int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2337 struct btrfs_key *key, struct btrfs_extent_item *ei,
2338 u32 item_size, u64 *out_root, u8 *out_level)
2339{
2340 int ret;
2341 int type;
2342 struct btrfs_extent_inline_ref *eiref;
2343
2344 if (*ptr == (unsigned long)-1)
2345 return 1;
2346
2347 while (1) {
2348 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
2349 &eiref, &type);
2350 if (ret < 0)
2351 return ret;
2352
2353 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
2354 type == BTRFS_SHARED_BLOCK_REF_KEY)
2355 break;
2356
2357 if (ret == 1)
2358 return 1;
2359 }
2360
2361 /* we can treat both ref types equally here */
2362 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
2363
2364 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
2365 struct btrfs_tree_block_info *info;
2366
2367 info = (struct btrfs_tree_block_info *)(ei + 1);
2368 *out_level = btrfs_tree_block_level(eb, info);
2369 } else {
2370 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
2371 *out_level = (u8)key->offset;
2372 }
2373
2374 if (ret == 1)
2375 *ptr = (unsigned long)-1;
2376
2377 return 0;
2378}
2379
2380static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
2381 struct extent_inode_elem *inode_list,
2382 u64 root, u64 extent_item_objectid,
2383 iterate_extent_inodes_t *iterate, void *ctx)
2384{
2385 struct extent_inode_elem *eie;
2386 int ret = 0;
2387
2388 for (eie = inode_list; eie; eie = eie->next) {
2389 btrfs_debug(fs_info,
2390 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
2391 extent_item_objectid, eie->inum,
2392 eie->offset, root);
2393 ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx);
2394 if (ret) {
2395 btrfs_debug(fs_info,
2396 "stopping iteration for %llu due to ret=%d",
2397 extent_item_objectid, ret);
2398 break;
2399 }
2400 }
2401
2402 return ret;
2403}
2404
2405/*
2406 * calls iterate() for every inode that references the extent identified by
2407 * the given parameters.
2408 * when the iterator function returns a non-zero value, iteration stops.
2409 */
2410int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
2411 bool search_commit_root,
2412 iterate_extent_inodes_t *iterate, void *user_ctx)
2413{
2414 int ret;
2415 struct ulist *refs;
2416 struct ulist_node *ref_node;
2417 struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
2418 struct ulist_iterator ref_uiter;
2419
2420 btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu",
2421 ctx->bytenr);
2422
2423 ASSERT(ctx->trans == NULL);
2424 ASSERT(ctx->roots == NULL);
2425
2426 if (!search_commit_root) {
2427 struct btrfs_trans_handle *trans;
2428
2429 trans = btrfs_attach_transaction(ctx->fs_info->tree_root);
2430 if (IS_ERR(trans)) {
2431 if (PTR_ERR(trans) != -ENOENT &&
2432 PTR_ERR(trans) != -EROFS)
2433 return PTR_ERR(trans);
2434 trans = NULL;
2435 }
2436 ctx->trans = trans;
2437 }
2438
2439 if (ctx->trans) {
2440 btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem);
2441 ctx->time_seq = seq_elem.seq;
2442 } else {
2443 down_read(&ctx->fs_info->commit_root_sem);
2444 }
2445
2446 ret = btrfs_find_all_leafs(ctx);
2447 if (ret)
2448 goto out;
2449 refs = ctx->refs;
2450 ctx->refs = NULL;
2451
2452 ULIST_ITER_INIT(&ref_uiter);
2453 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2454 const u64 leaf_bytenr = ref_node->val;
2455 struct ulist_node *root_node;
2456 struct ulist_iterator root_uiter;
2457 struct extent_inode_elem *inode_list;
2458
2459 inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux;
2460
2461 if (ctx->cache_lookup) {
2462 const u64 *root_ids;
2463 int root_count;
2464 bool cached;
2465
2466 cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx,
2467 &root_ids, &root_count);
2468 if (cached) {
2469 for (int i = 0; i < root_count; i++) {
2470 ret = iterate_leaf_refs(ctx->fs_info,
2471 inode_list,
2472 root_ids[i],
2473 leaf_bytenr,
2474 iterate,
2475 user_ctx);
2476 if (ret)
2477 break;
2478 }
2479 continue;
2480 }
2481 }
2482
2483 if (!ctx->roots) {
2484 ctx->roots = ulist_alloc(GFP_NOFS);
2485 if (!ctx->roots) {
2486 ret = -ENOMEM;
2487 break;
2488 }
2489 }
2490
2491 ctx->bytenr = leaf_bytenr;
2492 ret = btrfs_find_all_roots_safe(ctx);
2493 if (ret)
2494 break;
2495
2496 if (ctx->cache_store)
2497 ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx);
2498
2499 ULIST_ITER_INIT(&root_uiter);
2500 while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) {
2501 btrfs_debug(ctx->fs_info,
2502 "root %llu references leaf %llu, data list %#llx",
2503 root_node->val, ref_node->val,
2504 ref_node->aux);
2505 ret = iterate_leaf_refs(ctx->fs_info, inode_list,
2506 root_node->val, ctx->bytenr,
2507 iterate, user_ctx);
2508 }
2509 ulist_reinit(ctx->roots);
2510 }
2511
2512 free_leaf_list(refs);
2513out:
2514 if (ctx->trans) {
2515 btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem);
2516 btrfs_end_transaction(ctx->trans);
2517 ctx->trans = NULL;
2518 } else {
2519 up_read(&ctx->fs_info->commit_root_sem);
2520 }
2521
2522 ulist_free(ctx->roots);
2523 ctx->roots = NULL;
2524
2525 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP)
2526 ret = 0;
2527
2528 return ret;
2529}
2530
2531static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx)
2532{
2533 struct btrfs_data_container *inodes = ctx;
2534 const size_t c = 3 * sizeof(u64);
2535
2536 if (inodes->bytes_left >= c) {
2537 inodes->bytes_left -= c;
2538 inodes->val[inodes->elem_cnt] = inum;
2539 inodes->val[inodes->elem_cnt + 1] = offset;
2540 inodes->val[inodes->elem_cnt + 2] = root;
2541 inodes->elem_cnt += 3;
2542 } else {
2543 inodes->bytes_missing += c - inodes->bytes_left;
2544 inodes->bytes_left = 0;
2545 inodes->elem_missed += 3;
2546 }
2547
2548 return 0;
2549}
2550
2551int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2552 struct btrfs_path *path,
2553 void *ctx, bool ignore_offset)
2554{
2555 struct btrfs_backref_walk_ctx walk_ctx = { 0 };
2556 int ret;
2557 u64 flags = 0;
2558 struct btrfs_key found_key;
2559 int search_commit_root = path->search_commit_root;
2560
2561 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2562 btrfs_release_path(path);
2563 if (ret < 0)
2564 return ret;
2565 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2566 return -EINVAL;
2567
2568 walk_ctx.bytenr = found_key.objectid;
2569 if (ignore_offset)
2570 walk_ctx.ignore_extent_item_pos = true;
2571 else
2572 walk_ctx.extent_item_pos = logical - found_key.objectid;
2573 walk_ctx.fs_info = fs_info;
2574
2575 return iterate_extent_inodes(&walk_ctx, search_commit_root,
2576 build_ino_list, ctx);
2577}
2578
2579static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2580 struct extent_buffer *eb, struct inode_fs_paths *ipath);
2581
2582static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
2583{
2584 int ret = 0;
2585 int slot;
2586 u32 cur;
2587 u32 len;
2588 u32 name_len;
2589 u64 parent = 0;
2590 int found = 0;
2591 struct btrfs_root *fs_root = ipath->fs_root;
2592 struct btrfs_path *path = ipath->btrfs_path;
2593 struct extent_buffer *eb;
2594 struct btrfs_inode_ref *iref;
2595 struct btrfs_key found_key;
2596
2597 while (!ret) {
2598 ret = btrfs_find_item(fs_root, path, inum,
2599 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2600 &found_key);
2601
2602 if (ret < 0)
2603 break;
2604 if (ret) {
2605 ret = found ? 0 : -ENOENT;
2606 break;
2607 }
2608 ++found;
2609
2610 parent = found_key.offset;
2611 slot = path->slots[0];
2612 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2613 if (!eb) {
2614 ret = -ENOMEM;
2615 break;
2616 }
2617 btrfs_release_path(path);
2618
2619 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2620
2621 for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2622 name_len = btrfs_inode_ref_name_len(eb, iref);
2623 /* path must be released before calling iterate()! */
2624 btrfs_debug(fs_root->fs_info,
2625 "following ref at offset %u for inode %llu in tree %llu",
2626 cur, found_key.objectid,
2627 btrfs_root_id(fs_root));
2628 ret = inode_to_path(parent, name_len,
2629 (unsigned long)(iref + 1), eb, ipath);
2630 if (ret)
2631 break;
2632 len = sizeof(*iref) + name_len;
2633 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2634 }
2635 free_extent_buffer(eb);
2636 }
2637
2638 btrfs_release_path(path);
2639
2640 return ret;
2641}
2642
2643static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
2644{
2645 int ret;
2646 int slot;
2647 u64 offset = 0;
2648 u64 parent;
2649 int found = 0;
2650 struct btrfs_root *fs_root = ipath->fs_root;
2651 struct btrfs_path *path = ipath->btrfs_path;
2652 struct extent_buffer *eb;
2653 struct btrfs_inode_extref *extref;
2654 u32 item_size;
2655 u32 cur_offset;
2656 unsigned long ptr;
2657
2658 while (1) {
2659 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2660 &offset);
2661 if (ret < 0)
2662 break;
2663 if (ret) {
2664 ret = found ? 0 : -ENOENT;
2665 break;
2666 }
2667 ++found;
2668
2669 slot = path->slots[0];
2670 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2671 if (!eb) {
2672 ret = -ENOMEM;
2673 break;
2674 }
2675 btrfs_release_path(path);
2676
2677 item_size = btrfs_item_size(eb, slot);
2678 ptr = btrfs_item_ptr_offset(eb, slot);
2679 cur_offset = 0;
2680
2681 while (cur_offset < item_size) {
2682 u32 name_len;
2683
2684 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2685 parent = btrfs_inode_extref_parent(eb, extref);
2686 name_len = btrfs_inode_extref_name_len(eb, extref);
2687 ret = inode_to_path(parent, name_len,
2688 (unsigned long)&extref->name, eb, ipath);
2689 if (ret)
2690 break;
2691
2692 cur_offset += btrfs_inode_extref_name_len(eb, extref);
2693 cur_offset += sizeof(*extref);
2694 }
2695 free_extent_buffer(eb);
2696
2697 offset++;
2698 }
2699
2700 btrfs_release_path(path);
2701
2702 return ret;
2703}
2704
2705/*
2706 * returns 0 if the path could be dumped (probably truncated)
2707 * returns <0 in case of an error
2708 */
2709static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2710 struct extent_buffer *eb, struct inode_fs_paths *ipath)
2711{
2712 char *fspath;
2713 char *fspath_min;
2714 int i = ipath->fspath->elem_cnt;
2715 const int s_ptr = sizeof(char *);
2716 u32 bytes_left;
2717
2718 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2719 ipath->fspath->bytes_left - s_ptr : 0;
2720
2721 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2722 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2723 name_off, eb, inum, fspath_min, bytes_left);
2724 if (IS_ERR(fspath))
2725 return PTR_ERR(fspath);
2726
2727 if (fspath > fspath_min) {
2728 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2729 ++ipath->fspath->elem_cnt;
2730 ipath->fspath->bytes_left = fspath - fspath_min;
2731 } else {
2732 ++ipath->fspath->elem_missed;
2733 ipath->fspath->bytes_missing += fspath_min - fspath;
2734 ipath->fspath->bytes_left = 0;
2735 }
2736
2737 return 0;
2738}
2739
2740/*
2741 * this dumps all file system paths to the inode into the ipath struct, provided
2742 * is has been created large enough. each path is zero-terminated and accessed
2743 * from ipath->fspath->val[i].
2744 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2745 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2746 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2747 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2748 * have been needed to return all paths.
2749 */
2750int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2751{
2752 int ret;
2753 int found_refs = 0;
2754
2755 ret = iterate_inode_refs(inum, ipath);
2756 if (!ret)
2757 ++found_refs;
2758 else if (ret != -ENOENT)
2759 return ret;
2760
2761 ret = iterate_inode_extrefs(inum, ipath);
2762 if (ret == -ENOENT && found_refs)
2763 return 0;
2764
2765 return ret;
2766}
2767
2768struct btrfs_data_container *init_data_container(u32 total_bytes)
2769{
2770 struct btrfs_data_container *data;
2771 size_t alloc_bytes;
2772
2773 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2774 data = kvzalloc(alloc_bytes, GFP_KERNEL);
2775 if (!data)
2776 return ERR_PTR(-ENOMEM);
2777
2778 if (total_bytes >= sizeof(*data))
2779 data->bytes_left = total_bytes - sizeof(*data);
2780 else
2781 data->bytes_missing = sizeof(*data) - total_bytes;
2782
2783 return data;
2784}
2785
2786/*
2787 * allocates space to return multiple file system paths for an inode.
2788 * total_bytes to allocate are passed, note that space usable for actual path
2789 * information will be total_bytes - sizeof(struct inode_fs_paths).
2790 * the returned pointer must be freed with free_ipath() in the end.
2791 */
2792struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2793 struct btrfs_path *path)
2794{
2795 struct inode_fs_paths *ifp;
2796 struct btrfs_data_container *fspath;
2797
2798 fspath = init_data_container(total_bytes);
2799 if (IS_ERR(fspath))
2800 return ERR_CAST(fspath);
2801
2802 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2803 if (!ifp) {
2804 kvfree(fspath);
2805 return ERR_PTR(-ENOMEM);
2806 }
2807
2808 ifp->btrfs_path = path;
2809 ifp->fspath = fspath;
2810 ifp->fs_root = fs_root;
2811
2812 return ifp;
2813}
2814
2815void free_ipath(struct inode_fs_paths *ipath)
2816{
2817 if (!ipath)
2818 return;
2819 kvfree(ipath->fspath);
2820 kfree(ipath);
2821}
2822
2823struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info)
2824{
2825 struct btrfs_backref_iter *ret;
2826
2827 ret = kzalloc(sizeof(*ret), GFP_NOFS);
2828 if (!ret)
2829 return NULL;
2830
2831 ret->path = btrfs_alloc_path();
2832 if (!ret->path) {
2833 kfree(ret);
2834 return NULL;
2835 }
2836
2837 /* Current backref iterator only supports iteration in commit root */
2838 ret->path->search_commit_root = 1;
2839 ret->path->skip_locking = 1;
2840 ret->fs_info = fs_info;
2841
2842 return ret;
2843}
2844
2845static void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
2846{
2847 iter->bytenr = 0;
2848 iter->item_ptr = 0;
2849 iter->cur_ptr = 0;
2850 iter->end_ptr = 0;
2851 btrfs_release_path(iter->path);
2852 memset(&iter->cur_key, 0, sizeof(iter->cur_key));
2853}
2854
2855int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2856{
2857 struct btrfs_fs_info *fs_info = iter->fs_info;
2858 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2859 struct btrfs_path *path = iter->path;
2860 struct btrfs_extent_item *ei;
2861 struct btrfs_key key;
2862 int ret;
2863
2864 key.objectid = bytenr;
2865 key.type = BTRFS_METADATA_ITEM_KEY;
2866 key.offset = (u64)-1;
2867 iter->bytenr = bytenr;
2868
2869 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2870 if (ret < 0)
2871 return ret;
2872 if (ret == 0) {
2873 /*
2874 * Key with offset -1 found, there would have to exist an extent
2875 * item with such offset, but this is out of the valid range.
2876 */
2877 ret = -EUCLEAN;
2878 goto release;
2879 }
2880 if (path->slots[0] == 0) {
2881 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2882 ret = -EUCLEAN;
2883 goto release;
2884 }
2885 path->slots[0]--;
2886
2887 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2888 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2889 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2890 ret = -ENOENT;
2891 goto release;
2892 }
2893 memcpy(&iter->cur_key, &key, sizeof(key));
2894 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2895 path->slots[0]);
2896 iter->end_ptr = (u32)(iter->item_ptr +
2897 btrfs_item_size(path->nodes[0], path->slots[0]));
2898 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2899 struct btrfs_extent_item);
2900
2901 /*
2902 * Only support iteration on tree backref yet.
2903 *
2904 * This is an extra precaution for non skinny-metadata, where
2905 * EXTENT_ITEM is also used for tree blocks, that we can only use
2906 * extent flags to determine if it's a tree block.
2907 */
2908 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2909 ret = -ENOTSUPP;
2910 goto release;
2911 }
2912 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2913
2914 /* If there is no inline backref, go search for keyed backref */
2915 if (iter->cur_ptr >= iter->end_ptr) {
2916 ret = btrfs_next_item(extent_root, path);
2917
2918 /* No inline nor keyed ref */
2919 if (ret > 0) {
2920 ret = -ENOENT;
2921 goto release;
2922 }
2923 if (ret < 0)
2924 goto release;
2925
2926 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2927 path->slots[0]);
2928 if (iter->cur_key.objectid != bytenr ||
2929 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2930 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2931 ret = -ENOENT;
2932 goto release;
2933 }
2934 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2935 path->slots[0]);
2936 iter->item_ptr = iter->cur_ptr;
2937 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
2938 path->nodes[0], path->slots[0]));
2939 }
2940
2941 return 0;
2942release:
2943 btrfs_backref_iter_release(iter);
2944 return ret;
2945}
2946
2947static bool btrfs_backref_iter_is_inline_ref(struct btrfs_backref_iter *iter)
2948{
2949 if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
2950 iter->cur_key.type == BTRFS_METADATA_ITEM_KEY)
2951 return true;
2952 return false;
2953}
2954
2955/*
2956 * Go to the next backref item of current bytenr, can be either inlined or
2957 * keyed.
2958 *
2959 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2960 *
2961 * Return 0 if we get next backref without problem.
2962 * Return >0 if there is no extra backref for this bytenr.
2963 * Return <0 if there is something wrong happened.
2964 */
2965int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2966{
2967 struct extent_buffer *eb = iter->path->nodes[0];
2968 struct btrfs_root *extent_root;
2969 struct btrfs_path *path = iter->path;
2970 struct btrfs_extent_inline_ref *iref;
2971 int ret;
2972 u32 size;
2973
2974 if (btrfs_backref_iter_is_inline_ref(iter)) {
2975 /* We're still inside the inline refs */
2976 ASSERT(iter->cur_ptr < iter->end_ptr);
2977
2978 if (btrfs_backref_has_tree_block_info(iter)) {
2979 /* First tree block info */
2980 size = sizeof(struct btrfs_tree_block_info);
2981 } else {
2982 /* Use inline ref type to determine the size */
2983 int type;
2984
2985 iref = (struct btrfs_extent_inline_ref *)
2986 ((unsigned long)iter->cur_ptr);
2987 type = btrfs_extent_inline_ref_type(eb, iref);
2988
2989 size = btrfs_extent_inline_ref_size(type);
2990 }
2991 iter->cur_ptr += size;
2992 if (iter->cur_ptr < iter->end_ptr)
2993 return 0;
2994
2995 /* All inline items iterated, fall through */
2996 }
2997
2998 /* We're at keyed items, there is no inline item, go to the next one */
2999 extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
3000 ret = btrfs_next_item(extent_root, iter->path);
3001 if (ret)
3002 return ret;
3003
3004 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
3005 if (iter->cur_key.objectid != iter->bytenr ||
3006 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
3007 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
3008 return 1;
3009 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
3010 path->slots[0]);
3011 iter->cur_ptr = iter->item_ptr;
3012 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
3013 path->slots[0]);
3014 return 0;
3015}
3016
3017void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
3018 struct btrfs_backref_cache *cache, bool is_reloc)
3019{
3020 int i;
3021
3022 cache->rb_root = RB_ROOT;
3023 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3024 INIT_LIST_HEAD(&cache->pending[i]);
3025 INIT_LIST_HEAD(&cache->changed);
3026 INIT_LIST_HEAD(&cache->detached);
3027 INIT_LIST_HEAD(&cache->leaves);
3028 INIT_LIST_HEAD(&cache->pending_edge);
3029 INIT_LIST_HEAD(&cache->useless_node);
3030 cache->fs_info = fs_info;
3031 cache->is_reloc = is_reloc;
3032}
3033
3034struct btrfs_backref_node *btrfs_backref_alloc_node(
3035 struct btrfs_backref_cache *cache, u64 bytenr, int level)
3036{
3037 struct btrfs_backref_node *node;
3038
3039 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
3040 node = kzalloc(sizeof(*node), GFP_NOFS);
3041 if (!node)
3042 return node;
3043
3044 INIT_LIST_HEAD(&node->list);
3045 INIT_LIST_HEAD(&node->upper);
3046 INIT_LIST_HEAD(&node->lower);
3047 RB_CLEAR_NODE(&node->rb_node);
3048 cache->nr_nodes++;
3049 node->level = level;
3050 node->bytenr = bytenr;
3051
3052 return node;
3053}
3054
3055void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
3056 struct btrfs_backref_node *node)
3057{
3058 if (node) {
3059 ASSERT(list_empty(&node->list));
3060 ASSERT(list_empty(&node->lower));
3061 ASSERT(node->eb == NULL);
3062 cache->nr_nodes--;
3063 btrfs_put_root(node->root);
3064 kfree(node);
3065 }
3066}
3067
3068struct btrfs_backref_edge *btrfs_backref_alloc_edge(
3069 struct btrfs_backref_cache *cache)
3070{
3071 struct btrfs_backref_edge *edge;
3072
3073 edge = kzalloc(sizeof(*edge), GFP_NOFS);
3074 if (edge)
3075 cache->nr_edges++;
3076 return edge;
3077}
3078
3079void btrfs_backref_free_edge(struct btrfs_backref_cache *cache,
3080 struct btrfs_backref_edge *edge)
3081{
3082 if (edge) {
3083 cache->nr_edges--;
3084 kfree(edge);
3085 }
3086}
3087
3088void btrfs_backref_unlock_node_buffer(struct btrfs_backref_node *node)
3089{
3090 if (node->locked) {
3091 btrfs_tree_unlock(node->eb);
3092 node->locked = 0;
3093 }
3094}
3095
3096void btrfs_backref_drop_node_buffer(struct btrfs_backref_node *node)
3097{
3098 if (node->eb) {
3099 btrfs_backref_unlock_node_buffer(node);
3100 free_extent_buffer(node->eb);
3101 node->eb = NULL;
3102 }
3103}
3104
3105/*
3106 * Drop the backref node from cache without cleaning up its children
3107 * edges.
3108 *
3109 * This can only be called on node without parent edges.
3110 * The children edges are still kept as is.
3111 */
3112void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
3113 struct btrfs_backref_node *node)
3114{
3115 ASSERT(list_empty(&node->upper));
3116
3117 btrfs_backref_drop_node_buffer(node);
3118 list_del_init(&node->list);
3119 list_del_init(&node->lower);
3120 if (!RB_EMPTY_NODE(&node->rb_node))
3121 rb_erase(&node->rb_node, &tree->rb_root);
3122 btrfs_backref_free_node(tree, node);
3123}
3124
3125/*
3126 * Drop the backref node from cache, also cleaning up all its
3127 * upper edges and any uncached nodes in the path.
3128 *
3129 * This cleanup happens bottom up, thus the node should either
3130 * be the lowest node in the cache or a detached node.
3131 */
3132void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
3133 struct btrfs_backref_node *node)
3134{
3135 struct btrfs_backref_node *upper;
3136 struct btrfs_backref_edge *edge;
3137
3138 if (!node)
3139 return;
3140
3141 BUG_ON(!node->lowest && !node->detached);
3142 while (!list_empty(&node->upper)) {
3143 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
3144 list[LOWER]);
3145 upper = edge->node[UPPER];
3146 list_del(&edge->list[LOWER]);
3147 list_del(&edge->list[UPPER]);
3148 btrfs_backref_free_edge(cache, edge);
3149
3150 /*
3151 * Add the node to leaf node list if no other child block
3152 * cached.
3153 */
3154 if (list_empty(&upper->lower)) {
3155 list_add_tail(&upper->lower, &cache->leaves);
3156 upper->lowest = 1;
3157 }
3158 }
3159
3160 btrfs_backref_drop_node(cache, node);
3161}
3162
3163/*
3164 * Release all nodes/edges from current cache
3165 */
3166void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
3167{
3168 struct btrfs_backref_node *node;
3169 int i;
3170
3171 while (!list_empty(&cache->detached)) {
3172 node = list_entry(cache->detached.next,
3173 struct btrfs_backref_node, list);
3174 btrfs_backref_cleanup_node(cache, node);
3175 }
3176
3177 while (!list_empty(&cache->leaves)) {
3178 node = list_entry(cache->leaves.next,
3179 struct btrfs_backref_node, lower);
3180 btrfs_backref_cleanup_node(cache, node);
3181 }
3182
3183 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3184 while (!list_empty(&cache->pending[i])) {
3185 node = list_first_entry(&cache->pending[i],
3186 struct btrfs_backref_node,
3187 list);
3188 btrfs_backref_cleanup_node(cache, node);
3189 }
3190 }
3191 ASSERT(list_empty(&cache->pending_edge));
3192 ASSERT(list_empty(&cache->useless_node));
3193 ASSERT(list_empty(&cache->changed));
3194 ASSERT(list_empty(&cache->detached));
3195 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
3196 ASSERT(!cache->nr_nodes);
3197 ASSERT(!cache->nr_edges);
3198}
3199
3200void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
3201 struct btrfs_backref_node *lower,
3202 struct btrfs_backref_node *upper,
3203 int link_which)
3204{
3205 ASSERT(upper && lower && upper->level == lower->level + 1);
3206 edge->node[LOWER] = lower;
3207 edge->node[UPPER] = upper;
3208 if (link_which & LINK_LOWER)
3209 list_add_tail(&edge->list[LOWER], &lower->upper);
3210 if (link_which & LINK_UPPER)
3211 list_add_tail(&edge->list[UPPER], &upper->lower);
3212}
3213/*
3214 * Handle direct tree backref
3215 *
3216 * Direct tree backref means, the backref item shows its parent bytenr
3217 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
3218 *
3219 * @ref_key: The converted backref key.
3220 * For keyed backref, it's the item key.
3221 * For inlined backref, objectid is the bytenr,
3222 * type is btrfs_inline_ref_type, offset is
3223 * btrfs_inline_ref_offset.
3224 */
3225static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
3226 struct btrfs_key *ref_key,
3227 struct btrfs_backref_node *cur)
3228{
3229 struct btrfs_backref_edge *edge;
3230 struct btrfs_backref_node *upper;
3231 struct rb_node *rb_node;
3232
3233 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
3234
3235 /* Only reloc root uses backref pointing to itself */
3236 if (ref_key->objectid == ref_key->offset) {
3237 struct btrfs_root *root;
3238
3239 cur->is_reloc_root = 1;
3240 /* Only reloc backref cache cares about a specific root */
3241 if (cache->is_reloc) {
3242 root = find_reloc_root(cache->fs_info, cur->bytenr);
3243 if (!root)
3244 return -ENOENT;
3245 cur->root = root;
3246 } else {
3247 /*
3248 * For generic purpose backref cache, reloc root node
3249 * is useless.
3250 */
3251 list_add(&cur->list, &cache->useless_node);
3252 }
3253 return 0;
3254 }
3255
3256 edge = btrfs_backref_alloc_edge(cache);
3257 if (!edge)
3258 return -ENOMEM;
3259
3260 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
3261 if (!rb_node) {
3262 /* Parent node not yet cached */
3263 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
3264 cur->level + 1);
3265 if (!upper) {
3266 btrfs_backref_free_edge(cache, edge);
3267 return -ENOMEM;
3268 }
3269
3270 /*
3271 * Backrefs for the upper level block isn't cached, add the
3272 * block to pending list
3273 */
3274 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3275 } else {
3276 /* Parent node already cached */
3277 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
3278 ASSERT(upper->checked);
3279 INIT_LIST_HEAD(&edge->list[UPPER]);
3280 }
3281 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
3282 return 0;
3283}
3284
3285/*
3286 * Handle indirect tree backref
3287 *
3288 * Indirect tree backref means, we only know which tree the node belongs to.
3289 * We still need to do a tree search to find out the parents. This is for
3290 * TREE_BLOCK_REF backref (keyed or inlined).
3291 *
3292 * @trans: Transaction handle.
3293 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
3294 * @tree_key: The first key of this tree block.
3295 * @path: A clean (released) path, to avoid allocating path every time
3296 * the function get called.
3297 */
3298static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
3299 struct btrfs_backref_cache *cache,
3300 struct btrfs_path *path,
3301 struct btrfs_key *ref_key,
3302 struct btrfs_key *tree_key,
3303 struct btrfs_backref_node *cur)
3304{
3305 struct btrfs_fs_info *fs_info = cache->fs_info;
3306 struct btrfs_backref_node *upper;
3307 struct btrfs_backref_node *lower;
3308 struct btrfs_backref_edge *edge;
3309 struct extent_buffer *eb;
3310 struct btrfs_root *root;
3311 struct rb_node *rb_node;
3312 int level;
3313 bool need_check = true;
3314 int ret;
3315
3316 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
3317 if (IS_ERR(root))
3318 return PTR_ERR(root);
3319 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3320 cur->cowonly = 1;
3321
3322 if (btrfs_root_level(&root->root_item) == cur->level) {
3323 /* Tree root */
3324 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
3325 /*
3326 * For reloc backref cache, we may ignore reloc root. But for
3327 * general purpose backref cache, we can't rely on
3328 * btrfs_should_ignore_reloc_root() as it may conflict with
3329 * current running relocation and lead to missing root.
3330 *
3331 * For general purpose backref cache, reloc root detection is
3332 * completely relying on direct backref (key->offset is parent
3333 * bytenr), thus only do such check for reloc cache.
3334 */
3335 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
3336 btrfs_put_root(root);
3337 list_add(&cur->list, &cache->useless_node);
3338 } else {
3339 cur->root = root;
3340 }
3341 return 0;
3342 }
3343
3344 level = cur->level + 1;
3345
3346 /* Search the tree to find parent blocks referring to the block */
3347 path->search_commit_root = 1;
3348 path->skip_locking = 1;
3349 path->lowest_level = level;
3350 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
3351 path->lowest_level = 0;
3352 if (ret < 0) {
3353 btrfs_put_root(root);
3354 return ret;
3355 }
3356 if (ret > 0 && path->slots[level] > 0)
3357 path->slots[level]--;
3358
3359 eb = path->nodes[level];
3360 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
3361 btrfs_err(fs_info,
3362"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
3363 cur->bytenr, level - 1, btrfs_root_id(root),
3364 tree_key->objectid, tree_key->type, tree_key->offset);
3365 btrfs_put_root(root);
3366 ret = -ENOENT;
3367 goto out;
3368 }
3369 lower = cur;
3370
3371 /* Add all nodes and edges in the path */
3372 for (; level < BTRFS_MAX_LEVEL; level++) {
3373 if (!path->nodes[level]) {
3374 ASSERT(btrfs_root_bytenr(&root->root_item) ==
3375 lower->bytenr);
3376 /* Same as previous should_ignore_reloc_root() call */
3377 if (btrfs_should_ignore_reloc_root(root) &&
3378 cache->is_reloc) {
3379 btrfs_put_root(root);
3380 list_add(&lower->list, &cache->useless_node);
3381 } else {
3382 lower->root = root;
3383 }
3384 break;
3385 }
3386
3387 edge = btrfs_backref_alloc_edge(cache);
3388 if (!edge) {
3389 btrfs_put_root(root);
3390 ret = -ENOMEM;
3391 goto out;
3392 }
3393
3394 eb = path->nodes[level];
3395 rb_node = rb_simple_search(&cache->rb_root, eb->start);
3396 if (!rb_node) {
3397 upper = btrfs_backref_alloc_node(cache, eb->start,
3398 lower->level + 1);
3399 if (!upper) {
3400 btrfs_put_root(root);
3401 btrfs_backref_free_edge(cache, edge);
3402 ret = -ENOMEM;
3403 goto out;
3404 }
3405 upper->owner = btrfs_header_owner(eb);
3406 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3407 upper->cowonly = 1;
3408
3409 /*
3410 * If we know the block isn't shared we can avoid
3411 * checking its backrefs.
3412 */
3413 if (btrfs_block_can_be_shared(trans, root, eb))
3414 upper->checked = 0;
3415 else
3416 upper->checked = 1;
3417
3418 /*
3419 * Add the block to pending list if we need to check its
3420 * backrefs, we only do this once while walking up a
3421 * tree as we will catch anything else later on.
3422 */
3423 if (!upper->checked && need_check) {
3424 need_check = false;
3425 list_add_tail(&edge->list[UPPER],
3426 &cache->pending_edge);
3427 } else {
3428 if (upper->checked)
3429 need_check = true;
3430 INIT_LIST_HEAD(&edge->list[UPPER]);
3431 }
3432 } else {
3433 upper = rb_entry(rb_node, struct btrfs_backref_node,
3434 rb_node);
3435 ASSERT(upper->checked);
3436 INIT_LIST_HEAD(&edge->list[UPPER]);
3437 if (!upper->owner)
3438 upper->owner = btrfs_header_owner(eb);
3439 }
3440 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
3441
3442 if (rb_node) {
3443 btrfs_put_root(root);
3444 break;
3445 }
3446 lower = upper;
3447 upper = NULL;
3448 }
3449out:
3450 btrfs_release_path(path);
3451 return ret;
3452}
3453
3454/*
3455 * Add backref node @cur into @cache.
3456 *
3457 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
3458 * links aren't yet bi-directional. Needs to finish such links.
3459 * Use btrfs_backref_finish_upper_links() to finish such linkage.
3460 *
3461 * @trans: Transaction handle.
3462 * @path: Released path for indirect tree backref lookup
3463 * @iter: Released backref iter for extent tree search
3464 * @node_key: The first key of the tree block
3465 */
3466int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
3467 struct btrfs_backref_cache *cache,
3468 struct btrfs_path *path,
3469 struct btrfs_backref_iter *iter,
3470 struct btrfs_key *node_key,
3471 struct btrfs_backref_node *cur)
3472{
3473 struct btrfs_backref_edge *edge;
3474 struct btrfs_backref_node *exist;
3475 int ret;
3476
3477 ret = btrfs_backref_iter_start(iter, cur->bytenr);
3478 if (ret < 0)
3479 return ret;
3480 /*
3481 * We skip the first btrfs_tree_block_info, as we don't use the key
3482 * stored in it, but fetch it from the tree block
3483 */
3484 if (btrfs_backref_has_tree_block_info(iter)) {
3485 ret = btrfs_backref_iter_next(iter);
3486 if (ret < 0)
3487 goto out;
3488 /* No extra backref? This means the tree block is corrupted */
3489 if (ret > 0) {
3490 ret = -EUCLEAN;
3491 goto out;
3492 }
3493 }
3494 WARN_ON(cur->checked);
3495 if (!list_empty(&cur->upper)) {
3496 /*
3497 * The backref was added previously when processing backref of
3498 * type BTRFS_TREE_BLOCK_REF_KEY
3499 */
3500 ASSERT(list_is_singular(&cur->upper));
3501 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
3502 list[LOWER]);
3503 ASSERT(list_empty(&edge->list[UPPER]));
3504 exist = edge->node[UPPER];
3505 /*
3506 * Add the upper level block to pending list if we need check
3507 * its backrefs
3508 */
3509 if (!exist->checked)
3510 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3511 } else {
3512 exist = NULL;
3513 }
3514
3515 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
3516 struct extent_buffer *eb;
3517 struct btrfs_key key;
3518 int type;
3519
3520 cond_resched();
3521 eb = iter->path->nodes[0];
3522
3523 key.objectid = iter->bytenr;
3524 if (btrfs_backref_iter_is_inline_ref(iter)) {
3525 struct btrfs_extent_inline_ref *iref;
3526
3527 /* Update key for inline backref */
3528 iref = (struct btrfs_extent_inline_ref *)
3529 ((unsigned long)iter->cur_ptr);
3530 type = btrfs_get_extent_inline_ref_type(eb, iref,
3531 BTRFS_REF_TYPE_BLOCK);
3532 if (type == BTRFS_REF_TYPE_INVALID) {
3533 ret = -EUCLEAN;
3534 goto out;
3535 }
3536 key.type = type;
3537 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3538 } else {
3539 key.type = iter->cur_key.type;
3540 key.offset = iter->cur_key.offset;
3541 }
3542
3543 /*
3544 * Parent node found and matches current inline ref, no need to
3545 * rebuild this node for this inline ref
3546 */
3547 if (exist &&
3548 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
3549 exist->owner == key.offset) ||
3550 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
3551 exist->bytenr == key.offset))) {
3552 exist = NULL;
3553 continue;
3554 }
3555
3556 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3557 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3558 ret = handle_direct_tree_backref(cache, &key, cur);
3559 if (ret < 0)
3560 goto out;
3561 } else if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
3562 /*
3563 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref
3564 * offset means the root objectid. We need to search
3565 * the tree to get its parent bytenr.
3566 */
3567 ret = handle_indirect_tree_backref(trans, cache, path,
3568 &key, node_key, cur);
3569 if (ret < 0)
3570 goto out;
3571 }
3572 /*
3573 * Unrecognized tree backref items (if it can pass tree-checker)
3574 * would be ignored.
3575 */
3576 }
3577 ret = 0;
3578 cur->checked = 1;
3579 WARN_ON(exist);
3580out:
3581 btrfs_backref_iter_release(iter);
3582 return ret;
3583}
3584
3585/*
3586 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3587 */
3588int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3589 struct btrfs_backref_node *start)
3590{
3591 struct list_head *useless_node = &cache->useless_node;
3592 struct btrfs_backref_edge *edge;
3593 struct rb_node *rb_node;
3594 LIST_HEAD(pending_edge);
3595
3596 ASSERT(start->checked);
3597
3598 /* Insert this node to cache if it's not COW-only */
3599 if (!start->cowonly) {
3600 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3601 &start->rb_node);
3602 if (rb_node)
3603 btrfs_backref_panic(cache->fs_info, start->bytenr,
3604 -EEXIST);
3605 list_add_tail(&start->lower, &cache->leaves);
3606 }
3607
3608 /*
3609 * Use breadth first search to iterate all related edges.
3610 *
3611 * The starting points are all the edges of this node
3612 */
3613 list_for_each_entry(edge, &start->upper, list[LOWER])
3614 list_add_tail(&edge->list[UPPER], &pending_edge);
3615
3616 while (!list_empty(&pending_edge)) {
3617 struct btrfs_backref_node *upper;
3618 struct btrfs_backref_node *lower;
3619
3620 edge = list_first_entry(&pending_edge,
3621 struct btrfs_backref_edge, list[UPPER]);
3622 list_del_init(&edge->list[UPPER]);
3623 upper = edge->node[UPPER];
3624 lower = edge->node[LOWER];
3625
3626 /* Parent is detached, no need to keep any edges */
3627 if (upper->detached) {
3628 list_del(&edge->list[LOWER]);
3629 btrfs_backref_free_edge(cache, edge);
3630
3631 /* Lower node is orphan, queue for cleanup */
3632 if (list_empty(&lower->upper))
3633 list_add(&lower->list, useless_node);
3634 continue;
3635 }
3636
3637 /*
3638 * All new nodes added in current build_backref_tree() haven't
3639 * been linked to the cache rb tree.
3640 * So if we have upper->rb_node populated, this means a cache
3641 * hit. We only need to link the edge, as @upper and all its
3642 * parents have already been linked.
3643 */
3644 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3645 if (upper->lowest) {
3646 list_del_init(&upper->lower);
3647 upper->lowest = 0;
3648 }
3649
3650 list_add_tail(&edge->list[UPPER], &upper->lower);
3651 continue;
3652 }
3653
3654 /* Sanity check, we shouldn't have any unchecked nodes */
3655 if (!upper->checked) {
3656 ASSERT(0);
3657 return -EUCLEAN;
3658 }
3659
3660 /* Sanity check, COW-only node has non-COW-only parent */
3661 if (start->cowonly != upper->cowonly) {
3662 ASSERT(0);
3663 return -EUCLEAN;
3664 }
3665
3666 /* Only cache non-COW-only (subvolume trees) tree blocks */
3667 if (!upper->cowonly) {
3668 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3669 &upper->rb_node);
3670 if (rb_node) {
3671 btrfs_backref_panic(cache->fs_info,
3672 upper->bytenr, -EEXIST);
3673 return -EUCLEAN;
3674 }
3675 }
3676
3677 list_add_tail(&edge->list[UPPER], &upper->lower);
3678
3679 /*
3680 * Also queue all the parent edges of this uncached node
3681 * to finish the upper linkage
3682 */
3683 list_for_each_entry(edge, &upper->upper, list[LOWER])
3684 list_add_tail(&edge->list[UPPER], &pending_edge);
3685 }
3686 return 0;
3687}
3688
3689void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3690 struct btrfs_backref_node *node)
3691{
3692 struct btrfs_backref_node *lower;
3693 struct btrfs_backref_node *upper;
3694 struct btrfs_backref_edge *edge;
3695
3696 while (!list_empty(&cache->useless_node)) {
3697 lower = list_first_entry(&cache->useless_node,
3698 struct btrfs_backref_node, list);
3699 list_del_init(&lower->list);
3700 }
3701 while (!list_empty(&cache->pending_edge)) {
3702 edge = list_first_entry(&cache->pending_edge,
3703 struct btrfs_backref_edge, list[UPPER]);
3704 list_del(&edge->list[UPPER]);
3705 list_del(&edge->list[LOWER]);
3706 lower = edge->node[LOWER];
3707 upper = edge->node[UPPER];
3708 btrfs_backref_free_edge(cache, edge);
3709
3710 /*
3711 * Lower is no longer linked to any upper backref nodes and
3712 * isn't in the cache, we can free it ourselves.
3713 */
3714 if (list_empty(&lower->upper) &&
3715 RB_EMPTY_NODE(&lower->rb_node))
3716 list_add(&lower->list, &cache->useless_node);
3717
3718 if (!RB_EMPTY_NODE(&upper->rb_node))
3719 continue;
3720
3721 /* Add this guy's upper edges to the list to process */
3722 list_for_each_entry(edge, &upper->upper, list[LOWER])
3723 list_add_tail(&edge->list[UPPER],
3724 &cache->pending_edge);
3725 if (list_empty(&upper->upper))
3726 list_add(&upper->list, &cache->useless_node);
3727 }
3728
3729 while (!list_empty(&cache->useless_node)) {
3730 lower = list_first_entry(&cache->useless_node,
3731 struct btrfs_backref_node, list);
3732 list_del_init(&lower->list);
3733 if (lower == node)
3734 node = NULL;
3735 btrfs_backref_drop_node(cache, lower);
3736 }
3737
3738 btrfs_backref_cleanup_node(cache, node);
3739 ASSERT(list_empty(&cache->useless_node) &&
3740 list_empty(&cache->pending_edge));
3741}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011 STRATO. All rights reserved.
4 */
5
6#include <linux/mm.h>
7#include <linux/rbtree.h>
8#include <trace/events/btrfs.h>
9#include "ctree.h"
10#include "disk-io.h"
11#include "backref.h"
12#include "ulist.h"
13#include "transaction.h"
14#include "delayed-ref.h"
15#include "locking.h"
16#include "misc.h"
17
18/* Just an arbitrary number so we can be sure this happened */
19#define BACKREF_FOUND_SHARED 6
20
21struct extent_inode_elem {
22 u64 inum;
23 u64 offset;
24 struct extent_inode_elem *next;
25};
26
27static int check_extent_in_eb(const struct btrfs_key *key,
28 const struct extent_buffer *eb,
29 const struct btrfs_file_extent_item *fi,
30 u64 extent_item_pos,
31 struct extent_inode_elem **eie,
32 bool ignore_offset)
33{
34 u64 offset = 0;
35 struct extent_inode_elem *e;
36
37 if (!ignore_offset &&
38 !btrfs_file_extent_compression(eb, fi) &&
39 !btrfs_file_extent_encryption(eb, fi) &&
40 !btrfs_file_extent_other_encoding(eb, fi)) {
41 u64 data_offset;
42 u64 data_len;
43
44 data_offset = btrfs_file_extent_offset(eb, fi);
45 data_len = btrfs_file_extent_num_bytes(eb, fi);
46
47 if (extent_item_pos < data_offset ||
48 extent_item_pos >= data_offset + data_len)
49 return 1;
50 offset = extent_item_pos - data_offset;
51 }
52
53 e = kmalloc(sizeof(*e), GFP_NOFS);
54 if (!e)
55 return -ENOMEM;
56
57 e->next = *eie;
58 e->inum = key->objectid;
59 e->offset = key->offset + offset;
60 *eie = e;
61
62 return 0;
63}
64
65static void free_inode_elem_list(struct extent_inode_elem *eie)
66{
67 struct extent_inode_elem *eie_next;
68
69 for (; eie; eie = eie_next) {
70 eie_next = eie->next;
71 kfree(eie);
72 }
73}
74
75static int find_extent_in_eb(const struct extent_buffer *eb,
76 u64 wanted_disk_byte, u64 extent_item_pos,
77 struct extent_inode_elem **eie,
78 bool ignore_offset)
79{
80 u64 disk_byte;
81 struct btrfs_key key;
82 struct btrfs_file_extent_item *fi;
83 int slot;
84 int nritems;
85 int extent_type;
86 int ret;
87
88 /*
89 * from the shared data ref, we only have the leaf but we need
90 * the key. thus, we must look into all items and see that we
91 * find one (some) with a reference to our extent item.
92 */
93 nritems = btrfs_header_nritems(eb);
94 for (slot = 0; slot < nritems; ++slot) {
95 btrfs_item_key_to_cpu(eb, &key, slot);
96 if (key.type != BTRFS_EXTENT_DATA_KEY)
97 continue;
98 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
99 extent_type = btrfs_file_extent_type(eb, fi);
100 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
101 continue;
102 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
103 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
104 if (disk_byte != wanted_disk_byte)
105 continue;
106
107 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
108 if (ret < 0)
109 return ret;
110 }
111
112 return 0;
113}
114
115struct preftree {
116 struct rb_root_cached root;
117 unsigned int count;
118};
119
120#define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
121
122struct preftrees {
123 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
124 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
125 struct preftree indirect_missing_keys;
126};
127
128/*
129 * Checks for a shared extent during backref search.
130 *
131 * The share_count tracks prelim_refs (direct and indirect) having a
132 * ref->count >0:
133 * - incremented when a ref->count transitions to >0
134 * - decremented when a ref->count transitions to <1
135 */
136struct share_check {
137 u64 root_objectid;
138 u64 inum;
139 int share_count;
140};
141
142static inline int extent_is_shared(struct share_check *sc)
143{
144 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
145}
146
147static struct kmem_cache *btrfs_prelim_ref_cache;
148
149int __init btrfs_prelim_ref_init(void)
150{
151 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
152 sizeof(struct prelim_ref),
153 0,
154 SLAB_MEM_SPREAD,
155 NULL);
156 if (!btrfs_prelim_ref_cache)
157 return -ENOMEM;
158 return 0;
159}
160
161void __cold btrfs_prelim_ref_exit(void)
162{
163 kmem_cache_destroy(btrfs_prelim_ref_cache);
164}
165
166static void free_pref(struct prelim_ref *ref)
167{
168 kmem_cache_free(btrfs_prelim_ref_cache, ref);
169}
170
171/*
172 * Return 0 when both refs are for the same block (and can be merged).
173 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
174 * indicates a 'higher' block.
175 */
176static int prelim_ref_compare(struct prelim_ref *ref1,
177 struct prelim_ref *ref2)
178{
179 if (ref1->level < ref2->level)
180 return -1;
181 if (ref1->level > ref2->level)
182 return 1;
183 if (ref1->root_id < ref2->root_id)
184 return -1;
185 if (ref1->root_id > ref2->root_id)
186 return 1;
187 if (ref1->key_for_search.type < ref2->key_for_search.type)
188 return -1;
189 if (ref1->key_for_search.type > ref2->key_for_search.type)
190 return 1;
191 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
192 return -1;
193 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
194 return 1;
195 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
196 return -1;
197 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
198 return 1;
199 if (ref1->parent < ref2->parent)
200 return -1;
201 if (ref1->parent > ref2->parent)
202 return 1;
203
204 return 0;
205}
206
207static void update_share_count(struct share_check *sc, int oldcount,
208 int newcount)
209{
210 if ((!sc) || (oldcount == 0 && newcount < 1))
211 return;
212
213 if (oldcount > 0 && newcount < 1)
214 sc->share_count--;
215 else if (oldcount < 1 && newcount > 0)
216 sc->share_count++;
217}
218
219/*
220 * Add @newref to the @root rbtree, merging identical refs.
221 *
222 * Callers should assume that newref has been freed after calling.
223 */
224static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
225 struct preftree *preftree,
226 struct prelim_ref *newref,
227 struct share_check *sc)
228{
229 struct rb_root_cached *root;
230 struct rb_node **p;
231 struct rb_node *parent = NULL;
232 struct prelim_ref *ref;
233 int result;
234 bool leftmost = true;
235
236 root = &preftree->root;
237 p = &root->rb_root.rb_node;
238
239 while (*p) {
240 parent = *p;
241 ref = rb_entry(parent, struct prelim_ref, rbnode);
242 result = prelim_ref_compare(ref, newref);
243 if (result < 0) {
244 p = &(*p)->rb_left;
245 } else if (result > 0) {
246 p = &(*p)->rb_right;
247 leftmost = false;
248 } else {
249 /* Identical refs, merge them and free @newref */
250 struct extent_inode_elem *eie = ref->inode_list;
251
252 while (eie && eie->next)
253 eie = eie->next;
254
255 if (!eie)
256 ref->inode_list = newref->inode_list;
257 else
258 eie->next = newref->inode_list;
259 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
260 preftree->count);
261 /*
262 * A delayed ref can have newref->count < 0.
263 * The ref->count is updated to follow any
264 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
265 */
266 update_share_count(sc, ref->count,
267 ref->count + newref->count);
268 ref->count += newref->count;
269 free_pref(newref);
270 return;
271 }
272 }
273
274 update_share_count(sc, 0, newref->count);
275 preftree->count++;
276 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
277 rb_link_node(&newref->rbnode, parent, p);
278 rb_insert_color_cached(&newref->rbnode, root, leftmost);
279}
280
281/*
282 * Release the entire tree. We don't care about internal consistency so
283 * just free everything and then reset the tree root.
284 */
285static void prelim_release(struct preftree *preftree)
286{
287 struct prelim_ref *ref, *next_ref;
288
289 rbtree_postorder_for_each_entry_safe(ref, next_ref,
290 &preftree->root.rb_root, rbnode)
291 free_pref(ref);
292
293 preftree->root = RB_ROOT_CACHED;
294 preftree->count = 0;
295}
296
297/*
298 * the rules for all callers of this function are:
299 * - obtaining the parent is the goal
300 * - if you add a key, you must know that it is a correct key
301 * - if you cannot add the parent or a correct key, then we will look into the
302 * block later to set a correct key
303 *
304 * delayed refs
305 * ============
306 * backref type | shared | indirect | shared | indirect
307 * information | tree | tree | data | data
308 * --------------------+--------+----------+--------+----------
309 * parent logical | y | - | - | -
310 * key to resolve | - | y | y | y
311 * tree block logical | - | - | - | -
312 * root for resolving | y | y | y | y
313 *
314 * - column 1: we've the parent -> done
315 * - column 2, 3, 4: we use the key to find the parent
316 *
317 * on disk refs (inline or keyed)
318 * ==============================
319 * backref type | shared | indirect | shared | indirect
320 * information | tree | tree | data | data
321 * --------------------+--------+----------+--------+----------
322 * parent logical | y | - | y | -
323 * key to resolve | - | - | - | y
324 * tree block logical | y | y | y | y
325 * root for resolving | - | y | y | y
326 *
327 * - column 1, 3: we've the parent -> done
328 * - column 2: we take the first key from the block to find the parent
329 * (see add_missing_keys)
330 * - column 4: we use the key to find the parent
331 *
332 * additional information that's available but not required to find the parent
333 * block might help in merging entries to gain some speed.
334 */
335static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
336 struct preftree *preftree, u64 root_id,
337 const struct btrfs_key *key, int level, u64 parent,
338 u64 wanted_disk_byte, int count,
339 struct share_check *sc, gfp_t gfp_mask)
340{
341 struct prelim_ref *ref;
342
343 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
344 return 0;
345
346 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
347 if (!ref)
348 return -ENOMEM;
349
350 ref->root_id = root_id;
351 if (key)
352 ref->key_for_search = *key;
353 else
354 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
355
356 ref->inode_list = NULL;
357 ref->level = level;
358 ref->count = count;
359 ref->parent = parent;
360 ref->wanted_disk_byte = wanted_disk_byte;
361 prelim_ref_insert(fs_info, preftree, ref, sc);
362 return extent_is_shared(sc);
363}
364
365/* direct refs use root == 0, key == NULL */
366static int add_direct_ref(const struct btrfs_fs_info *fs_info,
367 struct preftrees *preftrees, int level, u64 parent,
368 u64 wanted_disk_byte, int count,
369 struct share_check *sc, gfp_t gfp_mask)
370{
371 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
372 parent, wanted_disk_byte, count, sc, gfp_mask);
373}
374
375/* indirect refs use parent == 0 */
376static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
377 struct preftrees *preftrees, u64 root_id,
378 const struct btrfs_key *key, int level,
379 u64 wanted_disk_byte, int count,
380 struct share_check *sc, gfp_t gfp_mask)
381{
382 struct preftree *tree = &preftrees->indirect;
383
384 if (!key)
385 tree = &preftrees->indirect_missing_keys;
386 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
387 wanted_disk_byte, count, sc, gfp_mask);
388}
389
390static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
391{
392 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
393 struct rb_node *parent = NULL;
394 struct prelim_ref *ref = NULL;
395 struct prelim_ref target = {};
396 int result;
397
398 target.parent = bytenr;
399
400 while (*p) {
401 parent = *p;
402 ref = rb_entry(parent, struct prelim_ref, rbnode);
403 result = prelim_ref_compare(ref, &target);
404
405 if (result < 0)
406 p = &(*p)->rb_left;
407 else if (result > 0)
408 p = &(*p)->rb_right;
409 else
410 return 1;
411 }
412 return 0;
413}
414
415static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
416 struct ulist *parents,
417 struct preftrees *preftrees, struct prelim_ref *ref,
418 int level, u64 time_seq, const u64 *extent_item_pos,
419 bool ignore_offset)
420{
421 int ret = 0;
422 int slot;
423 struct extent_buffer *eb;
424 struct btrfs_key key;
425 struct btrfs_key *key_for_search = &ref->key_for_search;
426 struct btrfs_file_extent_item *fi;
427 struct extent_inode_elem *eie = NULL, *old = NULL;
428 u64 disk_byte;
429 u64 wanted_disk_byte = ref->wanted_disk_byte;
430 u64 count = 0;
431 u64 data_offset;
432
433 if (level != 0) {
434 eb = path->nodes[level];
435 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
436 if (ret < 0)
437 return ret;
438 return 0;
439 }
440
441 /*
442 * 1. We normally enter this function with the path already pointing to
443 * the first item to check. But sometimes, we may enter it with
444 * slot == nritems.
445 * 2. We are searching for normal backref but bytenr of this leaf
446 * matches shared data backref
447 * 3. The leaf owner is not equal to the root we are searching
448 *
449 * For these cases, go to the next leaf before we continue.
450 */
451 eb = path->nodes[0];
452 if (path->slots[0] >= btrfs_header_nritems(eb) ||
453 is_shared_data_backref(preftrees, eb->start) ||
454 ref->root_id != btrfs_header_owner(eb)) {
455 if (time_seq == SEQ_LAST)
456 ret = btrfs_next_leaf(root, path);
457 else
458 ret = btrfs_next_old_leaf(root, path, time_seq);
459 }
460
461 while (!ret && count < ref->count) {
462 eb = path->nodes[0];
463 slot = path->slots[0];
464
465 btrfs_item_key_to_cpu(eb, &key, slot);
466
467 if (key.objectid != key_for_search->objectid ||
468 key.type != BTRFS_EXTENT_DATA_KEY)
469 break;
470
471 /*
472 * We are searching for normal backref but bytenr of this leaf
473 * matches shared data backref, OR
474 * the leaf owner is not equal to the root we are searching for
475 */
476 if (slot == 0 &&
477 (is_shared_data_backref(preftrees, eb->start) ||
478 ref->root_id != btrfs_header_owner(eb))) {
479 if (time_seq == SEQ_LAST)
480 ret = btrfs_next_leaf(root, path);
481 else
482 ret = btrfs_next_old_leaf(root, path, time_seq);
483 continue;
484 }
485 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
486 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
487 data_offset = btrfs_file_extent_offset(eb, fi);
488
489 if (disk_byte == wanted_disk_byte) {
490 eie = NULL;
491 old = NULL;
492 if (ref->key_for_search.offset == key.offset - data_offset)
493 count++;
494 else
495 goto next;
496 if (extent_item_pos) {
497 ret = check_extent_in_eb(&key, eb, fi,
498 *extent_item_pos,
499 &eie, ignore_offset);
500 if (ret < 0)
501 break;
502 }
503 if (ret > 0)
504 goto next;
505 ret = ulist_add_merge_ptr(parents, eb->start,
506 eie, (void **)&old, GFP_NOFS);
507 if (ret < 0)
508 break;
509 if (!ret && extent_item_pos) {
510 while (old->next)
511 old = old->next;
512 old->next = eie;
513 }
514 eie = NULL;
515 }
516next:
517 if (time_seq == SEQ_LAST)
518 ret = btrfs_next_item(root, path);
519 else
520 ret = btrfs_next_old_item(root, path, time_seq);
521 }
522
523 if (ret > 0)
524 ret = 0;
525 else if (ret < 0)
526 free_inode_elem_list(eie);
527 return ret;
528}
529
530/*
531 * resolve an indirect backref in the form (root_id, key, level)
532 * to a logical address
533 */
534static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
535 struct btrfs_path *path, u64 time_seq,
536 struct preftrees *preftrees,
537 struct prelim_ref *ref, struct ulist *parents,
538 const u64 *extent_item_pos, bool ignore_offset)
539{
540 struct btrfs_root *root;
541 struct extent_buffer *eb;
542 int ret = 0;
543 int root_level;
544 int level = ref->level;
545 struct btrfs_key search_key = ref->key_for_search;
546
547 root = btrfs_get_fs_root(fs_info, ref->root_id, false);
548 if (IS_ERR(root)) {
549 ret = PTR_ERR(root);
550 goto out_free;
551 }
552
553 if (!path->search_commit_root &&
554 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
555 ret = -ENOENT;
556 goto out;
557 }
558
559 if (btrfs_is_testing(fs_info)) {
560 ret = -ENOENT;
561 goto out;
562 }
563
564 if (path->search_commit_root)
565 root_level = btrfs_header_level(root->commit_root);
566 else if (time_seq == SEQ_LAST)
567 root_level = btrfs_header_level(root->node);
568 else
569 root_level = btrfs_old_root_level(root, time_seq);
570
571 if (root_level + 1 == level)
572 goto out;
573
574 /*
575 * We can often find data backrefs with an offset that is too large
576 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
577 * subtracting a file's offset with the data offset of its
578 * corresponding extent data item. This can happen for example in the
579 * clone ioctl.
580 *
581 * So if we detect such case we set the search key's offset to zero to
582 * make sure we will find the matching file extent item at
583 * add_all_parents(), otherwise we will miss it because the offset
584 * taken form the backref is much larger then the offset of the file
585 * extent item. This can make us scan a very large number of file
586 * extent items, but at least it will not make us miss any.
587 *
588 * This is an ugly workaround for a behaviour that should have never
589 * existed, but it does and a fix for the clone ioctl would touch a lot
590 * of places, cause backwards incompatibility and would not fix the
591 * problem for extents cloned with older kernels.
592 */
593 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
594 search_key.offset >= LLONG_MAX)
595 search_key.offset = 0;
596 path->lowest_level = level;
597 if (time_seq == SEQ_LAST)
598 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
599 else
600 ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
601
602 btrfs_debug(fs_info,
603 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
604 ref->root_id, level, ref->count, ret,
605 ref->key_for_search.objectid, ref->key_for_search.type,
606 ref->key_for_search.offset);
607 if (ret < 0)
608 goto out;
609
610 eb = path->nodes[level];
611 while (!eb) {
612 if (WARN_ON(!level)) {
613 ret = 1;
614 goto out;
615 }
616 level--;
617 eb = path->nodes[level];
618 }
619
620 ret = add_all_parents(root, path, parents, preftrees, ref, level,
621 time_seq, extent_item_pos, ignore_offset);
622out:
623 btrfs_put_root(root);
624out_free:
625 path->lowest_level = 0;
626 btrfs_release_path(path);
627 return ret;
628}
629
630static struct extent_inode_elem *
631unode_aux_to_inode_list(struct ulist_node *node)
632{
633 if (!node)
634 return NULL;
635 return (struct extent_inode_elem *)(uintptr_t)node->aux;
636}
637
638/*
639 * We maintain three separate rbtrees: one for direct refs, one for
640 * indirect refs which have a key, and one for indirect refs which do not
641 * have a key. Each tree does merge on insertion.
642 *
643 * Once all of the references are located, we iterate over the tree of
644 * indirect refs with missing keys. An appropriate key is located and
645 * the ref is moved onto the tree for indirect refs. After all missing
646 * keys are thus located, we iterate over the indirect ref tree, resolve
647 * each reference, and then insert the resolved reference onto the
648 * direct tree (merging there too).
649 *
650 * New backrefs (i.e., for parent nodes) are added to the appropriate
651 * rbtree as they are encountered. The new backrefs are subsequently
652 * resolved as above.
653 */
654static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
655 struct btrfs_path *path, u64 time_seq,
656 struct preftrees *preftrees,
657 const u64 *extent_item_pos,
658 struct share_check *sc, bool ignore_offset)
659{
660 int err;
661 int ret = 0;
662 struct ulist *parents;
663 struct ulist_node *node;
664 struct ulist_iterator uiter;
665 struct rb_node *rnode;
666
667 parents = ulist_alloc(GFP_NOFS);
668 if (!parents)
669 return -ENOMEM;
670
671 /*
672 * We could trade memory usage for performance here by iterating
673 * the tree, allocating new refs for each insertion, and then
674 * freeing the entire indirect tree when we're done. In some test
675 * cases, the tree can grow quite large (~200k objects).
676 */
677 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
678 struct prelim_ref *ref;
679
680 ref = rb_entry(rnode, struct prelim_ref, rbnode);
681 if (WARN(ref->parent,
682 "BUG: direct ref found in indirect tree")) {
683 ret = -EINVAL;
684 goto out;
685 }
686
687 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
688 preftrees->indirect.count--;
689
690 if (ref->count == 0) {
691 free_pref(ref);
692 continue;
693 }
694
695 if (sc && sc->root_objectid &&
696 ref->root_id != sc->root_objectid) {
697 free_pref(ref);
698 ret = BACKREF_FOUND_SHARED;
699 goto out;
700 }
701 err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
702 ref, parents, extent_item_pos,
703 ignore_offset);
704 /*
705 * we can only tolerate ENOENT,otherwise,we should catch error
706 * and return directly.
707 */
708 if (err == -ENOENT) {
709 prelim_ref_insert(fs_info, &preftrees->direct, ref,
710 NULL);
711 continue;
712 } else if (err) {
713 free_pref(ref);
714 ret = err;
715 goto out;
716 }
717
718 /* we put the first parent into the ref at hand */
719 ULIST_ITER_INIT(&uiter);
720 node = ulist_next(parents, &uiter);
721 ref->parent = node ? node->val : 0;
722 ref->inode_list = unode_aux_to_inode_list(node);
723
724 /* Add a prelim_ref(s) for any other parent(s). */
725 while ((node = ulist_next(parents, &uiter))) {
726 struct prelim_ref *new_ref;
727
728 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
729 GFP_NOFS);
730 if (!new_ref) {
731 free_pref(ref);
732 ret = -ENOMEM;
733 goto out;
734 }
735 memcpy(new_ref, ref, sizeof(*ref));
736 new_ref->parent = node->val;
737 new_ref->inode_list = unode_aux_to_inode_list(node);
738 prelim_ref_insert(fs_info, &preftrees->direct,
739 new_ref, NULL);
740 }
741
742 /*
743 * Now it's a direct ref, put it in the direct tree. We must
744 * do this last because the ref could be merged/freed here.
745 */
746 prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
747
748 ulist_reinit(parents);
749 cond_resched();
750 }
751out:
752 ulist_free(parents);
753 return ret;
754}
755
756/*
757 * read tree blocks and add keys where required.
758 */
759static int add_missing_keys(struct btrfs_fs_info *fs_info,
760 struct preftrees *preftrees, bool lock)
761{
762 struct prelim_ref *ref;
763 struct extent_buffer *eb;
764 struct preftree *tree = &preftrees->indirect_missing_keys;
765 struct rb_node *node;
766
767 while ((node = rb_first_cached(&tree->root))) {
768 ref = rb_entry(node, struct prelim_ref, rbnode);
769 rb_erase_cached(node, &tree->root);
770
771 BUG_ON(ref->parent); /* should not be a direct ref */
772 BUG_ON(ref->key_for_search.type);
773 BUG_ON(!ref->wanted_disk_byte);
774
775 eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
776 ref->level - 1, NULL);
777 if (IS_ERR(eb)) {
778 free_pref(ref);
779 return PTR_ERR(eb);
780 } else if (!extent_buffer_uptodate(eb)) {
781 free_pref(ref);
782 free_extent_buffer(eb);
783 return -EIO;
784 }
785 if (lock)
786 btrfs_tree_read_lock(eb);
787 if (btrfs_header_level(eb) == 0)
788 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
789 else
790 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
791 if (lock)
792 btrfs_tree_read_unlock(eb);
793 free_extent_buffer(eb);
794 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
795 cond_resched();
796 }
797 return 0;
798}
799
800/*
801 * add all currently queued delayed refs from this head whose seq nr is
802 * smaller or equal that seq to the list
803 */
804static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
805 struct btrfs_delayed_ref_head *head, u64 seq,
806 struct preftrees *preftrees, struct share_check *sc)
807{
808 struct btrfs_delayed_ref_node *node;
809 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
810 struct btrfs_key key;
811 struct btrfs_key tmp_op_key;
812 struct rb_node *n;
813 int count;
814 int ret = 0;
815
816 if (extent_op && extent_op->update_key)
817 btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
818
819 spin_lock(&head->lock);
820 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
821 node = rb_entry(n, struct btrfs_delayed_ref_node,
822 ref_node);
823 if (node->seq > seq)
824 continue;
825
826 switch (node->action) {
827 case BTRFS_ADD_DELAYED_EXTENT:
828 case BTRFS_UPDATE_DELAYED_HEAD:
829 WARN_ON(1);
830 continue;
831 case BTRFS_ADD_DELAYED_REF:
832 count = node->ref_mod;
833 break;
834 case BTRFS_DROP_DELAYED_REF:
835 count = node->ref_mod * -1;
836 break;
837 default:
838 BUG();
839 }
840 switch (node->type) {
841 case BTRFS_TREE_BLOCK_REF_KEY: {
842 /* NORMAL INDIRECT METADATA backref */
843 struct btrfs_delayed_tree_ref *ref;
844
845 ref = btrfs_delayed_node_to_tree_ref(node);
846 ret = add_indirect_ref(fs_info, preftrees, ref->root,
847 &tmp_op_key, ref->level + 1,
848 node->bytenr, count, sc,
849 GFP_ATOMIC);
850 break;
851 }
852 case BTRFS_SHARED_BLOCK_REF_KEY: {
853 /* SHARED DIRECT METADATA backref */
854 struct btrfs_delayed_tree_ref *ref;
855
856 ref = btrfs_delayed_node_to_tree_ref(node);
857
858 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
859 ref->parent, node->bytenr, count,
860 sc, GFP_ATOMIC);
861 break;
862 }
863 case BTRFS_EXTENT_DATA_REF_KEY: {
864 /* NORMAL INDIRECT DATA backref */
865 struct btrfs_delayed_data_ref *ref;
866 ref = btrfs_delayed_node_to_data_ref(node);
867
868 key.objectid = ref->objectid;
869 key.type = BTRFS_EXTENT_DATA_KEY;
870 key.offset = ref->offset;
871
872 /*
873 * Found a inum that doesn't match our known inum, we
874 * know it's shared.
875 */
876 if (sc && sc->inum && ref->objectid != sc->inum) {
877 ret = BACKREF_FOUND_SHARED;
878 goto out;
879 }
880
881 ret = add_indirect_ref(fs_info, preftrees, ref->root,
882 &key, 0, node->bytenr, count, sc,
883 GFP_ATOMIC);
884 break;
885 }
886 case BTRFS_SHARED_DATA_REF_KEY: {
887 /* SHARED DIRECT FULL backref */
888 struct btrfs_delayed_data_ref *ref;
889
890 ref = btrfs_delayed_node_to_data_ref(node);
891
892 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
893 node->bytenr, count, sc,
894 GFP_ATOMIC);
895 break;
896 }
897 default:
898 WARN_ON(1);
899 }
900 /*
901 * We must ignore BACKREF_FOUND_SHARED until all delayed
902 * refs have been checked.
903 */
904 if (ret && (ret != BACKREF_FOUND_SHARED))
905 break;
906 }
907 if (!ret)
908 ret = extent_is_shared(sc);
909out:
910 spin_unlock(&head->lock);
911 return ret;
912}
913
914/*
915 * add all inline backrefs for bytenr to the list
916 *
917 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
918 */
919static int add_inline_refs(const struct btrfs_fs_info *fs_info,
920 struct btrfs_path *path, u64 bytenr,
921 int *info_level, struct preftrees *preftrees,
922 struct share_check *sc)
923{
924 int ret = 0;
925 int slot;
926 struct extent_buffer *leaf;
927 struct btrfs_key key;
928 struct btrfs_key found_key;
929 unsigned long ptr;
930 unsigned long end;
931 struct btrfs_extent_item *ei;
932 u64 flags;
933 u64 item_size;
934
935 /*
936 * enumerate all inline refs
937 */
938 leaf = path->nodes[0];
939 slot = path->slots[0];
940
941 item_size = btrfs_item_size_nr(leaf, slot);
942 BUG_ON(item_size < sizeof(*ei));
943
944 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
945 flags = btrfs_extent_flags(leaf, ei);
946 btrfs_item_key_to_cpu(leaf, &found_key, slot);
947
948 ptr = (unsigned long)(ei + 1);
949 end = (unsigned long)ei + item_size;
950
951 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
952 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
953 struct btrfs_tree_block_info *info;
954
955 info = (struct btrfs_tree_block_info *)ptr;
956 *info_level = btrfs_tree_block_level(leaf, info);
957 ptr += sizeof(struct btrfs_tree_block_info);
958 BUG_ON(ptr > end);
959 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
960 *info_level = found_key.offset;
961 } else {
962 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
963 }
964
965 while (ptr < end) {
966 struct btrfs_extent_inline_ref *iref;
967 u64 offset;
968 int type;
969
970 iref = (struct btrfs_extent_inline_ref *)ptr;
971 type = btrfs_get_extent_inline_ref_type(leaf, iref,
972 BTRFS_REF_TYPE_ANY);
973 if (type == BTRFS_REF_TYPE_INVALID)
974 return -EUCLEAN;
975
976 offset = btrfs_extent_inline_ref_offset(leaf, iref);
977
978 switch (type) {
979 case BTRFS_SHARED_BLOCK_REF_KEY:
980 ret = add_direct_ref(fs_info, preftrees,
981 *info_level + 1, offset,
982 bytenr, 1, NULL, GFP_NOFS);
983 break;
984 case BTRFS_SHARED_DATA_REF_KEY: {
985 struct btrfs_shared_data_ref *sdref;
986 int count;
987
988 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
989 count = btrfs_shared_data_ref_count(leaf, sdref);
990
991 ret = add_direct_ref(fs_info, preftrees, 0, offset,
992 bytenr, count, sc, GFP_NOFS);
993 break;
994 }
995 case BTRFS_TREE_BLOCK_REF_KEY:
996 ret = add_indirect_ref(fs_info, preftrees, offset,
997 NULL, *info_level + 1,
998 bytenr, 1, NULL, GFP_NOFS);
999 break;
1000 case BTRFS_EXTENT_DATA_REF_KEY: {
1001 struct btrfs_extent_data_ref *dref;
1002 int count;
1003 u64 root;
1004
1005 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1006 count = btrfs_extent_data_ref_count(leaf, dref);
1007 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1008 dref);
1009 key.type = BTRFS_EXTENT_DATA_KEY;
1010 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1011
1012 if (sc && sc->inum && key.objectid != sc->inum) {
1013 ret = BACKREF_FOUND_SHARED;
1014 break;
1015 }
1016
1017 root = btrfs_extent_data_ref_root(leaf, dref);
1018
1019 ret = add_indirect_ref(fs_info, preftrees, root,
1020 &key, 0, bytenr, count,
1021 sc, GFP_NOFS);
1022 break;
1023 }
1024 default:
1025 WARN_ON(1);
1026 }
1027 if (ret)
1028 return ret;
1029 ptr += btrfs_extent_inline_ref_size(type);
1030 }
1031
1032 return 0;
1033}
1034
1035/*
1036 * add all non-inline backrefs for bytenr to the list
1037 *
1038 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1039 */
1040static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1041 struct btrfs_path *path, u64 bytenr,
1042 int info_level, struct preftrees *preftrees,
1043 struct share_check *sc)
1044{
1045 struct btrfs_root *extent_root = fs_info->extent_root;
1046 int ret;
1047 int slot;
1048 struct extent_buffer *leaf;
1049 struct btrfs_key key;
1050
1051 while (1) {
1052 ret = btrfs_next_item(extent_root, path);
1053 if (ret < 0)
1054 break;
1055 if (ret) {
1056 ret = 0;
1057 break;
1058 }
1059
1060 slot = path->slots[0];
1061 leaf = path->nodes[0];
1062 btrfs_item_key_to_cpu(leaf, &key, slot);
1063
1064 if (key.objectid != bytenr)
1065 break;
1066 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1067 continue;
1068 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1069 break;
1070
1071 switch (key.type) {
1072 case BTRFS_SHARED_BLOCK_REF_KEY:
1073 /* SHARED DIRECT METADATA backref */
1074 ret = add_direct_ref(fs_info, preftrees,
1075 info_level + 1, key.offset,
1076 bytenr, 1, NULL, GFP_NOFS);
1077 break;
1078 case BTRFS_SHARED_DATA_REF_KEY: {
1079 /* SHARED DIRECT FULL backref */
1080 struct btrfs_shared_data_ref *sdref;
1081 int count;
1082
1083 sdref = btrfs_item_ptr(leaf, slot,
1084 struct btrfs_shared_data_ref);
1085 count = btrfs_shared_data_ref_count(leaf, sdref);
1086 ret = add_direct_ref(fs_info, preftrees, 0,
1087 key.offset, bytenr, count,
1088 sc, GFP_NOFS);
1089 break;
1090 }
1091 case BTRFS_TREE_BLOCK_REF_KEY:
1092 /* NORMAL INDIRECT METADATA backref */
1093 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1094 NULL, info_level + 1, bytenr,
1095 1, NULL, GFP_NOFS);
1096 break;
1097 case BTRFS_EXTENT_DATA_REF_KEY: {
1098 /* NORMAL INDIRECT DATA backref */
1099 struct btrfs_extent_data_ref *dref;
1100 int count;
1101 u64 root;
1102
1103 dref = btrfs_item_ptr(leaf, slot,
1104 struct btrfs_extent_data_ref);
1105 count = btrfs_extent_data_ref_count(leaf, dref);
1106 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1107 dref);
1108 key.type = BTRFS_EXTENT_DATA_KEY;
1109 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1110
1111 if (sc && sc->inum && key.objectid != sc->inum) {
1112 ret = BACKREF_FOUND_SHARED;
1113 break;
1114 }
1115
1116 root = btrfs_extent_data_ref_root(leaf, dref);
1117 ret = add_indirect_ref(fs_info, preftrees, root,
1118 &key, 0, bytenr, count,
1119 sc, GFP_NOFS);
1120 break;
1121 }
1122 default:
1123 WARN_ON(1);
1124 }
1125 if (ret)
1126 return ret;
1127
1128 }
1129
1130 return ret;
1131}
1132
1133/*
1134 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1135 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1136 * indirect refs to their parent bytenr.
1137 * When roots are found, they're added to the roots list
1138 *
1139 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1140 * much like trans == NULL case, the difference only lies in it will not
1141 * commit root.
1142 * The special case is for qgroup to search roots in commit_transaction().
1143 *
1144 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1145 * shared extent is detected.
1146 *
1147 * Otherwise this returns 0 for success and <0 for an error.
1148 *
1149 * If ignore_offset is set to false, only extent refs whose offsets match
1150 * extent_item_pos are returned. If true, every extent ref is returned
1151 * and extent_item_pos is ignored.
1152 *
1153 * FIXME some caching might speed things up
1154 */
1155static int find_parent_nodes(struct btrfs_trans_handle *trans,
1156 struct btrfs_fs_info *fs_info, u64 bytenr,
1157 u64 time_seq, struct ulist *refs,
1158 struct ulist *roots, const u64 *extent_item_pos,
1159 struct share_check *sc, bool ignore_offset)
1160{
1161 struct btrfs_key key;
1162 struct btrfs_path *path;
1163 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1164 struct btrfs_delayed_ref_head *head;
1165 int info_level = 0;
1166 int ret;
1167 struct prelim_ref *ref;
1168 struct rb_node *node;
1169 struct extent_inode_elem *eie = NULL;
1170 struct preftrees preftrees = {
1171 .direct = PREFTREE_INIT,
1172 .indirect = PREFTREE_INIT,
1173 .indirect_missing_keys = PREFTREE_INIT
1174 };
1175
1176 key.objectid = bytenr;
1177 key.offset = (u64)-1;
1178 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1179 key.type = BTRFS_METADATA_ITEM_KEY;
1180 else
1181 key.type = BTRFS_EXTENT_ITEM_KEY;
1182
1183 path = btrfs_alloc_path();
1184 if (!path)
1185 return -ENOMEM;
1186 if (!trans) {
1187 path->search_commit_root = 1;
1188 path->skip_locking = 1;
1189 }
1190
1191 if (time_seq == SEQ_LAST)
1192 path->skip_locking = 1;
1193
1194 /*
1195 * grab both a lock on the path and a lock on the delayed ref head.
1196 * We need both to get a consistent picture of how the refs look
1197 * at a specified point in time
1198 */
1199again:
1200 head = NULL;
1201
1202 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1203 if (ret < 0)
1204 goto out;
1205 BUG_ON(ret == 0);
1206
1207#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1208 if (trans && likely(trans->type != __TRANS_DUMMY) &&
1209 time_seq != SEQ_LAST) {
1210#else
1211 if (trans && time_seq != SEQ_LAST) {
1212#endif
1213 /*
1214 * look if there are updates for this ref queued and lock the
1215 * head
1216 */
1217 delayed_refs = &trans->transaction->delayed_refs;
1218 spin_lock(&delayed_refs->lock);
1219 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1220 if (head) {
1221 if (!mutex_trylock(&head->mutex)) {
1222 refcount_inc(&head->refs);
1223 spin_unlock(&delayed_refs->lock);
1224
1225 btrfs_release_path(path);
1226
1227 /*
1228 * Mutex was contended, block until it's
1229 * released and try again
1230 */
1231 mutex_lock(&head->mutex);
1232 mutex_unlock(&head->mutex);
1233 btrfs_put_delayed_ref_head(head);
1234 goto again;
1235 }
1236 spin_unlock(&delayed_refs->lock);
1237 ret = add_delayed_refs(fs_info, head, time_seq,
1238 &preftrees, sc);
1239 mutex_unlock(&head->mutex);
1240 if (ret)
1241 goto out;
1242 } else {
1243 spin_unlock(&delayed_refs->lock);
1244 }
1245 }
1246
1247 if (path->slots[0]) {
1248 struct extent_buffer *leaf;
1249 int slot;
1250
1251 path->slots[0]--;
1252 leaf = path->nodes[0];
1253 slot = path->slots[0];
1254 btrfs_item_key_to_cpu(leaf, &key, slot);
1255 if (key.objectid == bytenr &&
1256 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1257 key.type == BTRFS_METADATA_ITEM_KEY)) {
1258 ret = add_inline_refs(fs_info, path, bytenr,
1259 &info_level, &preftrees, sc);
1260 if (ret)
1261 goto out;
1262 ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1263 &preftrees, sc);
1264 if (ret)
1265 goto out;
1266 }
1267 }
1268
1269 btrfs_release_path(path);
1270
1271 ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1272 if (ret)
1273 goto out;
1274
1275 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1276
1277 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1278 extent_item_pos, sc, ignore_offset);
1279 if (ret)
1280 goto out;
1281
1282 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1283
1284 /*
1285 * This walks the tree of merged and resolved refs. Tree blocks are
1286 * read in as needed. Unique entries are added to the ulist, and
1287 * the list of found roots is updated.
1288 *
1289 * We release the entire tree in one go before returning.
1290 */
1291 node = rb_first_cached(&preftrees.direct.root);
1292 while (node) {
1293 ref = rb_entry(node, struct prelim_ref, rbnode);
1294 node = rb_next(&ref->rbnode);
1295 /*
1296 * ref->count < 0 can happen here if there are delayed
1297 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1298 * prelim_ref_insert() relies on this when merging
1299 * identical refs to keep the overall count correct.
1300 * prelim_ref_insert() will merge only those refs
1301 * which compare identically. Any refs having
1302 * e.g. different offsets would not be merged,
1303 * and would retain their original ref->count < 0.
1304 */
1305 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1306 if (sc && sc->root_objectid &&
1307 ref->root_id != sc->root_objectid) {
1308 ret = BACKREF_FOUND_SHARED;
1309 goto out;
1310 }
1311
1312 /* no parent == root of tree */
1313 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1314 if (ret < 0)
1315 goto out;
1316 }
1317 if (ref->count && ref->parent) {
1318 if (extent_item_pos && !ref->inode_list &&
1319 ref->level == 0) {
1320 struct extent_buffer *eb;
1321
1322 eb = read_tree_block(fs_info, ref->parent, 0,
1323 ref->level, NULL);
1324 if (IS_ERR(eb)) {
1325 ret = PTR_ERR(eb);
1326 goto out;
1327 } else if (!extent_buffer_uptodate(eb)) {
1328 free_extent_buffer(eb);
1329 ret = -EIO;
1330 goto out;
1331 }
1332
1333 if (!path->skip_locking) {
1334 btrfs_tree_read_lock(eb);
1335 btrfs_set_lock_blocking_read(eb);
1336 }
1337 ret = find_extent_in_eb(eb, bytenr,
1338 *extent_item_pos, &eie, ignore_offset);
1339 if (!path->skip_locking)
1340 btrfs_tree_read_unlock_blocking(eb);
1341 free_extent_buffer(eb);
1342 if (ret < 0)
1343 goto out;
1344 ref->inode_list = eie;
1345 }
1346 ret = ulist_add_merge_ptr(refs, ref->parent,
1347 ref->inode_list,
1348 (void **)&eie, GFP_NOFS);
1349 if (ret < 0)
1350 goto out;
1351 if (!ret && extent_item_pos) {
1352 /*
1353 * we've recorded that parent, so we must extend
1354 * its inode list here
1355 */
1356 BUG_ON(!eie);
1357 while (eie->next)
1358 eie = eie->next;
1359 eie->next = ref->inode_list;
1360 }
1361 eie = NULL;
1362 }
1363 cond_resched();
1364 }
1365
1366out:
1367 btrfs_free_path(path);
1368
1369 prelim_release(&preftrees.direct);
1370 prelim_release(&preftrees.indirect);
1371 prelim_release(&preftrees.indirect_missing_keys);
1372
1373 if (ret < 0)
1374 free_inode_elem_list(eie);
1375 return ret;
1376}
1377
1378static void free_leaf_list(struct ulist *blocks)
1379{
1380 struct ulist_node *node = NULL;
1381 struct extent_inode_elem *eie;
1382 struct ulist_iterator uiter;
1383
1384 ULIST_ITER_INIT(&uiter);
1385 while ((node = ulist_next(blocks, &uiter))) {
1386 if (!node->aux)
1387 continue;
1388 eie = unode_aux_to_inode_list(node);
1389 free_inode_elem_list(eie);
1390 node->aux = 0;
1391 }
1392
1393 ulist_free(blocks);
1394}
1395
1396/*
1397 * Finds all leafs with a reference to the specified combination of bytenr and
1398 * offset. key_list_head will point to a list of corresponding keys (caller must
1399 * free each list element). The leafs will be stored in the leafs ulist, which
1400 * must be freed with ulist_free.
1401 *
1402 * returns 0 on success, <0 on error
1403 */
1404int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1405 struct btrfs_fs_info *fs_info, u64 bytenr,
1406 u64 time_seq, struct ulist **leafs,
1407 const u64 *extent_item_pos, bool ignore_offset)
1408{
1409 int ret;
1410
1411 *leafs = ulist_alloc(GFP_NOFS);
1412 if (!*leafs)
1413 return -ENOMEM;
1414
1415 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1416 *leafs, NULL, extent_item_pos, NULL, ignore_offset);
1417 if (ret < 0 && ret != -ENOENT) {
1418 free_leaf_list(*leafs);
1419 return ret;
1420 }
1421
1422 return 0;
1423}
1424
1425/*
1426 * walk all backrefs for a given extent to find all roots that reference this
1427 * extent. Walking a backref means finding all extents that reference this
1428 * extent and in turn walk the backrefs of those, too. Naturally this is a
1429 * recursive process, but here it is implemented in an iterative fashion: We
1430 * find all referencing extents for the extent in question and put them on a
1431 * list. In turn, we find all referencing extents for those, further appending
1432 * to the list. The way we iterate the list allows adding more elements after
1433 * the current while iterating. The process stops when we reach the end of the
1434 * list. Found roots are added to the roots list.
1435 *
1436 * returns 0 on success, < 0 on error.
1437 */
1438static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1439 struct btrfs_fs_info *fs_info, u64 bytenr,
1440 u64 time_seq, struct ulist **roots,
1441 bool ignore_offset)
1442{
1443 struct ulist *tmp;
1444 struct ulist_node *node = NULL;
1445 struct ulist_iterator uiter;
1446 int ret;
1447
1448 tmp = ulist_alloc(GFP_NOFS);
1449 if (!tmp)
1450 return -ENOMEM;
1451 *roots = ulist_alloc(GFP_NOFS);
1452 if (!*roots) {
1453 ulist_free(tmp);
1454 return -ENOMEM;
1455 }
1456
1457 ULIST_ITER_INIT(&uiter);
1458 while (1) {
1459 ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1460 tmp, *roots, NULL, NULL, ignore_offset);
1461 if (ret < 0 && ret != -ENOENT) {
1462 ulist_free(tmp);
1463 ulist_free(*roots);
1464 *roots = NULL;
1465 return ret;
1466 }
1467 node = ulist_next(tmp, &uiter);
1468 if (!node)
1469 break;
1470 bytenr = node->val;
1471 cond_resched();
1472 }
1473
1474 ulist_free(tmp);
1475 return 0;
1476}
1477
1478int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1479 struct btrfs_fs_info *fs_info, u64 bytenr,
1480 u64 time_seq, struct ulist **roots,
1481 bool ignore_offset)
1482{
1483 int ret;
1484
1485 if (!trans)
1486 down_read(&fs_info->commit_root_sem);
1487 ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1488 time_seq, roots, ignore_offset);
1489 if (!trans)
1490 up_read(&fs_info->commit_root_sem);
1491 return ret;
1492}
1493
1494/**
1495 * btrfs_check_shared - tell us whether an extent is shared
1496 *
1497 * btrfs_check_shared uses the backref walking code but will short
1498 * circuit as soon as it finds a root or inode that doesn't match the
1499 * one passed in. This provides a significant performance benefit for
1500 * callers (such as fiemap) which want to know whether the extent is
1501 * shared but do not need a ref count.
1502 *
1503 * This attempts to attach to the running transaction in order to account for
1504 * delayed refs, but continues on even when no running transaction exists.
1505 *
1506 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1507 */
1508int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1509 struct ulist *roots, struct ulist *tmp)
1510{
1511 struct btrfs_fs_info *fs_info = root->fs_info;
1512 struct btrfs_trans_handle *trans;
1513 struct ulist_iterator uiter;
1514 struct ulist_node *node;
1515 struct seq_list elem = SEQ_LIST_INIT(elem);
1516 int ret = 0;
1517 struct share_check shared = {
1518 .root_objectid = root->root_key.objectid,
1519 .inum = inum,
1520 .share_count = 0,
1521 };
1522
1523 ulist_init(roots);
1524 ulist_init(tmp);
1525
1526 trans = btrfs_join_transaction_nostart(root);
1527 if (IS_ERR(trans)) {
1528 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1529 ret = PTR_ERR(trans);
1530 goto out;
1531 }
1532 trans = NULL;
1533 down_read(&fs_info->commit_root_sem);
1534 } else {
1535 btrfs_get_tree_mod_seq(fs_info, &elem);
1536 }
1537
1538 ULIST_ITER_INIT(&uiter);
1539 while (1) {
1540 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1541 roots, NULL, &shared, false);
1542 if (ret == BACKREF_FOUND_SHARED) {
1543 /* this is the only condition under which we return 1 */
1544 ret = 1;
1545 break;
1546 }
1547 if (ret < 0 && ret != -ENOENT)
1548 break;
1549 ret = 0;
1550 node = ulist_next(tmp, &uiter);
1551 if (!node)
1552 break;
1553 bytenr = node->val;
1554 shared.share_count = 0;
1555 cond_resched();
1556 }
1557
1558 if (trans) {
1559 btrfs_put_tree_mod_seq(fs_info, &elem);
1560 btrfs_end_transaction(trans);
1561 } else {
1562 up_read(&fs_info->commit_root_sem);
1563 }
1564out:
1565 ulist_release(roots);
1566 ulist_release(tmp);
1567 return ret;
1568}
1569
1570int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1571 u64 start_off, struct btrfs_path *path,
1572 struct btrfs_inode_extref **ret_extref,
1573 u64 *found_off)
1574{
1575 int ret, slot;
1576 struct btrfs_key key;
1577 struct btrfs_key found_key;
1578 struct btrfs_inode_extref *extref;
1579 const struct extent_buffer *leaf;
1580 unsigned long ptr;
1581
1582 key.objectid = inode_objectid;
1583 key.type = BTRFS_INODE_EXTREF_KEY;
1584 key.offset = start_off;
1585
1586 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1587 if (ret < 0)
1588 return ret;
1589
1590 while (1) {
1591 leaf = path->nodes[0];
1592 slot = path->slots[0];
1593 if (slot >= btrfs_header_nritems(leaf)) {
1594 /*
1595 * If the item at offset is not found,
1596 * btrfs_search_slot will point us to the slot
1597 * where it should be inserted. In our case
1598 * that will be the slot directly before the
1599 * next INODE_REF_KEY_V2 item. In the case
1600 * that we're pointing to the last slot in a
1601 * leaf, we must move one leaf over.
1602 */
1603 ret = btrfs_next_leaf(root, path);
1604 if (ret) {
1605 if (ret >= 1)
1606 ret = -ENOENT;
1607 break;
1608 }
1609 continue;
1610 }
1611
1612 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1613
1614 /*
1615 * Check that we're still looking at an extended ref key for
1616 * this particular objectid. If we have different
1617 * objectid or type then there are no more to be found
1618 * in the tree and we can exit.
1619 */
1620 ret = -ENOENT;
1621 if (found_key.objectid != inode_objectid)
1622 break;
1623 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1624 break;
1625
1626 ret = 0;
1627 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1628 extref = (struct btrfs_inode_extref *)ptr;
1629 *ret_extref = extref;
1630 if (found_off)
1631 *found_off = found_key.offset;
1632 break;
1633 }
1634
1635 return ret;
1636}
1637
1638/*
1639 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1640 * Elements of the path are separated by '/' and the path is guaranteed to be
1641 * 0-terminated. the path is only given within the current file system.
1642 * Therefore, it never starts with a '/'. the caller is responsible to provide
1643 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1644 * the start point of the resulting string is returned. this pointer is within
1645 * dest, normally.
1646 * in case the path buffer would overflow, the pointer is decremented further
1647 * as if output was written to the buffer, though no more output is actually
1648 * generated. that way, the caller can determine how much space would be
1649 * required for the path to fit into the buffer. in that case, the returned
1650 * value will be smaller than dest. callers must check this!
1651 */
1652char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1653 u32 name_len, unsigned long name_off,
1654 struct extent_buffer *eb_in, u64 parent,
1655 char *dest, u32 size)
1656{
1657 int slot;
1658 u64 next_inum;
1659 int ret;
1660 s64 bytes_left = ((s64)size) - 1;
1661 struct extent_buffer *eb = eb_in;
1662 struct btrfs_key found_key;
1663 int leave_spinning = path->leave_spinning;
1664 struct btrfs_inode_ref *iref;
1665
1666 if (bytes_left >= 0)
1667 dest[bytes_left] = '\0';
1668
1669 path->leave_spinning = 1;
1670 while (1) {
1671 bytes_left -= name_len;
1672 if (bytes_left >= 0)
1673 read_extent_buffer(eb, dest + bytes_left,
1674 name_off, name_len);
1675 if (eb != eb_in) {
1676 if (!path->skip_locking)
1677 btrfs_tree_read_unlock_blocking(eb);
1678 free_extent_buffer(eb);
1679 }
1680 ret = btrfs_find_item(fs_root, path, parent, 0,
1681 BTRFS_INODE_REF_KEY, &found_key);
1682 if (ret > 0)
1683 ret = -ENOENT;
1684 if (ret)
1685 break;
1686
1687 next_inum = found_key.offset;
1688
1689 /* regular exit ahead */
1690 if (parent == next_inum)
1691 break;
1692
1693 slot = path->slots[0];
1694 eb = path->nodes[0];
1695 /* make sure we can use eb after releasing the path */
1696 if (eb != eb_in) {
1697 if (!path->skip_locking)
1698 btrfs_set_lock_blocking_read(eb);
1699 path->nodes[0] = NULL;
1700 path->locks[0] = 0;
1701 }
1702 btrfs_release_path(path);
1703 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1704
1705 name_len = btrfs_inode_ref_name_len(eb, iref);
1706 name_off = (unsigned long)(iref + 1);
1707
1708 parent = next_inum;
1709 --bytes_left;
1710 if (bytes_left >= 0)
1711 dest[bytes_left] = '/';
1712 }
1713
1714 btrfs_release_path(path);
1715 path->leave_spinning = leave_spinning;
1716
1717 if (ret)
1718 return ERR_PTR(ret);
1719
1720 return dest + bytes_left;
1721}
1722
1723/*
1724 * this makes the path point to (logical EXTENT_ITEM *)
1725 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1726 * tree blocks and <0 on error.
1727 */
1728int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1729 struct btrfs_path *path, struct btrfs_key *found_key,
1730 u64 *flags_ret)
1731{
1732 int ret;
1733 u64 flags;
1734 u64 size = 0;
1735 u32 item_size;
1736 const struct extent_buffer *eb;
1737 struct btrfs_extent_item *ei;
1738 struct btrfs_key key;
1739
1740 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1741 key.type = BTRFS_METADATA_ITEM_KEY;
1742 else
1743 key.type = BTRFS_EXTENT_ITEM_KEY;
1744 key.objectid = logical;
1745 key.offset = (u64)-1;
1746
1747 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1748 if (ret < 0)
1749 return ret;
1750
1751 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1752 if (ret) {
1753 if (ret > 0)
1754 ret = -ENOENT;
1755 return ret;
1756 }
1757 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1758 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1759 size = fs_info->nodesize;
1760 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1761 size = found_key->offset;
1762
1763 if (found_key->objectid > logical ||
1764 found_key->objectid + size <= logical) {
1765 btrfs_debug(fs_info,
1766 "logical %llu is not within any extent", logical);
1767 return -ENOENT;
1768 }
1769
1770 eb = path->nodes[0];
1771 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1772 BUG_ON(item_size < sizeof(*ei));
1773
1774 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1775 flags = btrfs_extent_flags(eb, ei);
1776
1777 btrfs_debug(fs_info,
1778 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1779 logical, logical - found_key->objectid, found_key->objectid,
1780 found_key->offset, flags, item_size);
1781
1782 WARN_ON(!flags_ret);
1783 if (flags_ret) {
1784 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1785 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1786 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1787 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1788 else
1789 BUG();
1790 return 0;
1791 }
1792
1793 return -EIO;
1794}
1795
1796/*
1797 * helper function to iterate extent inline refs. ptr must point to a 0 value
1798 * for the first call and may be modified. it is used to track state.
1799 * if more refs exist, 0 is returned and the next call to
1800 * get_extent_inline_ref must pass the modified ptr parameter to get the
1801 * next ref. after the last ref was processed, 1 is returned.
1802 * returns <0 on error
1803 */
1804static int get_extent_inline_ref(unsigned long *ptr,
1805 const struct extent_buffer *eb,
1806 const struct btrfs_key *key,
1807 const struct btrfs_extent_item *ei,
1808 u32 item_size,
1809 struct btrfs_extent_inline_ref **out_eiref,
1810 int *out_type)
1811{
1812 unsigned long end;
1813 u64 flags;
1814 struct btrfs_tree_block_info *info;
1815
1816 if (!*ptr) {
1817 /* first call */
1818 flags = btrfs_extent_flags(eb, ei);
1819 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1820 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1821 /* a skinny metadata extent */
1822 *out_eiref =
1823 (struct btrfs_extent_inline_ref *)(ei + 1);
1824 } else {
1825 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1826 info = (struct btrfs_tree_block_info *)(ei + 1);
1827 *out_eiref =
1828 (struct btrfs_extent_inline_ref *)(info + 1);
1829 }
1830 } else {
1831 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1832 }
1833 *ptr = (unsigned long)*out_eiref;
1834 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1835 return -ENOENT;
1836 }
1837
1838 end = (unsigned long)ei + item_size;
1839 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1840 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1841 BTRFS_REF_TYPE_ANY);
1842 if (*out_type == BTRFS_REF_TYPE_INVALID)
1843 return -EUCLEAN;
1844
1845 *ptr += btrfs_extent_inline_ref_size(*out_type);
1846 WARN_ON(*ptr > end);
1847 if (*ptr == end)
1848 return 1; /* last */
1849
1850 return 0;
1851}
1852
1853/*
1854 * reads the tree block backref for an extent. tree level and root are returned
1855 * through out_level and out_root. ptr must point to a 0 value for the first
1856 * call and may be modified (see get_extent_inline_ref comment).
1857 * returns 0 if data was provided, 1 if there was no more data to provide or
1858 * <0 on error.
1859 */
1860int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1861 struct btrfs_key *key, struct btrfs_extent_item *ei,
1862 u32 item_size, u64 *out_root, u8 *out_level)
1863{
1864 int ret;
1865 int type;
1866 struct btrfs_extent_inline_ref *eiref;
1867
1868 if (*ptr == (unsigned long)-1)
1869 return 1;
1870
1871 while (1) {
1872 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1873 &eiref, &type);
1874 if (ret < 0)
1875 return ret;
1876
1877 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1878 type == BTRFS_SHARED_BLOCK_REF_KEY)
1879 break;
1880
1881 if (ret == 1)
1882 return 1;
1883 }
1884
1885 /* we can treat both ref types equally here */
1886 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1887
1888 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1889 struct btrfs_tree_block_info *info;
1890
1891 info = (struct btrfs_tree_block_info *)(ei + 1);
1892 *out_level = btrfs_tree_block_level(eb, info);
1893 } else {
1894 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1895 *out_level = (u8)key->offset;
1896 }
1897
1898 if (ret == 1)
1899 *ptr = (unsigned long)-1;
1900
1901 return 0;
1902}
1903
1904static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1905 struct extent_inode_elem *inode_list,
1906 u64 root, u64 extent_item_objectid,
1907 iterate_extent_inodes_t *iterate, void *ctx)
1908{
1909 struct extent_inode_elem *eie;
1910 int ret = 0;
1911
1912 for (eie = inode_list; eie; eie = eie->next) {
1913 btrfs_debug(fs_info,
1914 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1915 extent_item_objectid, eie->inum,
1916 eie->offset, root);
1917 ret = iterate(eie->inum, eie->offset, root, ctx);
1918 if (ret) {
1919 btrfs_debug(fs_info,
1920 "stopping iteration for %llu due to ret=%d",
1921 extent_item_objectid, ret);
1922 break;
1923 }
1924 }
1925
1926 return ret;
1927}
1928
1929/*
1930 * calls iterate() for every inode that references the extent identified by
1931 * the given parameters.
1932 * when the iterator function returns a non-zero value, iteration stops.
1933 */
1934int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1935 u64 extent_item_objectid, u64 extent_item_pos,
1936 int search_commit_root,
1937 iterate_extent_inodes_t *iterate, void *ctx,
1938 bool ignore_offset)
1939{
1940 int ret;
1941 struct btrfs_trans_handle *trans = NULL;
1942 struct ulist *refs = NULL;
1943 struct ulist *roots = NULL;
1944 struct ulist_node *ref_node = NULL;
1945 struct ulist_node *root_node = NULL;
1946 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
1947 struct ulist_iterator ref_uiter;
1948 struct ulist_iterator root_uiter;
1949
1950 btrfs_debug(fs_info, "resolving all inodes for extent %llu",
1951 extent_item_objectid);
1952
1953 if (!search_commit_root) {
1954 trans = btrfs_attach_transaction(fs_info->extent_root);
1955 if (IS_ERR(trans)) {
1956 if (PTR_ERR(trans) != -ENOENT &&
1957 PTR_ERR(trans) != -EROFS)
1958 return PTR_ERR(trans);
1959 trans = NULL;
1960 }
1961 }
1962
1963 if (trans)
1964 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1965 else
1966 down_read(&fs_info->commit_root_sem);
1967
1968 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1969 tree_mod_seq_elem.seq, &refs,
1970 &extent_item_pos, ignore_offset);
1971 if (ret)
1972 goto out;
1973
1974 ULIST_ITER_INIT(&ref_uiter);
1975 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1976 ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1977 tree_mod_seq_elem.seq, &roots,
1978 ignore_offset);
1979 if (ret)
1980 break;
1981 ULIST_ITER_INIT(&root_uiter);
1982 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1983 btrfs_debug(fs_info,
1984 "root %llu references leaf %llu, data list %#llx",
1985 root_node->val, ref_node->val,
1986 ref_node->aux);
1987 ret = iterate_leaf_refs(fs_info,
1988 (struct extent_inode_elem *)
1989 (uintptr_t)ref_node->aux,
1990 root_node->val,
1991 extent_item_objectid,
1992 iterate, ctx);
1993 }
1994 ulist_free(roots);
1995 }
1996
1997 free_leaf_list(refs);
1998out:
1999 if (trans) {
2000 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2001 btrfs_end_transaction(trans);
2002 } else {
2003 up_read(&fs_info->commit_root_sem);
2004 }
2005
2006 return ret;
2007}
2008
2009int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2010 struct btrfs_path *path,
2011 iterate_extent_inodes_t *iterate, void *ctx,
2012 bool ignore_offset)
2013{
2014 int ret;
2015 u64 extent_item_pos;
2016 u64 flags = 0;
2017 struct btrfs_key found_key;
2018 int search_commit_root = path->search_commit_root;
2019
2020 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2021 btrfs_release_path(path);
2022 if (ret < 0)
2023 return ret;
2024 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2025 return -EINVAL;
2026
2027 extent_item_pos = logical - found_key.objectid;
2028 ret = iterate_extent_inodes(fs_info, found_key.objectid,
2029 extent_item_pos, search_commit_root,
2030 iterate, ctx, ignore_offset);
2031
2032 return ret;
2033}
2034
2035typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
2036 struct extent_buffer *eb, void *ctx);
2037
2038static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
2039 struct btrfs_path *path,
2040 iterate_irefs_t *iterate, void *ctx)
2041{
2042 int ret = 0;
2043 int slot;
2044 u32 cur;
2045 u32 len;
2046 u32 name_len;
2047 u64 parent = 0;
2048 int found = 0;
2049 struct extent_buffer *eb;
2050 struct btrfs_item *item;
2051 struct btrfs_inode_ref *iref;
2052 struct btrfs_key found_key;
2053
2054 while (!ret) {
2055 ret = btrfs_find_item(fs_root, path, inum,
2056 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2057 &found_key);
2058
2059 if (ret < 0)
2060 break;
2061 if (ret) {
2062 ret = found ? 0 : -ENOENT;
2063 break;
2064 }
2065 ++found;
2066
2067 parent = found_key.offset;
2068 slot = path->slots[0];
2069 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2070 if (!eb) {
2071 ret = -ENOMEM;
2072 break;
2073 }
2074 btrfs_release_path(path);
2075
2076 item = btrfs_item_nr(slot);
2077 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2078
2079 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2080 name_len = btrfs_inode_ref_name_len(eb, iref);
2081 /* path must be released before calling iterate()! */
2082 btrfs_debug(fs_root->fs_info,
2083 "following ref at offset %u for inode %llu in tree %llu",
2084 cur, found_key.objectid,
2085 fs_root->root_key.objectid);
2086 ret = iterate(parent, name_len,
2087 (unsigned long)(iref + 1), eb, ctx);
2088 if (ret)
2089 break;
2090 len = sizeof(*iref) + name_len;
2091 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2092 }
2093 free_extent_buffer(eb);
2094 }
2095
2096 btrfs_release_path(path);
2097
2098 return ret;
2099}
2100
2101static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2102 struct btrfs_path *path,
2103 iterate_irefs_t *iterate, void *ctx)
2104{
2105 int ret;
2106 int slot;
2107 u64 offset = 0;
2108 u64 parent;
2109 int found = 0;
2110 struct extent_buffer *eb;
2111 struct btrfs_inode_extref *extref;
2112 u32 item_size;
2113 u32 cur_offset;
2114 unsigned long ptr;
2115
2116 while (1) {
2117 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2118 &offset);
2119 if (ret < 0)
2120 break;
2121 if (ret) {
2122 ret = found ? 0 : -ENOENT;
2123 break;
2124 }
2125 ++found;
2126
2127 slot = path->slots[0];
2128 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2129 if (!eb) {
2130 ret = -ENOMEM;
2131 break;
2132 }
2133 btrfs_release_path(path);
2134
2135 item_size = btrfs_item_size_nr(eb, slot);
2136 ptr = btrfs_item_ptr_offset(eb, slot);
2137 cur_offset = 0;
2138
2139 while (cur_offset < item_size) {
2140 u32 name_len;
2141
2142 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2143 parent = btrfs_inode_extref_parent(eb, extref);
2144 name_len = btrfs_inode_extref_name_len(eb, extref);
2145 ret = iterate(parent, name_len,
2146 (unsigned long)&extref->name, eb, ctx);
2147 if (ret)
2148 break;
2149
2150 cur_offset += btrfs_inode_extref_name_len(eb, extref);
2151 cur_offset += sizeof(*extref);
2152 }
2153 free_extent_buffer(eb);
2154
2155 offset++;
2156 }
2157
2158 btrfs_release_path(path);
2159
2160 return ret;
2161}
2162
2163static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2164 struct btrfs_path *path, iterate_irefs_t *iterate,
2165 void *ctx)
2166{
2167 int ret;
2168 int found_refs = 0;
2169
2170 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2171 if (!ret)
2172 ++found_refs;
2173 else if (ret != -ENOENT)
2174 return ret;
2175
2176 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2177 if (ret == -ENOENT && found_refs)
2178 return 0;
2179
2180 return ret;
2181}
2182
2183/*
2184 * returns 0 if the path could be dumped (probably truncated)
2185 * returns <0 in case of an error
2186 */
2187static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2188 struct extent_buffer *eb, void *ctx)
2189{
2190 struct inode_fs_paths *ipath = ctx;
2191 char *fspath;
2192 char *fspath_min;
2193 int i = ipath->fspath->elem_cnt;
2194 const int s_ptr = sizeof(char *);
2195 u32 bytes_left;
2196
2197 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2198 ipath->fspath->bytes_left - s_ptr : 0;
2199
2200 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2201 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2202 name_off, eb, inum, fspath_min, bytes_left);
2203 if (IS_ERR(fspath))
2204 return PTR_ERR(fspath);
2205
2206 if (fspath > fspath_min) {
2207 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2208 ++ipath->fspath->elem_cnt;
2209 ipath->fspath->bytes_left = fspath - fspath_min;
2210 } else {
2211 ++ipath->fspath->elem_missed;
2212 ipath->fspath->bytes_missing += fspath_min - fspath;
2213 ipath->fspath->bytes_left = 0;
2214 }
2215
2216 return 0;
2217}
2218
2219/*
2220 * this dumps all file system paths to the inode into the ipath struct, provided
2221 * is has been created large enough. each path is zero-terminated and accessed
2222 * from ipath->fspath->val[i].
2223 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2224 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2225 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2226 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2227 * have been needed to return all paths.
2228 */
2229int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2230{
2231 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2232 inode_to_path, ipath);
2233}
2234
2235struct btrfs_data_container *init_data_container(u32 total_bytes)
2236{
2237 struct btrfs_data_container *data;
2238 size_t alloc_bytes;
2239
2240 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2241 data = kvmalloc(alloc_bytes, GFP_KERNEL);
2242 if (!data)
2243 return ERR_PTR(-ENOMEM);
2244
2245 if (total_bytes >= sizeof(*data)) {
2246 data->bytes_left = total_bytes - sizeof(*data);
2247 data->bytes_missing = 0;
2248 } else {
2249 data->bytes_missing = sizeof(*data) - total_bytes;
2250 data->bytes_left = 0;
2251 }
2252
2253 data->elem_cnt = 0;
2254 data->elem_missed = 0;
2255
2256 return data;
2257}
2258
2259/*
2260 * allocates space to return multiple file system paths for an inode.
2261 * total_bytes to allocate are passed, note that space usable for actual path
2262 * information will be total_bytes - sizeof(struct inode_fs_paths).
2263 * the returned pointer must be freed with free_ipath() in the end.
2264 */
2265struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2266 struct btrfs_path *path)
2267{
2268 struct inode_fs_paths *ifp;
2269 struct btrfs_data_container *fspath;
2270
2271 fspath = init_data_container(total_bytes);
2272 if (IS_ERR(fspath))
2273 return ERR_CAST(fspath);
2274
2275 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2276 if (!ifp) {
2277 kvfree(fspath);
2278 return ERR_PTR(-ENOMEM);
2279 }
2280
2281 ifp->btrfs_path = path;
2282 ifp->fspath = fspath;
2283 ifp->fs_root = fs_root;
2284
2285 return ifp;
2286}
2287
2288void free_ipath(struct inode_fs_paths *ipath)
2289{
2290 if (!ipath)
2291 return;
2292 kvfree(ipath->fspath);
2293 kfree(ipath);
2294}
2295
2296struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2297 struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2298{
2299 struct btrfs_backref_iter *ret;
2300
2301 ret = kzalloc(sizeof(*ret), gfp_flag);
2302 if (!ret)
2303 return NULL;
2304
2305 ret->path = btrfs_alloc_path();
2306 if (!ret->path) {
2307 kfree(ret);
2308 return NULL;
2309 }
2310
2311 /* Current backref iterator only supports iteration in commit root */
2312 ret->path->search_commit_root = 1;
2313 ret->path->skip_locking = 1;
2314 ret->fs_info = fs_info;
2315
2316 return ret;
2317}
2318
2319int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2320{
2321 struct btrfs_fs_info *fs_info = iter->fs_info;
2322 struct btrfs_path *path = iter->path;
2323 struct btrfs_extent_item *ei;
2324 struct btrfs_key key;
2325 int ret;
2326
2327 key.objectid = bytenr;
2328 key.type = BTRFS_METADATA_ITEM_KEY;
2329 key.offset = (u64)-1;
2330 iter->bytenr = bytenr;
2331
2332 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2333 if (ret < 0)
2334 return ret;
2335 if (ret == 0) {
2336 ret = -EUCLEAN;
2337 goto release;
2338 }
2339 if (path->slots[0] == 0) {
2340 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2341 ret = -EUCLEAN;
2342 goto release;
2343 }
2344 path->slots[0]--;
2345
2346 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2347 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2348 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2349 ret = -ENOENT;
2350 goto release;
2351 }
2352 memcpy(&iter->cur_key, &key, sizeof(key));
2353 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2354 path->slots[0]);
2355 iter->end_ptr = (u32)(iter->item_ptr +
2356 btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2357 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2358 struct btrfs_extent_item);
2359
2360 /*
2361 * Only support iteration on tree backref yet.
2362 *
2363 * This is an extra precaution for non skinny-metadata, where
2364 * EXTENT_ITEM is also used for tree blocks, that we can only use
2365 * extent flags to determine if it's a tree block.
2366 */
2367 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2368 ret = -ENOTSUPP;
2369 goto release;
2370 }
2371 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2372
2373 /* If there is no inline backref, go search for keyed backref */
2374 if (iter->cur_ptr >= iter->end_ptr) {
2375 ret = btrfs_next_item(fs_info->extent_root, path);
2376
2377 /* No inline nor keyed ref */
2378 if (ret > 0) {
2379 ret = -ENOENT;
2380 goto release;
2381 }
2382 if (ret < 0)
2383 goto release;
2384
2385 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2386 path->slots[0]);
2387 if (iter->cur_key.objectid != bytenr ||
2388 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2389 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2390 ret = -ENOENT;
2391 goto release;
2392 }
2393 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2394 path->slots[0]);
2395 iter->item_ptr = iter->cur_ptr;
2396 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2397 path->nodes[0], path->slots[0]));
2398 }
2399
2400 return 0;
2401release:
2402 btrfs_backref_iter_release(iter);
2403 return ret;
2404}
2405
2406/*
2407 * Go to the next backref item of current bytenr, can be either inlined or
2408 * keyed.
2409 *
2410 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2411 *
2412 * Return 0 if we get next backref without problem.
2413 * Return >0 if there is no extra backref for this bytenr.
2414 * Return <0 if there is something wrong happened.
2415 */
2416int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2417{
2418 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2419 struct btrfs_path *path = iter->path;
2420 struct btrfs_extent_inline_ref *iref;
2421 int ret;
2422 u32 size;
2423
2424 if (btrfs_backref_iter_is_inline_ref(iter)) {
2425 /* We're still inside the inline refs */
2426 ASSERT(iter->cur_ptr < iter->end_ptr);
2427
2428 if (btrfs_backref_has_tree_block_info(iter)) {
2429 /* First tree block info */
2430 size = sizeof(struct btrfs_tree_block_info);
2431 } else {
2432 /* Use inline ref type to determine the size */
2433 int type;
2434
2435 iref = (struct btrfs_extent_inline_ref *)
2436 ((unsigned long)iter->cur_ptr);
2437 type = btrfs_extent_inline_ref_type(eb, iref);
2438
2439 size = btrfs_extent_inline_ref_size(type);
2440 }
2441 iter->cur_ptr += size;
2442 if (iter->cur_ptr < iter->end_ptr)
2443 return 0;
2444
2445 /* All inline items iterated, fall through */
2446 }
2447
2448 /* We're at keyed items, there is no inline item, go to the next one */
2449 ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
2450 if (ret)
2451 return ret;
2452
2453 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2454 if (iter->cur_key.objectid != iter->bytenr ||
2455 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2456 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2457 return 1;
2458 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2459 path->slots[0]);
2460 iter->cur_ptr = iter->item_ptr;
2461 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2462 path->slots[0]);
2463 return 0;
2464}
2465
2466void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2467 struct btrfs_backref_cache *cache, int is_reloc)
2468{
2469 int i;
2470
2471 cache->rb_root = RB_ROOT;
2472 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2473 INIT_LIST_HEAD(&cache->pending[i]);
2474 INIT_LIST_HEAD(&cache->changed);
2475 INIT_LIST_HEAD(&cache->detached);
2476 INIT_LIST_HEAD(&cache->leaves);
2477 INIT_LIST_HEAD(&cache->pending_edge);
2478 INIT_LIST_HEAD(&cache->useless_node);
2479 cache->fs_info = fs_info;
2480 cache->is_reloc = is_reloc;
2481}
2482
2483struct btrfs_backref_node *btrfs_backref_alloc_node(
2484 struct btrfs_backref_cache *cache, u64 bytenr, int level)
2485{
2486 struct btrfs_backref_node *node;
2487
2488 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2489 node = kzalloc(sizeof(*node), GFP_NOFS);
2490 if (!node)
2491 return node;
2492
2493 INIT_LIST_HEAD(&node->list);
2494 INIT_LIST_HEAD(&node->upper);
2495 INIT_LIST_HEAD(&node->lower);
2496 RB_CLEAR_NODE(&node->rb_node);
2497 cache->nr_nodes++;
2498 node->level = level;
2499 node->bytenr = bytenr;
2500
2501 return node;
2502}
2503
2504struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2505 struct btrfs_backref_cache *cache)
2506{
2507 struct btrfs_backref_edge *edge;
2508
2509 edge = kzalloc(sizeof(*edge), GFP_NOFS);
2510 if (edge)
2511 cache->nr_edges++;
2512 return edge;
2513}
2514
2515/*
2516 * Drop the backref node from cache, also cleaning up all its
2517 * upper edges and any uncached nodes in the path.
2518 *
2519 * This cleanup happens bottom up, thus the node should either
2520 * be the lowest node in the cache or a detached node.
2521 */
2522void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2523 struct btrfs_backref_node *node)
2524{
2525 struct btrfs_backref_node *upper;
2526 struct btrfs_backref_edge *edge;
2527
2528 if (!node)
2529 return;
2530
2531 BUG_ON(!node->lowest && !node->detached);
2532 while (!list_empty(&node->upper)) {
2533 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2534 list[LOWER]);
2535 upper = edge->node[UPPER];
2536 list_del(&edge->list[LOWER]);
2537 list_del(&edge->list[UPPER]);
2538 btrfs_backref_free_edge(cache, edge);
2539
2540 if (RB_EMPTY_NODE(&upper->rb_node)) {
2541 BUG_ON(!list_empty(&node->upper));
2542 btrfs_backref_drop_node(cache, node);
2543 node = upper;
2544 node->lowest = 1;
2545 continue;
2546 }
2547 /*
2548 * Add the node to leaf node list if no other child block
2549 * cached.
2550 */
2551 if (list_empty(&upper->lower)) {
2552 list_add_tail(&upper->lower, &cache->leaves);
2553 upper->lowest = 1;
2554 }
2555 }
2556
2557 btrfs_backref_drop_node(cache, node);
2558}
2559
2560/*
2561 * Release all nodes/edges from current cache
2562 */
2563void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2564{
2565 struct btrfs_backref_node *node;
2566 int i;
2567
2568 while (!list_empty(&cache->detached)) {
2569 node = list_entry(cache->detached.next,
2570 struct btrfs_backref_node, list);
2571 btrfs_backref_cleanup_node(cache, node);
2572 }
2573
2574 while (!list_empty(&cache->leaves)) {
2575 node = list_entry(cache->leaves.next,
2576 struct btrfs_backref_node, lower);
2577 btrfs_backref_cleanup_node(cache, node);
2578 }
2579
2580 cache->last_trans = 0;
2581
2582 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2583 ASSERT(list_empty(&cache->pending[i]));
2584 ASSERT(list_empty(&cache->pending_edge));
2585 ASSERT(list_empty(&cache->useless_node));
2586 ASSERT(list_empty(&cache->changed));
2587 ASSERT(list_empty(&cache->detached));
2588 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2589 ASSERT(!cache->nr_nodes);
2590 ASSERT(!cache->nr_edges);
2591}
2592
2593/*
2594 * Handle direct tree backref
2595 *
2596 * Direct tree backref means, the backref item shows its parent bytenr
2597 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2598 *
2599 * @ref_key: The converted backref key.
2600 * For keyed backref, it's the item key.
2601 * For inlined backref, objectid is the bytenr,
2602 * type is btrfs_inline_ref_type, offset is
2603 * btrfs_inline_ref_offset.
2604 */
2605static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2606 struct btrfs_key *ref_key,
2607 struct btrfs_backref_node *cur)
2608{
2609 struct btrfs_backref_edge *edge;
2610 struct btrfs_backref_node *upper;
2611 struct rb_node *rb_node;
2612
2613 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2614
2615 /* Only reloc root uses backref pointing to itself */
2616 if (ref_key->objectid == ref_key->offset) {
2617 struct btrfs_root *root;
2618
2619 cur->is_reloc_root = 1;
2620 /* Only reloc backref cache cares about a specific root */
2621 if (cache->is_reloc) {
2622 root = find_reloc_root(cache->fs_info, cur->bytenr);
2623 if (WARN_ON(!root))
2624 return -ENOENT;
2625 cur->root = root;
2626 } else {
2627 /*
2628 * For generic purpose backref cache, reloc root node
2629 * is useless.
2630 */
2631 list_add(&cur->list, &cache->useless_node);
2632 }
2633 return 0;
2634 }
2635
2636 edge = btrfs_backref_alloc_edge(cache);
2637 if (!edge)
2638 return -ENOMEM;
2639
2640 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2641 if (!rb_node) {
2642 /* Parent node not yet cached */
2643 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2644 cur->level + 1);
2645 if (!upper) {
2646 btrfs_backref_free_edge(cache, edge);
2647 return -ENOMEM;
2648 }
2649
2650 /*
2651 * Backrefs for the upper level block isn't cached, add the
2652 * block to pending list
2653 */
2654 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2655 } else {
2656 /* Parent node already cached */
2657 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2658 ASSERT(upper->checked);
2659 INIT_LIST_HEAD(&edge->list[UPPER]);
2660 }
2661 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2662 return 0;
2663}
2664
2665/*
2666 * Handle indirect tree backref
2667 *
2668 * Indirect tree backref means, we only know which tree the node belongs to.
2669 * We still need to do a tree search to find out the parents. This is for
2670 * TREE_BLOCK_REF backref (keyed or inlined).
2671 *
2672 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
2673 * @tree_key: The first key of this tree block.
2674 * @path: A clean (released) path, to avoid allocating path everytime
2675 * the function get called.
2676 */
2677static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
2678 struct btrfs_path *path,
2679 struct btrfs_key *ref_key,
2680 struct btrfs_key *tree_key,
2681 struct btrfs_backref_node *cur)
2682{
2683 struct btrfs_fs_info *fs_info = cache->fs_info;
2684 struct btrfs_backref_node *upper;
2685 struct btrfs_backref_node *lower;
2686 struct btrfs_backref_edge *edge;
2687 struct extent_buffer *eb;
2688 struct btrfs_root *root;
2689 struct rb_node *rb_node;
2690 int level;
2691 bool need_check = true;
2692 int ret;
2693
2694 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2695 if (IS_ERR(root))
2696 return PTR_ERR(root);
2697 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2698 cur->cowonly = 1;
2699
2700 if (btrfs_root_level(&root->root_item) == cur->level) {
2701 /* Tree root */
2702 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2703 /*
2704 * For reloc backref cache, we may ignore reloc root. But for
2705 * general purpose backref cache, we can't rely on
2706 * btrfs_should_ignore_reloc_root() as it may conflict with
2707 * current running relocation and lead to missing root.
2708 *
2709 * For general purpose backref cache, reloc root detection is
2710 * completely relying on direct backref (key->offset is parent
2711 * bytenr), thus only do such check for reloc cache.
2712 */
2713 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2714 btrfs_put_root(root);
2715 list_add(&cur->list, &cache->useless_node);
2716 } else {
2717 cur->root = root;
2718 }
2719 return 0;
2720 }
2721
2722 level = cur->level + 1;
2723
2724 /* Search the tree to find parent blocks referring to the block */
2725 path->search_commit_root = 1;
2726 path->skip_locking = 1;
2727 path->lowest_level = level;
2728 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2729 path->lowest_level = 0;
2730 if (ret < 0) {
2731 btrfs_put_root(root);
2732 return ret;
2733 }
2734 if (ret > 0 && path->slots[level] > 0)
2735 path->slots[level]--;
2736
2737 eb = path->nodes[level];
2738 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2739 btrfs_err(fs_info,
2740"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2741 cur->bytenr, level - 1, root->root_key.objectid,
2742 tree_key->objectid, tree_key->type, tree_key->offset);
2743 btrfs_put_root(root);
2744 ret = -ENOENT;
2745 goto out;
2746 }
2747 lower = cur;
2748
2749 /* Add all nodes and edges in the path */
2750 for (; level < BTRFS_MAX_LEVEL; level++) {
2751 if (!path->nodes[level]) {
2752 ASSERT(btrfs_root_bytenr(&root->root_item) ==
2753 lower->bytenr);
2754 /* Same as previous should_ignore_reloc_root() call */
2755 if (btrfs_should_ignore_reloc_root(root) &&
2756 cache->is_reloc) {
2757 btrfs_put_root(root);
2758 list_add(&lower->list, &cache->useless_node);
2759 } else {
2760 lower->root = root;
2761 }
2762 break;
2763 }
2764
2765 edge = btrfs_backref_alloc_edge(cache);
2766 if (!edge) {
2767 btrfs_put_root(root);
2768 ret = -ENOMEM;
2769 goto out;
2770 }
2771
2772 eb = path->nodes[level];
2773 rb_node = rb_simple_search(&cache->rb_root, eb->start);
2774 if (!rb_node) {
2775 upper = btrfs_backref_alloc_node(cache, eb->start,
2776 lower->level + 1);
2777 if (!upper) {
2778 btrfs_put_root(root);
2779 btrfs_backref_free_edge(cache, edge);
2780 ret = -ENOMEM;
2781 goto out;
2782 }
2783 upper->owner = btrfs_header_owner(eb);
2784 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2785 upper->cowonly = 1;
2786
2787 /*
2788 * If we know the block isn't shared we can avoid
2789 * checking its backrefs.
2790 */
2791 if (btrfs_block_can_be_shared(root, eb))
2792 upper->checked = 0;
2793 else
2794 upper->checked = 1;
2795
2796 /*
2797 * Add the block to pending list if we need to check its
2798 * backrefs, we only do this once while walking up a
2799 * tree as we will catch anything else later on.
2800 */
2801 if (!upper->checked && need_check) {
2802 need_check = false;
2803 list_add_tail(&edge->list[UPPER],
2804 &cache->pending_edge);
2805 } else {
2806 if (upper->checked)
2807 need_check = true;
2808 INIT_LIST_HEAD(&edge->list[UPPER]);
2809 }
2810 } else {
2811 upper = rb_entry(rb_node, struct btrfs_backref_node,
2812 rb_node);
2813 ASSERT(upper->checked);
2814 INIT_LIST_HEAD(&edge->list[UPPER]);
2815 if (!upper->owner)
2816 upper->owner = btrfs_header_owner(eb);
2817 }
2818 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2819
2820 if (rb_node) {
2821 btrfs_put_root(root);
2822 break;
2823 }
2824 lower = upper;
2825 upper = NULL;
2826 }
2827out:
2828 btrfs_release_path(path);
2829 return ret;
2830}
2831
2832/*
2833 * Add backref node @cur into @cache.
2834 *
2835 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2836 * links aren't yet bi-directional. Needs to finish such links.
2837 * Use btrfs_backref_finish_upper_links() to finish such linkage.
2838 *
2839 * @path: Released path for indirect tree backref lookup
2840 * @iter: Released backref iter for extent tree search
2841 * @node_key: The first key of the tree block
2842 */
2843int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
2844 struct btrfs_path *path,
2845 struct btrfs_backref_iter *iter,
2846 struct btrfs_key *node_key,
2847 struct btrfs_backref_node *cur)
2848{
2849 struct btrfs_fs_info *fs_info = cache->fs_info;
2850 struct btrfs_backref_edge *edge;
2851 struct btrfs_backref_node *exist;
2852 int ret;
2853
2854 ret = btrfs_backref_iter_start(iter, cur->bytenr);
2855 if (ret < 0)
2856 return ret;
2857 /*
2858 * We skip the first btrfs_tree_block_info, as we don't use the key
2859 * stored in it, but fetch it from the tree block
2860 */
2861 if (btrfs_backref_has_tree_block_info(iter)) {
2862 ret = btrfs_backref_iter_next(iter);
2863 if (ret < 0)
2864 goto out;
2865 /* No extra backref? This means the tree block is corrupted */
2866 if (ret > 0) {
2867 ret = -EUCLEAN;
2868 goto out;
2869 }
2870 }
2871 WARN_ON(cur->checked);
2872 if (!list_empty(&cur->upper)) {
2873 /*
2874 * The backref was added previously when processing backref of
2875 * type BTRFS_TREE_BLOCK_REF_KEY
2876 */
2877 ASSERT(list_is_singular(&cur->upper));
2878 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2879 list[LOWER]);
2880 ASSERT(list_empty(&edge->list[UPPER]));
2881 exist = edge->node[UPPER];
2882 /*
2883 * Add the upper level block to pending list if we need check
2884 * its backrefs
2885 */
2886 if (!exist->checked)
2887 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2888 } else {
2889 exist = NULL;
2890 }
2891
2892 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2893 struct extent_buffer *eb;
2894 struct btrfs_key key;
2895 int type;
2896
2897 cond_resched();
2898 eb = btrfs_backref_get_eb(iter);
2899
2900 key.objectid = iter->bytenr;
2901 if (btrfs_backref_iter_is_inline_ref(iter)) {
2902 struct btrfs_extent_inline_ref *iref;
2903
2904 /* Update key for inline backref */
2905 iref = (struct btrfs_extent_inline_ref *)
2906 ((unsigned long)iter->cur_ptr);
2907 type = btrfs_get_extent_inline_ref_type(eb, iref,
2908 BTRFS_REF_TYPE_BLOCK);
2909 if (type == BTRFS_REF_TYPE_INVALID) {
2910 ret = -EUCLEAN;
2911 goto out;
2912 }
2913 key.type = type;
2914 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2915 } else {
2916 key.type = iter->cur_key.type;
2917 key.offset = iter->cur_key.offset;
2918 }
2919
2920 /*
2921 * Parent node found and matches current inline ref, no need to
2922 * rebuild this node for this inline ref
2923 */
2924 if (exist &&
2925 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2926 exist->owner == key.offset) ||
2927 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2928 exist->bytenr == key.offset))) {
2929 exist = NULL;
2930 continue;
2931 }
2932
2933 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
2934 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
2935 ret = handle_direct_tree_backref(cache, &key, cur);
2936 if (ret < 0)
2937 goto out;
2938 continue;
2939 } else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
2940 ret = -EINVAL;
2941 btrfs_print_v0_err(fs_info);
2942 btrfs_handle_fs_error(fs_info, ret, NULL);
2943 goto out;
2944 } else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
2945 continue;
2946 }
2947
2948 /*
2949 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
2950 * means the root objectid. We need to search the tree to get
2951 * its parent bytenr.
2952 */
2953 ret = handle_indirect_tree_backref(cache, path, &key, node_key,
2954 cur);
2955 if (ret < 0)
2956 goto out;
2957 }
2958 ret = 0;
2959 cur->checked = 1;
2960 WARN_ON(exist);
2961out:
2962 btrfs_backref_iter_release(iter);
2963 return ret;
2964}
2965
2966/*
2967 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
2968 */
2969int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
2970 struct btrfs_backref_node *start)
2971{
2972 struct list_head *useless_node = &cache->useless_node;
2973 struct btrfs_backref_edge *edge;
2974 struct rb_node *rb_node;
2975 LIST_HEAD(pending_edge);
2976
2977 ASSERT(start->checked);
2978
2979 /* Insert this node to cache if it's not COW-only */
2980 if (!start->cowonly) {
2981 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
2982 &start->rb_node);
2983 if (rb_node)
2984 btrfs_backref_panic(cache->fs_info, start->bytenr,
2985 -EEXIST);
2986 list_add_tail(&start->lower, &cache->leaves);
2987 }
2988
2989 /*
2990 * Use breadth first search to iterate all related edges.
2991 *
2992 * The starting points are all the edges of this node
2993 */
2994 list_for_each_entry(edge, &start->upper, list[LOWER])
2995 list_add_tail(&edge->list[UPPER], &pending_edge);
2996
2997 while (!list_empty(&pending_edge)) {
2998 struct btrfs_backref_node *upper;
2999 struct btrfs_backref_node *lower;
3000 struct rb_node *rb_node;
3001
3002 edge = list_first_entry(&pending_edge,
3003 struct btrfs_backref_edge, list[UPPER]);
3004 list_del_init(&edge->list[UPPER]);
3005 upper = edge->node[UPPER];
3006 lower = edge->node[LOWER];
3007
3008 /* Parent is detached, no need to keep any edges */
3009 if (upper->detached) {
3010 list_del(&edge->list[LOWER]);
3011 btrfs_backref_free_edge(cache, edge);
3012
3013 /* Lower node is orphan, queue for cleanup */
3014 if (list_empty(&lower->upper))
3015 list_add(&lower->list, useless_node);
3016 continue;
3017 }
3018
3019 /*
3020 * All new nodes added in current build_backref_tree() haven't
3021 * been linked to the cache rb tree.
3022 * So if we have upper->rb_node populated, this means a cache
3023 * hit. We only need to link the edge, as @upper and all its
3024 * parents have already been linked.
3025 */
3026 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3027 if (upper->lowest) {
3028 list_del_init(&upper->lower);
3029 upper->lowest = 0;
3030 }
3031
3032 list_add_tail(&edge->list[UPPER], &upper->lower);
3033 continue;
3034 }
3035
3036 /* Sanity check, we shouldn't have any unchecked nodes */
3037 if (!upper->checked) {
3038 ASSERT(0);
3039 return -EUCLEAN;
3040 }
3041
3042 /* Sanity check, COW-only node has non-COW-only parent */
3043 if (start->cowonly != upper->cowonly) {
3044 ASSERT(0);
3045 return -EUCLEAN;
3046 }
3047
3048 /* Only cache non-COW-only (subvolume trees) tree blocks */
3049 if (!upper->cowonly) {
3050 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3051 &upper->rb_node);
3052 if (rb_node) {
3053 btrfs_backref_panic(cache->fs_info,
3054 upper->bytenr, -EEXIST);
3055 return -EUCLEAN;
3056 }
3057 }
3058
3059 list_add_tail(&edge->list[UPPER], &upper->lower);
3060
3061 /*
3062 * Also queue all the parent edges of this uncached node
3063 * to finish the upper linkage
3064 */
3065 list_for_each_entry(edge, &upper->upper, list[LOWER])
3066 list_add_tail(&edge->list[UPPER], &pending_edge);
3067 }
3068 return 0;
3069}
3070
3071void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3072 struct btrfs_backref_node *node)
3073{
3074 struct btrfs_backref_node *lower;
3075 struct btrfs_backref_node *upper;
3076 struct btrfs_backref_edge *edge;
3077
3078 while (!list_empty(&cache->useless_node)) {
3079 lower = list_first_entry(&cache->useless_node,
3080 struct btrfs_backref_node, list);
3081 list_del_init(&lower->list);
3082 }
3083 while (!list_empty(&cache->pending_edge)) {
3084 edge = list_first_entry(&cache->pending_edge,
3085 struct btrfs_backref_edge, list[UPPER]);
3086 list_del(&edge->list[UPPER]);
3087 list_del(&edge->list[LOWER]);
3088 lower = edge->node[LOWER];
3089 upper = edge->node[UPPER];
3090 btrfs_backref_free_edge(cache, edge);
3091
3092 /*
3093 * Lower is no longer linked to any upper backref nodes and
3094 * isn't in the cache, we can free it ourselves.
3095 */
3096 if (list_empty(&lower->upper) &&
3097 RB_EMPTY_NODE(&lower->rb_node))
3098 list_add(&lower->list, &cache->useless_node);
3099
3100 if (!RB_EMPTY_NODE(&upper->rb_node))
3101 continue;
3102
3103 /* Add this guy's upper edges to the list to process */
3104 list_for_each_entry(edge, &upper->upper, list[LOWER])
3105 list_add_tail(&edge->list[UPPER],
3106 &cache->pending_edge);
3107 if (list_empty(&upper->upper))
3108 list_add(&upper->list, &cache->useless_node);
3109 }
3110
3111 while (!list_empty(&cache->useless_node)) {
3112 lower = list_first_entry(&cache->useless_node,
3113 struct btrfs_backref_node, list);
3114 list_del_init(&lower->list);
3115 if (lower == node)
3116 node = NULL;
3117 btrfs_backref_free_node(cache, lower);
3118 }
3119
3120 btrfs_backref_cleanup_node(cache, node);
3121 ASSERT(list_empty(&cache->useless_node) &&
3122 list_empty(&cache->pending_edge));
3123}