Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2011 STRATO. All rights reserved.
4 */
5
6#include <linux/mm.h>
7#include <linux/rbtree.h>
8#include <trace/events/btrfs.h>
9#include "ctree.h"
10#include "disk-io.h"
11#include "backref.h"
12#include "ulist.h"
13#include "transaction.h"
14#include "delayed-ref.h"
15#include "locking.h"
16#include "misc.h"
17#include "tree-mod-log.h"
18#include "fs.h"
19#include "accessors.h"
20#include "extent-tree.h"
21#include "relocation.h"
22#include "tree-checker.h"
23
24/* Just arbitrary numbers so we can be sure one of these happened. */
25#define BACKREF_FOUND_SHARED 6
26#define BACKREF_FOUND_NOT_SHARED 7
27
28struct extent_inode_elem {
29 u64 inum;
30 u64 offset;
31 u64 num_bytes;
32 struct extent_inode_elem *next;
33};
34
35static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
36 const struct btrfs_key *key,
37 const struct extent_buffer *eb,
38 const struct btrfs_file_extent_item *fi,
39 struct extent_inode_elem **eie)
40{
41 const u64 data_len = btrfs_file_extent_num_bytes(eb, fi);
42 u64 offset = key->offset;
43 struct extent_inode_elem *e;
44 const u64 *root_ids;
45 int root_count;
46 bool cached;
47
48 if (!ctx->ignore_extent_item_pos &&
49 !btrfs_file_extent_compression(eb, fi) &&
50 !btrfs_file_extent_encryption(eb, fi) &&
51 !btrfs_file_extent_other_encoding(eb, fi)) {
52 u64 data_offset;
53
54 data_offset = btrfs_file_extent_offset(eb, fi);
55
56 if (ctx->extent_item_pos < data_offset ||
57 ctx->extent_item_pos >= data_offset + data_len)
58 return 1;
59 offset += ctx->extent_item_pos - data_offset;
60 }
61
62 if (!ctx->indirect_ref_iterator || !ctx->cache_lookup)
63 goto add_inode_elem;
64
65 cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids,
66 &root_count);
67 if (!cached)
68 goto add_inode_elem;
69
70 for (int i = 0; i < root_count; i++) {
71 int ret;
72
73 ret = ctx->indirect_ref_iterator(key->objectid, offset,
74 data_len, root_ids[i],
75 ctx->user_ctx);
76 if (ret)
77 return ret;
78 }
79
80add_inode_elem:
81 e = kmalloc(sizeof(*e), GFP_NOFS);
82 if (!e)
83 return -ENOMEM;
84
85 e->next = *eie;
86 e->inum = key->objectid;
87 e->offset = offset;
88 e->num_bytes = data_len;
89 *eie = e;
90
91 return 0;
92}
93
94static void free_inode_elem_list(struct extent_inode_elem *eie)
95{
96 struct extent_inode_elem *eie_next;
97
98 for (; eie; eie = eie_next) {
99 eie_next = eie->next;
100 kfree(eie);
101 }
102}
103
104static int find_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
105 const struct extent_buffer *eb,
106 struct extent_inode_elem **eie)
107{
108 u64 disk_byte;
109 struct btrfs_key key;
110 struct btrfs_file_extent_item *fi;
111 int slot;
112 int nritems;
113 int extent_type;
114 int ret;
115
116 /*
117 * from the shared data ref, we only have the leaf but we need
118 * the key. thus, we must look into all items and see that we
119 * find one (some) with a reference to our extent item.
120 */
121 nritems = btrfs_header_nritems(eb);
122 for (slot = 0; slot < nritems; ++slot) {
123 btrfs_item_key_to_cpu(eb, &key, slot);
124 if (key.type != BTRFS_EXTENT_DATA_KEY)
125 continue;
126 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
127 extent_type = btrfs_file_extent_type(eb, fi);
128 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
129 continue;
130 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
131 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
132 if (disk_byte != ctx->bytenr)
133 continue;
134
135 ret = check_extent_in_eb(ctx, &key, eb, fi, eie);
136 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
137 return ret;
138 }
139
140 return 0;
141}
142
143struct preftree {
144 struct rb_root_cached root;
145 unsigned int count;
146};
147
148#define PREFTREE_INIT { .root = RB_ROOT_CACHED, .count = 0 }
149
150struct preftrees {
151 struct preftree direct; /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
152 struct preftree indirect; /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
153 struct preftree indirect_missing_keys;
154};
155
156/*
157 * Checks for a shared extent during backref search.
158 *
159 * The share_count tracks prelim_refs (direct and indirect) having a
160 * ref->count >0:
161 * - incremented when a ref->count transitions to >0
162 * - decremented when a ref->count transitions to <1
163 */
164struct share_check {
165 struct btrfs_backref_share_check_ctx *ctx;
166 struct btrfs_root *root;
167 u64 inum;
168 u64 data_bytenr;
169 u64 data_extent_gen;
170 /*
171 * Counts number of inodes that refer to an extent (different inodes in
172 * the same root or different roots) that we could find. The sharedness
173 * check typically stops once this counter gets greater than 1, so it
174 * may not reflect the total number of inodes.
175 */
176 int share_count;
177 /*
178 * The number of times we found our inode refers to the data extent we
179 * are determining the sharedness. In other words, how many file extent
180 * items we could find for our inode that point to our target data
181 * extent. The value we get here after finishing the extent sharedness
182 * check may be smaller than reality, but if it ends up being greater
183 * than 1, then we know for sure the inode has multiple file extent
184 * items that point to our inode, and we can safely assume it's useful
185 * to cache the sharedness check result.
186 */
187 int self_ref_count;
188 bool have_delayed_delete_refs;
189};
190
191static inline int extent_is_shared(struct share_check *sc)
192{
193 return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
194}
195
196static struct kmem_cache *btrfs_prelim_ref_cache;
197
198int __init btrfs_prelim_ref_init(void)
199{
200 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
201 sizeof(struct prelim_ref),
202 0,
203 SLAB_MEM_SPREAD,
204 NULL);
205 if (!btrfs_prelim_ref_cache)
206 return -ENOMEM;
207 return 0;
208}
209
210void __cold btrfs_prelim_ref_exit(void)
211{
212 kmem_cache_destroy(btrfs_prelim_ref_cache);
213}
214
215static void free_pref(struct prelim_ref *ref)
216{
217 kmem_cache_free(btrfs_prelim_ref_cache, ref);
218}
219
220/*
221 * Return 0 when both refs are for the same block (and can be merged).
222 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
223 * indicates a 'higher' block.
224 */
225static int prelim_ref_compare(struct prelim_ref *ref1,
226 struct prelim_ref *ref2)
227{
228 if (ref1->level < ref2->level)
229 return -1;
230 if (ref1->level > ref2->level)
231 return 1;
232 if (ref1->root_id < ref2->root_id)
233 return -1;
234 if (ref1->root_id > ref2->root_id)
235 return 1;
236 if (ref1->key_for_search.type < ref2->key_for_search.type)
237 return -1;
238 if (ref1->key_for_search.type > ref2->key_for_search.type)
239 return 1;
240 if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
241 return -1;
242 if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
243 return 1;
244 if (ref1->key_for_search.offset < ref2->key_for_search.offset)
245 return -1;
246 if (ref1->key_for_search.offset > ref2->key_for_search.offset)
247 return 1;
248 if (ref1->parent < ref2->parent)
249 return -1;
250 if (ref1->parent > ref2->parent)
251 return 1;
252
253 return 0;
254}
255
256static void update_share_count(struct share_check *sc, int oldcount,
257 int newcount, struct prelim_ref *newref)
258{
259 if ((!sc) || (oldcount == 0 && newcount < 1))
260 return;
261
262 if (oldcount > 0 && newcount < 1)
263 sc->share_count--;
264 else if (oldcount < 1 && newcount > 0)
265 sc->share_count++;
266
267 if (newref->root_id == sc->root->root_key.objectid &&
268 newref->wanted_disk_byte == sc->data_bytenr &&
269 newref->key_for_search.objectid == sc->inum)
270 sc->self_ref_count += newref->count;
271}
272
273/*
274 * Add @newref to the @root rbtree, merging identical refs.
275 *
276 * Callers should assume that newref has been freed after calling.
277 */
278static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
279 struct preftree *preftree,
280 struct prelim_ref *newref,
281 struct share_check *sc)
282{
283 struct rb_root_cached *root;
284 struct rb_node **p;
285 struct rb_node *parent = NULL;
286 struct prelim_ref *ref;
287 int result;
288 bool leftmost = true;
289
290 root = &preftree->root;
291 p = &root->rb_root.rb_node;
292
293 while (*p) {
294 parent = *p;
295 ref = rb_entry(parent, struct prelim_ref, rbnode);
296 result = prelim_ref_compare(ref, newref);
297 if (result < 0) {
298 p = &(*p)->rb_left;
299 } else if (result > 0) {
300 p = &(*p)->rb_right;
301 leftmost = false;
302 } else {
303 /* Identical refs, merge them and free @newref */
304 struct extent_inode_elem *eie = ref->inode_list;
305
306 while (eie && eie->next)
307 eie = eie->next;
308
309 if (!eie)
310 ref->inode_list = newref->inode_list;
311 else
312 eie->next = newref->inode_list;
313 trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
314 preftree->count);
315 /*
316 * A delayed ref can have newref->count < 0.
317 * The ref->count is updated to follow any
318 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
319 */
320 update_share_count(sc, ref->count,
321 ref->count + newref->count, newref);
322 ref->count += newref->count;
323 free_pref(newref);
324 return;
325 }
326 }
327
328 update_share_count(sc, 0, newref->count, newref);
329 preftree->count++;
330 trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
331 rb_link_node(&newref->rbnode, parent, p);
332 rb_insert_color_cached(&newref->rbnode, root, leftmost);
333}
334
335/*
336 * Release the entire tree. We don't care about internal consistency so
337 * just free everything and then reset the tree root.
338 */
339static void prelim_release(struct preftree *preftree)
340{
341 struct prelim_ref *ref, *next_ref;
342
343 rbtree_postorder_for_each_entry_safe(ref, next_ref,
344 &preftree->root.rb_root, rbnode) {
345 free_inode_elem_list(ref->inode_list);
346 free_pref(ref);
347 }
348
349 preftree->root = RB_ROOT_CACHED;
350 preftree->count = 0;
351}
352
353/*
354 * the rules for all callers of this function are:
355 * - obtaining the parent is the goal
356 * - if you add a key, you must know that it is a correct key
357 * - if you cannot add the parent or a correct key, then we will look into the
358 * block later to set a correct key
359 *
360 * delayed refs
361 * ============
362 * backref type | shared | indirect | shared | indirect
363 * information | tree | tree | data | data
364 * --------------------+--------+----------+--------+----------
365 * parent logical | y | - | - | -
366 * key to resolve | - | y | y | y
367 * tree block logical | - | - | - | -
368 * root for resolving | y | y | y | y
369 *
370 * - column 1: we've the parent -> done
371 * - column 2, 3, 4: we use the key to find the parent
372 *
373 * on disk refs (inline or keyed)
374 * ==============================
375 * backref type | shared | indirect | shared | indirect
376 * information | tree | tree | data | data
377 * --------------------+--------+----------+--------+----------
378 * parent logical | y | - | y | -
379 * key to resolve | - | - | - | y
380 * tree block logical | y | y | y | y
381 * root for resolving | - | y | y | y
382 *
383 * - column 1, 3: we've the parent -> done
384 * - column 2: we take the first key from the block to find the parent
385 * (see add_missing_keys)
386 * - column 4: we use the key to find the parent
387 *
388 * additional information that's available but not required to find the parent
389 * block might help in merging entries to gain some speed.
390 */
391static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
392 struct preftree *preftree, u64 root_id,
393 const struct btrfs_key *key, int level, u64 parent,
394 u64 wanted_disk_byte, int count,
395 struct share_check *sc, gfp_t gfp_mask)
396{
397 struct prelim_ref *ref;
398
399 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
400 return 0;
401
402 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
403 if (!ref)
404 return -ENOMEM;
405
406 ref->root_id = root_id;
407 if (key)
408 ref->key_for_search = *key;
409 else
410 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
411
412 ref->inode_list = NULL;
413 ref->level = level;
414 ref->count = count;
415 ref->parent = parent;
416 ref->wanted_disk_byte = wanted_disk_byte;
417 prelim_ref_insert(fs_info, preftree, ref, sc);
418 return extent_is_shared(sc);
419}
420
421/* direct refs use root == 0, key == NULL */
422static int add_direct_ref(const struct btrfs_fs_info *fs_info,
423 struct preftrees *preftrees, int level, u64 parent,
424 u64 wanted_disk_byte, int count,
425 struct share_check *sc, gfp_t gfp_mask)
426{
427 return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
428 parent, wanted_disk_byte, count, sc, gfp_mask);
429}
430
431/* indirect refs use parent == 0 */
432static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
433 struct preftrees *preftrees, u64 root_id,
434 const struct btrfs_key *key, int level,
435 u64 wanted_disk_byte, int count,
436 struct share_check *sc, gfp_t gfp_mask)
437{
438 struct preftree *tree = &preftrees->indirect;
439
440 if (!key)
441 tree = &preftrees->indirect_missing_keys;
442 return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
443 wanted_disk_byte, count, sc, gfp_mask);
444}
445
446static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
447{
448 struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
449 struct rb_node *parent = NULL;
450 struct prelim_ref *ref = NULL;
451 struct prelim_ref target = {};
452 int result;
453
454 target.parent = bytenr;
455
456 while (*p) {
457 parent = *p;
458 ref = rb_entry(parent, struct prelim_ref, rbnode);
459 result = prelim_ref_compare(ref, &target);
460
461 if (result < 0)
462 p = &(*p)->rb_left;
463 else if (result > 0)
464 p = &(*p)->rb_right;
465 else
466 return 1;
467 }
468 return 0;
469}
470
471static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
472 struct btrfs_root *root, struct btrfs_path *path,
473 struct ulist *parents,
474 struct preftrees *preftrees, struct prelim_ref *ref,
475 int level)
476{
477 int ret = 0;
478 int slot;
479 struct extent_buffer *eb;
480 struct btrfs_key key;
481 struct btrfs_key *key_for_search = &ref->key_for_search;
482 struct btrfs_file_extent_item *fi;
483 struct extent_inode_elem *eie = NULL, *old = NULL;
484 u64 disk_byte;
485 u64 wanted_disk_byte = ref->wanted_disk_byte;
486 u64 count = 0;
487 u64 data_offset;
488 u8 type;
489
490 if (level != 0) {
491 eb = path->nodes[level];
492 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
493 if (ret < 0)
494 return ret;
495 return 0;
496 }
497
498 /*
499 * 1. We normally enter this function with the path already pointing to
500 * the first item to check. But sometimes, we may enter it with
501 * slot == nritems.
502 * 2. We are searching for normal backref but bytenr of this leaf
503 * matches shared data backref
504 * 3. The leaf owner is not equal to the root we are searching
505 *
506 * For these cases, go to the next leaf before we continue.
507 */
508 eb = path->nodes[0];
509 if (path->slots[0] >= btrfs_header_nritems(eb) ||
510 is_shared_data_backref(preftrees, eb->start) ||
511 ref->root_id != btrfs_header_owner(eb)) {
512 if (ctx->time_seq == BTRFS_SEQ_LAST)
513 ret = btrfs_next_leaf(root, path);
514 else
515 ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
516 }
517
518 while (!ret && count < ref->count) {
519 eb = path->nodes[0];
520 slot = path->slots[0];
521
522 btrfs_item_key_to_cpu(eb, &key, slot);
523
524 if (key.objectid != key_for_search->objectid ||
525 key.type != BTRFS_EXTENT_DATA_KEY)
526 break;
527
528 /*
529 * We are searching for normal backref but bytenr of this leaf
530 * matches shared data backref, OR
531 * the leaf owner is not equal to the root we are searching for
532 */
533 if (slot == 0 &&
534 (is_shared_data_backref(preftrees, eb->start) ||
535 ref->root_id != btrfs_header_owner(eb))) {
536 if (ctx->time_seq == BTRFS_SEQ_LAST)
537 ret = btrfs_next_leaf(root, path);
538 else
539 ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
540 continue;
541 }
542 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
543 type = btrfs_file_extent_type(eb, fi);
544 if (type == BTRFS_FILE_EXTENT_INLINE)
545 goto next;
546 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
547 data_offset = btrfs_file_extent_offset(eb, fi);
548
549 if (disk_byte == wanted_disk_byte) {
550 eie = NULL;
551 old = NULL;
552 if (ref->key_for_search.offset == key.offset - data_offset)
553 count++;
554 else
555 goto next;
556 if (!ctx->skip_inode_ref_list) {
557 ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
558 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
559 ret < 0)
560 break;
561 }
562 if (ret > 0)
563 goto next;
564 ret = ulist_add_merge_ptr(parents, eb->start,
565 eie, (void **)&old, GFP_NOFS);
566 if (ret < 0)
567 break;
568 if (!ret && !ctx->skip_inode_ref_list) {
569 while (old->next)
570 old = old->next;
571 old->next = eie;
572 }
573 eie = NULL;
574 }
575next:
576 if (ctx->time_seq == BTRFS_SEQ_LAST)
577 ret = btrfs_next_item(root, path);
578 else
579 ret = btrfs_next_old_item(root, path, ctx->time_seq);
580 }
581
582 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
583 free_inode_elem_list(eie);
584 else if (ret > 0)
585 ret = 0;
586
587 return ret;
588}
589
590/*
591 * resolve an indirect backref in the form (root_id, key, level)
592 * to a logical address
593 */
594static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx,
595 struct btrfs_path *path,
596 struct preftrees *preftrees,
597 struct prelim_ref *ref, struct ulist *parents)
598{
599 struct btrfs_root *root;
600 struct extent_buffer *eb;
601 int ret = 0;
602 int root_level;
603 int level = ref->level;
604 struct btrfs_key search_key = ref->key_for_search;
605
606 /*
607 * If we're search_commit_root we could possibly be holding locks on
608 * other tree nodes. This happens when qgroups does backref walks when
609 * adding new delayed refs. To deal with this we need to look in cache
610 * for the root, and if we don't find it then we need to search the
611 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
612 * here.
613 */
614 if (path->search_commit_root)
615 root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id);
616 else
617 root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false);
618 if (IS_ERR(root)) {
619 ret = PTR_ERR(root);
620 goto out_free;
621 }
622
623 if (!path->search_commit_root &&
624 test_bit(BTRFS_ROOT_DELETING, &root->state)) {
625 ret = -ENOENT;
626 goto out;
627 }
628
629 if (btrfs_is_testing(ctx->fs_info)) {
630 ret = -ENOENT;
631 goto out;
632 }
633
634 if (path->search_commit_root)
635 root_level = btrfs_header_level(root->commit_root);
636 else if (ctx->time_seq == BTRFS_SEQ_LAST)
637 root_level = btrfs_header_level(root->node);
638 else
639 root_level = btrfs_old_root_level(root, ctx->time_seq);
640
641 if (root_level + 1 == level)
642 goto out;
643
644 /*
645 * We can often find data backrefs with an offset that is too large
646 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
647 * subtracting a file's offset with the data offset of its
648 * corresponding extent data item. This can happen for example in the
649 * clone ioctl.
650 *
651 * So if we detect such case we set the search key's offset to zero to
652 * make sure we will find the matching file extent item at
653 * add_all_parents(), otherwise we will miss it because the offset
654 * taken form the backref is much larger then the offset of the file
655 * extent item. This can make us scan a very large number of file
656 * extent items, but at least it will not make us miss any.
657 *
658 * This is an ugly workaround for a behaviour that should have never
659 * existed, but it does and a fix for the clone ioctl would touch a lot
660 * of places, cause backwards incompatibility and would not fix the
661 * problem for extents cloned with older kernels.
662 */
663 if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
664 search_key.offset >= LLONG_MAX)
665 search_key.offset = 0;
666 path->lowest_level = level;
667 if (ctx->time_seq == BTRFS_SEQ_LAST)
668 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
669 else
670 ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq);
671
672 btrfs_debug(ctx->fs_info,
673 "search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
674 ref->root_id, level, ref->count, ret,
675 ref->key_for_search.objectid, ref->key_for_search.type,
676 ref->key_for_search.offset);
677 if (ret < 0)
678 goto out;
679
680 eb = path->nodes[level];
681 while (!eb) {
682 if (WARN_ON(!level)) {
683 ret = 1;
684 goto out;
685 }
686 level--;
687 eb = path->nodes[level];
688 }
689
690 ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level);
691out:
692 btrfs_put_root(root);
693out_free:
694 path->lowest_level = 0;
695 btrfs_release_path(path);
696 return ret;
697}
698
699static struct extent_inode_elem *
700unode_aux_to_inode_list(struct ulist_node *node)
701{
702 if (!node)
703 return NULL;
704 return (struct extent_inode_elem *)(uintptr_t)node->aux;
705}
706
707static void free_leaf_list(struct ulist *ulist)
708{
709 struct ulist_node *node;
710 struct ulist_iterator uiter;
711
712 ULIST_ITER_INIT(&uiter);
713 while ((node = ulist_next(ulist, &uiter)))
714 free_inode_elem_list(unode_aux_to_inode_list(node));
715
716 ulist_free(ulist);
717}
718
719/*
720 * We maintain three separate rbtrees: one for direct refs, one for
721 * indirect refs which have a key, and one for indirect refs which do not
722 * have a key. Each tree does merge on insertion.
723 *
724 * Once all of the references are located, we iterate over the tree of
725 * indirect refs with missing keys. An appropriate key is located and
726 * the ref is moved onto the tree for indirect refs. After all missing
727 * keys are thus located, we iterate over the indirect ref tree, resolve
728 * each reference, and then insert the resolved reference onto the
729 * direct tree (merging there too).
730 *
731 * New backrefs (i.e., for parent nodes) are added to the appropriate
732 * rbtree as they are encountered. The new backrefs are subsequently
733 * resolved as above.
734 */
735static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
736 struct btrfs_path *path,
737 struct preftrees *preftrees,
738 struct share_check *sc)
739{
740 int err;
741 int ret = 0;
742 struct ulist *parents;
743 struct ulist_node *node;
744 struct ulist_iterator uiter;
745 struct rb_node *rnode;
746
747 parents = ulist_alloc(GFP_NOFS);
748 if (!parents)
749 return -ENOMEM;
750
751 /*
752 * We could trade memory usage for performance here by iterating
753 * the tree, allocating new refs for each insertion, and then
754 * freeing the entire indirect tree when we're done. In some test
755 * cases, the tree can grow quite large (~200k objects).
756 */
757 while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
758 struct prelim_ref *ref;
759
760 ref = rb_entry(rnode, struct prelim_ref, rbnode);
761 if (WARN(ref->parent,
762 "BUG: direct ref found in indirect tree")) {
763 ret = -EINVAL;
764 goto out;
765 }
766
767 rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
768 preftrees->indirect.count--;
769
770 if (ref->count == 0) {
771 free_pref(ref);
772 continue;
773 }
774
775 if (sc && ref->root_id != sc->root->root_key.objectid) {
776 free_pref(ref);
777 ret = BACKREF_FOUND_SHARED;
778 goto out;
779 }
780 err = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
781 /*
782 * we can only tolerate ENOENT,otherwise,we should catch error
783 * and return directly.
784 */
785 if (err == -ENOENT) {
786 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref,
787 NULL);
788 continue;
789 } else if (err) {
790 free_pref(ref);
791 ret = err;
792 goto out;
793 }
794
795 /* we put the first parent into the ref at hand */
796 ULIST_ITER_INIT(&uiter);
797 node = ulist_next(parents, &uiter);
798 ref->parent = node ? node->val : 0;
799 ref->inode_list = unode_aux_to_inode_list(node);
800
801 /* Add a prelim_ref(s) for any other parent(s). */
802 while ((node = ulist_next(parents, &uiter))) {
803 struct prelim_ref *new_ref;
804
805 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
806 GFP_NOFS);
807 if (!new_ref) {
808 free_pref(ref);
809 ret = -ENOMEM;
810 goto out;
811 }
812 memcpy(new_ref, ref, sizeof(*ref));
813 new_ref->parent = node->val;
814 new_ref->inode_list = unode_aux_to_inode_list(node);
815 prelim_ref_insert(ctx->fs_info, &preftrees->direct,
816 new_ref, NULL);
817 }
818
819 /*
820 * Now it's a direct ref, put it in the direct tree. We must
821 * do this last because the ref could be merged/freed here.
822 */
823 prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL);
824
825 ulist_reinit(parents);
826 cond_resched();
827 }
828out:
829 /*
830 * We may have inode lists attached to refs in the parents ulist, so we
831 * must free them before freeing the ulist and its refs.
832 */
833 free_leaf_list(parents);
834 return ret;
835}
836
837/*
838 * read tree blocks and add keys where required.
839 */
840static int add_missing_keys(struct btrfs_fs_info *fs_info,
841 struct preftrees *preftrees, bool lock)
842{
843 struct prelim_ref *ref;
844 struct extent_buffer *eb;
845 struct preftree *tree = &preftrees->indirect_missing_keys;
846 struct rb_node *node;
847
848 while ((node = rb_first_cached(&tree->root))) {
849 struct btrfs_tree_parent_check check = { 0 };
850
851 ref = rb_entry(node, struct prelim_ref, rbnode);
852 rb_erase_cached(node, &tree->root);
853
854 BUG_ON(ref->parent); /* should not be a direct ref */
855 BUG_ON(ref->key_for_search.type);
856 BUG_ON(!ref->wanted_disk_byte);
857
858 check.level = ref->level - 1;
859 check.owner_root = ref->root_id;
860
861 eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check);
862 if (IS_ERR(eb)) {
863 free_pref(ref);
864 return PTR_ERR(eb);
865 }
866 if (!extent_buffer_uptodate(eb)) {
867 free_pref(ref);
868 free_extent_buffer(eb);
869 return -EIO;
870 }
871
872 if (lock)
873 btrfs_tree_read_lock(eb);
874 if (btrfs_header_level(eb) == 0)
875 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
876 else
877 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
878 if (lock)
879 btrfs_tree_read_unlock(eb);
880 free_extent_buffer(eb);
881 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
882 cond_resched();
883 }
884 return 0;
885}
886
887/*
888 * add all currently queued delayed refs from this head whose seq nr is
889 * smaller or equal that seq to the list
890 */
891static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
892 struct btrfs_delayed_ref_head *head, u64 seq,
893 struct preftrees *preftrees, struct share_check *sc)
894{
895 struct btrfs_delayed_ref_node *node;
896 struct btrfs_key key;
897 struct rb_node *n;
898 int count;
899 int ret = 0;
900
901 spin_lock(&head->lock);
902 for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
903 node = rb_entry(n, struct btrfs_delayed_ref_node,
904 ref_node);
905 if (node->seq > seq)
906 continue;
907
908 switch (node->action) {
909 case BTRFS_ADD_DELAYED_EXTENT:
910 case BTRFS_UPDATE_DELAYED_HEAD:
911 WARN_ON(1);
912 continue;
913 case BTRFS_ADD_DELAYED_REF:
914 count = node->ref_mod;
915 break;
916 case BTRFS_DROP_DELAYED_REF:
917 count = node->ref_mod * -1;
918 break;
919 default:
920 BUG();
921 }
922 switch (node->type) {
923 case BTRFS_TREE_BLOCK_REF_KEY: {
924 /* NORMAL INDIRECT METADATA backref */
925 struct btrfs_delayed_tree_ref *ref;
926 struct btrfs_key *key_ptr = NULL;
927
928 if (head->extent_op && head->extent_op->update_key) {
929 btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
930 key_ptr = &key;
931 }
932
933 ref = btrfs_delayed_node_to_tree_ref(node);
934 ret = add_indirect_ref(fs_info, preftrees, ref->root,
935 key_ptr, ref->level + 1,
936 node->bytenr, count, sc,
937 GFP_ATOMIC);
938 break;
939 }
940 case BTRFS_SHARED_BLOCK_REF_KEY: {
941 /* SHARED DIRECT METADATA backref */
942 struct btrfs_delayed_tree_ref *ref;
943
944 ref = btrfs_delayed_node_to_tree_ref(node);
945
946 ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
947 ref->parent, node->bytenr, count,
948 sc, GFP_ATOMIC);
949 break;
950 }
951 case BTRFS_EXTENT_DATA_REF_KEY: {
952 /* NORMAL INDIRECT DATA backref */
953 struct btrfs_delayed_data_ref *ref;
954 ref = btrfs_delayed_node_to_data_ref(node);
955
956 key.objectid = ref->objectid;
957 key.type = BTRFS_EXTENT_DATA_KEY;
958 key.offset = ref->offset;
959
960 /*
961 * If we have a share check context and a reference for
962 * another inode, we can't exit immediately. This is
963 * because even if this is a BTRFS_ADD_DELAYED_REF
964 * reference we may find next a BTRFS_DROP_DELAYED_REF
965 * which cancels out this ADD reference.
966 *
967 * If this is a DROP reference and there was no previous
968 * ADD reference, then we need to signal that when we
969 * process references from the extent tree (through
970 * add_inline_refs() and add_keyed_refs()), we should
971 * not exit early if we find a reference for another
972 * inode, because one of the delayed DROP references
973 * may cancel that reference in the extent tree.
974 */
975 if (sc && count < 0)
976 sc->have_delayed_delete_refs = true;
977
978 ret = add_indirect_ref(fs_info, preftrees, ref->root,
979 &key, 0, node->bytenr, count, sc,
980 GFP_ATOMIC);
981 break;
982 }
983 case BTRFS_SHARED_DATA_REF_KEY: {
984 /* SHARED DIRECT FULL backref */
985 struct btrfs_delayed_data_ref *ref;
986
987 ref = btrfs_delayed_node_to_data_ref(node);
988
989 ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
990 node->bytenr, count, sc,
991 GFP_ATOMIC);
992 break;
993 }
994 default:
995 WARN_ON(1);
996 }
997 /*
998 * We must ignore BACKREF_FOUND_SHARED until all delayed
999 * refs have been checked.
1000 */
1001 if (ret && (ret != BACKREF_FOUND_SHARED))
1002 break;
1003 }
1004 if (!ret)
1005 ret = extent_is_shared(sc);
1006
1007 spin_unlock(&head->lock);
1008 return ret;
1009}
1010
1011/*
1012 * add all inline backrefs for bytenr to the list
1013 *
1014 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1015 */
1016static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
1017 struct btrfs_path *path,
1018 int *info_level, struct preftrees *preftrees,
1019 struct share_check *sc)
1020{
1021 int ret = 0;
1022 int slot;
1023 struct extent_buffer *leaf;
1024 struct btrfs_key key;
1025 struct btrfs_key found_key;
1026 unsigned long ptr;
1027 unsigned long end;
1028 struct btrfs_extent_item *ei;
1029 u64 flags;
1030 u64 item_size;
1031
1032 /*
1033 * enumerate all inline refs
1034 */
1035 leaf = path->nodes[0];
1036 slot = path->slots[0];
1037
1038 item_size = btrfs_item_size(leaf, slot);
1039 BUG_ON(item_size < sizeof(*ei));
1040
1041 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
1042
1043 if (ctx->check_extent_item) {
1044 ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx);
1045 if (ret)
1046 return ret;
1047 }
1048
1049 flags = btrfs_extent_flags(leaf, ei);
1050 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1051
1052 ptr = (unsigned long)(ei + 1);
1053 end = (unsigned long)ei + item_size;
1054
1055 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1056 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1057 struct btrfs_tree_block_info *info;
1058
1059 info = (struct btrfs_tree_block_info *)ptr;
1060 *info_level = btrfs_tree_block_level(leaf, info);
1061 ptr += sizeof(struct btrfs_tree_block_info);
1062 BUG_ON(ptr > end);
1063 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1064 *info_level = found_key.offset;
1065 } else {
1066 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1067 }
1068
1069 while (ptr < end) {
1070 struct btrfs_extent_inline_ref *iref;
1071 u64 offset;
1072 int type;
1073
1074 iref = (struct btrfs_extent_inline_ref *)ptr;
1075 type = btrfs_get_extent_inline_ref_type(leaf, iref,
1076 BTRFS_REF_TYPE_ANY);
1077 if (type == BTRFS_REF_TYPE_INVALID)
1078 return -EUCLEAN;
1079
1080 offset = btrfs_extent_inline_ref_offset(leaf, iref);
1081
1082 switch (type) {
1083 case BTRFS_SHARED_BLOCK_REF_KEY:
1084 ret = add_direct_ref(ctx->fs_info, preftrees,
1085 *info_level + 1, offset,
1086 ctx->bytenr, 1, NULL, GFP_NOFS);
1087 break;
1088 case BTRFS_SHARED_DATA_REF_KEY: {
1089 struct btrfs_shared_data_ref *sdref;
1090 int count;
1091
1092 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1093 count = btrfs_shared_data_ref_count(leaf, sdref);
1094
1095 ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset,
1096 ctx->bytenr, count, sc, GFP_NOFS);
1097 break;
1098 }
1099 case BTRFS_TREE_BLOCK_REF_KEY:
1100 ret = add_indirect_ref(ctx->fs_info, preftrees, offset,
1101 NULL, *info_level + 1,
1102 ctx->bytenr, 1, NULL, GFP_NOFS);
1103 break;
1104 case BTRFS_EXTENT_DATA_REF_KEY: {
1105 struct btrfs_extent_data_ref *dref;
1106 int count;
1107 u64 root;
1108
1109 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1110 count = btrfs_extent_data_ref_count(leaf, dref);
1111 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1112 dref);
1113 key.type = BTRFS_EXTENT_DATA_KEY;
1114 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1115
1116 if (sc && key.objectid != sc->inum &&
1117 !sc->have_delayed_delete_refs) {
1118 ret = BACKREF_FOUND_SHARED;
1119 break;
1120 }
1121
1122 root = btrfs_extent_data_ref_root(leaf, dref);
1123
1124 if (!ctx->skip_data_ref ||
1125 !ctx->skip_data_ref(root, key.objectid, key.offset,
1126 ctx->user_ctx))
1127 ret = add_indirect_ref(ctx->fs_info, preftrees,
1128 root, &key, 0, ctx->bytenr,
1129 count, sc, GFP_NOFS);
1130 break;
1131 }
1132 case BTRFS_EXTENT_OWNER_REF_KEY:
1133 ASSERT(btrfs_fs_incompat(ctx->fs_info, SIMPLE_QUOTA));
1134 break;
1135 default:
1136 WARN_ON(1);
1137 }
1138 if (ret)
1139 return ret;
1140 ptr += btrfs_extent_inline_ref_size(type);
1141 }
1142
1143 return 0;
1144}
1145
1146/*
1147 * add all non-inline backrefs for bytenr to the list
1148 *
1149 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1150 */
1151static int add_keyed_refs(struct btrfs_backref_walk_ctx *ctx,
1152 struct btrfs_root *extent_root,
1153 struct btrfs_path *path,
1154 int info_level, struct preftrees *preftrees,
1155 struct share_check *sc)
1156{
1157 struct btrfs_fs_info *fs_info = extent_root->fs_info;
1158 int ret;
1159 int slot;
1160 struct extent_buffer *leaf;
1161 struct btrfs_key key;
1162
1163 while (1) {
1164 ret = btrfs_next_item(extent_root, path);
1165 if (ret < 0)
1166 break;
1167 if (ret) {
1168 ret = 0;
1169 break;
1170 }
1171
1172 slot = path->slots[0];
1173 leaf = path->nodes[0];
1174 btrfs_item_key_to_cpu(leaf, &key, slot);
1175
1176 if (key.objectid != ctx->bytenr)
1177 break;
1178 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1179 continue;
1180 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1181 break;
1182
1183 switch (key.type) {
1184 case BTRFS_SHARED_BLOCK_REF_KEY:
1185 /* SHARED DIRECT METADATA backref */
1186 ret = add_direct_ref(fs_info, preftrees,
1187 info_level + 1, key.offset,
1188 ctx->bytenr, 1, NULL, GFP_NOFS);
1189 break;
1190 case BTRFS_SHARED_DATA_REF_KEY: {
1191 /* SHARED DIRECT FULL backref */
1192 struct btrfs_shared_data_ref *sdref;
1193 int count;
1194
1195 sdref = btrfs_item_ptr(leaf, slot,
1196 struct btrfs_shared_data_ref);
1197 count = btrfs_shared_data_ref_count(leaf, sdref);
1198 ret = add_direct_ref(fs_info, preftrees, 0,
1199 key.offset, ctx->bytenr, count,
1200 sc, GFP_NOFS);
1201 break;
1202 }
1203 case BTRFS_TREE_BLOCK_REF_KEY:
1204 /* NORMAL INDIRECT METADATA backref */
1205 ret = add_indirect_ref(fs_info, preftrees, key.offset,
1206 NULL, info_level + 1, ctx->bytenr,
1207 1, NULL, GFP_NOFS);
1208 break;
1209 case BTRFS_EXTENT_DATA_REF_KEY: {
1210 /* NORMAL INDIRECT DATA backref */
1211 struct btrfs_extent_data_ref *dref;
1212 int count;
1213 u64 root;
1214
1215 dref = btrfs_item_ptr(leaf, slot,
1216 struct btrfs_extent_data_ref);
1217 count = btrfs_extent_data_ref_count(leaf, dref);
1218 key.objectid = btrfs_extent_data_ref_objectid(leaf,
1219 dref);
1220 key.type = BTRFS_EXTENT_DATA_KEY;
1221 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1222
1223 if (sc && key.objectid != sc->inum &&
1224 !sc->have_delayed_delete_refs) {
1225 ret = BACKREF_FOUND_SHARED;
1226 break;
1227 }
1228
1229 root = btrfs_extent_data_ref_root(leaf, dref);
1230
1231 if (!ctx->skip_data_ref ||
1232 !ctx->skip_data_ref(root, key.objectid, key.offset,
1233 ctx->user_ctx))
1234 ret = add_indirect_ref(fs_info, preftrees, root,
1235 &key, 0, ctx->bytenr,
1236 count, sc, GFP_NOFS);
1237 break;
1238 }
1239 default:
1240 WARN_ON(1);
1241 }
1242 if (ret)
1243 return ret;
1244
1245 }
1246
1247 return ret;
1248}
1249
1250/*
1251 * The caller has joined a transaction or is holding a read lock on the
1252 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1253 * snapshot field changing while updating or checking the cache.
1254 */
1255static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1256 struct btrfs_root *root,
1257 u64 bytenr, int level, bool *is_shared)
1258{
1259 const struct btrfs_fs_info *fs_info = root->fs_info;
1260 struct btrfs_backref_shared_cache_entry *entry;
1261
1262 if (!current->journal_info)
1263 lockdep_assert_held(&fs_info->commit_root_sem);
1264
1265 if (!ctx->use_path_cache)
1266 return false;
1267
1268 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1269 return false;
1270
1271 /*
1272 * Level -1 is used for the data extent, which is not reliable to cache
1273 * because its reference count can increase or decrease without us
1274 * realizing. We cache results only for extent buffers that lead from
1275 * the root node down to the leaf with the file extent item.
1276 */
1277 ASSERT(level >= 0);
1278
1279 entry = &ctx->path_cache_entries[level];
1280
1281 /* Unused cache entry or being used for some other extent buffer. */
1282 if (entry->bytenr != bytenr)
1283 return false;
1284
1285 /*
1286 * We cached a false result, but the last snapshot generation of the
1287 * root changed, so we now have a snapshot. Don't trust the result.
1288 */
1289 if (!entry->is_shared &&
1290 entry->gen != btrfs_root_last_snapshot(&root->root_item))
1291 return false;
1292
1293 /*
1294 * If we cached a true result and the last generation used for dropping
1295 * a root changed, we can not trust the result, because the dropped root
1296 * could be a snapshot sharing this extent buffer.
1297 */
1298 if (entry->is_shared &&
1299 entry->gen != btrfs_get_last_root_drop_gen(fs_info))
1300 return false;
1301
1302 *is_shared = entry->is_shared;
1303 /*
1304 * If the node at this level is shared, than all nodes below are also
1305 * shared. Currently some of the nodes below may be marked as not shared
1306 * because we have just switched from one leaf to another, and switched
1307 * also other nodes above the leaf and below the current level, so mark
1308 * them as shared.
1309 */
1310 if (*is_shared) {
1311 for (int i = 0; i < level; i++) {
1312 ctx->path_cache_entries[i].is_shared = true;
1313 ctx->path_cache_entries[i].gen = entry->gen;
1314 }
1315 }
1316
1317 return true;
1318}
1319
1320/*
1321 * The caller has joined a transaction or is holding a read lock on the
1322 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1323 * snapshot field changing while updating or checking the cache.
1324 */
1325static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1326 struct btrfs_root *root,
1327 u64 bytenr, int level, bool is_shared)
1328{
1329 const struct btrfs_fs_info *fs_info = root->fs_info;
1330 struct btrfs_backref_shared_cache_entry *entry;
1331 u64 gen;
1332
1333 if (!current->journal_info)
1334 lockdep_assert_held(&fs_info->commit_root_sem);
1335
1336 if (!ctx->use_path_cache)
1337 return;
1338
1339 if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1340 return;
1341
1342 /*
1343 * Level -1 is used for the data extent, which is not reliable to cache
1344 * because its reference count can increase or decrease without us
1345 * realizing. We cache results only for extent buffers that lead from
1346 * the root node down to the leaf with the file extent item.
1347 */
1348 ASSERT(level >= 0);
1349
1350 if (is_shared)
1351 gen = btrfs_get_last_root_drop_gen(fs_info);
1352 else
1353 gen = btrfs_root_last_snapshot(&root->root_item);
1354
1355 entry = &ctx->path_cache_entries[level];
1356 entry->bytenr = bytenr;
1357 entry->is_shared = is_shared;
1358 entry->gen = gen;
1359
1360 /*
1361 * If we found an extent buffer is shared, set the cache result for all
1362 * extent buffers below it to true. As nodes in the path are COWed,
1363 * their sharedness is moved to their children, and if a leaf is COWed,
1364 * then the sharedness of a data extent becomes direct, the refcount of
1365 * data extent is increased in the extent item at the extent tree.
1366 */
1367 if (is_shared) {
1368 for (int i = 0; i < level; i++) {
1369 entry = &ctx->path_cache_entries[i];
1370 entry->is_shared = is_shared;
1371 entry->gen = gen;
1372 }
1373 }
1374}
1375
1376/*
1377 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1378 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1379 * indirect refs to their parent bytenr.
1380 * When roots are found, they're added to the roots list
1381 *
1382 * @ctx: Backref walking context object, must be not NULL.
1383 * @sc: If !NULL, then immediately return BACKREF_FOUND_SHARED when a
1384 * shared extent is detected.
1385 *
1386 * Otherwise this returns 0 for success and <0 for an error.
1387 *
1388 * FIXME some caching might speed things up
1389 */
1390static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
1391 struct share_check *sc)
1392{
1393 struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr);
1394 struct btrfs_key key;
1395 struct btrfs_path *path;
1396 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1397 struct btrfs_delayed_ref_head *head;
1398 int info_level = 0;
1399 int ret;
1400 struct prelim_ref *ref;
1401 struct rb_node *node;
1402 struct extent_inode_elem *eie = NULL;
1403 struct preftrees preftrees = {
1404 .direct = PREFTREE_INIT,
1405 .indirect = PREFTREE_INIT,
1406 .indirect_missing_keys = PREFTREE_INIT
1407 };
1408
1409 /* Roots ulist is not needed when using a sharedness check context. */
1410 if (sc)
1411 ASSERT(ctx->roots == NULL);
1412
1413 key.objectid = ctx->bytenr;
1414 key.offset = (u64)-1;
1415 if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA))
1416 key.type = BTRFS_METADATA_ITEM_KEY;
1417 else
1418 key.type = BTRFS_EXTENT_ITEM_KEY;
1419
1420 path = btrfs_alloc_path();
1421 if (!path)
1422 return -ENOMEM;
1423 if (!ctx->trans) {
1424 path->search_commit_root = 1;
1425 path->skip_locking = 1;
1426 }
1427
1428 if (ctx->time_seq == BTRFS_SEQ_LAST)
1429 path->skip_locking = 1;
1430
1431again:
1432 head = NULL;
1433
1434 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1435 if (ret < 0)
1436 goto out;
1437 if (ret == 0) {
1438 /* This shouldn't happen, indicates a bug or fs corruption. */
1439 ASSERT(ret != 0);
1440 ret = -EUCLEAN;
1441 goto out;
1442 }
1443
1444 if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) &&
1445 ctx->time_seq != BTRFS_SEQ_LAST) {
1446 /*
1447 * We have a specific time_seq we care about and trans which
1448 * means we have the path lock, we need to grab the ref head and
1449 * lock it so we have a consistent view of the refs at the given
1450 * time.
1451 */
1452 delayed_refs = &ctx->trans->transaction->delayed_refs;
1453 spin_lock(&delayed_refs->lock);
1454 head = btrfs_find_delayed_ref_head(delayed_refs, ctx->bytenr);
1455 if (head) {
1456 if (!mutex_trylock(&head->mutex)) {
1457 refcount_inc(&head->refs);
1458 spin_unlock(&delayed_refs->lock);
1459
1460 btrfs_release_path(path);
1461
1462 /*
1463 * Mutex was contended, block until it's
1464 * released and try again
1465 */
1466 mutex_lock(&head->mutex);
1467 mutex_unlock(&head->mutex);
1468 btrfs_put_delayed_ref_head(head);
1469 goto again;
1470 }
1471 spin_unlock(&delayed_refs->lock);
1472 ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq,
1473 &preftrees, sc);
1474 mutex_unlock(&head->mutex);
1475 if (ret)
1476 goto out;
1477 } else {
1478 spin_unlock(&delayed_refs->lock);
1479 }
1480 }
1481
1482 if (path->slots[0]) {
1483 struct extent_buffer *leaf;
1484 int slot;
1485
1486 path->slots[0]--;
1487 leaf = path->nodes[0];
1488 slot = path->slots[0];
1489 btrfs_item_key_to_cpu(leaf, &key, slot);
1490 if (key.objectid == ctx->bytenr &&
1491 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1492 key.type == BTRFS_METADATA_ITEM_KEY)) {
1493 ret = add_inline_refs(ctx, path, &info_level,
1494 &preftrees, sc);
1495 if (ret)
1496 goto out;
1497 ret = add_keyed_refs(ctx, root, path, info_level,
1498 &preftrees, sc);
1499 if (ret)
1500 goto out;
1501 }
1502 }
1503
1504 /*
1505 * If we have a share context and we reached here, it means the extent
1506 * is not directly shared (no multiple reference items for it),
1507 * otherwise we would have exited earlier with a return value of
1508 * BACKREF_FOUND_SHARED after processing delayed references or while
1509 * processing inline or keyed references from the extent tree.
1510 * The extent may however be indirectly shared through shared subtrees
1511 * as a result from creating snapshots, so we determine below what is
1512 * its parent node, in case we are dealing with a metadata extent, or
1513 * what's the leaf (or leaves), from a fs tree, that has a file extent
1514 * item pointing to it in case we are dealing with a data extent.
1515 */
1516 ASSERT(extent_is_shared(sc) == 0);
1517
1518 /*
1519 * If we are here for a data extent and we have a share_check structure
1520 * it means the data extent is not directly shared (does not have
1521 * multiple reference items), so we have to check if a path in the fs
1522 * tree (going from the root node down to the leaf that has the file
1523 * extent item pointing to the data extent) is shared, that is, if any
1524 * of the extent buffers in the path is referenced by other trees.
1525 */
1526 if (sc && ctx->bytenr == sc->data_bytenr) {
1527 /*
1528 * If our data extent is from a generation more recent than the
1529 * last generation used to snapshot the root, then we know that
1530 * it can not be shared through subtrees, so we can skip
1531 * resolving indirect references, there's no point in
1532 * determining the extent buffers for the path from the fs tree
1533 * root node down to the leaf that has the file extent item that
1534 * points to the data extent.
1535 */
1536 if (sc->data_extent_gen >
1537 btrfs_root_last_snapshot(&sc->root->root_item)) {
1538 ret = BACKREF_FOUND_NOT_SHARED;
1539 goto out;
1540 }
1541
1542 /*
1543 * If we are only determining if a data extent is shared or not
1544 * and the corresponding file extent item is located in the same
1545 * leaf as the previous file extent item, we can skip resolving
1546 * indirect references for a data extent, since the fs tree path
1547 * is the same (same leaf, so same path). We skip as long as the
1548 * cached result for the leaf is valid and only if there's only
1549 * one file extent item pointing to the data extent, because in
1550 * the case of multiple file extent items, they may be located
1551 * in different leaves and therefore we have multiple paths.
1552 */
1553 if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr &&
1554 sc->self_ref_count == 1) {
1555 bool cached;
1556 bool is_shared;
1557
1558 cached = lookup_backref_shared_cache(sc->ctx, sc->root,
1559 sc->ctx->curr_leaf_bytenr,
1560 0, &is_shared);
1561 if (cached) {
1562 if (is_shared)
1563 ret = BACKREF_FOUND_SHARED;
1564 else
1565 ret = BACKREF_FOUND_NOT_SHARED;
1566 goto out;
1567 }
1568 }
1569 }
1570
1571 btrfs_release_path(path);
1572
1573 ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0);
1574 if (ret)
1575 goto out;
1576
1577 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1578
1579 ret = resolve_indirect_refs(ctx, path, &preftrees, sc);
1580 if (ret)
1581 goto out;
1582
1583 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1584
1585 /*
1586 * This walks the tree of merged and resolved refs. Tree blocks are
1587 * read in as needed. Unique entries are added to the ulist, and
1588 * the list of found roots is updated.
1589 *
1590 * We release the entire tree in one go before returning.
1591 */
1592 node = rb_first_cached(&preftrees.direct.root);
1593 while (node) {
1594 ref = rb_entry(node, struct prelim_ref, rbnode);
1595 node = rb_next(&ref->rbnode);
1596 /*
1597 * ref->count < 0 can happen here if there are delayed
1598 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1599 * prelim_ref_insert() relies on this when merging
1600 * identical refs to keep the overall count correct.
1601 * prelim_ref_insert() will merge only those refs
1602 * which compare identically. Any refs having
1603 * e.g. different offsets would not be merged,
1604 * and would retain their original ref->count < 0.
1605 */
1606 if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) {
1607 /* no parent == root of tree */
1608 ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS);
1609 if (ret < 0)
1610 goto out;
1611 }
1612 if (ref->count && ref->parent) {
1613 if (!ctx->skip_inode_ref_list && !ref->inode_list &&
1614 ref->level == 0) {
1615 struct btrfs_tree_parent_check check = { 0 };
1616 struct extent_buffer *eb;
1617
1618 check.level = ref->level;
1619
1620 eb = read_tree_block(ctx->fs_info, ref->parent,
1621 &check);
1622 if (IS_ERR(eb)) {
1623 ret = PTR_ERR(eb);
1624 goto out;
1625 }
1626 if (!extent_buffer_uptodate(eb)) {
1627 free_extent_buffer(eb);
1628 ret = -EIO;
1629 goto out;
1630 }
1631
1632 if (!path->skip_locking)
1633 btrfs_tree_read_lock(eb);
1634 ret = find_extent_in_eb(ctx, eb, &eie);
1635 if (!path->skip_locking)
1636 btrfs_tree_read_unlock(eb);
1637 free_extent_buffer(eb);
1638 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1639 ret < 0)
1640 goto out;
1641 ref->inode_list = eie;
1642 /*
1643 * We transferred the list ownership to the ref,
1644 * so set to NULL to avoid a double free in case
1645 * an error happens after this.
1646 */
1647 eie = NULL;
1648 }
1649 ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
1650 ref->inode_list,
1651 (void **)&eie, GFP_NOFS);
1652 if (ret < 0)
1653 goto out;
1654 if (!ret && !ctx->skip_inode_ref_list) {
1655 /*
1656 * We've recorded that parent, so we must extend
1657 * its inode list here.
1658 *
1659 * However if there was corruption we may not
1660 * have found an eie, return an error in this
1661 * case.
1662 */
1663 ASSERT(eie);
1664 if (!eie) {
1665 ret = -EUCLEAN;
1666 goto out;
1667 }
1668 while (eie->next)
1669 eie = eie->next;
1670 eie->next = ref->inode_list;
1671 }
1672 eie = NULL;
1673 /*
1674 * We have transferred the inode list ownership from
1675 * this ref to the ref we added to the 'refs' ulist.
1676 * So set this ref's inode list to NULL to avoid
1677 * use-after-free when our caller uses it or double
1678 * frees in case an error happens before we return.
1679 */
1680 ref->inode_list = NULL;
1681 }
1682 cond_resched();
1683 }
1684
1685out:
1686 btrfs_free_path(path);
1687
1688 prelim_release(&preftrees.direct);
1689 prelim_release(&preftrees.indirect);
1690 prelim_release(&preftrees.indirect_missing_keys);
1691
1692 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
1693 free_inode_elem_list(eie);
1694 return ret;
1695}
1696
1697/*
1698 * Finds all leaves with a reference to the specified combination of
1699 * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
1700 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1701 * function. The caller should free the ulist with free_leaf_list() if
1702 * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
1703 * enough.
1704 *
1705 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1706 */
1707int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
1708{
1709 int ret;
1710
1711 ASSERT(ctx->refs == NULL);
1712
1713 ctx->refs = ulist_alloc(GFP_NOFS);
1714 if (!ctx->refs)
1715 return -ENOMEM;
1716
1717 ret = find_parent_nodes(ctx, NULL);
1718 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1719 (ret < 0 && ret != -ENOENT)) {
1720 free_leaf_list(ctx->refs);
1721 ctx->refs = NULL;
1722 return ret;
1723 }
1724
1725 return 0;
1726}
1727
1728/*
1729 * Walk all backrefs for a given extent to find all roots that reference this
1730 * extent. Walking a backref means finding all extents that reference this
1731 * extent and in turn walk the backrefs of those, too. Naturally this is a
1732 * recursive process, but here it is implemented in an iterative fashion: We
1733 * find all referencing extents for the extent in question and put them on a
1734 * list. In turn, we find all referencing extents for those, further appending
1735 * to the list. The way we iterate the list allows adding more elements after
1736 * the current while iterating. The process stops when we reach the end of the
1737 * list.
1738 *
1739 * Found roots are added to @ctx->roots, which is allocated by this function if
1740 * it points to NULL, in which case the caller is responsible for freeing it
1741 * after it's not needed anymore.
1742 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1743 * ulist to do temporary work, and frees it before returning.
1744 *
1745 * Returns 0 on success, < 0 on error.
1746 */
1747static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
1748{
1749 const u64 orig_bytenr = ctx->bytenr;
1750 const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list;
1751 bool roots_ulist_allocated = false;
1752 struct ulist_iterator uiter;
1753 int ret = 0;
1754
1755 ASSERT(ctx->refs == NULL);
1756
1757 ctx->refs = ulist_alloc(GFP_NOFS);
1758 if (!ctx->refs)
1759 return -ENOMEM;
1760
1761 if (!ctx->roots) {
1762 ctx->roots = ulist_alloc(GFP_NOFS);
1763 if (!ctx->roots) {
1764 ulist_free(ctx->refs);
1765 ctx->refs = NULL;
1766 return -ENOMEM;
1767 }
1768 roots_ulist_allocated = true;
1769 }
1770
1771 ctx->skip_inode_ref_list = true;
1772
1773 ULIST_ITER_INIT(&uiter);
1774 while (1) {
1775 struct ulist_node *node;
1776
1777 ret = find_parent_nodes(ctx, NULL);
1778 if (ret < 0 && ret != -ENOENT) {
1779 if (roots_ulist_allocated) {
1780 ulist_free(ctx->roots);
1781 ctx->roots = NULL;
1782 }
1783 break;
1784 }
1785 ret = 0;
1786 node = ulist_next(ctx->refs, &uiter);
1787 if (!node)
1788 break;
1789 ctx->bytenr = node->val;
1790 cond_resched();
1791 }
1792
1793 ulist_free(ctx->refs);
1794 ctx->refs = NULL;
1795 ctx->bytenr = orig_bytenr;
1796 ctx->skip_inode_ref_list = orig_skip_inode_ref_list;
1797
1798 return ret;
1799}
1800
1801int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx,
1802 bool skip_commit_root_sem)
1803{
1804 int ret;
1805
1806 if (!ctx->trans && !skip_commit_root_sem)
1807 down_read(&ctx->fs_info->commit_root_sem);
1808 ret = btrfs_find_all_roots_safe(ctx);
1809 if (!ctx->trans && !skip_commit_root_sem)
1810 up_read(&ctx->fs_info->commit_root_sem);
1811 return ret;
1812}
1813
1814struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void)
1815{
1816 struct btrfs_backref_share_check_ctx *ctx;
1817
1818 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1819 if (!ctx)
1820 return NULL;
1821
1822 ulist_init(&ctx->refs);
1823
1824 return ctx;
1825}
1826
1827void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx)
1828{
1829 if (!ctx)
1830 return;
1831
1832 ulist_release(&ctx->refs);
1833 kfree(ctx);
1834}
1835
1836/*
1837 * Check if a data extent is shared or not.
1838 *
1839 * @inode: The inode whose extent we are checking.
1840 * @bytenr: Logical bytenr of the extent we are checking.
1841 * @extent_gen: Generation of the extent (file extent item) or 0 if it is
1842 * not known.
1843 * @ctx: A backref sharedness check context.
1844 *
1845 * btrfs_is_data_extent_shared uses the backref walking code but will short
1846 * circuit as soon as it finds a root or inode that doesn't match the
1847 * one passed in. This provides a significant performance benefit for
1848 * callers (such as fiemap) which want to know whether the extent is
1849 * shared but do not need a ref count.
1850 *
1851 * This attempts to attach to the running transaction in order to account for
1852 * delayed refs, but continues on even when no running transaction exists.
1853 *
1854 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1855 */
1856int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
1857 u64 extent_gen,
1858 struct btrfs_backref_share_check_ctx *ctx)
1859{
1860 struct btrfs_backref_walk_ctx walk_ctx = { 0 };
1861 struct btrfs_root *root = inode->root;
1862 struct btrfs_fs_info *fs_info = root->fs_info;
1863 struct btrfs_trans_handle *trans;
1864 struct ulist_iterator uiter;
1865 struct ulist_node *node;
1866 struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1867 int ret = 0;
1868 struct share_check shared = {
1869 .ctx = ctx,
1870 .root = root,
1871 .inum = btrfs_ino(inode),
1872 .data_bytenr = bytenr,
1873 .data_extent_gen = extent_gen,
1874 .share_count = 0,
1875 .self_ref_count = 0,
1876 .have_delayed_delete_refs = false,
1877 };
1878 int level;
1879 bool leaf_cached;
1880 bool leaf_is_shared;
1881
1882 for (int i = 0; i < BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; i++) {
1883 if (ctx->prev_extents_cache[i].bytenr == bytenr)
1884 return ctx->prev_extents_cache[i].is_shared;
1885 }
1886
1887 ulist_init(&ctx->refs);
1888
1889 trans = btrfs_join_transaction_nostart(root);
1890 if (IS_ERR(trans)) {
1891 if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1892 ret = PTR_ERR(trans);
1893 goto out;
1894 }
1895 trans = NULL;
1896 down_read(&fs_info->commit_root_sem);
1897 } else {
1898 btrfs_get_tree_mod_seq(fs_info, &elem);
1899 walk_ctx.time_seq = elem.seq;
1900 }
1901
1902 ctx->use_path_cache = true;
1903
1904 /*
1905 * We may have previously determined that the current leaf is shared.
1906 * If it is, then we have a data extent that is shared due to a shared
1907 * subtree (caused by snapshotting) and we don't need to check for data
1908 * backrefs. If the leaf is not shared, then we must do backref walking
1909 * to determine if the data extent is shared through reflinks.
1910 */
1911 leaf_cached = lookup_backref_shared_cache(ctx, root,
1912 ctx->curr_leaf_bytenr, 0,
1913 &leaf_is_shared);
1914 if (leaf_cached && leaf_is_shared) {
1915 ret = 1;
1916 goto out_trans;
1917 }
1918
1919 walk_ctx.skip_inode_ref_list = true;
1920 walk_ctx.trans = trans;
1921 walk_ctx.fs_info = fs_info;
1922 walk_ctx.refs = &ctx->refs;
1923
1924 /* -1 means we are in the bytenr of the data extent. */
1925 level = -1;
1926 ULIST_ITER_INIT(&uiter);
1927 while (1) {
1928 const unsigned long prev_ref_count = ctx->refs.nnodes;
1929
1930 walk_ctx.bytenr = bytenr;
1931 ret = find_parent_nodes(&walk_ctx, &shared);
1932 if (ret == BACKREF_FOUND_SHARED ||
1933 ret == BACKREF_FOUND_NOT_SHARED) {
1934 /* If shared must return 1, otherwise return 0. */
1935 ret = (ret == BACKREF_FOUND_SHARED) ? 1 : 0;
1936 if (level >= 0)
1937 store_backref_shared_cache(ctx, root, bytenr,
1938 level, ret == 1);
1939 break;
1940 }
1941 if (ret < 0 && ret != -ENOENT)
1942 break;
1943 ret = 0;
1944
1945 /*
1946 * More than one extent buffer (bytenr) may have been added to
1947 * the ctx->refs ulist, in which case we have to check multiple
1948 * tree paths in case the first one is not shared, so we can not
1949 * use the path cache which is made for a single path. Multiple
1950 * extent buffers at the current level happen when:
1951 *
1952 * 1) level -1, the data extent: If our data extent was not
1953 * directly shared (without multiple reference items), then
1954 * it might have a single reference item with a count > 1 for
1955 * the same offset, which means there are 2 (or more) file
1956 * extent items that point to the data extent - this happens
1957 * when a file extent item needs to be split and then one
1958 * item gets moved to another leaf due to a b+tree leaf split
1959 * when inserting some item. In this case the file extent
1960 * items may be located in different leaves and therefore
1961 * some of the leaves may be referenced through shared
1962 * subtrees while others are not. Since our extent buffer
1963 * cache only works for a single path (by far the most common
1964 * case and simpler to deal with), we can not use it if we
1965 * have multiple leaves (which implies multiple paths).
1966 *
1967 * 2) level >= 0, a tree node/leaf: We can have a mix of direct
1968 * and indirect references on a b+tree node/leaf, so we have
1969 * to check multiple paths, and the extent buffer (the
1970 * current bytenr) may be shared or not. One example is
1971 * during relocation as we may get a shared tree block ref
1972 * (direct ref) and a non-shared tree block ref (indirect
1973 * ref) for the same node/leaf.
1974 */
1975 if ((ctx->refs.nnodes - prev_ref_count) > 1)
1976 ctx->use_path_cache = false;
1977
1978 if (level >= 0)
1979 store_backref_shared_cache(ctx, root, bytenr,
1980 level, false);
1981 node = ulist_next(&ctx->refs, &uiter);
1982 if (!node)
1983 break;
1984 bytenr = node->val;
1985 if (ctx->use_path_cache) {
1986 bool is_shared;
1987 bool cached;
1988
1989 level++;
1990 cached = lookup_backref_shared_cache(ctx, root, bytenr,
1991 level, &is_shared);
1992 if (cached) {
1993 ret = (is_shared ? 1 : 0);
1994 break;
1995 }
1996 }
1997 shared.share_count = 0;
1998 shared.have_delayed_delete_refs = false;
1999 cond_resched();
2000 }
2001
2002 /*
2003 * If the path cache is disabled, then it means at some tree level we
2004 * got multiple parents due to a mix of direct and indirect backrefs or
2005 * multiple leaves with file extent items pointing to the same data
2006 * extent. We have to invalidate the cache and cache only the sharedness
2007 * result for the levels where we got only one node/reference.
2008 */
2009 if (!ctx->use_path_cache) {
2010 int i = 0;
2011
2012 level--;
2013 if (ret >= 0 && level >= 0) {
2014 bytenr = ctx->path_cache_entries[level].bytenr;
2015 ctx->use_path_cache = true;
2016 store_backref_shared_cache(ctx, root, bytenr, level, ret);
2017 i = level + 1;
2018 }
2019
2020 for ( ; i < BTRFS_MAX_LEVEL; i++)
2021 ctx->path_cache_entries[i].bytenr = 0;
2022 }
2023
2024 /*
2025 * Cache the sharedness result for the data extent if we know our inode
2026 * has more than 1 file extent item that refers to the data extent.
2027 */
2028 if (ret >= 0 && shared.self_ref_count > 1) {
2029 int slot = ctx->prev_extents_cache_slot;
2030
2031 ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr;
2032 ctx->prev_extents_cache[slot].is_shared = (ret == 1);
2033
2034 slot = (slot + 1) % BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE;
2035 ctx->prev_extents_cache_slot = slot;
2036 }
2037
2038out_trans:
2039 if (trans) {
2040 btrfs_put_tree_mod_seq(fs_info, &elem);
2041 btrfs_end_transaction(trans);
2042 } else {
2043 up_read(&fs_info->commit_root_sem);
2044 }
2045out:
2046 ulist_release(&ctx->refs);
2047 ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr;
2048
2049 return ret;
2050}
2051
2052int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
2053 u64 start_off, struct btrfs_path *path,
2054 struct btrfs_inode_extref **ret_extref,
2055 u64 *found_off)
2056{
2057 int ret, slot;
2058 struct btrfs_key key;
2059 struct btrfs_key found_key;
2060 struct btrfs_inode_extref *extref;
2061 const struct extent_buffer *leaf;
2062 unsigned long ptr;
2063
2064 key.objectid = inode_objectid;
2065 key.type = BTRFS_INODE_EXTREF_KEY;
2066 key.offset = start_off;
2067
2068 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2069 if (ret < 0)
2070 return ret;
2071
2072 while (1) {
2073 leaf = path->nodes[0];
2074 slot = path->slots[0];
2075 if (slot >= btrfs_header_nritems(leaf)) {
2076 /*
2077 * If the item at offset is not found,
2078 * btrfs_search_slot will point us to the slot
2079 * where it should be inserted. In our case
2080 * that will be the slot directly before the
2081 * next INODE_REF_KEY_V2 item. In the case
2082 * that we're pointing to the last slot in a
2083 * leaf, we must move one leaf over.
2084 */
2085 ret = btrfs_next_leaf(root, path);
2086 if (ret) {
2087 if (ret >= 1)
2088 ret = -ENOENT;
2089 break;
2090 }
2091 continue;
2092 }
2093
2094 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2095
2096 /*
2097 * Check that we're still looking at an extended ref key for
2098 * this particular objectid. If we have different
2099 * objectid or type then there are no more to be found
2100 * in the tree and we can exit.
2101 */
2102 ret = -ENOENT;
2103 if (found_key.objectid != inode_objectid)
2104 break;
2105 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
2106 break;
2107
2108 ret = 0;
2109 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2110 extref = (struct btrfs_inode_extref *)ptr;
2111 *ret_extref = extref;
2112 if (found_off)
2113 *found_off = found_key.offset;
2114 break;
2115 }
2116
2117 return ret;
2118}
2119
2120/*
2121 * this iterates to turn a name (from iref/extref) into a full filesystem path.
2122 * Elements of the path are separated by '/' and the path is guaranteed to be
2123 * 0-terminated. the path is only given within the current file system.
2124 * Therefore, it never starts with a '/'. the caller is responsible to provide
2125 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
2126 * the start point of the resulting string is returned. this pointer is within
2127 * dest, normally.
2128 * in case the path buffer would overflow, the pointer is decremented further
2129 * as if output was written to the buffer, though no more output is actually
2130 * generated. that way, the caller can determine how much space would be
2131 * required for the path to fit into the buffer. in that case, the returned
2132 * value will be smaller than dest. callers must check this!
2133 */
2134char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
2135 u32 name_len, unsigned long name_off,
2136 struct extent_buffer *eb_in, u64 parent,
2137 char *dest, u32 size)
2138{
2139 int slot;
2140 u64 next_inum;
2141 int ret;
2142 s64 bytes_left = ((s64)size) - 1;
2143 struct extent_buffer *eb = eb_in;
2144 struct btrfs_key found_key;
2145 struct btrfs_inode_ref *iref;
2146
2147 if (bytes_left >= 0)
2148 dest[bytes_left] = '\0';
2149
2150 while (1) {
2151 bytes_left -= name_len;
2152 if (bytes_left >= 0)
2153 read_extent_buffer(eb, dest + bytes_left,
2154 name_off, name_len);
2155 if (eb != eb_in) {
2156 if (!path->skip_locking)
2157 btrfs_tree_read_unlock(eb);
2158 free_extent_buffer(eb);
2159 }
2160 ret = btrfs_find_item(fs_root, path, parent, 0,
2161 BTRFS_INODE_REF_KEY, &found_key);
2162 if (ret > 0)
2163 ret = -ENOENT;
2164 if (ret)
2165 break;
2166
2167 next_inum = found_key.offset;
2168
2169 /* regular exit ahead */
2170 if (parent == next_inum)
2171 break;
2172
2173 slot = path->slots[0];
2174 eb = path->nodes[0];
2175 /* make sure we can use eb after releasing the path */
2176 if (eb != eb_in) {
2177 path->nodes[0] = NULL;
2178 path->locks[0] = 0;
2179 }
2180 btrfs_release_path(path);
2181 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2182
2183 name_len = btrfs_inode_ref_name_len(eb, iref);
2184 name_off = (unsigned long)(iref + 1);
2185
2186 parent = next_inum;
2187 --bytes_left;
2188 if (bytes_left >= 0)
2189 dest[bytes_left] = '/';
2190 }
2191
2192 btrfs_release_path(path);
2193
2194 if (ret)
2195 return ERR_PTR(ret);
2196
2197 return dest + bytes_left;
2198}
2199
2200/*
2201 * this makes the path point to (logical EXTENT_ITEM *)
2202 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
2203 * tree blocks and <0 on error.
2204 */
2205int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
2206 struct btrfs_path *path, struct btrfs_key *found_key,
2207 u64 *flags_ret)
2208{
2209 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
2210 int ret;
2211 u64 flags;
2212 u64 size = 0;
2213 u32 item_size;
2214 const struct extent_buffer *eb;
2215 struct btrfs_extent_item *ei;
2216 struct btrfs_key key;
2217
2218 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2219 key.type = BTRFS_METADATA_ITEM_KEY;
2220 else
2221 key.type = BTRFS_EXTENT_ITEM_KEY;
2222 key.objectid = logical;
2223 key.offset = (u64)-1;
2224
2225 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2226 if (ret < 0)
2227 return ret;
2228
2229 ret = btrfs_previous_extent_item(extent_root, path, 0);
2230 if (ret) {
2231 if (ret > 0)
2232 ret = -ENOENT;
2233 return ret;
2234 }
2235 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
2236 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
2237 size = fs_info->nodesize;
2238 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
2239 size = found_key->offset;
2240
2241 if (found_key->objectid > logical ||
2242 found_key->objectid + size <= logical) {
2243 btrfs_debug(fs_info,
2244 "logical %llu is not within any extent", logical);
2245 return -ENOENT;
2246 }
2247
2248 eb = path->nodes[0];
2249 item_size = btrfs_item_size(eb, path->slots[0]);
2250 BUG_ON(item_size < sizeof(*ei));
2251
2252 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
2253 flags = btrfs_extent_flags(eb, ei);
2254
2255 btrfs_debug(fs_info,
2256 "logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
2257 logical, logical - found_key->objectid, found_key->objectid,
2258 found_key->offset, flags, item_size);
2259
2260 WARN_ON(!flags_ret);
2261 if (flags_ret) {
2262 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2263 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
2264 else if (flags & BTRFS_EXTENT_FLAG_DATA)
2265 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
2266 else
2267 BUG();
2268 return 0;
2269 }
2270
2271 return -EIO;
2272}
2273
2274/*
2275 * helper function to iterate extent inline refs. ptr must point to a 0 value
2276 * for the first call and may be modified. it is used to track state.
2277 * if more refs exist, 0 is returned and the next call to
2278 * get_extent_inline_ref must pass the modified ptr parameter to get the
2279 * next ref. after the last ref was processed, 1 is returned.
2280 * returns <0 on error
2281 */
2282static int get_extent_inline_ref(unsigned long *ptr,
2283 const struct extent_buffer *eb,
2284 const struct btrfs_key *key,
2285 const struct btrfs_extent_item *ei,
2286 u32 item_size,
2287 struct btrfs_extent_inline_ref **out_eiref,
2288 int *out_type)
2289{
2290 unsigned long end;
2291 u64 flags;
2292 struct btrfs_tree_block_info *info;
2293
2294 if (!*ptr) {
2295 /* first call */
2296 flags = btrfs_extent_flags(eb, ei);
2297 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2298 if (key->type == BTRFS_METADATA_ITEM_KEY) {
2299 /* a skinny metadata extent */
2300 *out_eiref =
2301 (struct btrfs_extent_inline_ref *)(ei + 1);
2302 } else {
2303 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
2304 info = (struct btrfs_tree_block_info *)(ei + 1);
2305 *out_eiref =
2306 (struct btrfs_extent_inline_ref *)(info + 1);
2307 }
2308 } else {
2309 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
2310 }
2311 *ptr = (unsigned long)*out_eiref;
2312 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
2313 return -ENOENT;
2314 }
2315
2316 end = (unsigned long)ei + item_size;
2317 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
2318 *out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
2319 BTRFS_REF_TYPE_ANY);
2320 if (*out_type == BTRFS_REF_TYPE_INVALID)
2321 return -EUCLEAN;
2322
2323 *ptr += btrfs_extent_inline_ref_size(*out_type);
2324 WARN_ON(*ptr > end);
2325 if (*ptr == end)
2326 return 1; /* last */
2327
2328 return 0;
2329}
2330
2331/*
2332 * reads the tree block backref for an extent. tree level and root are returned
2333 * through out_level and out_root. ptr must point to a 0 value for the first
2334 * call and may be modified (see get_extent_inline_ref comment).
2335 * returns 0 if data was provided, 1 if there was no more data to provide or
2336 * <0 on error.
2337 */
2338int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2339 struct btrfs_key *key, struct btrfs_extent_item *ei,
2340 u32 item_size, u64 *out_root, u8 *out_level)
2341{
2342 int ret;
2343 int type;
2344 struct btrfs_extent_inline_ref *eiref;
2345
2346 if (*ptr == (unsigned long)-1)
2347 return 1;
2348
2349 while (1) {
2350 ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
2351 &eiref, &type);
2352 if (ret < 0)
2353 return ret;
2354
2355 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
2356 type == BTRFS_SHARED_BLOCK_REF_KEY)
2357 break;
2358
2359 if (ret == 1)
2360 return 1;
2361 }
2362
2363 /* we can treat both ref types equally here */
2364 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
2365
2366 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
2367 struct btrfs_tree_block_info *info;
2368
2369 info = (struct btrfs_tree_block_info *)(ei + 1);
2370 *out_level = btrfs_tree_block_level(eb, info);
2371 } else {
2372 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
2373 *out_level = (u8)key->offset;
2374 }
2375
2376 if (ret == 1)
2377 *ptr = (unsigned long)-1;
2378
2379 return 0;
2380}
2381
2382static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
2383 struct extent_inode_elem *inode_list,
2384 u64 root, u64 extent_item_objectid,
2385 iterate_extent_inodes_t *iterate, void *ctx)
2386{
2387 struct extent_inode_elem *eie;
2388 int ret = 0;
2389
2390 for (eie = inode_list; eie; eie = eie->next) {
2391 btrfs_debug(fs_info,
2392 "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
2393 extent_item_objectid, eie->inum,
2394 eie->offset, root);
2395 ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx);
2396 if (ret) {
2397 btrfs_debug(fs_info,
2398 "stopping iteration for %llu due to ret=%d",
2399 extent_item_objectid, ret);
2400 break;
2401 }
2402 }
2403
2404 return ret;
2405}
2406
2407/*
2408 * calls iterate() for every inode that references the extent identified by
2409 * the given parameters.
2410 * when the iterator function returns a non-zero value, iteration stops.
2411 */
2412int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
2413 bool search_commit_root,
2414 iterate_extent_inodes_t *iterate, void *user_ctx)
2415{
2416 int ret;
2417 struct ulist *refs;
2418 struct ulist_node *ref_node;
2419 struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
2420 struct ulist_iterator ref_uiter;
2421
2422 btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu",
2423 ctx->bytenr);
2424
2425 ASSERT(ctx->trans == NULL);
2426 ASSERT(ctx->roots == NULL);
2427
2428 if (!search_commit_root) {
2429 struct btrfs_trans_handle *trans;
2430
2431 trans = btrfs_attach_transaction(ctx->fs_info->tree_root);
2432 if (IS_ERR(trans)) {
2433 if (PTR_ERR(trans) != -ENOENT &&
2434 PTR_ERR(trans) != -EROFS)
2435 return PTR_ERR(trans);
2436 trans = NULL;
2437 }
2438 ctx->trans = trans;
2439 }
2440
2441 if (ctx->trans) {
2442 btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem);
2443 ctx->time_seq = seq_elem.seq;
2444 } else {
2445 down_read(&ctx->fs_info->commit_root_sem);
2446 }
2447
2448 ret = btrfs_find_all_leafs(ctx);
2449 if (ret)
2450 goto out;
2451 refs = ctx->refs;
2452 ctx->refs = NULL;
2453
2454 ULIST_ITER_INIT(&ref_uiter);
2455 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2456 const u64 leaf_bytenr = ref_node->val;
2457 struct ulist_node *root_node;
2458 struct ulist_iterator root_uiter;
2459 struct extent_inode_elem *inode_list;
2460
2461 inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux;
2462
2463 if (ctx->cache_lookup) {
2464 const u64 *root_ids;
2465 int root_count;
2466 bool cached;
2467
2468 cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx,
2469 &root_ids, &root_count);
2470 if (cached) {
2471 for (int i = 0; i < root_count; i++) {
2472 ret = iterate_leaf_refs(ctx->fs_info,
2473 inode_list,
2474 root_ids[i],
2475 leaf_bytenr,
2476 iterate,
2477 user_ctx);
2478 if (ret)
2479 break;
2480 }
2481 continue;
2482 }
2483 }
2484
2485 if (!ctx->roots) {
2486 ctx->roots = ulist_alloc(GFP_NOFS);
2487 if (!ctx->roots) {
2488 ret = -ENOMEM;
2489 break;
2490 }
2491 }
2492
2493 ctx->bytenr = leaf_bytenr;
2494 ret = btrfs_find_all_roots_safe(ctx);
2495 if (ret)
2496 break;
2497
2498 if (ctx->cache_store)
2499 ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx);
2500
2501 ULIST_ITER_INIT(&root_uiter);
2502 while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) {
2503 btrfs_debug(ctx->fs_info,
2504 "root %llu references leaf %llu, data list %#llx",
2505 root_node->val, ref_node->val,
2506 ref_node->aux);
2507 ret = iterate_leaf_refs(ctx->fs_info, inode_list,
2508 root_node->val, ctx->bytenr,
2509 iterate, user_ctx);
2510 }
2511 ulist_reinit(ctx->roots);
2512 }
2513
2514 free_leaf_list(refs);
2515out:
2516 if (ctx->trans) {
2517 btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem);
2518 btrfs_end_transaction(ctx->trans);
2519 ctx->trans = NULL;
2520 } else {
2521 up_read(&ctx->fs_info->commit_root_sem);
2522 }
2523
2524 ulist_free(ctx->roots);
2525 ctx->roots = NULL;
2526
2527 if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP)
2528 ret = 0;
2529
2530 return ret;
2531}
2532
2533static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx)
2534{
2535 struct btrfs_data_container *inodes = ctx;
2536 const size_t c = 3 * sizeof(u64);
2537
2538 if (inodes->bytes_left >= c) {
2539 inodes->bytes_left -= c;
2540 inodes->val[inodes->elem_cnt] = inum;
2541 inodes->val[inodes->elem_cnt + 1] = offset;
2542 inodes->val[inodes->elem_cnt + 2] = root;
2543 inodes->elem_cnt += 3;
2544 } else {
2545 inodes->bytes_missing += c - inodes->bytes_left;
2546 inodes->bytes_left = 0;
2547 inodes->elem_missed += 3;
2548 }
2549
2550 return 0;
2551}
2552
2553int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2554 struct btrfs_path *path,
2555 void *ctx, bool ignore_offset)
2556{
2557 struct btrfs_backref_walk_ctx walk_ctx = { 0 };
2558 int ret;
2559 u64 flags = 0;
2560 struct btrfs_key found_key;
2561 int search_commit_root = path->search_commit_root;
2562
2563 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2564 btrfs_release_path(path);
2565 if (ret < 0)
2566 return ret;
2567 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2568 return -EINVAL;
2569
2570 walk_ctx.bytenr = found_key.objectid;
2571 if (ignore_offset)
2572 walk_ctx.ignore_extent_item_pos = true;
2573 else
2574 walk_ctx.extent_item_pos = logical - found_key.objectid;
2575 walk_ctx.fs_info = fs_info;
2576
2577 return iterate_extent_inodes(&walk_ctx, search_commit_root,
2578 build_ino_list, ctx);
2579}
2580
2581static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2582 struct extent_buffer *eb, struct inode_fs_paths *ipath);
2583
2584static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
2585{
2586 int ret = 0;
2587 int slot;
2588 u32 cur;
2589 u32 len;
2590 u32 name_len;
2591 u64 parent = 0;
2592 int found = 0;
2593 struct btrfs_root *fs_root = ipath->fs_root;
2594 struct btrfs_path *path = ipath->btrfs_path;
2595 struct extent_buffer *eb;
2596 struct btrfs_inode_ref *iref;
2597 struct btrfs_key found_key;
2598
2599 while (!ret) {
2600 ret = btrfs_find_item(fs_root, path, inum,
2601 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2602 &found_key);
2603
2604 if (ret < 0)
2605 break;
2606 if (ret) {
2607 ret = found ? 0 : -ENOENT;
2608 break;
2609 }
2610 ++found;
2611
2612 parent = found_key.offset;
2613 slot = path->slots[0];
2614 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2615 if (!eb) {
2616 ret = -ENOMEM;
2617 break;
2618 }
2619 btrfs_release_path(path);
2620
2621 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2622
2623 for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2624 name_len = btrfs_inode_ref_name_len(eb, iref);
2625 /* path must be released before calling iterate()! */
2626 btrfs_debug(fs_root->fs_info,
2627 "following ref at offset %u for inode %llu in tree %llu",
2628 cur, found_key.objectid,
2629 fs_root->root_key.objectid);
2630 ret = inode_to_path(parent, name_len,
2631 (unsigned long)(iref + 1), eb, ipath);
2632 if (ret)
2633 break;
2634 len = sizeof(*iref) + name_len;
2635 iref = (struct btrfs_inode_ref *)((char *)iref + len);
2636 }
2637 free_extent_buffer(eb);
2638 }
2639
2640 btrfs_release_path(path);
2641
2642 return ret;
2643}
2644
2645static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
2646{
2647 int ret;
2648 int slot;
2649 u64 offset = 0;
2650 u64 parent;
2651 int found = 0;
2652 struct btrfs_root *fs_root = ipath->fs_root;
2653 struct btrfs_path *path = ipath->btrfs_path;
2654 struct extent_buffer *eb;
2655 struct btrfs_inode_extref *extref;
2656 u32 item_size;
2657 u32 cur_offset;
2658 unsigned long ptr;
2659
2660 while (1) {
2661 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2662 &offset);
2663 if (ret < 0)
2664 break;
2665 if (ret) {
2666 ret = found ? 0 : -ENOENT;
2667 break;
2668 }
2669 ++found;
2670
2671 slot = path->slots[0];
2672 eb = btrfs_clone_extent_buffer(path->nodes[0]);
2673 if (!eb) {
2674 ret = -ENOMEM;
2675 break;
2676 }
2677 btrfs_release_path(path);
2678
2679 item_size = btrfs_item_size(eb, slot);
2680 ptr = btrfs_item_ptr_offset(eb, slot);
2681 cur_offset = 0;
2682
2683 while (cur_offset < item_size) {
2684 u32 name_len;
2685
2686 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2687 parent = btrfs_inode_extref_parent(eb, extref);
2688 name_len = btrfs_inode_extref_name_len(eb, extref);
2689 ret = inode_to_path(parent, name_len,
2690 (unsigned long)&extref->name, eb, ipath);
2691 if (ret)
2692 break;
2693
2694 cur_offset += btrfs_inode_extref_name_len(eb, extref);
2695 cur_offset += sizeof(*extref);
2696 }
2697 free_extent_buffer(eb);
2698
2699 offset++;
2700 }
2701
2702 btrfs_release_path(path);
2703
2704 return ret;
2705}
2706
2707/*
2708 * returns 0 if the path could be dumped (probably truncated)
2709 * returns <0 in case of an error
2710 */
2711static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2712 struct extent_buffer *eb, struct inode_fs_paths *ipath)
2713{
2714 char *fspath;
2715 char *fspath_min;
2716 int i = ipath->fspath->elem_cnt;
2717 const int s_ptr = sizeof(char *);
2718 u32 bytes_left;
2719
2720 bytes_left = ipath->fspath->bytes_left > s_ptr ?
2721 ipath->fspath->bytes_left - s_ptr : 0;
2722
2723 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2724 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2725 name_off, eb, inum, fspath_min, bytes_left);
2726 if (IS_ERR(fspath))
2727 return PTR_ERR(fspath);
2728
2729 if (fspath > fspath_min) {
2730 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2731 ++ipath->fspath->elem_cnt;
2732 ipath->fspath->bytes_left = fspath - fspath_min;
2733 } else {
2734 ++ipath->fspath->elem_missed;
2735 ipath->fspath->bytes_missing += fspath_min - fspath;
2736 ipath->fspath->bytes_left = 0;
2737 }
2738
2739 return 0;
2740}
2741
2742/*
2743 * this dumps all file system paths to the inode into the ipath struct, provided
2744 * is has been created large enough. each path is zero-terminated and accessed
2745 * from ipath->fspath->val[i].
2746 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2747 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2748 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2749 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2750 * have been needed to return all paths.
2751 */
2752int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2753{
2754 int ret;
2755 int found_refs = 0;
2756
2757 ret = iterate_inode_refs(inum, ipath);
2758 if (!ret)
2759 ++found_refs;
2760 else if (ret != -ENOENT)
2761 return ret;
2762
2763 ret = iterate_inode_extrefs(inum, ipath);
2764 if (ret == -ENOENT && found_refs)
2765 return 0;
2766
2767 return ret;
2768}
2769
2770struct btrfs_data_container *init_data_container(u32 total_bytes)
2771{
2772 struct btrfs_data_container *data;
2773 size_t alloc_bytes;
2774
2775 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2776 data = kvmalloc(alloc_bytes, GFP_KERNEL);
2777 if (!data)
2778 return ERR_PTR(-ENOMEM);
2779
2780 if (total_bytes >= sizeof(*data)) {
2781 data->bytes_left = total_bytes - sizeof(*data);
2782 data->bytes_missing = 0;
2783 } else {
2784 data->bytes_missing = sizeof(*data) - total_bytes;
2785 data->bytes_left = 0;
2786 }
2787
2788 data->elem_cnt = 0;
2789 data->elem_missed = 0;
2790
2791 return data;
2792}
2793
2794/*
2795 * allocates space to return multiple file system paths for an inode.
2796 * total_bytes to allocate are passed, note that space usable for actual path
2797 * information will be total_bytes - sizeof(struct inode_fs_paths).
2798 * the returned pointer must be freed with free_ipath() in the end.
2799 */
2800struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2801 struct btrfs_path *path)
2802{
2803 struct inode_fs_paths *ifp;
2804 struct btrfs_data_container *fspath;
2805
2806 fspath = init_data_container(total_bytes);
2807 if (IS_ERR(fspath))
2808 return ERR_CAST(fspath);
2809
2810 ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2811 if (!ifp) {
2812 kvfree(fspath);
2813 return ERR_PTR(-ENOMEM);
2814 }
2815
2816 ifp->btrfs_path = path;
2817 ifp->fspath = fspath;
2818 ifp->fs_root = fs_root;
2819
2820 return ifp;
2821}
2822
2823void free_ipath(struct inode_fs_paths *ipath)
2824{
2825 if (!ipath)
2826 return;
2827 kvfree(ipath->fspath);
2828 kfree(ipath);
2829}
2830
2831struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info)
2832{
2833 struct btrfs_backref_iter *ret;
2834
2835 ret = kzalloc(sizeof(*ret), GFP_NOFS);
2836 if (!ret)
2837 return NULL;
2838
2839 ret->path = btrfs_alloc_path();
2840 if (!ret->path) {
2841 kfree(ret);
2842 return NULL;
2843 }
2844
2845 /* Current backref iterator only supports iteration in commit root */
2846 ret->path->search_commit_root = 1;
2847 ret->path->skip_locking = 1;
2848 ret->fs_info = fs_info;
2849
2850 return ret;
2851}
2852
2853int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2854{
2855 struct btrfs_fs_info *fs_info = iter->fs_info;
2856 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2857 struct btrfs_path *path = iter->path;
2858 struct btrfs_extent_item *ei;
2859 struct btrfs_key key;
2860 int ret;
2861
2862 key.objectid = bytenr;
2863 key.type = BTRFS_METADATA_ITEM_KEY;
2864 key.offset = (u64)-1;
2865 iter->bytenr = bytenr;
2866
2867 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2868 if (ret < 0)
2869 return ret;
2870 if (ret == 0) {
2871 ret = -EUCLEAN;
2872 goto release;
2873 }
2874 if (path->slots[0] == 0) {
2875 WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2876 ret = -EUCLEAN;
2877 goto release;
2878 }
2879 path->slots[0]--;
2880
2881 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2882 if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2883 key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2884 ret = -ENOENT;
2885 goto release;
2886 }
2887 memcpy(&iter->cur_key, &key, sizeof(key));
2888 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2889 path->slots[0]);
2890 iter->end_ptr = (u32)(iter->item_ptr +
2891 btrfs_item_size(path->nodes[0], path->slots[0]));
2892 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2893 struct btrfs_extent_item);
2894
2895 /*
2896 * Only support iteration on tree backref yet.
2897 *
2898 * This is an extra precaution for non skinny-metadata, where
2899 * EXTENT_ITEM is also used for tree blocks, that we can only use
2900 * extent flags to determine if it's a tree block.
2901 */
2902 if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2903 ret = -ENOTSUPP;
2904 goto release;
2905 }
2906 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2907
2908 /* If there is no inline backref, go search for keyed backref */
2909 if (iter->cur_ptr >= iter->end_ptr) {
2910 ret = btrfs_next_item(extent_root, path);
2911
2912 /* No inline nor keyed ref */
2913 if (ret > 0) {
2914 ret = -ENOENT;
2915 goto release;
2916 }
2917 if (ret < 0)
2918 goto release;
2919
2920 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2921 path->slots[0]);
2922 if (iter->cur_key.objectid != bytenr ||
2923 (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2924 iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2925 ret = -ENOENT;
2926 goto release;
2927 }
2928 iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2929 path->slots[0]);
2930 iter->item_ptr = iter->cur_ptr;
2931 iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
2932 path->nodes[0], path->slots[0]));
2933 }
2934
2935 return 0;
2936release:
2937 btrfs_backref_iter_release(iter);
2938 return ret;
2939}
2940
2941/*
2942 * Go to the next backref item of current bytenr, can be either inlined or
2943 * keyed.
2944 *
2945 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2946 *
2947 * Return 0 if we get next backref without problem.
2948 * Return >0 if there is no extra backref for this bytenr.
2949 * Return <0 if there is something wrong happened.
2950 */
2951int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2952{
2953 struct extent_buffer *eb = btrfs_backref_get_eb(iter);
2954 struct btrfs_root *extent_root;
2955 struct btrfs_path *path = iter->path;
2956 struct btrfs_extent_inline_ref *iref;
2957 int ret;
2958 u32 size;
2959
2960 if (btrfs_backref_iter_is_inline_ref(iter)) {
2961 /* We're still inside the inline refs */
2962 ASSERT(iter->cur_ptr < iter->end_ptr);
2963
2964 if (btrfs_backref_has_tree_block_info(iter)) {
2965 /* First tree block info */
2966 size = sizeof(struct btrfs_tree_block_info);
2967 } else {
2968 /* Use inline ref type to determine the size */
2969 int type;
2970
2971 iref = (struct btrfs_extent_inline_ref *)
2972 ((unsigned long)iter->cur_ptr);
2973 type = btrfs_extent_inline_ref_type(eb, iref);
2974
2975 size = btrfs_extent_inline_ref_size(type);
2976 }
2977 iter->cur_ptr += size;
2978 if (iter->cur_ptr < iter->end_ptr)
2979 return 0;
2980
2981 /* All inline items iterated, fall through */
2982 }
2983
2984 /* We're at keyed items, there is no inline item, go to the next one */
2985 extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
2986 ret = btrfs_next_item(extent_root, iter->path);
2987 if (ret)
2988 return ret;
2989
2990 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2991 if (iter->cur_key.objectid != iter->bytenr ||
2992 (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2993 iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2994 return 1;
2995 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2996 path->slots[0]);
2997 iter->cur_ptr = iter->item_ptr;
2998 iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
2999 path->slots[0]);
3000 return 0;
3001}
3002
3003void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
3004 struct btrfs_backref_cache *cache, bool is_reloc)
3005{
3006 int i;
3007
3008 cache->rb_root = RB_ROOT;
3009 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3010 INIT_LIST_HEAD(&cache->pending[i]);
3011 INIT_LIST_HEAD(&cache->changed);
3012 INIT_LIST_HEAD(&cache->detached);
3013 INIT_LIST_HEAD(&cache->leaves);
3014 INIT_LIST_HEAD(&cache->pending_edge);
3015 INIT_LIST_HEAD(&cache->useless_node);
3016 cache->fs_info = fs_info;
3017 cache->is_reloc = is_reloc;
3018}
3019
3020struct btrfs_backref_node *btrfs_backref_alloc_node(
3021 struct btrfs_backref_cache *cache, u64 bytenr, int level)
3022{
3023 struct btrfs_backref_node *node;
3024
3025 ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
3026 node = kzalloc(sizeof(*node), GFP_NOFS);
3027 if (!node)
3028 return node;
3029
3030 INIT_LIST_HEAD(&node->list);
3031 INIT_LIST_HEAD(&node->upper);
3032 INIT_LIST_HEAD(&node->lower);
3033 RB_CLEAR_NODE(&node->rb_node);
3034 cache->nr_nodes++;
3035 node->level = level;
3036 node->bytenr = bytenr;
3037
3038 return node;
3039}
3040
3041struct btrfs_backref_edge *btrfs_backref_alloc_edge(
3042 struct btrfs_backref_cache *cache)
3043{
3044 struct btrfs_backref_edge *edge;
3045
3046 edge = kzalloc(sizeof(*edge), GFP_NOFS);
3047 if (edge)
3048 cache->nr_edges++;
3049 return edge;
3050}
3051
3052/*
3053 * Drop the backref node from cache, also cleaning up all its
3054 * upper edges and any uncached nodes in the path.
3055 *
3056 * This cleanup happens bottom up, thus the node should either
3057 * be the lowest node in the cache or a detached node.
3058 */
3059void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
3060 struct btrfs_backref_node *node)
3061{
3062 struct btrfs_backref_node *upper;
3063 struct btrfs_backref_edge *edge;
3064
3065 if (!node)
3066 return;
3067
3068 BUG_ON(!node->lowest && !node->detached);
3069 while (!list_empty(&node->upper)) {
3070 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
3071 list[LOWER]);
3072 upper = edge->node[UPPER];
3073 list_del(&edge->list[LOWER]);
3074 list_del(&edge->list[UPPER]);
3075 btrfs_backref_free_edge(cache, edge);
3076
3077 /*
3078 * Add the node to leaf node list if no other child block
3079 * cached.
3080 */
3081 if (list_empty(&upper->lower)) {
3082 list_add_tail(&upper->lower, &cache->leaves);
3083 upper->lowest = 1;
3084 }
3085 }
3086
3087 btrfs_backref_drop_node(cache, node);
3088}
3089
3090/*
3091 * Release all nodes/edges from current cache
3092 */
3093void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
3094{
3095 struct btrfs_backref_node *node;
3096 int i;
3097
3098 while (!list_empty(&cache->detached)) {
3099 node = list_entry(cache->detached.next,
3100 struct btrfs_backref_node, list);
3101 btrfs_backref_cleanup_node(cache, node);
3102 }
3103
3104 while (!list_empty(&cache->leaves)) {
3105 node = list_entry(cache->leaves.next,
3106 struct btrfs_backref_node, lower);
3107 btrfs_backref_cleanup_node(cache, node);
3108 }
3109
3110 cache->last_trans = 0;
3111
3112 for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3113 ASSERT(list_empty(&cache->pending[i]));
3114 ASSERT(list_empty(&cache->pending_edge));
3115 ASSERT(list_empty(&cache->useless_node));
3116 ASSERT(list_empty(&cache->changed));
3117 ASSERT(list_empty(&cache->detached));
3118 ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
3119 ASSERT(!cache->nr_nodes);
3120 ASSERT(!cache->nr_edges);
3121}
3122
3123/*
3124 * Handle direct tree backref
3125 *
3126 * Direct tree backref means, the backref item shows its parent bytenr
3127 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
3128 *
3129 * @ref_key: The converted backref key.
3130 * For keyed backref, it's the item key.
3131 * For inlined backref, objectid is the bytenr,
3132 * type is btrfs_inline_ref_type, offset is
3133 * btrfs_inline_ref_offset.
3134 */
3135static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
3136 struct btrfs_key *ref_key,
3137 struct btrfs_backref_node *cur)
3138{
3139 struct btrfs_backref_edge *edge;
3140 struct btrfs_backref_node *upper;
3141 struct rb_node *rb_node;
3142
3143 ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
3144
3145 /* Only reloc root uses backref pointing to itself */
3146 if (ref_key->objectid == ref_key->offset) {
3147 struct btrfs_root *root;
3148
3149 cur->is_reloc_root = 1;
3150 /* Only reloc backref cache cares about a specific root */
3151 if (cache->is_reloc) {
3152 root = find_reloc_root(cache->fs_info, cur->bytenr);
3153 if (!root)
3154 return -ENOENT;
3155 cur->root = root;
3156 } else {
3157 /*
3158 * For generic purpose backref cache, reloc root node
3159 * is useless.
3160 */
3161 list_add(&cur->list, &cache->useless_node);
3162 }
3163 return 0;
3164 }
3165
3166 edge = btrfs_backref_alloc_edge(cache);
3167 if (!edge)
3168 return -ENOMEM;
3169
3170 rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
3171 if (!rb_node) {
3172 /* Parent node not yet cached */
3173 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
3174 cur->level + 1);
3175 if (!upper) {
3176 btrfs_backref_free_edge(cache, edge);
3177 return -ENOMEM;
3178 }
3179
3180 /*
3181 * Backrefs for the upper level block isn't cached, add the
3182 * block to pending list
3183 */
3184 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3185 } else {
3186 /* Parent node already cached */
3187 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
3188 ASSERT(upper->checked);
3189 INIT_LIST_HEAD(&edge->list[UPPER]);
3190 }
3191 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
3192 return 0;
3193}
3194
3195/*
3196 * Handle indirect tree backref
3197 *
3198 * Indirect tree backref means, we only know which tree the node belongs to.
3199 * We still need to do a tree search to find out the parents. This is for
3200 * TREE_BLOCK_REF backref (keyed or inlined).
3201 *
3202 * @trans: Transaction handle.
3203 * @ref_key: The same as @ref_key in handle_direct_tree_backref()
3204 * @tree_key: The first key of this tree block.
3205 * @path: A clean (released) path, to avoid allocating path every time
3206 * the function get called.
3207 */
3208static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
3209 struct btrfs_backref_cache *cache,
3210 struct btrfs_path *path,
3211 struct btrfs_key *ref_key,
3212 struct btrfs_key *tree_key,
3213 struct btrfs_backref_node *cur)
3214{
3215 struct btrfs_fs_info *fs_info = cache->fs_info;
3216 struct btrfs_backref_node *upper;
3217 struct btrfs_backref_node *lower;
3218 struct btrfs_backref_edge *edge;
3219 struct extent_buffer *eb;
3220 struct btrfs_root *root;
3221 struct rb_node *rb_node;
3222 int level;
3223 bool need_check = true;
3224 int ret;
3225
3226 root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
3227 if (IS_ERR(root))
3228 return PTR_ERR(root);
3229 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3230 cur->cowonly = 1;
3231
3232 if (btrfs_root_level(&root->root_item) == cur->level) {
3233 /* Tree root */
3234 ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
3235 /*
3236 * For reloc backref cache, we may ignore reloc root. But for
3237 * general purpose backref cache, we can't rely on
3238 * btrfs_should_ignore_reloc_root() as it may conflict with
3239 * current running relocation and lead to missing root.
3240 *
3241 * For general purpose backref cache, reloc root detection is
3242 * completely relying on direct backref (key->offset is parent
3243 * bytenr), thus only do such check for reloc cache.
3244 */
3245 if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
3246 btrfs_put_root(root);
3247 list_add(&cur->list, &cache->useless_node);
3248 } else {
3249 cur->root = root;
3250 }
3251 return 0;
3252 }
3253
3254 level = cur->level + 1;
3255
3256 /* Search the tree to find parent blocks referring to the block */
3257 path->search_commit_root = 1;
3258 path->skip_locking = 1;
3259 path->lowest_level = level;
3260 ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
3261 path->lowest_level = 0;
3262 if (ret < 0) {
3263 btrfs_put_root(root);
3264 return ret;
3265 }
3266 if (ret > 0 && path->slots[level] > 0)
3267 path->slots[level]--;
3268
3269 eb = path->nodes[level];
3270 if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
3271 btrfs_err(fs_info,
3272"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
3273 cur->bytenr, level - 1, root->root_key.objectid,
3274 tree_key->objectid, tree_key->type, tree_key->offset);
3275 btrfs_put_root(root);
3276 ret = -ENOENT;
3277 goto out;
3278 }
3279 lower = cur;
3280
3281 /* Add all nodes and edges in the path */
3282 for (; level < BTRFS_MAX_LEVEL; level++) {
3283 if (!path->nodes[level]) {
3284 ASSERT(btrfs_root_bytenr(&root->root_item) ==
3285 lower->bytenr);
3286 /* Same as previous should_ignore_reloc_root() call */
3287 if (btrfs_should_ignore_reloc_root(root) &&
3288 cache->is_reloc) {
3289 btrfs_put_root(root);
3290 list_add(&lower->list, &cache->useless_node);
3291 } else {
3292 lower->root = root;
3293 }
3294 break;
3295 }
3296
3297 edge = btrfs_backref_alloc_edge(cache);
3298 if (!edge) {
3299 btrfs_put_root(root);
3300 ret = -ENOMEM;
3301 goto out;
3302 }
3303
3304 eb = path->nodes[level];
3305 rb_node = rb_simple_search(&cache->rb_root, eb->start);
3306 if (!rb_node) {
3307 upper = btrfs_backref_alloc_node(cache, eb->start,
3308 lower->level + 1);
3309 if (!upper) {
3310 btrfs_put_root(root);
3311 btrfs_backref_free_edge(cache, edge);
3312 ret = -ENOMEM;
3313 goto out;
3314 }
3315 upper->owner = btrfs_header_owner(eb);
3316 if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3317 upper->cowonly = 1;
3318
3319 /*
3320 * If we know the block isn't shared we can avoid
3321 * checking its backrefs.
3322 */
3323 if (btrfs_block_can_be_shared(trans, root, eb))
3324 upper->checked = 0;
3325 else
3326 upper->checked = 1;
3327
3328 /*
3329 * Add the block to pending list if we need to check its
3330 * backrefs, we only do this once while walking up a
3331 * tree as we will catch anything else later on.
3332 */
3333 if (!upper->checked && need_check) {
3334 need_check = false;
3335 list_add_tail(&edge->list[UPPER],
3336 &cache->pending_edge);
3337 } else {
3338 if (upper->checked)
3339 need_check = true;
3340 INIT_LIST_HEAD(&edge->list[UPPER]);
3341 }
3342 } else {
3343 upper = rb_entry(rb_node, struct btrfs_backref_node,
3344 rb_node);
3345 ASSERT(upper->checked);
3346 INIT_LIST_HEAD(&edge->list[UPPER]);
3347 if (!upper->owner)
3348 upper->owner = btrfs_header_owner(eb);
3349 }
3350 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
3351
3352 if (rb_node) {
3353 btrfs_put_root(root);
3354 break;
3355 }
3356 lower = upper;
3357 upper = NULL;
3358 }
3359out:
3360 btrfs_release_path(path);
3361 return ret;
3362}
3363
3364/*
3365 * Add backref node @cur into @cache.
3366 *
3367 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
3368 * links aren't yet bi-directional. Needs to finish such links.
3369 * Use btrfs_backref_finish_upper_links() to finish such linkage.
3370 *
3371 * @trans: Transaction handle.
3372 * @path: Released path for indirect tree backref lookup
3373 * @iter: Released backref iter for extent tree search
3374 * @node_key: The first key of the tree block
3375 */
3376int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
3377 struct btrfs_backref_cache *cache,
3378 struct btrfs_path *path,
3379 struct btrfs_backref_iter *iter,
3380 struct btrfs_key *node_key,
3381 struct btrfs_backref_node *cur)
3382{
3383 struct btrfs_backref_edge *edge;
3384 struct btrfs_backref_node *exist;
3385 int ret;
3386
3387 ret = btrfs_backref_iter_start(iter, cur->bytenr);
3388 if (ret < 0)
3389 return ret;
3390 /*
3391 * We skip the first btrfs_tree_block_info, as we don't use the key
3392 * stored in it, but fetch it from the tree block
3393 */
3394 if (btrfs_backref_has_tree_block_info(iter)) {
3395 ret = btrfs_backref_iter_next(iter);
3396 if (ret < 0)
3397 goto out;
3398 /* No extra backref? This means the tree block is corrupted */
3399 if (ret > 0) {
3400 ret = -EUCLEAN;
3401 goto out;
3402 }
3403 }
3404 WARN_ON(cur->checked);
3405 if (!list_empty(&cur->upper)) {
3406 /*
3407 * The backref was added previously when processing backref of
3408 * type BTRFS_TREE_BLOCK_REF_KEY
3409 */
3410 ASSERT(list_is_singular(&cur->upper));
3411 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
3412 list[LOWER]);
3413 ASSERT(list_empty(&edge->list[UPPER]));
3414 exist = edge->node[UPPER];
3415 /*
3416 * Add the upper level block to pending list if we need check
3417 * its backrefs
3418 */
3419 if (!exist->checked)
3420 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3421 } else {
3422 exist = NULL;
3423 }
3424
3425 for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
3426 struct extent_buffer *eb;
3427 struct btrfs_key key;
3428 int type;
3429
3430 cond_resched();
3431 eb = btrfs_backref_get_eb(iter);
3432
3433 key.objectid = iter->bytenr;
3434 if (btrfs_backref_iter_is_inline_ref(iter)) {
3435 struct btrfs_extent_inline_ref *iref;
3436
3437 /* Update key for inline backref */
3438 iref = (struct btrfs_extent_inline_ref *)
3439 ((unsigned long)iter->cur_ptr);
3440 type = btrfs_get_extent_inline_ref_type(eb, iref,
3441 BTRFS_REF_TYPE_BLOCK);
3442 if (type == BTRFS_REF_TYPE_INVALID) {
3443 ret = -EUCLEAN;
3444 goto out;
3445 }
3446 key.type = type;
3447 key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3448 } else {
3449 key.type = iter->cur_key.type;
3450 key.offset = iter->cur_key.offset;
3451 }
3452
3453 /*
3454 * Parent node found and matches current inline ref, no need to
3455 * rebuild this node for this inline ref
3456 */
3457 if (exist &&
3458 ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
3459 exist->owner == key.offset) ||
3460 (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
3461 exist->bytenr == key.offset))) {
3462 exist = NULL;
3463 continue;
3464 }
3465
3466 /* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3467 if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3468 ret = handle_direct_tree_backref(cache, &key, cur);
3469 if (ret < 0)
3470 goto out;
3471 } else if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
3472 /*
3473 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref
3474 * offset means the root objectid. We need to search
3475 * the tree to get its parent bytenr.
3476 */
3477 ret = handle_indirect_tree_backref(trans, cache, path,
3478 &key, node_key, cur);
3479 if (ret < 0)
3480 goto out;
3481 }
3482 /*
3483 * Unrecognized tree backref items (if it can pass tree-checker)
3484 * would be ignored.
3485 */
3486 }
3487 ret = 0;
3488 cur->checked = 1;
3489 WARN_ON(exist);
3490out:
3491 btrfs_backref_iter_release(iter);
3492 return ret;
3493}
3494
3495/*
3496 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3497 */
3498int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3499 struct btrfs_backref_node *start)
3500{
3501 struct list_head *useless_node = &cache->useless_node;
3502 struct btrfs_backref_edge *edge;
3503 struct rb_node *rb_node;
3504 LIST_HEAD(pending_edge);
3505
3506 ASSERT(start->checked);
3507
3508 /* Insert this node to cache if it's not COW-only */
3509 if (!start->cowonly) {
3510 rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3511 &start->rb_node);
3512 if (rb_node)
3513 btrfs_backref_panic(cache->fs_info, start->bytenr,
3514 -EEXIST);
3515 list_add_tail(&start->lower, &cache->leaves);
3516 }
3517
3518 /*
3519 * Use breadth first search to iterate all related edges.
3520 *
3521 * The starting points are all the edges of this node
3522 */
3523 list_for_each_entry(edge, &start->upper, list[LOWER])
3524 list_add_tail(&edge->list[UPPER], &pending_edge);
3525
3526 while (!list_empty(&pending_edge)) {
3527 struct btrfs_backref_node *upper;
3528 struct btrfs_backref_node *lower;
3529
3530 edge = list_first_entry(&pending_edge,
3531 struct btrfs_backref_edge, list[UPPER]);
3532 list_del_init(&edge->list[UPPER]);
3533 upper = edge->node[UPPER];
3534 lower = edge->node[LOWER];
3535
3536 /* Parent is detached, no need to keep any edges */
3537 if (upper->detached) {
3538 list_del(&edge->list[LOWER]);
3539 btrfs_backref_free_edge(cache, edge);
3540
3541 /* Lower node is orphan, queue for cleanup */
3542 if (list_empty(&lower->upper))
3543 list_add(&lower->list, useless_node);
3544 continue;
3545 }
3546
3547 /*
3548 * All new nodes added in current build_backref_tree() haven't
3549 * been linked to the cache rb tree.
3550 * So if we have upper->rb_node populated, this means a cache
3551 * hit. We only need to link the edge, as @upper and all its
3552 * parents have already been linked.
3553 */
3554 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3555 if (upper->lowest) {
3556 list_del_init(&upper->lower);
3557 upper->lowest = 0;
3558 }
3559
3560 list_add_tail(&edge->list[UPPER], &upper->lower);
3561 continue;
3562 }
3563
3564 /* Sanity check, we shouldn't have any unchecked nodes */
3565 if (!upper->checked) {
3566 ASSERT(0);
3567 return -EUCLEAN;
3568 }
3569
3570 /* Sanity check, COW-only node has non-COW-only parent */
3571 if (start->cowonly != upper->cowonly) {
3572 ASSERT(0);
3573 return -EUCLEAN;
3574 }
3575
3576 /* Only cache non-COW-only (subvolume trees) tree blocks */
3577 if (!upper->cowonly) {
3578 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3579 &upper->rb_node);
3580 if (rb_node) {
3581 btrfs_backref_panic(cache->fs_info,
3582 upper->bytenr, -EEXIST);
3583 return -EUCLEAN;
3584 }
3585 }
3586
3587 list_add_tail(&edge->list[UPPER], &upper->lower);
3588
3589 /*
3590 * Also queue all the parent edges of this uncached node
3591 * to finish the upper linkage
3592 */
3593 list_for_each_entry(edge, &upper->upper, list[LOWER])
3594 list_add_tail(&edge->list[UPPER], &pending_edge);
3595 }
3596 return 0;
3597}
3598
3599void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3600 struct btrfs_backref_node *node)
3601{
3602 struct btrfs_backref_node *lower;
3603 struct btrfs_backref_node *upper;
3604 struct btrfs_backref_edge *edge;
3605
3606 while (!list_empty(&cache->useless_node)) {
3607 lower = list_first_entry(&cache->useless_node,
3608 struct btrfs_backref_node, list);
3609 list_del_init(&lower->list);
3610 }
3611 while (!list_empty(&cache->pending_edge)) {
3612 edge = list_first_entry(&cache->pending_edge,
3613 struct btrfs_backref_edge, list[UPPER]);
3614 list_del(&edge->list[UPPER]);
3615 list_del(&edge->list[LOWER]);
3616 lower = edge->node[LOWER];
3617 upper = edge->node[UPPER];
3618 btrfs_backref_free_edge(cache, edge);
3619
3620 /*
3621 * Lower is no longer linked to any upper backref nodes and
3622 * isn't in the cache, we can free it ourselves.
3623 */
3624 if (list_empty(&lower->upper) &&
3625 RB_EMPTY_NODE(&lower->rb_node))
3626 list_add(&lower->list, &cache->useless_node);
3627
3628 if (!RB_EMPTY_NODE(&upper->rb_node))
3629 continue;
3630
3631 /* Add this guy's upper edges to the list to process */
3632 list_for_each_entry(edge, &upper->upper, list[LOWER])
3633 list_add_tail(&edge->list[UPPER],
3634 &cache->pending_edge);
3635 if (list_empty(&upper->upper))
3636 list_add(&upper->list, &cache->useless_node);
3637 }
3638
3639 while (!list_empty(&cache->useless_node)) {
3640 lower = list_first_entry(&cache->useless_node,
3641 struct btrfs_backref_node, list);
3642 list_del_init(&lower->list);
3643 if (lower == node)
3644 node = NULL;
3645 btrfs_backref_drop_node(cache, lower);
3646 }
3647
3648 btrfs_backref_cleanup_node(cache, node);
3649 ASSERT(list_empty(&cache->useless_node) &&
3650 list_empty(&cache->pending_edge));
3651}
1/*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19#include <linux/vmalloc.h>
20#include "ctree.h"
21#include "disk-io.h"
22#include "backref.h"
23#include "ulist.h"
24#include "transaction.h"
25#include "delayed-ref.h"
26#include "locking.h"
27
28/* Just an arbitrary number so we can be sure this happened */
29#define BACKREF_FOUND_SHARED 6
30
31struct extent_inode_elem {
32 u64 inum;
33 u64 offset;
34 struct extent_inode_elem *next;
35};
36
37static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
38 struct btrfs_file_extent_item *fi,
39 u64 extent_item_pos,
40 struct extent_inode_elem **eie)
41{
42 u64 offset = 0;
43 struct extent_inode_elem *e;
44
45 if (!btrfs_file_extent_compression(eb, fi) &&
46 !btrfs_file_extent_encryption(eb, fi) &&
47 !btrfs_file_extent_other_encoding(eb, fi)) {
48 u64 data_offset;
49 u64 data_len;
50
51 data_offset = btrfs_file_extent_offset(eb, fi);
52 data_len = btrfs_file_extent_num_bytes(eb, fi);
53
54 if (extent_item_pos < data_offset ||
55 extent_item_pos >= data_offset + data_len)
56 return 1;
57 offset = extent_item_pos - data_offset;
58 }
59
60 e = kmalloc(sizeof(*e), GFP_NOFS);
61 if (!e)
62 return -ENOMEM;
63
64 e->next = *eie;
65 e->inum = key->objectid;
66 e->offset = key->offset + offset;
67 *eie = e;
68
69 return 0;
70}
71
72static void free_inode_elem_list(struct extent_inode_elem *eie)
73{
74 struct extent_inode_elem *eie_next;
75
76 for (; eie; eie = eie_next) {
77 eie_next = eie->next;
78 kfree(eie);
79 }
80}
81
82static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
83 u64 extent_item_pos,
84 struct extent_inode_elem **eie)
85{
86 u64 disk_byte;
87 struct btrfs_key key;
88 struct btrfs_file_extent_item *fi;
89 int slot;
90 int nritems;
91 int extent_type;
92 int ret;
93
94 /*
95 * from the shared data ref, we only have the leaf but we need
96 * the key. thus, we must look into all items and see that we
97 * find one (some) with a reference to our extent item.
98 */
99 nritems = btrfs_header_nritems(eb);
100 for (slot = 0; slot < nritems; ++slot) {
101 btrfs_item_key_to_cpu(eb, &key, slot);
102 if (key.type != BTRFS_EXTENT_DATA_KEY)
103 continue;
104 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
105 extent_type = btrfs_file_extent_type(eb, fi);
106 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
107 continue;
108 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
109 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
110 if (disk_byte != wanted_disk_byte)
111 continue;
112
113 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
114 if (ret < 0)
115 return ret;
116 }
117
118 return 0;
119}
120
121/*
122 * this structure records all encountered refs on the way up to the root
123 */
124struct __prelim_ref {
125 struct list_head list;
126 u64 root_id;
127 struct btrfs_key key_for_search;
128 int level;
129 int count;
130 struct extent_inode_elem *inode_list;
131 u64 parent;
132 u64 wanted_disk_byte;
133};
134
135static struct kmem_cache *btrfs_prelim_ref_cache;
136
137int __init btrfs_prelim_ref_init(void)
138{
139 btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
140 sizeof(struct __prelim_ref),
141 0,
142 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
143 NULL);
144 if (!btrfs_prelim_ref_cache)
145 return -ENOMEM;
146 return 0;
147}
148
149void btrfs_prelim_ref_exit(void)
150{
151 kmem_cache_destroy(btrfs_prelim_ref_cache);
152}
153
154/*
155 * the rules for all callers of this function are:
156 * - obtaining the parent is the goal
157 * - if you add a key, you must know that it is a correct key
158 * - if you cannot add the parent or a correct key, then we will look into the
159 * block later to set a correct key
160 *
161 * delayed refs
162 * ============
163 * backref type | shared | indirect | shared | indirect
164 * information | tree | tree | data | data
165 * --------------------+--------+----------+--------+----------
166 * parent logical | y | - | - | -
167 * key to resolve | - | y | y | y
168 * tree block logical | - | - | - | -
169 * root for resolving | y | y | y | y
170 *
171 * - column 1: we've the parent -> done
172 * - column 2, 3, 4: we use the key to find the parent
173 *
174 * on disk refs (inline or keyed)
175 * ==============================
176 * backref type | shared | indirect | shared | indirect
177 * information | tree | tree | data | data
178 * --------------------+--------+----------+--------+----------
179 * parent logical | y | - | y | -
180 * key to resolve | - | - | - | y
181 * tree block logical | y | y | y | y
182 * root for resolving | - | y | y | y
183 *
184 * - column 1, 3: we've the parent -> done
185 * - column 2: we take the first key from the block to find the parent
186 * (see __add_missing_keys)
187 * - column 4: we use the key to find the parent
188 *
189 * additional information that's available but not required to find the parent
190 * block might help in merging entries to gain some speed.
191 */
192
193static int __add_prelim_ref(struct list_head *head, u64 root_id,
194 struct btrfs_key *key, int level,
195 u64 parent, u64 wanted_disk_byte, int count,
196 gfp_t gfp_mask)
197{
198 struct __prelim_ref *ref;
199
200 if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
201 return 0;
202
203 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
204 if (!ref)
205 return -ENOMEM;
206
207 ref->root_id = root_id;
208 if (key) {
209 ref->key_for_search = *key;
210 /*
211 * We can often find data backrefs with an offset that is too
212 * large (>= LLONG_MAX, maximum allowed file offset) due to
213 * underflows when subtracting a file's offset with the data
214 * offset of its corresponding extent data item. This can
215 * happen for example in the clone ioctl.
216 * So if we detect such case we set the search key's offset to
217 * zero to make sure we will find the matching file extent item
218 * at add_all_parents(), otherwise we will miss it because the
219 * offset taken form the backref is much larger then the offset
220 * of the file extent item. This can make us scan a very large
221 * number of file extent items, but at least it will not make
222 * us miss any.
223 * This is an ugly workaround for a behaviour that should have
224 * never existed, but it does and a fix for the clone ioctl
225 * would touch a lot of places, cause backwards incompatibility
226 * and would not fix the problem for extents cloned with older
227 * kernels.
228 */
229 if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY &&
230 ref->key_for_search.offset >= LLONG_MAX)
231 ref->key_for_search.offset = 0;
232 } else {
233 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
234 }
235
236 ref->inode_list = NULL;
237 ref->level = level;
238 ref->count = count;
239 ref->parent = parent;
240 ref->wanted_disk_byte = wanted_disk_byte;
241 list_add_tail(&ref->list, head);
242
243 return 0;
244}
245
246static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
247 struct ulist *parents, struct __prelim_ref *ref,
248 int level, u64 time_seq, const u64 *extent_item_pos,
249 u64 total_refs)
250{
251 int ret = 0;
252 int slot;
253 struct extent_buffer *eb;
254 struct btrfs_key key;
255 struct btrfs_key *key_for_search = &ref->key_for_search;
256 struct btrfs_file_extent_item *fi;
257 struct extent_inode_elem *eie = NULL, *old = NULL;
258 u64 disk_byte;
259 u64 wanted_disk_byte = ref->wanted_disk_byte;
260 u64 count = 0;
261
262 if (level != 0) {
263 eb = path->nodes[level];
264 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
265 if (ret < 0)
266 return ret;
267 return 0;
268 }
269
270 /*
271 * We normally enter this function with the path already pointing to
272 * the first item to check. But sometimes, we may enter it with
273 * slot==nritems. In that case, go to the next leaf before we continue.
274 */
275 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
276 if (time_seq == (u64)-1)
277 ret = btrfs_next_leaf(root, path);
278 else
279 ret = btrfs_next_old_leaf(root, path, time_seq);
280 }
281
282 while (!ret && count < total_refs) {
283 eb = path->nodes[0];
284 slot = path->slots[0];
285
286 btrfs_item_key_to_cpu(eb, &key, slot);
287
288 if (key.objectid != key_for_search->objectid ||
289 key.type != BTRFS_EXTENT_DATA_KEY)
290 break;
291
292 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
293 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
294
295 if (disk_byte == wanted_disk_byte) {
296 eie = NULL;
297 old = NULL;
298 count++;
299 if (extent_item_pos) {
300 ret = check_extent_in_eb(&key, eb, fi,
301 *extent_item_pos,
302 &eie);
303 if (ret < 0)
304 break;
305 }
306 if (ret > 0)
307 goto next;
308 ret = ulist_add_merge_ptr(parents, eb->start,
309 eie, (void **)&old, GFP_NOFS);
310 if (ret < 0)
311 break;
312 if (!ret && extent_item_pos) {
313 while (old->next)
314 old = old->next;
315 old->next = eie;
316 }
317 eie = NULL;
318 }
319next:
320 if (time_seq == (u64)-1)
321 ret = btrfs_next_item(root, path);
322 else
323 ret = btrfs_next_old_item(root, path, time_seq);
324 }
325
326 if (ret > 0)
327 ret = 0;
328 else if (ret < 0)
329 free_inode_elem_list(eie);
330 return ret;
331}
332
333/*
334 * resolve an indirect backref in the form (root_id, key, level)
335 * to a logical address
336 */
337static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
338 struct btrfs_path *path, u64 time_seq,
339 struct __prelim_ref *ref,
340 struct ulist *parents,
341 const u64 *extent_item_pos, u64 total_refs)
342{
343 struct btrfs_root *root;
344 struct btrfs_key root_key;
345 struct extent_buffer *eb;
346 int ret = 0;
347 int root_level;
348 int level = ref->level;
349 int index;
350
351 root_key.objectid = ref->root_id;
352 root_key.type = BTRFS_ROOT_ITEM_KEY;
353 root_key.offset = (u64)-1;
354
355 index = srcu_read_lock(&fs_info->subvol_srcu);
356
357 root = btrfs_get_fs_root(fs_info, &root_key, false);
358 if (IS_ERR(root)) {
359 srcu_read_unlock(&fs_info->subvol_srcu, index);
360 ret = PTR_ERR(root);
361 goto out;
362 }
363
364 if (btrfs_test_is_dummy_root(root)) {
365 srcu_read_unlock(&fs_info->subvol_srcu, index);
366 ret = -ENOENT;
367 goto out;
368 }
369
370 if (path->search_commit_root)
371 root_level = btrfs_header_level(root->commit_root);
372 else if (time_seq == (u64)-1)
373 root_level = btrfs_header_level(root->node);
374 else
375 root_level = btrfs_old_root_level(root, time_seq);
376
377 if (root_level + 1 == level) {
378 srcu_read_unlock(&fs_info->subvol_srcu, index);
379 goto out;
380 }
381
382 path->lowest_level = level;
383 if (time_seq == (u64)-1)
384 ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path,
385 0, 0);
386 else
387 ret = btrfs_search_old_slot(root, &ref->key_for_search, path,
388 time_seq);
389
390 /* root node has been locked, we can release @subvol_srcu safely here */
391 srcu_read_unlock(&fs_info->subvol_srcu, index);
392
393 pr_debug("search slot in root %llu (level %d, ref count %d) returned "
394 "%d for key (%llu %u %llu)\n",
395 ref->root_id, level, ref->count, ret,
396 ref->key_for_search.objectid, ref->key_for_search.type,
397 ref->key_for_search.offset);
398 if (ret < 0)
399 goto out;
400
401 eb = path->nodes[level];
402 while (!eb) {
403 if (WARN_ON(!level)) {
404 ret = 1;
405 goto out;
406 }
407 level--;
408 eb = path->nodes[level];
409 }
410
411 ret = add_all_parents(root, path, parents, ref, level, time_seq,
412 extent_item_pos, total_refs);
413out:
414 path->lowest_level = 0;
415 btrfs_release_path(path);
416 return ret;
417}
418
419/*
420 * resolve all indirect backrefs from the list
421 */
422static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
423 struct btrfs_path *path, u64 time_seq,
424 struct list_head *head,
425 const u64 *extent_item_pos, u64 total_refs,
426 u64 root_objectid)
427{
428 int err;
429 int ret = 0;
430 struct __prelim_ref *ref;
431 struct __prelim_ref *ref_safe;
432 struct __prelim_ref *new_ref;
433 struct ulist *parents;
434 struct ulist_node *node;
435 struct ulist_iterator uiter;
436
437 parents = ulist_alloc(GFP_NOFS);
438 if (!parents)
439 return -ENOMEM;
440
441 /*
442 * _safe allows us to insert directly after the current item without
443 * iterating over the newly inserted items.
444 * we're also allowed to re-assign ref during iteration.
445 */
446 list_for_each_entry_safe(ref, ref_safe, head, list) {
447 if (ref->parent) /* already direct */
448 continue;
449 if (ref->count == 0)
450 continue;
451 if (root_objectid && ref->root_id != root_objectid) {
452 ret = BACKREF_FOUND_SHARED;
453 goto out;
454 }
455 err = __resolve_indirect_ref(fs_info, path, time_seq, ref,
456 parents, extent_item_pos,
457 total_refs);
458 /*
459 * we can only tolerate ENOENT,otherwise,we should catch error
460 * and return directly.
461 */
462 if (err == -ENOENT) {
463 continue;
464 } else if (err) {
465 ret = err;
466 goto out;
467 }
468
469 /* we put the first parent into the ref at hand */
470 ULIST_ITER_INIT(&uiter);
471 node = ulist_next(parents, &uiter);
472 ref->parent = node ? node->val : 0;
473 ref->inode_list = node ?
474 (struct extent_inode_elem *)(uintptr_t)node->aux : NULL;
475
476 /* additional parents require new refs being added here */
477 while ((node = ulist_next(parents, &uiter))) {
478 new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
479 GFP_NOFS);
480 if (!new_ref) {
481 ret = -ENOMEM;
482 goto out;
483 }
484 memcpy(new_ref, ref, sizeof(*ref));
485 new_ref->parent = node->val;
486 new_ref->inode_list = (struct extent_inode_elem *)
487 (uintptr_t)node->aux;
488 list_add(&new_ref->list, &ref->list);
489 }
490 ulist_reinit(parents);
491 }
492out:
493 ulist_free(parents);
494 return ret;
495}
496
497static inline int ref_for_same_block(struct __prelim_ref *ref1,
498 struct __prelim_ref *ref2)
499{
500 if (ref1->level != ref2->level)
501 return 0;
502 if (ref1->root_id != ref2->root_id)
503 return 0;
504 if (ref1->key_for_search.type != ref2->key_for_search.type)
505 return 0;
506 if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
507 return 0;
508 if (ref1->key_for_search.offset != ref2->key_for_search.offset)
509 return 0;
510 if (ref1->parent != ref2->parent)
511 return 0;
512
513 return 1;
514}
515
516/*
517 * read tree blocks and add keys where required.
518 */
519static int __add_missing_keys(struct btrfs_fs_info *fs_info,
520 struct list_head *head)
521{
522 struct __prelim_ref *ref;
523 struct extent_buffer *eb;
524
525 list_for_each_entry(ref, head, list) {
526 if (ref->parent)
527 continue;
528 if (ref->key_for_search.type)
529 continue;
530 BUG_ON(!ref->wanted_disk_byte);
531 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
532 0);
533 if (IS_ERR(eb)) {
534 return PTR_ERR(eb);
535 } else if (!extent_buffer_uptodate(eb)) {
536 free_extent_buffer(eb);
537 return -EIO;
538 }
539 btrfs_tree_read_lock(eb);
540 if (btrfs_header_level(eb) == 0)
541 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
542 else
543 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
544 btrfs_tree_read_unlock(eb);
545 free_extent_buffer(eb);
546 }
547 return 0;
548}
549
550/*
551 * merge backrefs and adjust counts accordingly
552 *
553 * mode = 1: merge identical keys, if key is set
554 * FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
555 * additionally, we could even add a key range for the blocks we
556 * looked into to merge even more (-> replace unresolved refs by those
557 * having a parent).
558 * mode = 2: merge identical parents
559 */
560static void __merge_refs(struct list_head *head, int mode)
561{
562 struct __prelim_ref *pos1;
563
564 list_for_each_entry(pos1, head, list) {
565 struct __prelim_ref *pos2 = pos1, *tmp;
566
567 list_for_each_entry_safe_continue(pos2, tmp, head, list) {
568 struct __prelim_ref *ref1 = pos1, *ref2 = pos2;
569 struct extent_inode_elem *eie;
570
571 if (!ref_for_same_block(ref1, ref2))
572 continue;
573 if (mode == 1) {
574 if (!ref1->parent && ref2->parent)
575 swap(ref1, ref2);
576 } else {
577 if (ref1->parent != ref2->parent)
578 continue;
579 }
580
581 eie = ref1->inode_list;
582 while (eie && eie->next)
583 eie = eie->next;
584 if (eie)
585 eie->next = ref2->inode_list;
586 else
587 ref1->inode_list = ref2->inode_list;
588 ref1->count += ref2->count;
589
590 list_del(&ref2->list);
591 kmem_cache_free(btrfs_prelim_ref_cache, ref2);
592 }
593
594 }
595}
596
597/*
598 * add all currently queued delayed refs from this head whose seq nr is
599 * smaller or equal that seq to the list
600 */
601static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
602 struct list_head *prefs, u64 *total_refs,
603 u64 inum)
604{
605 struct btrfs_delayed_ref_node *node;
606 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
607 struct btrfs_key key;
608 struct btrfs_key op_key = {0};
609 int sgn;
610 int ret = 0;
611
612 if (extent_op && extent_op->update_key)
613 btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
614
615 spin_lock(&head->lock);
616 list_for_each_entry(node, &head->ref_list, list) {
617 if (node->seq > seq)
618 continue;
619
620 switch (node->action) {
621 case BTRFS_ADD_DELAYED_EXTENT:
622 case BTRFS_UPDATE_DELAYED_HEAD:
623 WARN_ON(1);
624 continue;
625 case BTRFS_ADD_DELAYED_REF:
626 sgn = 1;
627 break;
628 case BTRFS_DROP_DELAYED_REF:
629 sgn = -1;
630 break;
631 default:
632 BUG_ON(1);
633 }
634 *total_refs += (node->ref_mod * sgn);
635 switch (node->type) {
636 case BTRFS_TREE_BLOCK_REF_KEY: {
637 struct btrfs_delayed_tree_ref *ref;
638
639 ref = btrfs_delayed_node_to_tree_ref(node);
640 ret = __add_prelim_ref(prefs, ref->root, &op_key,
641 ref->level + 1, 0, node->bytenr,
642 node->ref_mod * sgn, GFP_ATOMIC);
643 break;
644 }
645 case BTRFS_SHARED_BLOCK_REF_KEY: {
646 struct btrfs_delayed_tree_ref *ref;
647
648 ref = btrfs_delayed_node_to_tree_ref(node);
649 ret = __add_prelim_ref(prefs, 0, NULL,
650 ref->level + 1, ref->parent,
651 node->bytenr,
652 node->ref_mod * sgn, GFP_ATOMIC);
653 break;
654 }
655 case BTRFS_EXTENT_DATA_REF_KEY: {
656 struct btrfs_delayed_data_ref *ref;
657 ref = btrfs_delayed_node_to_data_ref(node);
658
659 key.objectid = ref->objectid;
660 key.type = BTRFS_EXTENT_DATA_KEY;
661 key.offset = ref->offset;
662
663 /*
664 * Found a inum that doesn't match our known inum, we
665 * know it's shared.
666 */
667 if (inum && ref->objectid != inum) {
668 ret = BACKREF_FOUND_SHARED;
669 break;
670 }
671
672 ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
673 node->bytenr,
674 node->ref_mod * sgn, GFP_ATOMIC);
675 break;
676 }
677 case BTRFS_SHARED_DATA_REF_KEY: {
678 struct btrfs_delayed_data_ref *ref;
679
680 ref = btrfs_delayed_node_to_data_ref(node);
681 ret = __add_prelim_ref(prefs, 0, NULL, 0,
682 ref->parent, node->bytenr,
683 node->ref_mod * sgn, GFP_ATOMIC);
684 break;
685 }
686 default:
687 WARN_ON(1);
688 }
689 if (ret)
690 break;
691 }
692 spin_unlock(&head->lock);
693 return ret;
694}
695
696/*
697 * add all inline backrefs for bytenr to the list
698 */
699static int __add_inline_refs(struct btrfs_fs_info *fs_info,
700 struct btrfs_path *path, u64 bytenr,
701 int *info_level, struct list_head *prefs,
702 u64 *total_refs, u64 inum)
703{
704 int ret = 0;
705 int slot;
706 struct extent_buffer *leaf;
707 struct btrfs_key key;
708 struct btrfs_key found_key;
709 unsigned long ptr;
710 unsigned long end;
711 struct btrfs_extent_item *ei;
712 u64 flags;
713 u64 item_size;
714
715 /*
716 * enumerate all inline refs
717 */
718 leaf = path->nodes[0];
719 slot = path->slots[0];
720
721 item_size = btrfs_item_size_nr(leaf, slot);
722 BUG_ON(item_size < sizeof(*ei));
723
724 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
725 flags = btrfs_extent_flags(leaf, ei);
726 *total_refs += btrfs_extent_refs(leaf, ei);
727 btrfs_item_key_to_cpu(leaf, &found_key, slot);
728
729 ptr = (unsigned long)(ei + 1);
730 end = (unsigned long)ei + item_size;
731
732 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
733 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
734 struct btrfs_tree_block_info *info;
735
736 info = (struct btrfs_tree_block_info *)ptr;
737 *info_level = btrfs_tree_block_level(leaf, info);
738 ptr += sizeof(struct btrfs_tree_block_info);
739 BUG_ON(ptr > end);
740 } else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
741 *info_level = found_key.offset;
742 } else {
743 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
744 }
745
746 while (ptr < end) {
747 struct btrfs_extent_inline_ref *iref;
748 u64 offset;
749 int type;
750
751 iref = (struct btrfs_extent_inline_ref *)ptr;
752 type = btrfs_extent_inline_ref_type(leaf, iref);
753 offset = btrfs_extent_inline_ref_offset(leaf, iref);
754
755 switch (type) {
756 case BTRFS_SHARED_BLOCK_REF_KEY:
757 ret = __add_prelim_ref(prefs, 0, NULL,
758 *info_level + 1, offset,
759 bytenr, 1, GFP_NOFS);
760 break;
761 case BTRFS_SHARED_DATA_REF_KEY: {
762 struct btrfs_shared_data_ref *sdref;
763 int count;
764
765 sdref = (struct btrfs_shared_data_ref *)(iref + 1);
766 count = btrfs_shared_data_ref_count(leaf, sdref);
767 ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
768 bytenr, count, GFP_NOFS);
769 break;
770 }
771 case BTRFS_TREE_BLOCK_REF_KEY:
772 ret = __add_prelim_ref(prefs, offset, NULL,
773 *info_level + 1, 0,
774 bytenr, 1, GFP_NOFS);
775 break;
776 case BTRFS_EXTENT_DATA_REF_KEY: {
777 struct btrfs_extent_data_ref *dref;
778 int count;
779 u64 root;
780
781 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
782 count = btrfs_extent_data_ref_count(leaf, dref);
783 key.objectid = btrfs_extent_data_ref_objectid(leaf,
784 dref);
785 key.type = BTRFS_EXTENT_DATA_KEY;
786 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
787
788 if (inum && key.objectid != inum) {
789 ret = BACKREF_FOUND_SHARED;
790 break;
791 }
792
793 root = btrfs_extent_data_ref_root(leaf, dref);
794 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
795 bytenr, count, GFP_NOFS);
796 break;
797 }
798 default:
799 WARN_ON(1);
800 }
801 if (ret)
802 return ret;
803 ptr += btrfs_extent_inline_ref_size(type);
804 }
805
806 return 0;
807}
808
809/*
810 * add all non-inline backrefs for bytenr to the list
811 */
812static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
813 struct btrfs_path *path, u64 bytenr,
814 int info_level, struct list_head *prefs, u64 inum)
815{
816 struct btrfs_root *extent_root = fs_info->extent_root;
817 int ret;
818 int slot;
819 struct extent_buffer *leaf;
820 struct btrfs_key key;
821
822 while (1) {
823 ret = btrfs_next_item(extent_root, path);
824 if (ret < 0)
825 break;
826 if (ret) {
827 ret = 0;
828 break;
829 }
830
831 slot = path->slots[0];
832 leaf = path->nodes[0];
833 btrfs_item_key_to_cpu(leaf, &key, slot);
834
835 if (key.objectid != bytenr)
836 break;
837 if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
838 continue;
839 if (key.type > BTRFS_SHARED_DATA_REF_KEY)
840 break;
841
842 switch (key.type) {
843 case BTRFS_SHARED_BLOCK_REF_KEY:
844 ret = __add_prelim_ref(prefs, 0, NULL,
845 info_level + 1, key.offset,
846 bytenr, 1, GFP_NOFS);
847 break;
848 case BTRFS_SHARED_DATA_REF_KEY: {
849 struct btrfs_shared_data_ref *sdref;
850 int count;
851
852 sdref = btrfs_item_ptr(leaf, slot,
853 struct btrfs_shared_data_ref);
854 count = btrfs_shared_data_ref_count(leaf, sdref);
855 ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
856 bytenr, count, GFP_NOFS);
857 break;
858 }
859 case BTRFS_TREE_BLOCK_REF_KEY:
860 ret = __add_prelim_ref(prefs, key.offset, NULL,
861 info_level + 1, 0,
862 bytenr, 1, GFP_NOFS);
863 break;
864 case BTRFS_EXTENT_DATA_REF_KEY: {
865 struct btrfs_extent_data_ref *dref;
866 int count;
867 u64 root;
868
869 dref = btrfs_item_ptr(leaf, slot,
870 struct btrfs_extent_data_ref);
871 count = btrfs_extent_data_ref_count(leaf, dref);
872 key.objectid = btrfs_extent_data_ref_objectid(leaf,
873 dref);
874 key.type = BTRFS_EXTENT_DATA_KEY;
875 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
876
877 if (inum && key.objectid != inum) {
878 ret = BACKREF_FOUND_SHARED;
879 break;
880 }
881
882 root = btrfs_extent_data_ref_root(leaf, dref);
883 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
884 bytenr, count, GFP_NOFS);
885 break;
886 }
887 default:
888 WARN_ON(1);
889 }
890 if (ret)
891 return ret;
892
893 }
894
895 return ret;
896}
897
898/*
899 * this adds all existing backrefs (inline backrefs, backrefs and delayed
900 * refs) for the given bytenr to the refs list, merges duplicates and resolves
901 * indirect refs to their parent bytenr.
902 * When roots are found, they're added to the roots list
903 *
904 * NOTE: This can return values > 0
905 *
906 * If time_seq is set to (u64)-1, it will not search delayed_refs, and behave
907 * much like trans == NULL case, the difference only lies in it will not
908 * commit root.
909 * The special case is for qgroup to search roots in commit_transaction().
910 *
911 * FIXME some caching might speed things up
912 */
913static int find_parent_nodes(struct btrfs_trans_handle *trans,
914 struct btrfs_fs_info *fs_info, u64 bytenr,
915 u64 time_seq, struct ulist *refs,
916 struct ulist *roots, const u64 *extent_item_pos,
917 u64 root_objectid, u64 inum)
918{
919 struct btrfs_key key;
920 struct btrfs_path *path;
921 struct btrfs_delayed_ref_root *delayed_refs = NULL;
922 struct btrfs_delayed_ref_head *head;
923 int info_level = 0;
924 int ret;
925 struct list_head prefs_delayed;
926 struct list_head prefs;
927 struct __prelim_ref *ref;
928 struct extent_inode_elem *eie = NULL;
929 u64 total_refs = 0;
930
931 INIT_LIST_HEAD(&prefs);
932 INIT_LIST_HEAD(&prefs_delayed);
933
934 key.objectid = bytenr;
935 key.offset = (u64)-1;
936 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
937 key.type = BTRFS_METADATA_ITEM_KEY;
938 else
939 key.type = BTRFS_EXTENT_ITEM_KEY;
940
941 path = btrfs_alloc_path();
942 if (!path)
943 return -ENOMEM;
944 if (!trans) {
945 path->search_commit_root = 1;
946 path->skip_locking = 1;
947 }
948
949 if (time_seq == (u64)-1)
950 path->skip_locking = 1;
951
952 /*
953 * grab both a lock on the path and a lock on the delayed ref head.
954 * We need both to get a consistent picture of how the refs look
955 * at a specified point in time
956 */
957again:
958 head = NULL;
959
960 ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
961 if (ret < 0)
962 goto out;
963 BUG_ON(ret == 0);
964
965#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
966 if (trans && likely(trans->type != __TRANS_DUMMY) &&
967 time_seq != (u64)-1) {
968#else
969 if (trans && time_seq != (u64)-1) {
970#endif
971 /*
972 * look if there are updates for this ref queued and lock the
973 * head
974 */
975 delayed_refs = &trans->transaction->delayed_refs;
976 spin_lock(&delayed_refs->lock);
977 head = btrfs_find_delayed_ref_head(trans, bytenr);
978 if (head) {
979 if (!mutex_trylock(&head->mutex)) {
980 atomic_inc(&head->node.refs);
981 spin_unlock(&delayed_refs->lock);
982
983 btrfs_release_path(path);
984
985 /*
986 * Mutex was contended, block until it's
987 * released and try again
988 */
989 mutex_lock(&head->mutex);
990 mutex_unlock(&head->mutex);
991 btrfs_put_delayed_ref(&head->node);
992 goto again;
993 }
994 spin_unlock(&delayed_refs->lock);
995 ret = __add_delayed_refs(head, time_seq,
996 &prefs_delayed, &total_refs,
997 inum);
998 mutex_unlock(&head->mutex);
999 if (ret)
1000 goto out;
1001 } else {
1002 spin_unlock(&delayed_refs->lock);
1003 }
1004 }
1005
1006 if (path->slots[0]) {
1007 struct extent_buffer *leaf;
1008 int slot;
1009
1010 path->slots[0]--;
1011 leaf = path->nodes[0];
1012 slot = path->slots[0];
1013 btrfs_item_key_to_cpu(leaf, &key, slot);
1014 if (key.objectid == bytenr &&
1015 (key.type == BTRFS_EXTENT_ITEM_KEY ||
1016 key.type == BTRFS_METADATA_ITEM_KEY)) {
1017 ret = __add_inline_refs(fs_info, path, bytenr,
1018 &info_level, &prefs,
1019 &total_refs, inum);
1020 if (ret)
1021 goto out;
1022 ret = __add_keyed_refs(fs_info, path, bytenr,
1023 info_level, &prefs, inum);
1024 if (ret)
1025 goto out;
1026 }
1027 }
1028 btrfs_release_path(path);
1029
1030 list_splice_init(&prefs_delayed, &prefs);
1031
1032 ret = __add_missing_keys(fs_info, &prefs);
1033 if (ret)
1034 goto out;
1035
1036 __merge_refs(&prefs, 1);
1037
1038 ret = __resolve_indirect_refs(fs_info, path, time_seq, &prefs,
1039 extent_item_pos, total_refs,
1040 root_objectid);
1041 if (ret)
1042 goto out;
1043
1044 __merge_refs(&prefs, 2);
1045
1046 while (!list_empty(&prefs)) {
1047 ref = list_first_entry(&prefs, struct __prelim_ref, list);
1048 WARN_ON(ref->count < 0);
1049 if (roots && ref->count && ref->root_id && ref->parent == 0) {
1050 if (root_objectid && ref->root_id != root_objectid) {
1051 ret = BACKREF_FOUND_SHARED;
1052 goto out;
1053 }
1054
1055 /* no parent == root of tree */
1056 ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1057 if (ret < 0)
1058 goto out;
1059 }
1060 if (ref->count && ref->parent) {
1061 if (extent_item_pos && !ref->inode_list &&
1062 ref->level == 0) {
1063 struct extent_buffer *eb;
1064
1065 eb = read_tree_block(fs_info->extent_root,
1066 ref->parent, 0);
1067 if (IS_ERR(eb)) {
1068 ret = PTR_ERR(eb);
1069 goto out;
1070 } else if (!extent_buffer_uptodate(eb)) {
1071 free_extent_buffer(eb);
1072 ret = -EIO;
1073 goto out;
1074 }
1075 btrfs_tree_read_lock(eb);
1076 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1077 ret = find_extent_in_eb(eb, bytenr,
1078 *extent_item_pos, &eie);
1079 btrfs_tree_read_unlock_blocking(eb);
1080 free_extent_buffer(eb);
1081 if (ret < 0)
1082 goto out;
1083 ref->inode_list = eie;
1084 }
1085 ret = ulist_add_merge_ptr(refs, ref->parent,
1086 ref->inode_list,
1087 (void **)&eie, GFP_NOFS);
1088 if (ret < 0)
1089 goto out;
1090 if (!ret && extent_item_pos) {
1091 /*
1092 * we've recorded that parent, so we must extend
1093 * its inode list here
1094 */
1095 BUG_ON(!eie);
1096 while (eie->next)
1097 eie = eie->next;
1098 eie->next = ref->inode_list;
1099 }
1100 eie = NULL;
1101 }
1102 list_del(&ref->list);
1103 kmem_cache_free(btrfs_prelim_ref_cache, ref);
1104 }
1105
1106out:
1107 btrfs_free_path(path);
1108 while (!list_empty(&prefs)) {
1109 ref = list_first_entry(&prefs, struct __prelim_ref, list);
1110 list_del(&ref->list);
1111 kmem_cache_free(btrfs_prelim_ref_cache, ref);
1112 }
1113 while (!list_empty(&prefs_delayed)) {
1114 ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
1115 list);
1116 list_del(&ref->list);
1117 kmem_cache_free(btrfs_prelim_ref_cache, ref);
1118 }
1119 if (ret < 0)
1120 free_inode_elem_list(eie);
1121 return ret;
1122}
1123
1124static void free_leaf_list(struct ulist *blocks)
1125{
1126 struct ulist_node *node = NULL;
1127 struct extent_inode_elem *eie;
1128 struct ulist_iterator uiter;
1129
1130 ULIST_ITER_INIT(&uiter);
1131 while ((node = ulist_next(blocks, &uiter))) {
1132 if (!node->aux)
1133 continue;
1134 eie = (struct extent_inode_elem *)(uintptr_t)node->aux;
1135 free_inode_elem_list(eie);
1136 node->aux = 0;
1137 }
1138
1139 ulist_free(blocks);
1140}
1141
1142/*
1143 * Finds all leafs with a reference to the specified combination of bytenr and
1144 * offset. key_list_head will point to a list of corresponding keys (caller must
1145 * free each list element). The leafs will be stored in the leafs ulist, which
1146 * must be freed with ulist_free.
1147 *
1148 * returns 0 on success, <0 on error
1149 */
1150static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1151 struct btrfs_fs_info *fs_info, u64 bytenr,
1152 u64 time_seq, struct ulist **leafs,
1153 const u64 *extent_item_pos)
1154{
1155 int ret;
1156
1157 *leafs = ulist_alloc(GFP_NOFS);
1158 if (!*leafs)
1159 return -ENOMEM;
1160
1161 ret = find_parent_nodes(trans, fs_info, bytenr,
1162 time_seq, *leafs, NULL, extent_item_pos, 0, 0);
1163 if (ret < 0 && ret != -ENOENT) {
1164 free_leaf_list(*leafs);
1165 return ret;
1166 }
1167
1168 return 0;
1169}
1170
1171/*
1172 * walk all backrefs for a given extent to find all roots that reference this
1173 * extent. Walking a backref means finding all extents that reference this
1174 * extent and in turn walk the backrefs of those, too. Naturally this is a
1175 * recursive process, but here it is implemented in an iterative fashion: We
1176 * find all referencing extents for the extent in question and put them on a
1177 * list. In turn, we find all referencing extents for those, further appending
1178 * to the list. The way we iterate the list allows adding more elements after
1179 * the current while iterating. The process stops when we reach the end of the
1180 * list. Found roots are added to the roots list.
1181 *
1182 * returns 0 on success, < 0 on error.
1183 */
1184static int __btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1185 struct btrfs_fs_info *fs_info, u64 bytenr,
1186 u64 time_seq, struct ulist **roots)
1187{
1188 struct ulist *tmp;
1189 struct ulist_node *node = NULL;
1190 struct ulist_iterator uiter;
1191 int ret;
1192
1193 tmp = ulist_alloc(GFP_NOFS);
1194 if (!tmp)
1195 return -ENOMEM;
1196 *roots = ulist_alloc(GFP_NOFS);
1197 if (!*roots) {
1198 ulist_free(tmp);
1199 return -ENOMEM;
1200 }
1201
1202 ULIST_ITER_INIT(&uiter);
1203 while (1) {
1204 ret = find_parent_nodes(trans, fs_info, bytenr,
1205 time_seq, tmp, *roots, NULL, 0, 0);
1206 if (ret < 0 && ret != -ENOENT) {
1207 ulist_free(tmp);
1208 ulist_free(*roots);
1209 return ret;
1210 }
1211 node = ulist_next(tmp, &uiter);
1212 if (!node)
1213 break;
1214 bytenr = node->val;
1215 cond_resched();
1216 }
1217
1218 ulist_free(tmp);
1219 return 0;
1220}
1221
1222int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1223 struct btrfs_fs_info *fs_info, u64 bytenr,
1224 u64 time_seq, struct ulist **roots)
1225{
1226 int ret;
1227
1228 if (!trans)
1229 down_read(&fs_info->commit_root_sem);
1230 ret = __btrfs_find_all_roots(trans, fs_info, bytenr, time_seq, roots);
1231 if (!trans)
1232 up_read(&fs_info->commit_root_sem);
1233 return ret;
1234}
1235
1236/**
1237 * btrfs_check_shared - tell us whether an extent is shared
1238 *
1239 * @trans: optional trans handle
1240 *
1241 * btrfs_check_shared uses the backref walking code but will short
1242 * circuit as soon as it finds a root or inode that doesn't match the
1243 * one passed in. This provides a significant performance benefit for
1244 * callers (such as fiemap) which want to know whether the extent is
1245 * shared but do not need a ref count.
1246 *
1247 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1248 */
1249int btrfs_check_shared(struct btrfs_trans_handle *trans,
1250 struct btrfs_fs_info *fs_info, u64 root_objectid,
1251 u64 inum, u64 bytenr)
1252{
1253 struct ulist *tmp = NULL;
1254 struct ulist *roots = NULL;
1255 struct ulist_iterator uiter;
1256 struct ulist_node *node;
1257 struct seq_list elem = SEQ_LIST_INIT(elem);
1258 int ret = 0;
1259
1260 tmp = ulist_alloc(GFP_NOFS);
1261 roots = ulist_alloc(GFP_NOFS);
1262 if (!tmp || !roots) {
1263 ulist_free(tmp);
1264 ulist_free(roots);
1265 return -ENOMEM;
1266 }
1267
1268 if (trans)
1269 btrfs_get_tree_mod_seq(fs_info, &elem);
1270 else
1271 down_read(&fs_info->commit_root_sem);
1272 ULIST_ITER_INIT(&uiter);
1273 while (1) {
1274 ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1275 roots, NULL, root_objectid, inum);
1276 if (ret == BACKREF_FOUND_SHARED) {
1277 /* this is the only condition under which we return 1 */
1278 ret = 1;
1279 break;
1280 }
1281 if (ret < 0 && ret != -ENOENT)
1282 break;
1283 ret = 0;
1284 node = ulist_next(tmp, &uiter);
1285 if (!node)
1286 break;
1287 bytenr = node->val;
1288 cond_resched();
1289 }
1290 if (trans)
1291 btrfs_put_tree_mod_seq(fs_info, &elem);
1292 else
1293 up_read(&fs_info->commit_root_sem);
1294 ulist_free(tmp);
1295 ulist_free(roots);
1296 return ret;
1297}
1298
1299int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1300 u64 start_off, struct btrfs_path *path,
1301 struct btrfs_inode_extref **ret_extref,
1302 u64 *found_off)
1303{
1304 int ret, slot;
1305 struct btrfs_key key;
1306 struct btrfs_key found_key;
1307 struct btrfs_inode_extref *extref;
1308 struct extent_buffer *leaf;
1309 unsigned long ptr;
1310
1311 key.objectid = inode_objectid;
1312 key.type = BTRFS_INODE_EXTREF_KEY;
1313 key.offset = start_off;
1314
1315 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1316 if (ret < 0)
1317 return ret;
1318
1319 while (1) {
1320 leaf = path->nodes[0];
1321 slot = path->slots[0];
1322 if (slot >= btrfs_header_nritems(leaf)) {
1323 /*
1324 * If the item at offset is not found,
1325 * btrfs_search_slot will point us to the slot
1326 * where it should be inserted. In our case
1327 * that will be the slot directly before the
1328 * next INODE_REF_KEY_V2 item. In the case
1329 * that we're pointing to the last slot in a
1330 * leaf, we must move one leaf over.
1331 */
1332 ret = btrfs_next_leaf(root, path);
1333 if (ret) {
1334 if (ret >= 1)
1335 ret = -ENOENT;
1336 break;
1337 }
1338 continue;
1339 }
1340
1341 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1342
1343 /*
1344 * Check that we're still looking at an extended ref key for
1345 * this particular objectid. If we have different
1346 * objectid or type then there are no more to be found
1347 * in the tree and we can exit.
1348 */
1349 ret = -ENOENT;
1350 if (found_key.objectid != inode_objectid)
1351 break;
1352 if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1353 break;
1354
1355 ret = 0;
1356 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1357 extref = (struct btrfs_inode_extref *)ptr;
1358 *ret_extref = extref;
1359 if (found_off)
1360 *found_off = found_key.offset;
1361 break;
1362 }
1363
1364 return ret;
1365}
1366
1367/*
1368 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1369 * Elements of the path are separated by '/' and the path is guaranteed to be
1370 * 0-terminated. the path is only given within the current file system.
1371 * Therefore, it never starts with a '/'. the caller is responsible to provide
1372 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1373 * the start point of the resulting string is returned. this pointer is within
1374 * dest, normally.
1375 * in case the path buffer would overflow, the pointer is decremented further
1376 * as if output was written to the buffer, though no more output is actually
1377 * generated. that way, the caller can determine how much space would be
1378 * required for the path to fit into the buffer. in that case, the returned
1379 * value will be smaller than dest. callers must check this!
1380 */
1381char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1382 u32 name_len, unsigned long name_off,
1383 struct extent_buffer *eb_in, u64 parent,
1384 char *dest, u32 size)
1385{
1386 int slot;
1387 u64 next_inum;
1388 int ret;
1389 s64 bytes_left = ((s64)size) - 1;
1390 struct extent_buffer *eb = eb_in;
1391 struct btrfs_key found_key;
1392 int leave_spinning = path->leave_spinning;
1393 struct btrfs_inode_ref *iref;
1394
1395 if (bytes_left >= 0)
1396 dest[bytes_left] = '\0';
1397
1398 path->leave_spinning = 1;
1399 while (1) {
1400 bytes_left -= name_len;
1401 if (bytes_left >= 0)
1402 read_extent_buffer(eb, dest + bytes_left,
1403 name_off, name_len);
1404 if (eb != eb_in) {
1405 if (!path->skip_locking)
1406 btrfs_tree_read_unlock_blocking(eb);
1407 free_extent_buffer(eb);
1408 }
1409 ret = btrfs_find_item(fs_root, path, parent, 0,
1410 BTRFS_INODE_REF_KEY, &found_key);
1411 if (ret > 0)
1412 ret = -ENOENT;
1413 if (ret)
1414 break;
1415
1416 next_inum = found_key.offset;
1417
1418 /* regular exit ahead */
1419 if (parent == next_inum)
1420 break;
1421
1422 slot = path->slots[0];
1423 eb = path->nodes[0];
1424 /* make sure we can use eb after releasing the path */
1425 if (eb != eb_in) {
1426 if (!path->skip_locking)
1427 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1428 path->nodes[0] = NULL;
1429 path->locks[0] = 0;
1430 }
1431 btrfs_release_path(path);
1432 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1433
1434 name_len = btrfs_inode_ref_name_len(eb, iref);
1435 name_off = (unsigned long)(iref + 1);
1436
1437 parent = next_inum;
1438 --bytes_left;
1439 if (bytes_left >= 0)
1440 dest[bytes_left] = '/';
1441 }
1442
1443 btrfs_release_path(path);
1444 path->leave_spinning = leave_spinning;
1445
1446 if (ret)
1447 return ERR_PTR(ret);
1448
1449 return dest + bytes_left;
1450}
1451
1452/*
1453 * this makes the path point to (logical EXTENT_ITEM *)
1454 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1455 * tree blocks and <0 on error.
1456 */
1457int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1458 struct btrfs_path *path, struct btrfs_key *found_key,
1459 u64 *flags_ret)
1460{
1461 int ret;
1462 u64 flags;
1463 u64 size = 0;
1464 u32 item_size;
1465 struct extent_buffer *eb;
1466 struct btrfs_extent_item *ei;
1467 struct btrfs_key key;
1468
1469 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1470 key.type = BTRFS_METADATA_ITEM_KEY;
1471 else
1472 key.type = BTRFS_EXTENT_ITEM_KEY;
1473 key.objectid = logical;
1474 key.offset = (u64)-1;
1475
1476 ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1477 if (ret < 0)
1478 return ret;
1479
1480 ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1481 if (ret) {
1482 if (ret > 0)
1483 ret = -ENOENT;
1484 return ret;
1485 }
1486 btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1487 if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1488 size = fs_info->extent_root->nodesize;
1489 else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1490 size = found_key->offset;
1491
1492 if (found_key->objectid > logical ||
1493 found_key->objectid + size <= logical) {
1494 pr_debug("logical %llu is not within any extent\n", logical);
1495 return -ENOENT;
1496 }
1497
1498 eb = path->nodes[0];
1499 item_size = btrfs_item_size_nr(eb, path->slots[0]);
1500 BUG_ON(item_size < sizeof(*ei));
1501
1502 ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1503 flags = btrfs_extent_flags(eb, ei);
1504
1505 pr_debug("logical %llu is at position %llu within the extent (%llu "
1506 "EXTENT_ITEM %llu) flags %#llx size %u\n",
1507 logical, logical - found_key->objectid, found_key->objectid,
1508 found_key->offset, flags, item_size);
1509
1510 WARN_ON(!flags_ret);
1511 if (flags_ret) {
1512 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1513 *flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1514 else if (flags & BTRFS_EXTENT_FLAG_DATA)
1515 *flags_ret = BTRFS_EXTENT_FLAG_DATA;
1516 else
1517 BUG_ON(1);
1518 return 0;
1519 }
1520
1521 return -EIO;
1522}
1523
1524/*
1525 * helper function to iterate extent inline refs. ptr must point to a 0 value
1526 * for the first call and may be modified. it is used to track state.
1527 * if more refs exist, 0 is returned and the next call to
1528 * __get_extent_inline_ref must pass the modified ptr parameter to get the
1529 * next ref. after the last ref was processed, 1 is returned.
1530 * returns <0 on error
1531 */
1532static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
1533 struct btrfs_key *key,
1534 struct btrfs_extent_item *ei, u32 item_size,
1535 struct btrfs_extent_inline_ref **out_eiref,
1536 int *out_type)
1537{
1538 unsigned long end;
1539 u64 flags;
1540 struct btrfs_tree_block_info *info;
1541
1542 if (!*ptr) {
1543 /* first call */
1544 flags = btrfs_extent_flags(eb, ei);
1545 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1546 if (key->type == BTRFS_METADATA_ITEM_KEY) {
1547 /* a skinny metadata extent */
1548 *out_eiref =
1549 (struct btrfs_extent_inline_ref *)(ei + 1);
1550 } else {
1551 WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1552 info = (struct btrfs_tree_block_info *)(ei + 1);
1553 *out_eiref =
1554 (struct btrfs_extent_inline_ref *)(info + 1);
1555 }
1556 } else {
1557 *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1558 }
1559 *ptr = (unsigned long)*out_eiref;
1560 if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1561 return -ENOENT;
1562 }
1563
1564 end = (unsigned long)ei + item_size;
1565 *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1566 *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
1567
1568 *ptr += btrfs_extent_inline_ref_size(*out_type);
1569 WARN_ON(*ptr > end);
1570 if (*ptr == end)
1571 return 1; /* last */
1572
1573 return 0;
1574}
1575
1576/*
1577 * reads the tree block backref for an extent. tree level and root are returned
1578 * through out_level and out_root. ptr must point to a 0 value for the first
1579 * call and may be modified (see __get_extent_inline_ref comment).
1580 * returns 0 if data was provided, 1 if there was no more data to provide or
1581 * <0 on error.
1582 */
1583int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1584 struct btrfs_key *key, struct btrfs_extent_item *ei,
1585 u32 item_size, u64 *out_root, u8 *out_level)
1586{
1587 int ret;
1588 int type;
1589 struct btrfs_extent_inline_ref *eiref;
1590
1591 if (*ptr == (unsigned long)-1)
1592 return 1;
1593
1594 while (1) {
1595 ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size,
1596 &eiref, &type);
1597 if (ret < 0)
1598 return ret;
1599
1600 if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1601 type == BTRFS_SHARED_BLOCK_REF_KEY)
1602 break;
1603
1604 if (ret == 1)
1605 return 1;
1606 }
1607
1608 /* we can treat both ref types equally here */
1609 *out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1610
1611 if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1612 struct btrfs_tree_block_info *info;
1613
1614 info = (struct btrfs_tree_block_info *)(ei + 1);
1615 *out_level = btrfs_tree_block_level(eb, info);
1616 } else {
1617 ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1618 *out_level = (u8)key->offset;
1619 }
1620
1621 if (ret == 1)
1622 *ptr = (unsigned long)-1;
1623
1624 return 0;
1625}
1626
1627static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
1628 u64 root, u64 extent_item_objectid,
1629 iterate_extent_inodes_t *iterate, void *ctx)
1630{
1631 struct extent_inode_elem *eie;
1632 int ret = 0;
1633
1634 for (eie = inode_list; eie; eie = eie->next) {
1635 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
1636 "root %llu\n", extent_item_objectid,
1637 eie->inum, eie->offset, root);
1638 ret = iterate(eie->inum, eie->offset, root, ctx);
1639 if (ret) {
1640 pr_debug("stopping iteration for %llu due to ret=%d\n",
1641 extent_item_objectid, ret);
1642 break;
1643 }
1644 }
1645
1646 return ret;
1647}
1648
1649/*
1650 * calls iterate() for every inode that references the extent identified by
1651 * the given parameters.
1652 * when the iterator function returns a non-zero value, iteration stops.
1653 */
1654int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1655 u64 extent_item_objectid, u64 extent_item_pos,
1656 int search_commit_root,
1657 iterate_extent_inodes_t *iterate, void *ctx)
1658{
1659 int ret;
1660 struct btrfs_trans_handle *trans = NULL;
1661 struct ulist *refs = NULL;
1662 struct ulist *roots = NULL;
1663 struct ulist_node *ref_node = NULL;
1664 struct ulist_node *root_node = NULL;
1665 struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
1666 struct ulist_iterator ref_uiter;
1667 struct ulist_iterator root_uiter;
1668
1669 pr_debug("resolving all inodes for extent %llu\n",
1670 extent_item_objectid);
1671
1672 if (!search_commit_root) {
1673 trans = btrfs_join_transaction(fs_info->extent_root);
1674 if (IS_ERR(trans))
1675 return PTR_ERR(trans);
1676 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1677 } else {
1678 down_read(&fs_info->commit_root_sem);
1679 }
1680
1681 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1682 tree_mod_seq_elem.seq, &refs,
1683 &extent_item_pos);
1684 if (ret)
1685 goto out;
1686
1687 ULIST_ITER_INIT(&ref_uiter);
1688 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1689 ret = __btrfs_find_all_roots(trans, fs_info, ref_node->val,
1690 tree_mod_seq_elem.seq, &roots);
1691 if (ret)
1692 break;
1693 ULIST_ITER_INIT(&root_uiter);
1694 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1695 pr_debug("root %llu references leaf %llu, data list "
1696 "%#llx\n", root_node->val, ref_node->val,
1697 ref_node->aux);
1698 ret = iterate_leaf_refs((struct extent_inode_elem *)
1699 (uintptr_t)ref_node->aux,
1700 root_node->val,
1701 extent_item_objectid,
1702 iterate, ctx);
1703 }
1704 ulist_free(roots);
1705 }
1706
1707 free_leaf_list(refs);
1708out:
1709 if (!search_commit_root) {
1710 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1711 btrfs_end_transaction(trans, fs_info->extent_root);
1712 } else {
1713 up_read(&fs_info->commit_root_sem);
1714 }
1715
1716 return ret;
1717}
1718
1719int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1720 struct btrfs_path *path,
1721 iterate_extent_inodes_t *iterate, void *ctx)
1722{
1723 int ret;
1724 u64 extent_item_pos;
1725 u64 flags = 0;
1726 struct btrfs_key found_key;
1727 int search_commit_root = path->search_commit_root;
1728
1729 ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
1730 btrfs_release_path(path);
1731 if (ret < 0)
1732 return ret;
1733 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1734 return -EINVAL;
1735
1736 extent_item_pos = logical - found_key.objectid;
1737 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1738 extent_item_pos, search_commit_root,
1739 iterate, ctx);
1740
1741 return ret;
1742}
1743
1744typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
1745 struct extent_buffer *eb, void *ctx);
1746
1747static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
1748 struct btrfs_path *path,
1749 iterate_irefs_t *iterate, void *ctx)
1750{
1751 int ret = 0;
1752 int slot;
1753 u32 cur;
1754 u32 len;
1755 u32 name_len;
1756 u64 parent = 0;
1757 int found = 0;
1758 struct extent_buffer *eb;
1759 struct btrfs_item *item;
1760 struct btrfs_inode_ref *iref;
1761 struct btrfs_key found_key;
1762
1763 while (!ret) {
1764 ret = btrfs_find_item(fs_root, path, inum,
1765 parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
1766 &found_key);
1767
1768 if (ret < 0)
1769 break;
1770 if (ret) {
1771 ret = found ? 0 : -ENOENT;
1772 break;
1773 }
1774 ++found;
1775
1776 parent = found_key.offset;
1777 slot = path->slots[0];
1778 eb = btrfs_clone_extent_buffer(path->nodes[0]);
1779 if (!eb) {
1780 ret = -ENOMEM;
1781 break;
1782 }
1783 extent_buffer_get(eb);
1784 btrfs_tree_read_lock(eb);
1785 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1786 btrfs_release_path(path);
1787
1788 item = btrfs_item_nr(slot);
1789 iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1790
1791 for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
1792 name_len = btrfs_inode_ref_name_len(eb, iref);
1793 /* path must be released before calling iterate()! */
1794 pr_debug("following ref at offset %u for inode %llu in "
1795 "tree %llu\n", cur, found_key.objectid,
1796 fs_root->objectid);
1797 ret = iterate(parent, name_len,
1798 (unsigned long)(iref + 1), eb, ctx);
1799 if (ret)
1800 break;
1801 len = sizeof(*iref) + name_len;
1802 iref = (struct btrfs_inode_ref *)((char *)iref + len);
1803 }
1804 btrfs_tree_read_unlock_blocking(eb);
1805 free_extent_buffer(eb);
1806 }
1807
1808 btrfs_release_path(path);
1809
1810 return ret;
1811}
1812
1813static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
1814 struct btrfs_path *path,
1815 iterate_irefs_t *iterate, void *ctx)
1816{
1817 int ret;
1818 int slot;
1819 u64 offset = 0;
1820 u64 parent;
1821 int found = 0;
1822 struct extent_buffer *eb;
1823 struct btrfs_inode_extref *extref;
1824 u32 item_size;
1825 u32 cur_offset;
1826 unsigned long ptr;
1827
1828 while (1) {
1829 ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
1830 &offset);
1831 if (ret < 0)
1832 break;
1833 if (ret) {
1834 ret = found ? 0 : -ENOENT;
1835 break;
1836 }
1837 ++found;
1838
1839 slot = path->slots[0];
1840 eb = btrfs_clone_extent_buffer(path->nodes[0]);
1841 if (!eb) {
1842 ret = -ENOMEM;
1843 break;
1844 }
1845 extent_buffer_get(eb);
1846
1847 btrfs_tree_read_lock(eb);
1848 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1849 btrfs_release_path(path);
1850
1851 item_size = btrfs_item_size_nr(eb, slot);
1852 ptr = btrfs_item_ptr_offset(eb, slot);
1853 cur_offset = 0;
1854
1855 while (cur_offset < item_size) {
1856 u32 name_len;
1857
1858 extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
1859 parent = btrfs_inode_extref_parent(eb, extref);
1860 name_len = btrfs_inode_extref_name_len(eb, extref);
1861 ret = iterate(parent, name_len,
1862 (unsigned long)&extref->name, eb, ctx);
1863 if (ret)
1864 break;
1865
1866 cur_offset += btrfs_inode_extref_name_len(eb, extref);
1867 cur_offset += sizeof(*extref);
1868 }
1869 btrfs_tree_read_unlock_blocking(eb);
1870 free_extent_buffer(eb);
1871
1872 offset++;
1873 }
1874
1875 btrfs_release_path(path);
1876
1877 return ret;
1878}
1879
1880static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
1881 struct btrfs_path *path, iterate_irefs_t *iterate,
1882 void *ctx)
1883{
1884 int ret;
1885 int found_refs = 0;
1886
1887 ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
1888 if (!ret)
1889 ++found_refs;
1890 else if (ret != -ENOENT)
1891 return ret;
1892
1893 ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
1894 if (ret == -ENOENT && found_refs)
1895 return 0;
1896
1897 return ret;
1898}
1899
1900/*
1901 * returns 0 if the path could be dumped (probably truncated)
1902 * returns <0 in case of an error
1903 */
1904static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
1905 struct extent_buffer *eb, void *ctx)
1906{
1907 struct inode_fs_paths *ipath = ctx;
1908 char *fspath;
1909 char *fspath_min;
1910 int i = ipath->fspath->elem_cnt;
1911 const int s_ptr = sizeof(char *);
1912 u32 bytes_left;
1913
1914 bytes_left = ipath->fspath->bytes_left > s_ptr ?
1915 ipath->fspath->bytes_left - s_ptr : 0;
1916
1917 fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
1918 fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
1919 name_off, eb, inum, fspath_min, bytes_left);
1920 if (IS_ERR(fspath))
1921 return PTR_ERR(fspath);
1922
1923 if (fspath > fspath_min) {
1924 ipath->fspath->val[i] = (u64)(unsigned long)fspath;
1925 ++ipath->fspath->elem_cnt;
1926 ipath->fspath->bytes_left = fspath - fspath_min;
1927 } else {
1928 ++ipath->fspath->elem_missed;
1929 ipath->fspath->bytes_missing += fspath_min - fspath;
1930 ipath->fspath->bytes_left = 0;
1931 }
1932
1933 return 0;
1934}
1935
1936/*
1937 * this dumps all file system paths to the inode into the ipath struct, provided
1938 * is has been created large enough. each path is zero-terminated and accessed
1939 * from ipath->fspath->val[i].
1940 * when it returns, there are ipath->fspath->elem_cnt number of paths available
1941 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1942 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
1943 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1944 * have been needed to return all paths.
1945 */
1946int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
1947{
1948 return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
1949 inode_to_path, ipath);
1950}
1951
1952struct btrfs_data_container *init_data_container(u32 total_bytes)
1953{
1954 struct btrfs_data_container *data;
1955 size_t alloc_bytes;
1956
1957 alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
1958 data = vmalloc(alloc_bytes);
1959 if (!data)
1960 return ERR_PTR(-ENOMEM);
1961
1962 if (total_bytes >= sizeof(*data)) {
1963 data->bytes_left = total_bytes - sizeof(*data);
1964 data->bytes_missing = 0;
1965 } else {
1966 data->bytes_missing = sizeof(*data) - total_bytes;
1967 data->bytes_left = 0;
1968 }
1969
1970 data->elem_cnt = 0;
1971 data->elem_missed = 0;
1972
1973 return data;
1974}
1975
1976/*
1977 * allocates space to return multiple file system paths for an inode.
1978 * total_bytes to allocate are passed, note that space usable for actual path
1979 * information will be total_bytes - sizeof(struct inode_fs_paths).
1980 * the returned pointer must be freed with free_ipath() in the end.
1981 */
1982struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
1983 struct btrfs_path *path)
1984{
1985 struct inode_fs_paths *ifp;
1986 struct btrfs_data_container *fspath;
1987
1988 fspath = init_data_container(total_bytes);
1989 if (IS_ERR(fspath))
1990 return (void *)fspath;
1991
1992 ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
1993 if (!ifp) {
1994 kfree(fspath);
1995 return ERR_PTR(-ENOMEM);
1996 }
1997
1998 ifp->btrfs_path = path;
1999 ifp->fspath = fspath;
2000 ifp->fs_root = fs_root;
2001
2002 return ifp;
2003}
2004
2005void free_ipath(struct inode_fs_paths *ipath)
2006{
2007 if (!ipath)
2008 return;
2009 vfree(ipath->fspath);
2010 kfree(ipath);
2011}