Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/rbtree.h>
   8#include <trace/events/btrfs.h>
   9#include "ctree.h"
  10#include "disk-io.h"
  11#include "backref.h"
  12#include "ulist.h"
  13#include "transaction.h"
  14#include "delayed-ref.h"
  15#include "locking.h"
  16
  17/* Just an arbitrary number so we can be sure this happened */
  18#define BACKREF_FOUND_SHARED 6
  19
  20struct extent_inode_elem {
  21	u64 inum;
  22	u64 offset;
  23	struct extent_inode_elem *next;
  24};
  25
  26static int check_extent_in_eb(const struct btrfs_key *key,
  27			      const struct extent_buffer *eb,
  28			      const struct btrfs_file_extent_item *fi,
  29			      u64 extent_item_pos,
  30			      struct extent_inode_elem **eie,
  31			      bool ignore_offset)
  32{
  33	u64 offset = 0;
  34	struct extent_inode_elem *e;
  35
  36	if (!ignore_offset &&
  37	    !btrfs_file_extent_compression(eb, fi) &&
  38	    !btrfs_file_extent_encryption(eb, fi) &&
  39	    !btrfs_file_extent_other_encoding(eb, fi)) {
  40		u64 data_offset;
  41		u64 data_len;
  42
  43		data_offset = btrfs_file_extent_offset(eb, fi);
  44		data_len = btrfs_file_extent_num_bytes(eb, fi);
  45
  46		if (extent_item_pos < data_offset ||
  47		    extent_item_pos >= data_offset + data_len)
  48			return 1;
  49		offset = extent_item_pos - data_offset;
  50	}
  51
  52	e = kmalloc(sizeof(*e), GFP_NOFS);
  53	if (!e)
  54		return -ENOMEM;
  55
  56	e->next = *eie;
  57	e->inum = key->objectid;
  58	e->offset = key->offset + offset;
  59	*eie = e;
  60
  61	return 0;
  62}
  63
  64static void free_inode_elem_list(struct extent_inode_elem *eie)
  65{
  66	struct extent_inode_elem *eie_next;
  67
  68	for (; eie; eie = eie_next) {
  69		eie_next = eie->next;
  70		kfree(eie);
  71	}
  72}
  73
  74static int find_extent_in_eb(const struct extent_buffer *eb,
  75			     u64 wanted_disk_byte, u64 extent_item_pos,
  76			     struct extent_inode_elem **eie,
  77			     bool ignore_offset)
  78{
  79	u64 disk_byte;
  80	struct btrfs_key key;
  81	struct btrfs_file_extent_item *fi;
  82	int slot;
  83	int nritems;
  84	int extent_type;
  85	int ret;
  86
  87	/*
  88	 * from the shared data ref, we only have the leaf but we need
  89	 * the key. thus, we must look into all items and see that we
  90	 * find one (some) with a reference to our extent item.
  91	 */
  92	nritems = btrfs_header_nritems(eb);
  93	for (slot = 0; slot < nritems; ++slot) {
  94		btrfs_item_key_to_cpu(eb, &key, slot);
  95		if (key.type != BTRFS_EXTENT_DATA_KEY)
  96			continue;
  97		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
  98		extent_type = btrfs_file_extent_type(eb, fi);
  99		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
 100			continue;
 101		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
 102		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 103		if (disk_byte != wanted_disk_byte)
 104			continue;
 105
 106		ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
 107		if (ret < 0)
 108			return ret;
 109	}
 110
 111	return 0;
 112}
 113
 114struct preftree {
 115	struct rb_root_cached root;
 116	unsigned int count;
 117};
 118
 119#define PREFTREE_INIT	{ .root = RB_ROOT_CACHED, .count = 0 }
 120
 121struct preftrees {
 122	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
 123	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
 124	struct preftree indirect_missing_keys;
 125};
 126
 127/*
 128 * Checks for a shared extent during backref search.
 129 *
 130 * The share_count tracks prelim_refs (direct and indirect) having a
 131 * ref->count >0:
 132 *  - incremented when a ref->count transitions to >0
 133 *  - decremented when a ref->count transitions to <1
 134 */
 135struct share_check {
 136	u64 root_objectid;
 137	u64 inum;
 138	int share_count;
 139};
 140
 141static inline int extent_is_shared(struct share_check *sc)
 142{
 143	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
 144}
 145
 146static struct kmem_cache *btrfs_prelim_ref_cache;
 147
 148int __init btrfs_prelim_ref_init(void)
 149{
 150	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
 151					sizeof(struct prelim_ref),
 152					0,
 153					SLAB_MEM_SPREAD,
 154					NULL);
 155	if (!btrfs_prelim_ref_cache)
 156		return -ENOMEM;
 157	return 0;
 158}
 159
 160void __cold btrfs_prelim_ref_exit(void)
 161{
 162	kmem_cache_destroy(btrfs_prelim_ref_cache);
 163}
 164
 165static void free_pref(struct prelim_ref *ref)
 166{
 167	kmem_cache_free(btrfs_prelim_ref_cache, ref);
 168}
 169
 170/*
 171 * Return 0 when both refs are for the same block (and can be merged).
 172 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
 173 * indicates a 'higher' block.
 174 */
 175static int prelim_ref_compare(struct prelim_ref *ref1,
 176			      struct prelim_ref *ref2)
 177{
 178	if (ref1->level < ref2->level)
 179		return -1;
 180	if (ref1->level > ref2->level)
 181		return 1;
 182	if (ref1->root_id < ref2->root_id)
 183		return -1;
 184	if (ref1->root_id > ref2->root_id)
 185		return 1;
 186	if (ref1->key_for_search.type < ref2->key_for_search.type)
 187		return -1;
 188	if (ref1->key_for_search.type > ref2->key_for_search.type)
 189		return 1;
 190	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
 191		return -1;
 192	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
 193		return 1;
 194	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
 195		return -1;
 196	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
 197		return 1;
 198	if (ref1->parent < ref2->parent)
 199		return -1;
 200	if (ref1->parent > ref2->parent)
 201		return 1;
 202
 203	return 0;
 204}
 205
 206static void update_share_count(struct share_check *sc, int oldcount,
 207			       int newcount)
 208{
 209	if ((!sc) || (oldcount == 0 && newcount < 1))
 210		return;
 211
 212	if (oldcount > 0 && newcount < 1)
 213		sc->share_count--;
 214	else if (oldcount < 1 && newcount > 0)
 215		sc->share_count++;
 216}
 217
 218/*
 219 * Add @newref to the @root rbtree, merging identical refs.
 220 *
 221 * Callers should assume that newref has been freed after calling.
 222 */
 223static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
 224			      struct preftree *preftree,
 225			      struct prelim_ref *newref,
 226			      struct share_check *sc)
 227{
 228	struct rb_root_cached *root;
 229	struct rb_node **p;
 230	struct rb_node *parent = NULL;
 231	struct prelim_ref *ref;
 232	int result;
 233	bool leftmost = true;
 234
 235	root = &preftree->root;
 236	p = &root->rb_root.rb_node;
 237
 238	while (*p) {
 239		parent = *p;
 240		ref = rb_entry(parent, struct prelim_ref, rbnode);
 241		result = prelim_ref_compare(ref, newref);
 242		if (result < 0) {
 243			p = &(*p)->rb_left;
 244		} else if (result > 0) {
 245			p = &(*p)->rb_right;
 246			leftmost = false;
 247		} else {
 248			/* Identical refs, merge them and free @newref */
 249			struct extent_inode_elem *eie = ref->inode_list;
 250
 251			while (eie && eie->next)
 252				eie = eie->next;
 253
 254			if (!eie)
 255				ref->inode_list = newref->inode_list;
 256			else
 257				eie->next = newref->inode_list;
 258			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
 259						     preftree->count);
 260			/*
 261			 * A delayed ref can have newref->count < 0.
 262			 * The ref->count is updated to follow any
 263			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
 264			 */
 265			update_share_count(sc, ref->count,
 266					   ref->count + newref->count);
 267			ref->count += newref->count;
 268			free_pref(newref);
 269			return;
 270		}
 271	}
 272
 273	update_share_count(sc, 0, newref->count);
 274	preftree->count++;
 275	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
 276	rb_link_node(&newref->rbnode, parent, p);
 277	rb_insert_color_cached(&newref->rbnode, root, leftmost);
 278}
 279
 280/*
 281 * Release the entire tree.  We don't care about internal consistency so
 282 * just free everything and then reset the tree root.
 283 */
 284static void prelim_release(struct preftree *preftree)
 285{
 286	struct prelim_ref *ref, *next_ref;
 287
 288	rbtree_postorder_for_each_entry_safe(ref, next_ref,
 289					     &preftree->root.rb_root, rbnode)
 290		free_pref(ref);
 291
 292	preftree->root = RB_ROOT_CACHED;
 293	preftree->count = 0;
 294}
 295
 296/*
 297 * the rules for all callers of this function are:
 298 * - obtaining the parent is the goal
 299 * - if you add a key, you must know that it is a correct key
 300 * - if you cannot add the parent or a correct key, then we will look into the
 301 *   block later to set a correct key
 302 *
 303 * delayed refs
 304 * ============
 305 *        backref type | shared | indirect | shared | indirect
 306 * information         |   tree |     tree |   data |     data
 307 * --------------------+--------+----------+--------+----------
 308 *      parent logical |    y   |     -    |    -   |     -
 309 *      key to resolve |    -   |     y    |    y   |     y
 310 *  tree block logical |    -   |     -    |    -   |     -
 311 *  root for resolving |    y   |     y    |    y   |     y
 312 *
 313 * - column 1:       we've the parent -> done
 314 * - column 2, 3, 4: we use the key to find the parent
 315 *
 316 * on disk refs (inline or keyed)
 317 * ==============================
 318 *        backref type | shared | indirect | shared | indirect
 319 * information         |   tree |     tree |   data |     data
 320 * --------------------+--------+----------+--------+----------
 321 *      parent logical |    y   |     -    |    y   |     -
 322 *      key to resolve |    -   |     -    |    -   |     y
 323 *  tree block logical |    y   |     y    |    y   |     y
 324 *  root for resolving |    -   |     y    |    y   |     y
 325 *
 326 * - column 1, 3: we've the parent -> done
 327 * - column 2:    we take the first key from the block to find the parent
 328 *                (see add_missing_keys)
 329 * - column 4:    we use the key to find the parent
 330 *
 331 * additional information that's available but not required to find the parent
 332 * block might help in merging entries to gain some speed.
 333 */
 334static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
 335			  struct preftree *preftree, u64 root_id,
 336			  const struct btrfs_key *key, int level, u64 parent,
 337			  u64 wanted_disk_byte, int count,
 338			  struct share_check *sc, gfp_t gfp_mask)
 339{
 340	struct prelim_ref *ref;
 341
 342	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
 343		return 0;
 344
 345	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
 346	if (!ref)
 347		return -ENOMEM;
 348
 349	ref->root_id = root_id;
 350	if (key) {
 351		ref->key_for_search = *key;
 352		/*
 353		 * We can often find data backrefs with an offset that is too
 354		 * large (>= LLONG_MAX, maximum allowed file offset) due to
 355		 * underflows when subtracting a file's offset with the data
 356		 * offset of its corresponding extent data item. This can
 357		 * happen for example in the clone ioctl.
 358		 * So if we detect such case we set the search key's offset to
 359		 * zero to make sure we will find the matching file extent item
 360		 * at add_all_parents(), otherwise we will miss it because the
 361		 * offset taken form the backref is much larger then the offset
 362		 * of the file extent item. This can make us scan a very large
 363		 * number of file extent items, but at least it will not make
 364		 * us miss any.
 365		 * This is an ugly workaround for a behaviour that should have
 366		 * never existed, but it does and a fix for the clone ioctl
 367		 * would touch a lot of places, cause backwards incompatibility
 368		 * and would not fix the problem for extents cloned with older
 369		 * kernels.
 370		 */
 371		if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY &&
 372		    ref->key_for_search.offset >= LLONG_MAX)
 373			ref->key_for_search.offset = 0;
 374	} else {
 375		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
 376	}
 377
 378	ref->inode_list = NULL;
 379	ref->level = level;
 380	ref->count = count;
 381	ref->parent = parent;
 382	ref->wanted_disk_byte = wanted_disk_byte;
 383	prelim_ref_insert(fs_info, preftree, ref, sc);
 384	return extent_is_shared(sc);
 385}
 386
 387/* direct refs use root == 0, key == NULL */
 388static int add_direct_ref(const struct btrfs_fs_info *fs_info,
 389			  struct preftrees *preftrees, int level, u64 parent,
 390			  u64 wanted_disk_byte, int count,
 391			  struct share_check *sc, gfp_t gfp_mask)
 392{
 393	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
 394			      parent, wanted_disk_byte, count, sc, gfp_mask);
 395}
 396
 397/* indirect refs use parent == 0 */
 398static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
 399			    struct preftrees *preftrees, u64 root_id,
 400			    const struct btrfs_key *key, int level,
 401			    u64 wanted_disk_byte, int count,
 402			    struct share_check *sc, gfp_t gfp_mask)
 403{
 404	struct preftree *tree = &preftrees->indirect;
 405
 406	if (!key)
 407		tree = &preftrees->indirect_missing_keys;
 408	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
 409			      wanted_disk_byte, count, sc, gfp_mask);
 410}
 411
 412static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
 413			   struct ulist *parents, struct prelim_ref *ref,
 414			   int level, u64 time_seq, const u64 *extent_item_pos,
 415			   u64 total_refs, bool ignore_offset)
 416{
 417	int ret = 0;
 418	int slot;
 419	struct extent_buffer *eb;
 420	struct btrfs_key key;
 421	struct btrfs_key *key_for_search = &ref->key_for_search;
 422	struct btrfs_file_extent_item *fi;
 423	struct extent_inode_elem *eie = NULL, *old = NULL;
 424	u64 disk_byte;
 425	u64 wanted_disk_byte = ref->wanted_disk_byte;
 426	u64 count = 0;
 427
 428	if (level != 0) {
 429		eb = path->nodes[level];
 430		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
 431		if (ret < 0)
 432			return ret;
 433		return 0;
 434	}
 435
 436	/*
 437	 * We normally enter this function with the path already pointing to
 438	 * the first item to check. But sometimes, we may enter it with
 439	 * slot==nritems. In that case, go to the next leaf before we continue.
 440	 */
 441	if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
 442		if (time_seq == SEQ_LAST)
 443			ret = btrfs_next_leaf(root, path);
 444		else
 445			ret = btrfs_next_old_leaf(root, path, time_seq);
 446	}
 447
 448	while (!ret && count < total_refs) {
 449		eb = path->nodes[0];
 450		slot = path->slots[0];
 451
 452		btrfs_item_key_to_cpu(eb, &key, slot);
 453
 454		if (key.objectid != key_for_search->objectid ||
 455		    key.type != BTRFS_EXTENT_DATA_KEY)
 456			break;
 457
 458		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 459		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 460
 461		if (disk_byte == wanted_disk_byte) {
 462			eie = NULL;
 463			old = NULL;
 464			count++;
 465			if (extent_item_pos) {
 466				ret = check_extent_in_eb(&key, eb, fi,
 467						*extent_item_pos,
 468						&eie, ignore_offset);
 469				if (ret < 0)
 470					break;
 471			}
 472			if (ret > 0)
 473				goto next;
 474			ret = ulist_add_merge_ptr(parents, eb->start,
 475						  eie, (void **)&old, GFP_NOFS);
 476			if (ret < 0)
 477				break;
 478			if (!ret && extent_item_pos) {
 479				while (old->next)
 480					old = old->next;
 481				old->next = eie;
 482			}
 483			eie = NULL;
 484		}
 485next:
 486		if (time_seq == SEQ_LAST)
 487			ret = btrfs_next_item(root, path);
 488		else
 489			ret = btrfs_next_old_item(root, path, time_seq);
 490	}
 491
 492	if (ret > 0)
 493		ret = 0;
 494	else if (ret < 0)
 495		free_inode_elem_list(eie);
 496	return ret;
 497}
 498
 499/*
 500 * resolve an indirect backref in the form (root_id, key, level)
 501 * to a logical address
 502 */
 503static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
 504				struct btrfs_path *path, u64 time_seq,
 505				struct prelim_ref *ref, struct ulist *parents,
 506				const u64 *extent_item_pos, u64 total_refs,
 507				bool ignore_offset)
 508{
 509	struct btrfs_root *root;
 510	struct btrfs_key root_key;
 511	struct extent_buffer *eb;
 512	int ret = 0;
 513	int root_level;
 514	int level = ref->level;
 515	int index;
 516
 517	root_key.objectid = ref->root_id;
 518	root_key.type = BTRFS_ROOT_ITEM_KEY;
 519	root_key.offset = (u64)-1;
 520
 521	index = srcu_read_lock(&fs_info->subvol_srcu);
 522
 523	root = btrfs_get_fs_root(fs_info, &root_key, false);
 524	if (IS_ERR(root)) {
 525		srcu_read_unlock(&fs_info->subvol_srcu, index);
 526		ret = PTR_ERR(root);
 527		goto out;
 528	}
 529
 530	if (btrfs_is_testing(fs_info)) {
 531		srcu_read_unlock(&fs_info->subvol_srcu, index);
 532		ret = -ENOENT;
 533		goto out;
 534	}
 535
 536	if (path->search_commit_root)
 537		root_level = btrfs_header_level(root->commit_root);
 538	else if (time_seq == SEQ_LAST)
 539		root_level = btrfs_header_level(root->node);
 540	else
 541		root_level = btrfs_old_root_level(root, time_seq);
 542
 543	if (root_level + 1 == level) {
 544		srcu_read_unlock(&fs_info->subvol_srcu, index);
 545		goto out;
 546	}
 547
 548	path->lowest_level = level;
 549	if (time_seq == SEQ_LAST)
 550		ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path,
 551					0, 0);
 552	else
 553		ret = btrfs_search_old_slot(root, &ref->key_for_search, path,
 554					    time_seq);
 555
 556	/* root node has been locked, we can release @subvol_srcu safely here */
 557	srcu_read_unlock(&fs_info->subvol_srcu, index);
 558
 559	btrfs_debug(fs_info,
 560		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
 561		 ref->root_id, level, ref->count, ret,
 562		 ref->key_for_search.objectid, ref->key_for_search.type,
 563		 ref->key_for_search.offset);
 564	if (ret < 0)
 565		goto out;
 566
 567	eb = path->nodes[level];
 568	while (!eb) {
 569		if (WARN_ON(!level)) {
 570			ret = 1;
 571			goto out;
 572		}
 573		level--;
 574		eb = path->nodes[level];
 575	}
 576
 577	ret = add_all_parents(root, path, parents, ref, level, time_seq,
 578			      extent_item_pos, total_refs, ignore_offset);
 579out:
 580	path->lowest_level = 0;
 581	btrfs_release_path(path);
 582	return ret;
 583}
 584
 585static struct extent_inode_elem *
 586unode_aux_to_inode_list(struct ulist_node *node)
 587{
 588	if (!node)
 589		return NULL;
 590	return (struct extent_inode_elem *)(uintptr_t)node->aux;
 591}
 592
 593/*
 594 * We maintain three separate rbtrees: one for direct refs, one for
 595 * indirect refs which have a key, and one for indirect refs which do not
 596 * have a key. Each tree does merge on insertion.
 597 *
 598 * Once all of the references are located, we iterate over the tree of
 599 * indirect refs with missing keys. An appropriate key is located and
 600 * the ref is moved onto the tree for indirect refs. After all missing
 601 * keys are thus located, we iterate over the indirect ref tree, resolve
 602 * each reference, and then insert the resolved reference onto the
 603 * direct tree (merging there too).
 604 *
 605 * New backrefs (i.e., for parent nodes) are added to the appropriate
 606 * rbtree as they are encountered. The new backrefs are subsequently
 607 * resolved as above.
 608 */
 609static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
 610				 struct btrfs_path *path, u64 time_seq,
 611				 struct preftrees *preftrees,
 612				 const u64 *extent_item_pos, u64 total_refs,
 613				 struct share_check *sc, bool ignore_offset)
 614{
 615	int err;
 616	int ret = 0;
 617	struct ulist *parents;
 618	struct ulist_node *node;
 619	struct ulist_iterator uiter;
 620	struct rb_node *rnode;
 621
 622	parents = ulist_alloc(GFP_NOFS);
 623	if (!parents)
 624		return -ENOMEM;
 625
 626	/*
 627	 * We could trade memory usage for performance here by iterating
 628	 * the tree, allocating new refs for each insertion, and then
 629	 * freeing the entire indirect tree when we're done.  In some test
 630	 * cases, the tree can grow quite large (~200k objects).
 631	 */
 632	while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
 633		struct prelim_ref *ref;
 634
 635		ref = rb_entry(rnode, struct prelim_ref, rbnode);
 636		if (WARN(ref->parent,
 637			 "BUG: direct ref found in indirect tree")) {
 638			ret = -EINVAL;
 639			goto out;
 640		}
 641
 642		rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
 643		preftrees->indirect.count--;
 644
 645		if (ref->count == 0) {
 646			free_pref(ref);
 647			continue;
 648		}
 649
 650		if (sc && sc->root_objectid &&
 651		    ref->root_id != sc->root_objectid) {
 652			free_pref(ref);
 653			ret = BACKREF_FOUND_SHARED;
 654			goto out;
 655		}
 656		err = resolve_indirect_ref(fs_info, path, time_seq, ref,
 657					   parents, extent_item_pos,
 658					   total_refs, ignore_offset);
 659		/*
 660		 * we can only tolerate ENOENT,otherwise,we should catch error
 661		 * and return directly.
 662		 */
 663		if (err == -ENOENT) {
 664			prelim_ref_insert(fs_info, &preftrees->direct, ref,
 665					  NULL);
 666			continue;
 667		} else if (err) {
 668			free_pref(ref);
 669			ret = err;
 670			goto out;
 671		}
 672
 673		/* we put the first parent into the ref at hand */
 674		ULIST_ITER_INIT(&uiter);
 675		node = ulist_next(parents, &uiter);
 676		ref->parent = node ? node->val : 0;
 677		ref->inode_list = unode_aux_to_inode_list(node);
 678
 679		/* Add a prelim_ref(s) for any other parent(s). */
 680		while ((node = ulist_next(parents, &uiter))) {
 681			struct prelim_ref *new_ref;
 682
 683			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
 684						   GFP_NOFS);
 685			if (!new_ref) {
 686				free_pref(ref);
 687				ret = -ENOMEM;
 688				goto out;
 689			}
 690			memcpy(new_ref, ref, sizeof(*ref));
 691			new_ref->parent = node->val;
 692			new_ref->inode_list = unode_aux_to_inode_list(node);
 693			prelim_ref_insert(fs_info, &preftrees->direct,
 694					  new_ref, NULL);
 695		}
 696
 697		/*
 698		 * Now it's a direct ref, put it in the direct tree. We must
 699		 * do this last because the ref could be merged/freed here.
 700		 */
 701		prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
 702
 703		ulist_reinit(parents);
 704		cond_resched();
 705	}
 706out:
 707	ulist_free(parents);
 708	return ret;
 709}
 710
 711/*
 712 * read tree blocks and add keys where required.
 713 */
 714static int add_missing_keys(struct btrfs_fs_info *fs_info,
 715			    struct preftrees *preftrees, bool lock)
 716{
 717	struct prelim_ref *ref;
 718	struct extent_buffer *eb;
 719	struct preftree *tree = &preftrees->indirect_missing_keys;
 720	struct rb_node *node;
 721
 722	while ((node = rb_first_cached(&tree->root))) {
 723		ref = rb_entry(node, struct prelim_ref, rbnode);
 724		rb_erase_cached(node, &tree->root);
 725
 726		BUG_ON(ref->parent);	/* should not be a direct ref */
 727		BUG_ON(ref->key_for_search.type);
 728		BUG_ON(!ref->wanted_disk_byte);
 729
 730		eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
 731				     ref->level - 1, NULL);
 732		if (IS_ERR(eb)) {
 733			free_pref(ref);
 734			return PTR_ERR(eb);
 735		} else if (!extent_buffer_uptodate(eb)) {
 736			free_pref(ref);
 737			free_extent_buffer(eb);
 738			return -EIO;
 739		}
 740		if (lock)
 741			btrfs_tree_read_lock(eb);
 742		if (btrfs_header_level(eb) == 0)
 743			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
 744		else
 745			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
 746		if (lock)
 747			btrfs_tree_read_unlock(eb);
 748		free_extent_buffer(eb);
 749		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
 750		cond_resched();
 751	}
 752	return 0;
 753}
 754
 755/*
 756 * add all currently queued delayed refs from this head whose seq nr is
 757 * smaller or equal that seq to the list
 758 */
 759static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
 760			    struct btrfs_delayed_ref_head *head, u64 seq,
 761			    struct preftrees *preftrees, u64 *total_refs,
 762			    struct share_check *sc)
 763{
 764	struct btrfs_delayed_ref_node *node;
 765	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
 766	struct btrfs_key key;
 767	struct btrfs_key tmp_op_key;
 768	struct rb_node *n;
 769	int count;
 770	int ret = 0;
 771
 772	if (extent_op && extent_op->update_key)
 773		btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
 774
 775	spin_lock(&head->lock);
 776	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
 777		node = rb_entry(n, struct btrfs_delayed_ref_node,
 778				ref_node);
 779		if (node->seq > seq)
 780			continue;
 781
 782		switch (node->action) {
 783		case BTRFS_ADD_DELAYED_EXTENT:
 784		case BTRFS_UPDATE_DELAYED_HEAD:
 785			WARN_ON(1);
 786			continue;
 787		case BTRFS_ADD_DELAYED_REF:
 788			count = node->ref_mod;
 789			break;
 790		case BTRFS_DROP_DELAYED_REF:
 791			count = node->ref_mod * -1;
 792			break;
 793		default:
 794			BUG();
 795		}
 796		*total_refs += count;
 797		switch (node->type) {
 798		case BTRFS_TREE_BLOCK_REF_KEY: {
 799			/* NORMAL INDIRECT METADATA backref */
 800			struct btrfs_delayed_tree_ref *ref;
 801
 802			ref = btrfs_delayed_node_to_tree_ref(node);
 803			ret = add_indirect_ref(fs_info, preftrees, ref->root,
 804					       &tmp_op_key, ref->level + 1,
 805					       node->bytenr, count, sc,
 806					       GFP_ATOMIC);
 807			break;
 808		}
 809		case BTRFS_SHARED_BLOCK_REF_KEY: {
 810			/* SHARED DIRECT METADATA backref */
 811			struct btrfs_delayed_tree_ref *ref;
 812
 813			ref = btrfs_delayed_node_to_tree_ref(node);
 814
 815			ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
 816					     ref->parent, node->bytenr, count,
 817					     sc, GFP_ATOMIC);
 818			break;
 819		}
 820		case BTRFS_EXTENT_DATA_REF_KEY: {
 821			/* NORMAL INDIRECT DATA backref */
 822			struct btrfs_delayed_data_ref *ref;
 823			ref = btrfs_delayed_node_to_data_ref(node);
 824
 825			key.objectid = ref->objectid;
 826			key.type = BTRFS_EXTENT_DATA_KEY;
 827			key.offset = ref->offset;
 828
 829			/*
 830			 * Found a inum that doesn't match our known inum, we
 831			 * know it's shared.
 832			 */
 833			if (sc && sc->inum && ref->objectid != sc->inum) {
 834				ret = BACKREF_FOUND_SHARED;
 835				goto out;
 836			}
 837
 838			ret = add_indirect_ref(fs_info, preftrees, ref->root,
 839					       &key, 0, node->bytenr, count, sc,
 840					       GFP_ATOMIC);
 841			break;
 842		}
 843		case BTRFS_SHARED_DATA_REF_KEY: {
 844			/* SHARED DIRECT FULL backref */
 845			struct btrfs_delayed_data_ref *ref;
 846
 847			ref = btrfs_delayed_node_to_data_ref(node);
 848
 849			ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
 850					     node->bytenr, count, sc,
 851					     GFP_ATOMIC);
 852			break;
 853		}
 854		default:
 855			WARN_ON(1);
 856		}
 857		/*
 858		 * We must ignore BACKREF_FOUND_SHARED until all delayed
 859		 * refs have been checked.
 860		 */
 861		if (ret && (ret != BACKREF_FOUND_SHARED))
 862			break;
 863	}
 864	if (!ret)
 865		ret = extent_is_shared(sc);
 866out:
 867	spin_unlock(&head->lock);
 868	return ret;
 869}
 870
 871/*
 872 * add all inline backrefs for bytenr to the list
 873 *
 874 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
 875 */
 876static int add_inline_refs(const struct btrfs_fs_info *fs_info,
 877			   struct btrfs_path *path, u64 bytenr,
 878			   int *info_level, struct preftrees *preftrees,
 879			   u64 *total_refs, struct share_check *sc)
 880{
 881	int ret = 0;
 882	int slot;
 883	struct extent_buffer *leaf;
 884	struct btrfs_key key;
 885	struct btrfs_key found_key;
 886	unsigned long ptr;
 887	unsigned long end;
 888	struct btrfs_extent_item *ei;
 889	u64 flags;
 890	u64 item_size;
 891
 892	/*
 893	 * enumerate all inline refs
 894	 */
 895	leaf = path->nodes[0];
 896	slot = path->slots[0];
 897
 898	item_size = btrfs_item_size_nr(leaf, slot);
 899	BUG_ON(item_size < sizeof(*ei));
 900
 901	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
 902	flags = btrfs_extent_flags(leaf, ei);
 903	*total_refs += btrfs_extent_refs(leaf, ei);
 904	btrfs_item_key_to_cpu(leaf, &found_key, slot);
 905
 906	ptr = (unsigned long)(ei + 1);
 907	end = (unsigned long)ei + item_size;
 908
 909	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
 910	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
 911		struct btrfs_tree_block_info *info;
 912
 913		info = (struct btrfs_tree_block_info *)ptr;
 914		*info_level = btrfs_tree_block_level(leaf, info);
 915		ptr += sizeof(struct btrfs_tree_block_info);
 916		BUG_ON(ptr > end);
 917	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
 918		*info_level = found_key.offset;
 919	} else {
 920		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
 921	}
 922
 923	while (ptr < end) {
 924		struct btrfs_extent_inline_ref *iref;
 925		u64 offset;
 926		int type;
 927
 928		iref = (struct btrfs_extent_inline_ref *)ptr;
 929		type = btrfs_get_extent_inline_ref_type(leaf, iref,
 930							BTRFS_REF_TYPE_ANY);
 931		if (type == BTRFS_REF_TYPE_INVALID)
 932			return -EUCLEAN;
 933
 934		offset = btrfs_extent_inline_ref_offset(leaf, iref);
 935
 936		switch (type) {
 937		case BTRFS_SHARED_BLOCK_REF_KEY:
 938			ret = add_direct_ref(fs_info, preftrees,
 939					     *info_level + 1, offset,
 940					     bytenr, 1, NULL, GFP_NOFS);
 941			break;
 942		case BTRFS_SHARED_DATA_REF_KEY: {
 943			struct btrfs_shared_data_ref *sdref;
 944			int count;
 945
 946			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
 947			count = btrfs_shared_data_ref_count(leaf, sdref);
 948
 949			ret = add_direct_ref(fs_info, preftrees, 0, offset,
 950					     bytenr, count, sc, GFP_NOFS);
 951			break;
 952		}
 953		case BTRFS_TREE_BLOCK_REF_KEY:
 954			ret = add_indirect_ref(fs_info, preftrees, offset,
 955					       NULL, *info_level + 1,
 956					       bytenr, 1, NULL, GFP_NOFS);
 957			break;
 958		case BTRFS_EXTENT_DATA_REF_KEY: {
 959			struct btrfs_extent_data_ref *dref;
 960			int count;
 961			u64 root;
 962
 963			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
 964			count = btrfs_extent_data_ref_count(leaf, dref);
 965			key.objectid = btrfs_extent_data_ref_objectid(leaf,
 966								      dref);
 967			key.type = BTRFS_EXTENT_DATA_KEY;
 968			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
 969
 970			if (sc && sc->inum && key.objectid != sc->inum) {
 971				ret = BACKREF_FOUND_SHARED;
 972				break;
 973			}
 974
 975			root = btrfs_extent_data_ref_root(leaf, dref);
 976
 977			ret = add_indirect_ref(fs_info, preftrees, root,
 978					       &key, 0, bytenr, count,
 979					       sc, GFP_NOFS);
 980			break;
 981		}
 982		default:
 983			WARN_ON(1);
 984		}
 985		if (ret)
 986			return ret;
 987		ptr += btrfs_extent_inline_ref_size(type);
 988	}
 989
 990	return 0;
 991}
 992
 993/*
 994 * add all non-inline backrefs for bytenr to the list
 995 *
 996 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
 997 */
 998static int add_keyed_refs(struct btrfs_fs_info *fs_info,
 999			  struct btrfs_path *path, u64 bytenr,
1000			  int info_level, struct preftrees *preftrees,
1001			  struct share_check *sc)
1002{
1003	struct btrfs_root *extent_root = fs_info->extent_root;
1004	int ret;
1005	int slot;
1006	struct extent_buffer *leaf;
1007	struct btrfs_key key;
1008
1009	while (1) {
1010		ret = btrfs_next_item(extent_root, path);
1011		if (ret < 0)
1012			break;
1013		if (ret) {
1014			ret = 0;
1015			break;
1016		}
1017
1018		slot = path->slots[0];
1019		leaf = path->nodes[0];
1020		btrfs_item_key_to_cpu(leaf, &key, slot);
1021
1022		if (key.objectid != bytenr)
1023			break;
1024		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1025			continue;
1026		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1027			break;
1028
1029		switch (key.type) {
1030		case BTRFS_SHARED_BLOCK_REF_KEY:
1031			/* SHARED DIRECT METADATA backref */
1032			ret = add_direct_ref(fs_info, preftrees,
1033					     info_level + 1, key.offset,
1034					     bytenr, 1, NULL, GFP_NOFS);
1035			break;
1036		case BTRFS_SHARED_DATA_REF_KEY: {
1037			/* SHARED DIRECT FULL backref */
1038			struct btrfs_shared_data_ref *sdref;
1039			int count;
1040
1041			sdref = btrfs_item_ptr(leaf, slot,
1042					      struct btrfs_shared_data_ref);
1043			count = btrfs_shared_data_ref_count(leaf, sdref);
1044			ret = add_direct_ref(fs_info, preftrees, 0,
1045					     key.offset, bytenr, count,
1046					     sc, GFP_NOFS);
1047			break;
1048		}
1049		case BTRFS_TREE_BLOCK_REF_KEY:
1050			/* NORMAL INDIRECT METADATA backref */
1051			ret = add_indirect_ref(fs_info, preftrees, key.offset,
1052					       NULL, info_level + 1, bytenr,
1053					       1, NULL, GFP_NOFS);
1054			break;
1055		case BTRFS_EXTENT_DATA_REF_KEY: {
1056			/* NORMAL INDIRECT DATA backref */
1057			struct btrfs_extent_data_ref *dref;
1058			int count;
1059			u64 root;
1060
1061			dref = btrfs_item_ptr(leaf, slot,
1062					      struct btrfs_extent_data_ref);
1063			count = btrfs_extent_data_ref_count(leaf, dref);
1064			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1065								      dref);
1066			key.type = BTRFS_EXTENT_DATA_KEY;
1067			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1068
1069			if (sc && sc->inum && key.objectid != sc->inum) {
1070				ret = BACKREF_FOUND_SHARED;
1071				break;
1072			}
1073
1074			root = btrfs_extent_data_ref_root(leaf, dref);
1075			ret = add_indirect_ref(fs_info, preftrees, root,
1076					       &key, 0, bytenr, count,
1077					       sc, GFP_NOFS);
1078			break;
1079		}
1080		default:
1081			WARN_ON(1);
1082		}
1083		if (ret)
1084			return ret;
1085
1086	}
1087
1088	return ret;
1089}
1090
1091/*
1092 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1093 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1094 * indirect refs to their parent bytenr.
1095 * When roots are found, they're added to the roots list
1096 *
1097 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1098 * much like trans == NULL case, the difference only lies in it will not
1099 * commit root.
1100 * The special case is for qgroup to search roots in commit_transaction().
1101 *
1102 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1103 * shared extent is detected.
1104 *
1105 * Otherwise this returns 0 for success and <0 for an error.
1106 *
1107 * If ignore_offset is set to false, only extent refs whose offsets match
1108 * extent_item_pos are returned.  If true, every extent ref is returned
1109 * and extent_item_pos is ignored.
1110 *
1111 * FIXME some caching might speed things up
1112 */
1113static int find_parent_nodes(struct btrfs_trans_handle *trans,
1114			     struct btrfs_fs_info *fs_info, u64 bytenr,
1115			     u64 time_seq, struct ulist *refs,
1116			     struct ulist *roots, const u64 *extent_item_pos,
1117			     struct share_check *sc, bool ignore_offset)
1118{
1119	struct btrfs_key key;
1120	struct btrfs_path *path;
1121	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1122	struct btrfs_delayed_ref_head *head;
1123	int info_level = 0;
1124	int ret;
1125	struct prelim_ref *ref;
1126	struct rb_node *node;
1127	struct extent_inode_elem *eie = NULL;
1128	/* total of both direct AND indirect refs! */
1129	u64 total_refs = 0;
1130	struct preftrees preftrees = {
1131		.direct = PREFTREE_INIT,
1132		.indirect = PREFTREE_INIT,
1133		.indirect_missing_keys = PREFTREE_INIT
1134	};
1135
1136	key.objectid = bytenr;
1137	key.offset = (u64)-1;
1138	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1139		key.type = BTRFS_METADATA_ITEM_KEY;
1140	else
1141		key.type = BTRFS_EXTENT_ITEM_KEY;
1142
1143	path = btrfs_alloc_path();
1144	if (!path)
1145		return -ENOMEM;
1146	if (!trans) {
1147		path->search_commit_root = 1;
1148		path->skip_locking = 1;
1149	}
1150
1151	if (time_seq == SEQ_LAST)
1152		path->skip_locking = 1;
1153
1154	/*
1155	 * grab both a lock on the path and a lock on the delayed ref head.
1156	 * We need both to get a consistent picture of how the refs look
1157	 * at a specified point in time
1158	 */
1159again:
1160	head = NULL;
1161
1162	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1163	if (ret < 0)
1164		goto out;
1165	BUG_ON(ret == 0);
1166
1167#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1168	if (trans && likely(trans->type != __TRANS_DUMMY) &&
1169	    time_seq != SEQ_LAST) {
1170#else
1171	if (trans && time_seq != SEQ_LAST) {
1172#endif
1173		/*
1174		 * look if there are updates for this ref queued and lock the
1175		 * head
1176		 */
1177		delayed_refs = &trans->transaction->delayed_refs;
1178		spin_lock(&delayed_refs->lock);
1179		head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1180		if (head) {
1181			if (!mutex_trylock(&head->mutex)) {
1182				refcount_inc(&head->refs);
1183				spin_unlock(&delayed_refs->lock);
1184
1185				btrfs_release_path(path);
1186
1187				/*
1188				 * Mutex was contended, block until it's
1189				 * released and try again
1190				 */
1191				mutex_lock(&head->mutex);
1192				mutex_unlock(&head->mutex);
1193				btrfs_put_delayed_ref_head(head);
1194				goto again;
1195			}
1196			spin_unlock(&delayed_refs->lock);
1197			ret = add_delayed_refs(fs_info, head, time_seq,
1198					       &preftrees, &total_refs, sc);
1199			mutex_unlock(&head->mutex);
1200			if (ret)
1201				goto out;
1202		} else {
1203			spin_unlock(&delayed_refs->lock);
1204		}
1205	}
1206
1207	if (path->slots[0]) {
1208		struct extent_buffer *leaf;
1209		int slot;
1210
1211		path->slots[0]--;
1212		leaf = path->nodes[0];
1213		slot = path->slots[0];
1214		btrfs_item_key_to_cpu(leaf, &key, slot);
1215		if (key.objectid == bytenr &&
1216		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
1217		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1218			ret = add_inline_refs(fs_info, path, bytenr,
1219					      &info_level, &preftrees,
1220					      &total_refs, sc);
1221			if (ret)
1222				goto out;
1223			ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1224					     &preftrees, sc);
1225			if (ret)
1226				goto out;
1227		}
1228	}
1229
1230	btrfs_release_path(path);
1231
1232	ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1233	if (ret)
1234		goto out;
1235
1236	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1237
1238	ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1239				    extent_item_pos, total_refs, sc, ignore_offset);
1240	if (ret)
1241		goto out;
1242
1243	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1244
1245	/*
1246	 * This walks the tree of merged and resolved refs. Tree blocks are
1247	 * read in as needed. Unique entries are added to the ulist, and
1248	 * the list of found roots is updated.
1249	 *
1250	 * We release the entire tree in one go before returning.
1251	 */
1252	node = rb_first_cached(&preftrees.direct.root);
1253	while (node) {
1254		ref = rb_entry(node, struct prelim_ref, rbnode);
1255		node = rb_next(&ref->rbnode);
1256		/*
1257		 * ref->count < 0 can happen here if there are delayed
1258		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1259		 * prelim_ref_insert() relies on this when merging
1260		 * identical refs to keep the overall count correct.
1261		 * prelim_ref_insert() will merge only those refs
1262		 * which compare identically.  Any refs having
1263		 * e.g. different offsets would not be merged,
1264		 * and would retain their original ref->count < 0.
1265		 */
1266		if (roots && ref->count && ref->root_id && ref->parent == 0) {
1267			if (sc && sc->root_objectid &&
1268			    ref->root_id != sc->root_objectid) {
1269				ret = BACKREF_FOUND_SHARED;
1270				goto out;
1271			}
1272
1273			/* no parent == root of tree */
1274			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1275			if (ret < 0)
1276				goto out;
1277		}
1278		if (ref->count && ref->parent) {
1279			if (extent_item_pos && !ref->inode_list &&
1280			    ref->level == 0) {
1281				struct extent_buffer *eb;
1282
1283				eb = read_tree_block(fs_info, ref->parent, 0,
1284						     ref->level, NULL);
1285				if (IS_ERR(eb)) {
1286					ret = PTR_ERR(eb);
1287					goto out;
1288				} else if (!extent_buffer_uptodate(eb)) {
1289					free_extent_buffer(eb);
1290					ret = -EIO;
1291					goto out;
1292				}
1293
1294				if (!path->skip_locking) {
1295					btrfs_tree_read_lock(eb);
1296					btrfs_set_lock_blocking_read(eb);
1297				}
1298				ret = find_extent_in_eb(eb, bytenr,
1299							*extent_item_pos, &eie, ignore_offset);
1300				if (!path->skip_locking)
1301					btrfs_tree_read_unlock_blocking(eb);
1302				free_extent_buffer(eb);
1303				if (ret < 0)
1304					goto out;
1305				ref->inode_list = eie;
1306			}
1307			ret = ulist_add_merge_ptr(refs, ref->parent,
1308						  ref->inode_list,
1309						  (void **)&eie, GFP_NOFS);
1310			if (ret < 0)
1311				goto out;
1312			if (!ret && extent_item_pos) {
1313				/*
1314				 * we've recorded that parent, so we must extend
1315				 * its inode list here
1316				 */
1317				BUG_ON(!eie);
1318				while (eie->next)
1319					eie = eie->next;
1320				eie->next = ref->inode_list;
1321			}
1322			eie = NULL;
1323		}
1324		cond_resched();
1325	}
1326
1327out:
1328	btrfs_free_path(path);
1329
1330	prelim_release(&preftrees.direct);
1331	prelim_release(&preftrees.indirect);
1332	prelim_release(&preftrees.indirect_missing_keys);
1333
1334	if (ret < 0)
1335		free_inode_elem_list(eie);
1336	return ret;
1337}
1338
1339static void free_leaf_list(struct ulist *blocks)
1340{
1341	struct ulist_node *node = NULL;
1342	struct extent_inode_elem *eie;
1343	struct ulist_iterator uiter;
1344
1345	ULIST_ITER_INIT(&uiter);
1346	while ((node = ulist_next(blocks, &uiter))) {
1347		if (!node->aux)
1348			continue;
1349		eie = unode_aux_to_inode_list(node);
1350		free_inode_elem_list(eie);
1351		node->aux = 0;
1352	}
1353
1354	ulist_free(blocks);
1355}
1356
1357/*
1358 * Finds all leafs with a reference to the specified combination of bytenr and
1359 * offset. key_list_head will point to a list of corresponding keys (caller must
1360 * free each list element). The leafs will be stored in the leafs ulist, which
1361 * must be freed with ulist_free.
1362 *
1363 * returns 0 on success, <0 on error
1364 */
1365static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1366				struct btrfs_fs_info *fs_info, u64 bytenr,
1367				u64 time_seq, struct ulist **leafs,
1368				const u64 *extent_item_pos, bool ignore_offset)
1369{
1370	int ret;
1371
1372	*leafs = ulist_alloc(GFP_NOFS);
1373	if (!*leafs)
1374		return -ENOMEM;
1375
1376	ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1377				*leafs, NULL, extent_item_pos, NULL, ignore_offset);
1378	if (ret < 0 && ret != -ENOENT) {
1379		free_leaf_list(*leafs);
1380		return ret;
1381	}
1382
1383	return 0;
1384}
1385
1386/*
1387 * walk all backrefs for a given extent to find all roots that reference this
1388 * extent. Walking a backref means finding all extents that reference this
1389 * extent and in turn walk the backrefs of those, too. Naturally this is a
1390 * recursive process, but here it is implemented in an iterative fashion: We
1391 * find all referencing extents for the extent in question and put them on a
1392 * list. In turn, we find all referencing extents for those, further appending
1393 * to the list. The way we iterate the list allows adding more elements after
1394 * the current while iterating. The process stops when we reach the end of the
1395 * list. Found roots are added to the roots list.
1396 *
1397 * returns 0 on success, < 0 on error.
1398 */
1399static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1400				     struct btrfs_fs_info *fs_info, u64 bytenr,
1401				     u64 time_seq, struct ulist **roots,
1402				     bool ignore_offset)
1403{
1404	struct ulist *tmp;
1405	struct ulist_node *node = NULL;
1406	struct ulist_iterator uiter;
1407	int ret;
1408
1409	tmp = ulist_alloc(GFP_NOFS);
1410	if (!tmp)
1411		return -ENOMEM;
1412	*roots = ulist_alloc(GFP_NOFS);
1413	if (!*roots) {
1414		ulist_free(tmp);
1415		return -ENOMEM;
1416	}
1417
1418	ULIST_ITER_INIT(&uiter);
1419	while (1) {
1420		ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1421					tmp, *roots, NULL, NULL, ignore_offset);
1422		if (ret < 0 && ret != -ENOENT) {
1423			ulist_free(tmp);
1424			ulist_free(*roots);
1425			return ret;
1426		}
1427		node = ulist_next(tmp, &uiter);
1428		if (!node)
1429			break;
1430		bytenr = node->val;
1431		cond_resched();
1432	}
1433
1434	ulist_free(tmp);
1435	return 0;
1436}
1437
1438int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1439			 struct btrfs_fs_info *fs_info, u64 bytenr,
1440			 u64 time_seq, struct ulist **roots,
1441			 bool ignore_offset)
1442{
1443	int ret;
1444
1445	if (!trans)
1446		down_read(&fs_info->commit_root_sem);
1447	ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1448					time_seq, roots, ignore_offset);
1449	if (!trans)
1450		up_read(&fs_info->commit_root_sem);
1451	return ret;
1452}
1453
1454/**
1455 * btrfs_check_shared - tell us whether an extent is shared
1456 *
1457 * btrfs_check_shared uses the backref walking code but will short
1458 * circuit as soon as it finds a root or inode that doesn't match the
1459 * one passed in. This provides a significant performance benefit for
1460 * callers (such as fiemap) which want to know whether the extent is
1461 * shared but do not need a ref count.
1462 *
1463 * This attempts to attach to the running transaction in order to account for
1464 * delayed refs, but continues on even when no running transaction exists.
1465 *
1466 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1467 */
1468int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1469		struct ulist *roots, struct ulist *tmp)
1470{
1471	struct btrfs_fs_info *fs_info = root->fs_info;
1472	struct btrfs_trans_handle *trans;
1473	struct ulist_iterator uiter;
1474	struct ulist_node *node;
1475	struct seq_list elem = SEQ_LIST_INIT(elem);
1476	int ret = 0;
1477	struct share_check shared = {
1478		.root_objectid = root->root_key.objectid,
1479		.inum = inum,
1480		.share_count = 0,
1481	};
1482
1483	ulist_init(roots);
1484	ulist_init(tmp);
1485
1486	trans = btrfs_join_transaction_nostart(root);
1487	if (IS_ERR(trans)) {
1488		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1489			ret = PTR_ERR(trans);
1490			goto out;
1491		}
1492		trans = NULL;
1493		down_read(&fs_info->commit_root_sem);
1494	} else {
1495		btrfs_get_tree_mod_seq(fs_info, &elem);
1496	}
1497
1498	ULIST_ITER_INIT(&uiter);
1499	while (1) {
1500		ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1501					roots, NULL, &shared, false);
1502		if (ret == BACKREF_FOUND_SHARED) {
1503			/* this is the only condition under which we return 1 */
1504			ret = 1;
1505			break;
1506		}
1507		if (ret < 0 && ret != -ENOENT)
1508			break;
1509		ret = 0;
1510		node = ulist_next(tmp, &uiter);
1511		if (!node)
1512			break;
1513		bytenr = node->val;
1514		shared.share_count = 0;
1515		cond_resched();
1516	}
1517
1518	if (trans) {
1519		btrfs_put_tree_mod_seq(fs_info, &elem);
1520		btrfs_end_transaction(trans);
1521	} else {
1522		up_read(&fs_info->commit_root_sem);
1523	}
1524out:
1525	ulist_release(roots);
1526	ulist_release(tmp);
1527	return ret;
1528}
1529
1530int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1531			  u64 start_off, struct btrfs_path *path,
1532			  struct btrfs_inode_extref **ret_extref,
1533			  u64 *found_off)
1534{
1535	int ret, slot;
1536	struct btrfs_key key;
1537	struct btrfs_key found_key;
1538	struct btrfs_inode_extref *extref;
1539	const struct extent_buffer *leaf;
1540	unsigned long ptr;
1541
1542	key.objectid = inode_objectid;
1543	key.type = BTRFS_INODE_EXTREF_KEY;
1544	key.offset = start_off;
1545
1546	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1547	if (ret < 0)
1548		return ret;
1549
1550	while (1) {
1551		leaf = path->nodes[0];
1552		slot = path->slots[0];
1553		if (slot >= btrfs_header_nritems(leaf)) {
1554			/*
1555			 * If the item at offset is not found,
1556			 * btrfs_search_slot will point us to the slot
1557			 * where it should be inserted. In our case
1558			 * that will be the slot directly before the
1559			 * next INODE_REF_KEY_V2 item. In the case
1560			 * that we're pointing to the last slot in a
1561			 * leaf, we must move one leaf over.
1562			 */
1563			ret = btrfs_next_leaf(root, path);
1564			if (ret) {
1565				if (ret >= 1)
1566					ret = -ENOENT;
1567				break;
1568			}
1569			continue;
1570		}
1571
1572		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1573
1574		/*
1575		 * Check that we're still looking at an extended ref key for
1576		 * this particular objectid. If we have different
1577		 * objectid or type then there are no more to be found
1578		 * in the tree and we can exit.
1579		 */
1580		ret = -ENOENT;
1581		if (found_key.objectid != inode_objectid)
1582			break;
1583		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1584			break;
1585
1586		ret = 0;
1587		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1588		extref = (struct btrfs_inode_extref *)ptr;
1589		*ret_extref = extref;
1590		if (found_off)
1591			*found_off = found_key.offset;
1592		break;
1593	}
1594
1595	return ret;
1596}
1597
1598/*
1599 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1600 * Elements of the path are separated by '/' and the path is guaranteed to be
1601 * 0-terminated. the path is only given within the current file system.
1602 * Therefore, it never starts with a '/'. the caller is responsible to provide
1603 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1604 * the start point of the resulting string is returned. this pointer is within
1605 * dest, normally.
1606 * in case the path buffer would overflow, the pointer is decremented further
1607 * as if output was written to the buffer, though no more output is actually
1608 * generated. that way, the caller can determine how much space would be
1609 * required for the path to fit into the buffer. in that case, the returned
1610 * value will be smaller than dest. callers must check this!
1611 */
1612char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1613			u32 name_len, unsigned long name_off,
1614			struct extent_buffer *eb_in, u64 parent,
1615			char *dest, u32 size)
1616{
1617	int slot;
1618	u64 next_inum;
1619	int ret;
1620	s64 bytes_left = ((s64)size) - 1;
1621	struct extent_buffer *eb = eb_in;
1622	struct btrfs_key found_key;
1623	int leave_spinning = path->leave_spinning;
1624	struct btrfs_inode_ref *iref;
1625
1626	if (bytes_left >= 0)
1627		dest[bytes_left] = '\0';
1628
1629	path->leave_spinning = 1;
1630	while (1) {
1631		bytes_left -= name_len;
1632		if (bytes_left >= 0)
1633			read_extent_buffer(eb, dest + bytes_left,
1634					   name_off, name_len);
1635		if (eb != eb_in) {
1636			if (!path->skip_locking)
1637				btrfs_tree_read_unlock_blocking(eb);
1638			free_extent_buffer(eb);
1639		}
1640		ret = btrfs_find_item(fs_root, path, parent, 0,
1641				BTRFS_INODE_REF_KEY, &found_key);
1642		if (ret > 0)
1643			ret = -ENOENT;
1644		if (ret)
1645			break;
1646
1647		next_inum = found_key.offset;
1648
1649		/* regular exit ahead */
1650		if (parent == next_inum)
1651			break;
1652
1653		slot = path->slots[0];
1654		eb = path->nodes[0];
1655		/* make sure we can use eb after releasing the path */
1656		if (eb != eb_in) {
1657			if (!path->skip_locking)
1658				btrfs_set_lock_blocking_read(eb);
1659			path->nodes[0] = NULL;
1660			path->locks[0] = 0;
1661		}
1662		btrfs_release_path(path);
1663		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1664
1665		name_len = btrfs_inode_ref_name_len(eb, iref);
1666		name_off = (unsigned long)(iref + 1);
1667
1668		parent = next_inum;
1669		--bytes_left;
1670		if (bytes_left >= 0)
1671			dest[bytes_left] = '/';
1672	}
1673
1674	btrfs_release_path(path);
1675	path->leave_spinning = leave_spinning;
1676
1677	if (ret)
1678		return ERR_PTR(ret);
1679
1680	return dest + bytes_left;
1681}
1682
1683/*
1684 * this makes the path point to (logical EXTENT_ITEM *)
1685 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1686 * tree blocks and <0 on error.
1687 */
1688int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1689			struct btrfs_path *path, struct btrfs_key *found_key,
1690			u64 *flags_ret)
1691{
1692	int ret;
1693	u64 flags;
1694	u64 size = 0;
1695	u32 item_size;
1696	const struct extent_buffer *eb;
1697	struct btrfs_extent_item *ei;
1698	struct btrfs_key key;
1699
1700	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1701		key.type = BTRFS_METADATA_ITEM_KEY;
1702	else
1703		key.type = BTRFS_EXTENT_ITEM_KEY;
1704	key.objectid = logical;
1705	key.offset = (u64)-1;
1706
1707	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1708	if (ret < 0)
1709		return ret;
1710
1711	ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1712	if (ret) {
1713		if (ret > 0)
1714			ret = -ENOENT;
1715		return ret;
1716	}
1717	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1718	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1719		size = fs_info->nodesize;
1720	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1721		size = found_key->offset;
1722
1723	if (found_key->objectid > logical ||
1724	    found_key->objectid + size <= logical) {
1725		btrfs_debug(fs_info,
1726			"logical %llu is not within any extent", logical);
1727		return -ENOENT;
1728	}
1729
1730	eb = path->nodes[0];
1731	item_size = btrfs_item_size_nr(eb, path->slots[0]);
1732	BUG_ON(item_size < sizeof(*ei));
1733
1734	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1735	flags = btrfs_extent_flags(eb, ei);
1736
1737	btrfs_debug(fs_info,
1738		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1739		 logical, logical - found_key->objectid, found_key->objectid,
1740		 found_key->offset, flags, item_size);
1741
1742	WARN_ON(!flags_ret);
1743	if (flags_ret) {
1744		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1745			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1746		else if (flags & BTRFS_EXTENT_FLAG_DATA)
1747			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
1748		else
1749			BUG();
1750		return 0;
1751	}
1752
1753	return -EIO;
1754}
1755
1756/*
1757 * helper function to iterate extent inline refs. ptr must point to a 0 value
1758 * for the first call and may be modified. it is used to track state.
1759 * if more refs exist, 0 is returned and the next call to
1760 * get_extent_inline_ref must pass the modified ptr parameter to get the
1761 * next ref. after the last ref was processed, 1 is returned.
1762 * returns <0 on error
1763 */
1764static int get_extent_inline_ref(unsigned long *ptr,
1765				 const struct extent_buffer *eb,
1766				 const struct btrfs_key *key,
1767				 const struct btrfs_extent_item *ei,
1768				 u32 item_size,
1769				 struct btrfs_extent_inline_ref **out_eiref,
1770				 int *out_type)
1771{
1772	unsigned long end;
1773	u64 flags;
1774	struct btrfs_tree_block_info *info;
1775
1776	if (!*ptr) {
1777		/* first call */
1778		flags = btrfs_extent_flags(eb, ei);
1779		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1780			if (key->type == BTRFS_METADATA_ITEM_KEY) {
1781				/* a skinny metadata extent */
1782				*out_eiref =
1783				     (struct btrfs_extent_inline_ref *)(ei + 1);
1784			} else {
1785				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1786				info = (struct btrfs_tree_block_info *)(ei + 1);
1787				*out_eiref =
1788				   (struct btrfs_extent_inline_ref *)(info + 1);
1789			}
1790		} else {
1791			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1792		}
1793		*ptr = (unsigned long)*out_eiref;
1794		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1795			return -ENOENT;
1796	}
1797
1798	end = (unsigned long)ei + item_size;
1799	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1800	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1801						     BTRFS_REF_TYPE_ANY);
1802	if (*out_type == BTRFS_REF_TYPE_INVALID)
1803		return -EUCLEAN;
1804
1805	*ptr += btrfs_extent_inline_ref_size(*out_type);
1806	WARN_ON(*ptr > end);
1807	if (*ptr == end)
1808		return 1; /* last */
1809
1810	return 0;
1811}
1812
1813/*
1814 * reads the tree block backref for an extent. tree level and root are returned
1815 * through out_level and out_root. ptr must point to a 0 value for the first
1816 * call and may be modified (see get_extent_inline_ref comment).
1817 * returns 0 if data was provided, 1 if there was no more data to provide or
1818 * <0 on error.
1819 */
1820int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1821			    struct btrfs_key *key, struct btrfs_extent_item *ei,
1822			    u32 item_size, u64 *out_root, u8 *out_level)
1823{
1824	int ret;
1825	int type;
1826	struct btrfs_extent_inline_ref *eiref;
1827
1828	if (*ptr == (unsigned long)-1)
1829		return 1;
1830
1831	while (1) {
1832		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1833					      &eiref, &type);
1834		if (ret < 0)
1835			return ret;
1836
1837		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1838		    type == BTRFS_SHARED_BLOCK_REF_KEY)
1839			break;
1840
1841		if (ret == 1)
1842			return 1;
1843	}
1844
1845	/* we can treat both ref types equally here */
1846	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1847
1848	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1849		struct btrfs_tree_block_info *info;
1850
1851		info = (struct btrfs_tree_block_info *)(ei + 1);
1852		*out_level = btrfs_tree_block_level(eb, info);
1853	} else {
1854		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1855		*out_level = (u8)key->offset;
1856	}
1857
1858	if (ret == 1)
1859		*ptr = (unsigned long)-1;
1860
1861	return 0;
1862}
1863
1864static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1865			     struct extent_inode_elem *inode_list,
1866			     u64 root, u64 extent_item_objectid,
1867			     iterate_extent_inodes_t *iterate, void *ctx)
1868{
1869	struct extent_inode_elem *eie;
1870	int ret = 0;
1871
1872	for (eie = inode_list; eie; eie = eie->next) {
1873		btrfs_debug(fs_info,
1874			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1875			    extent_item_objectid, eie->inum,
1876			    eie->offset, root);
1877		ret = iterate(eie->inum, eie->offset, root, ctx);
1878		if (ret) {
1879			btrfs_debug(fs_info,
1880				    "stopping iteration for %llu due to ret=%d",
1881				    extent_item_objectid, ret);
1882			break;
1883		}
1884	}
1885
1886	return ret;
1887}
1888
1889/*
1890 * calls iterate() for every inode that references the extent identified by
1891 * the given parameters.
1892 * when the iterator function returns a non-zero value, iteration stops.
1893 */
1894int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1895				u64 extent_item_objectid, u64 extent_item_pos,
1896				int search_commit_root,
1897				iterate_extent_inodes_t *iterate, void *ctx,
1898				bool ignore_offset)
1899{
1900	int ret;
1901	struct btrfs_trans_handle *trans = NULL;
1902	struct ulist *refs = NULL;
1903	struct ulist *roots = NULL;
1904	struct ulist_node *ref_node = NULL;
1905	struct ulist_node *root_node = NULL;
1906	struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
1907	struct ulist_iterator ref_uiter;
1908	struct ulist_iterator root_uiter;
1909
1910	btrfs_debug(fs_info, "resolving all inodes for extent %llu",
1911			extent_item_objectid);
1912
1913	if (!search_commit_root) {
1914		trans = btrfs_attach_transaction(fs_info->extent_root);
1915		if (IS_ERR(trans)) {
1916			if (PTR_ERR(trans) != -ENOENT &&
1917			    PTR_ERR(trans) != -EROFS)
1918				return PTR_ERR(trans);
1919			trans = NULL;
1920		}
1921	}
1922
1923	if (trans)
1924		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1925	else
1926		down_read(&fs_info->commit_root_sem);
1927
1928	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1929				   tree_mod_seq_elem.seq, &refs,
1930				   &extent_item_pos, ignore_offset);
1931	if (ret)
1932		goto out;
1933
1934	ULIST_ITER_INIT(&ref_uiter);
1935	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1936		ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1937						tree_mod_seq_elem.seq, &roots,
1938						ignore_offset);
1939		if (ret)
1940			break;
1941		ULIST_ITER_INIT(&root_uiter);
1942		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1943			btrfs_debug(fs_info,
1944				    "root %llu references leaf %llu, data list %#llx",
1945				    root_node->val, ref_node->val,
1946				    ref_node->aux);
1947			ret = iterate_leaf_refs(fs_info,
1948						(struct extent_inode_elem *)
1949						(uintptr_t)ref_node->aux,
1950						root_node->val,
1951						extent_item_objectid,
1952						iterate, ctx);
1953		}
1954		ulist_free(roots);
1955	}
1956
1957	free_leaf_list(refs);
1958out:
1959	if (trans) {
1960		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1961		btrfs_end_transaction(trans);
1962	} else {
1963		up_read(&fs_info->commit_root_sem);
1964	}
1965
1966	return ret;
1967}
1968
1969int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
1970				struct btrfs_path *path,
1971				iterate_extent_inodes_t *iterate, void *ctx,
1972				bool ignore_offset)
1973{
1974	int ret;
1975	u64 extent_item_pos;
1976	u64 flags = 0;
1977	struct btrfs_key found_key;
1978	int search_commit_root = path->search_commit_root;
1979
1980	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
1981	btrfs_release_path(path);
1982	if (ret < 0)
1983		return ret;
1984	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1985		return -EINVAL;
1986
1987	extent_item_pos = logical - found_key.objectid;
1988	ret = iterate_extent_inodes(fs_info, found_key.objectid,
1989					extent_item_pos, search_commit_root,
1990					iterate, ctx, ignore_offset);
1991
1992	return ret;
1993}
1994
1995typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
1996			      struct extent_buffer *eb, void *ctx);
1997
1998static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
1999			      struct btrfs_path *path,
2000			      iterate_irefs_t *iterate, void *ctx)
2001{
2002	int ret = 0;
2003	int slot;
2004	u32 cur;
2005	u32 len;
2006	u32 name_len;
2007	u64 parent = 0;
2008	int found = 0;
2009	struct extent_buffer *eb;
2010	struct btrfs_item *item;
2011	struct btrfs_inode_ref *iref;
2012	struct btrfs_key found_key;
2013
2014	while (!ret) {
2015		ret = btrfs_find_item(fs_root, path, inum,
2016				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2017				&found_key);
2018
2019		if (ret < 0)
2020			break;
2021		if (ret) {
2022			ret = found ? 0 : -ENOENT;
2023			break;
2024		}
2025		++found;
2026
2027		parent = found_key.offset;
2028		slot = path->slots[0];
2029		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2030		if (!eb) {
2031			ret = -ENOMEM;
2032			break;
2033		}
2034		btrfs_release_path(path);
2035
2036		item = btrfs_item_nr(slot);
2037		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2038
2039		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2040			name_len = btrfs_inode_ref_name_len(eb, iref);
2041			/* path must be released before calling iterate()! */
2042			btrfs_debug(fs_root->fs_info,
2043				"following ref at offset %u for inode %llu in tree %llu",
2044				cur, found_key.objectid,
2045				fs_root->root_key.objectid);
2046			ret = iterate(parent, name_len,
2047				      (unsigned long)(iref + 1), eb, ctx);
2048			if (ret)
2049				break;
2050			len = sizeof(*iref) + name_len;
2051			iref = (struct btrfs_inode_ref *)((char *)iref + len);
2052		}
2053		free_extent_buffer(eb);
2054	}
2055
2056	btrfs_release_path(path);
2057
2058	return ret;
2059}
2060
2061static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2062				 struct btrfs_path *path,
2063				 iterate_irefs_t *iterate, void *ctx)
2064{
2065	int ret;
2066	int slot;
2067	u64 offset = 0;
2068	u64 parent;
2069	int found = 0;
2070	struct extent_buffer *eb;
2071	struct btrfs_inode_extref *extref;
2072	u32 item_size;
2073	u32 cur_offset;
2074	unsigned long ptr;
2075
2076	while (1) {
2077		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2078					    &offset);
2079		if (ret < 0)
2080			break;
2081		if (ret) {
2082			ret = found ? 0 : -ENOENT;
2083			break;
2084		}
2085		++found;
2086
2087		slot = path->slots[0];
2088		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2089		if (!eb) {
2090			ret = -ENOMEM;
2091			break;
2092		}
2093		btrfs_release_path(path);
2094
2095		item_size = btrfs_item_size_nr(eb, slot);
2096		ptr = btrfs_item_ptr_offset(eb, slot);
2097		cur_offset = 0;
2098
2099		while (cur_offset < item_size) {
2100			u32 name_len;
2101
2102			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2103			parent = btrfs_inode_extref_parent(eb, extref);
2104			name_len = btrfs_inode_extref_name_len(eb, extref);
2105			ret = iterate(parent, name_len,
2106				      (unsigned long)&extref->name, eb, ctx);
2107			if (ret)
2108				break;
2109
2110			cur_offset += btrfs_inode_extref_name_len(eb, extref);
2111			cur_offset += sizeof(*extref);
2112		}
2113		free_extent_buffer(eb);
2114
2115		offset++;
2116	}
2117
2118	btrfs_release_path(path);
2119
2120	return ret;
2121}
2122
2123static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2124			 struct btrfs_path *path, iterate_irefs_t *iterate,
2125			 void *ctx)
2126{
2127	int ret;
2128	int found_refs = 0;
2129
2130	ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2131	if (!ret)
2132		++found_refs;
2133	else if (ret != -ENOENT)
2134		return ret;
2135
2136	ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2137	if (ret == -ENOENT && found_refs)
2138		return 0;
2139
2140	return ret;
2141}
2142
2143/*
2144 * returns 0 if the path could be dumped (probably truncated)
2145 * returns <0 in case of an error
2146 */
2147static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2148			 struct extent_buffer *eb, void *ctx)
2149{
2150	struct inode_fs_paths *ipath = ctx;
2151	char *fspath;
2152	char *fspath_min;
2153	int i = ipath->fspath->elem_cnt;
2154	const int s_ptr = sizeof(char *);
2155	u32 bytes_left;
2156
2157	bytes_left = ipath->fspath->bytes_left > s_ptr ?
2158					ipath->fspath->bytes_left - s_ptr : 0;
2159
2160	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2161	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2162				   name_off, eb, inum, fspath_min, bytes_left);
2163	if (IS_ERR(fspath))
2164		return PTR_ERR(fspath);
2165
2166	if (fspath > fspath_min) {
2167		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2168		++ipath->fspath->elem_cnt;
2169		ipath->fspath->bytes_left = fspath - fspath_min;
2170	} else {
2171		++ipath->fspath->elem_missed;
2172		ipath->fspath->bytes_missing += fspath_min - fspath;
2173		ipath->fspath->bytes_left = 0;
2174	}
2175
2176	return 0;
2177}
2178
2179/*
2180 * this dumps all file system paths to the inode into the ipath struct, provided
2181 * is has been created large enough. each path is zero-terminated and accessed
2182 * from ipath->fspath->val[i].
2183 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2184 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2185 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2186 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2187 * have been needed to return all paths.
2188 */
2189int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2190{
2191	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2192			     inode_to_path, ipath);
2193}
2194
2195struct btrfs_data_container *init_data_container(u32 total_bytes)
2196{
2197	struct btrfs_data_container *data;
2198	size_t alloc_bytes;
2199
2200	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2201	data = kvmalloc(alloc_bytes, GFP_KERNEL);
2202	if (!data)
2203		return ERR_PTR(-ENOMEM);
2204
2205	if (total_bytes >= sizeof(*data)) {
2206		data->bytes_left = total_bytes - sizeof(*data);
2207		data->bytes_missing = 0;
2208	} else {
2209		data->bytes_missing = sizeof(*data) - total_bytes;
2210		data->bytes_left = 0;
2211	}
2212
2213	data->elem_cnt = 0;
2214	data->elem_missed = 0;
2215
2216	return data;
2217}
2218
2219/*
2220 * allocates space to return multiple file system paths for an inode.
2221 * total_bytes to allocate are passed, note that space usable for actual path
2222 * information will be total_bytes - sizeof(struct inode_fs_paths).
2223 * the returned pointer must be freed with free_ipath() in the end.
2224 */
2225struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2226					struct btrfs_path *path)
2227{
2228	struct inode_fs_paths *ifp;
2229	struct btrfs_data_container *fspath;
2230
2231	fspath = init_data_container(total_bytes);
2232	if (IS_ERR(fspath))
2233		return ERR_CAST(fspath);
2234
2235	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2236	if (!ifp) {
2237		kvfree(fspath);
2238		return ERR_PTR(-ENOMEM);
2239	}
2240
2241	ifp->btrfs_path = path;
2242	ifp->fspath = fspath;
2243	ifp->fs_root = fs_root;
2244
2245	return ifp;
2246}
2247
2248void free_ipath(struct inode_fs_paths *ipath)
2249{
2250	if (!ipath)
2251		return;
2252	kvfree(ipath->fspath);
2253	kfree(ipath);
2254}