Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/rbtree.h>
   8#include <trace/events/btrfs.h>
   9#include "ctree.h"
  10#include "disk-io.h"
  11#include "backref.h"
  12#include "ulist.h"
  13#include "transaction.h"
  14#include "delayed-ref.h"
  15#include "locking.h"
  16#include "misc.h"
  17#include "tree-mod-log.h"
  18#include "fs.h"
  19#include "accessors.h"
  20#include "extent-tree.h"
  21#include "relocation.h"
  22#include "tree-checker.h"
  23
  24/* Just arbitrary numbers so we can be sure one of these happened. */
  25#define BACKREF_FOUND_SHARED     6
  26#define BACKREF_FOUND_NOT_SHARED 7
  27
  28struct extent_inode_elem {
  29	u64 inum;
  30	u64 offset;
  31	u64 num_bytes;
  32	struct extent_inode_elem *next;
  33};
  34
  35static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
  36			      const struct btrfs_key *key,
  37			      const struct extent_buffer *eb,
  38			      const struct btrfs_file_extent_item *fi,
  39			      struct extent_inode_elem **eie)
 
 
  40{
  41	const u64 data_len = btrfs_file_extent_num_bytes(eb, fi);
  42	u64 offset = key->offset;
  43	struct extent_inode_elem *e;
  44	const u64 *root_ids;
  45	int root_count;
  46	bool cached;
  47
  48	if (!ctx->ignore_extent_item_pos &&
  49	    !btrfs_file_extent_compression(eb, fi) &&
  50	    !btrfs_file_extent_encryption(eb, fi) &&
  51	    !btrfs_file_extent_other_encoding(eb, fi)) {
  52		u64 data_offset;
 
  53
  54		data_offset = btrfs_file_extent_offset(eb, fi);
 
  55
  56		if (ctx->extent_item_pos < data_offset ||
  57		    ctx->extent_item_pos >= data_offset + data_len)
  58			return 1;
  59		offset += ctx->extent_item_pos - data_offset;
  60	}
  61
  62	if (!ctx->indirect_ref_iterator || !ctx->cache_lookup)
  63		goto add_inode_elem;
  64
  65	cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids,
  66				   &root_count);
  67	if (!cached)
  68		goto add_inode_elem;
  69
  70	for (int i = 0; i < root_count; i++) {
  71		int ret;
  72
  73		ret = ctx->indirect_ref_iterator(key->objectid, offset,
  74						 data_len, root_ids[i],
  75						 ctx->user_ctx);
  76		if (ret)
  77			return ret;
  78	}
  79
  80add_inode_elem:
  81	e = kmalloc(sizeof(*e), GFP_NOFS);
  82	if (!e)
  83		return -ENOMEM;
  84
  85	e->next = *eie;
  86	e->inum = key->objectid;
  87	e->offset = offset;
  88	e->num_bytes = data_len;
  89	*eie = e;
  90
  91	return 0;
  92}
  93
  94static void free_inode_elem_list(struct extent_inode_elem *eie)
  95{
  96	struct extent_inode_elem *eie_next;
  97
  98	for (; eie; eie = eie_next) {
  99		eie_next = eie->next;
 100		kfree(eie);
 101	}
 102}
 103
 104static int find_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
 105			     const struct extent_buffer *eb,
 106			     struct extent_inode_elem **eie)
 
 107{
 108	u64 disk_byte;
 109	struct btrfs_key key;
 110	struct btrfs_file_extent_item *fi;
 111	int slot;
 112	int nritems;
 113	int extent_type;
 114	int ret;
 115
 116	/*
 117	 * from the shared data ref, we only have the leaf but we need
 118	 * the key. thus, we must look into all items and see that we
 119	 * find one (some) with a reference to our extent item.
 120	 */
 121	nritems = btrfs_header_nritems(eb);
 122	for (slot = 0; slot < nritems; ++slot) {
 123		btrfs_item_key_to_cpu(eb, &key, slot);
 124		if (key.type != BTRFS_EXTENT_DATA_KEY)
 125			continue;
 126		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 127		extent_type = btrfs_file_extent_type(eb, fi);
 128		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
 129			continue;
 130		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
 131		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 132		if (disk_byte != ctx->bytenr)
 133			continue;
 134
 135		ret = check_extent_in_eb(ctx, &key, eb, fi, eie);
 136		if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
 137			return ret;
 138	}
 139
 140	return 0;
 141}
 142
 143struct preftree {
 144	struct rb_root_cached root;
 145	unsigned int count;
 146};
 147
 148#define PREFTREE_INIT	{ .root = RB_ROOT_CACHED, .count = 0 }
 149
 150struct preftrees {
 151	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
 152	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
 153	struct preftree indirect_missing_keys;
 154};
 155
 156/*
 157 * Checks for a shared extent during backref search.
 158 *
 159 * The share_count tracks prelim_refs (direct and indirect) having a
 160 * ref->count >0:
 161 *  - incremented when a ref->count transitions to >0
 162 *  - decremented when a ref->count transitions to <1
 163 */
 164struct share_check {
 165	struct btrfs_backref_share_check_ctx *ctx;
 166	struct btrfs_root *root;
 167	u64 inum;
 168	u64 data_bytenr;
 169	u64 data_extent_gen;
 170	/*
 171	 * Counts number of inodes that refer to an extent (different inodes in
 172	 * the same root or different roots) that we could find. The sharedness
 173	 * check typically stops once this counter gets greater than 1, so it
 174	 * may not reflect the total number of inodes.
 175	 */
 176	int share_count;
 177	/*
 178	 * The number of times we found our inode refers to the data extent we
 179	 * are determining the sharedness. In other words, how many file extent
 180	 * items we could find for our inode that point to our target data
 181	 * extent. The value we get here after finishing the extent sharedness
 182	 * check may be smaller than reality, but if it ends up being greater
 183	 * than 1, then we know for sure the inode has multiple file extent
 184	 * items that point to our inode, and we can safely assume it's useful
 185	 * to cache the sharedness check result.
 186	 */
 187	int self_ref_count;
 188	bool have_delayed_delete_refs;
 189};
 190
 191static inline int extent_is_shared(struct share_check *sc)
 192{
 193	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
 194}
 195
 196static struct kmem_cache *btrfs_prelim_ref_cache;
 197
 198int __init btrfs_prelim_ref_init(void)
 199{
 200	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
 201					sizeof(struct prelim_ref), 0, 0, NULL);
 
 
 
 202	if (!btrfs_prelim_ref_cache)
 203		return -ENOMEM;
 204	return 0;
 205}
 206
 207void __cold btrfs_prelim_ref_exit(void)
 208{
 209	kmem_cache_destroy(btrfs_prelim_ref_cache);
 210}
 211
 212static void free_pref(struct prelim_ref *ref)
 213{
 214	kmem_cache_free(btrfs_prelim_ref_cache, ref);
 215}
 216
 217/*
 218 * Return 0 when both refs are for the same block (and can be merged).
 219 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
 220 * indicates a 'higher' block.
 221 */
 222static int prelim_ref_compare(const struct prelim_ref *ref1,
 223			      const struct prelim_ref *ref2)
 224{
 225	if (ref1->level < ref2->level)
 226		return -1;
 227	if (ref1->level > ref2->level)
 228		return 1;
 229	if (ref1->root_id < ref2->root_id)
 230		return -1;
 231	if (ref1->root_id > ref2->root_id)
 232		return 1;
 233	if (ref1->key_for_search.type < ref2->key_for_search.type)
 234		return -1;
 235	if (ref1->key_for_search.type > ref2->key_for_search.type)
 236		return 1;
 237	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
 238		return -1;
 239	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
 240		return 1;
 241	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
 242		return -1;
 243	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
 244		return 1;
 245	if (ref1->parent < ref2->parent)
 246		return -1;
 247	if (ref1->parent > ref2->parent)
 248		return 1;
 249
 250	return 0;
 251}
 252
 253static void update_share_count(struct share_check *sc, int oldcount,
 254			       int newcount, const struct prelim_ref *newref)
 255{
 256	if ((!sc) || (oldcount == 0 && newcount < 1))
 257		return;
 258
 259	if (oldcount > 0 && newcount < 1)
 260		sc->share_count--;
 261	else if (oldcount < 1 && newcount > 0)
 262		sc->share_count++;
 263
 264	if (newref->root_id == btrfs_root_id(sc->root) &&
 265	    newref->wanted_disk_byte == sc->data_bytenr &&
 266	    newref->key_for_search.objectid == sc->inum)
 267		sc->self_ref_count += newref->count;
 268}
 269
 270/*
 271 * Add @newref to the @root rbtree, merging identical refs.
 272 *
 273 * Callers should assume that newref has been freed after calling.
 274 */
 275static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
 276			      struct preftree *preftree,
 277			      struct prelim_ref *newref,
 278			      struct share_check *sc)
 279{
 280	struct rb_root_cached *root;
 281	struct rb_node **p;
 282	struct rb_node *parent = NULL;
 283	struct prelim_ref *ref;
 284	int result;
 285	bool leftmost = true;
 286
 287	root = &preftree->root;
 288	p = &root->rb_root.rb_node;
 289
 290	while (*p) {
 291		parent = *p;
 292		ref = rb_entry(parent, struct prelim_ref, rbnode);
 293		result = prelim_ref_compare(ref, newref);
 294		if (result < 0) {
 295			p = &(*p)->rb_left;
 296		} else if (result > 0) {
 297			p = &(*p)->rb_right;
 298			leftmost = false;
 299		} else {
 300			/* Identical refs, merge them and free @newref */
 301			struct extent_inode_elem *eie = ref->inode_list;
 302
 303			while (eie && eie->next)
 304				eie = eie->next;
 305
 306			if (!eie)
 307				ref->inode_list = newref->inode_list;
 308			else
 309				eie->next = newref->inode_list;
 310			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
 311						     preftree->count);
 312			/*
 313			 * A delayed ref can have newref->count < 0.
 314			 * The ref->count is updated to follow any
 315			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
 316			 */
 317			update_share_count(sc, ref->count,
 318					   ref->count + newref->count, newref);
 319			ref->count += newref->count;
 320			free_pref(newref);
 321			return;
 322		}
 323	}
 324
 325	update_share_count(sc, 0, newref->count, newref);
 326	preftree->count++;
 327	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
 328	rb_link_node(&newref->rbnode, parent, p);
 329	rb_insert_color_cached(&newref->rbnode, root, leftmost);
 330}
 331
 332/*
 333 * Release the entire tree.  We don't care about internal consistency so
 334 * just free everything and then reset the tree root.
 335 */
 336static void prelim_release(struct preftree *preftree)
 337{
 338	struct prelim_ref *ref, *next_ref;
 339
 340	rbtree_postorder_for_each_entry_safe(ref, next_ref,
 341					     &preftree->root.rb_root, rbnode) {
 342		free_inode_elem_list(ref->inode_list);
 343		free_pref(ref);
 344	}
 345
 346	preftree->root = RB_ROOT_CACHED;
 347	preftree->count = 0;
 348}
 349
 350/*
 351 * the rules for all callers of this function are:
 352 * - obtaining the parent is the goal
 353 * - if you add a key, you must know that it is a correct key
 354 * - if you cannot add the parent or a correct key, then we will look into the
 355 *   block later to set a correct key
 356 *
 357 * delayed refs
 358 * ============
 359 *        backref type | shared | indirect | shared | indirect
 360 * information         |   tree |     tree |   data |     data
 361 * --------------------+--------+----------+--------+----------
 362 *      parent logical |    y   |     -    |    -   |     -
 363 *      key to resolve |    -   |     y    |    y   |     y
 364 *  tree block logical |    -   |     -    |    -   |     -
 365 *  root for resolving |    y   |     y    |    y   |     y
 366 *
 367 * - column 1:       we've the parent -> done
 368 * - column 2, 3, 4: we use the key to find the parent
 369 *
 370 * on disk refs (inline or keyed)
 371 * ==============================
 372 *        backref type | shared | indirect | shared | indirect
 373 * information         |   tree |     tree |   data |     data
 374 * --------------------+--------+----------+--------+----------
 375 *      parent logical |    y   |     -    |    y   |     -
 376 *      key to resolve |    -   |     -    |    -   |     y
 377 *  tree block logical |    y   |     y    |    y   |     y
 378 *  root for resolving |    -   |     y    |    y   |     y
 379 *
 380 * - column 1, 3: we've the parent -> done
 381 * - column 2:    we take the first key from the block to find the parent
 382 *                (see add_missing_keys)
 383 * - column 4:    we use the key to find the parent
 384 *
 385 * additional information that's available but not required to find the parent
 386 * block might help in merging entries to gain some speed.
 387 */
 388static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
 389			  struct preftree *preftree, u64 root_id,
 390			  const struct btrfs_key *key, int level, u64 parent,
 391			  u64 wanted_disk_byte, int count,
 392			  struct share_check *sc, gfp_t gfp_mask)
 393{
 394	struct prelim_ref *ref;
 395
 396	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
 397		return 0;
 398
 399	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
 400	if (!ref)
 401		return -ENOMEM;
 402
 403	ref->root_id = root_id;
 404	if (key)
 405		ref->key_for_search = *key;
 406	else
 407		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
 408
 409	ref->inode_list = NULL;
 410	ref->level = level;
 411	ref->count = count;
 412	ref->parent = parent;
 413	ref->wanted_disk_byte = wanted_disk_byte;
 414	prelim_ref_insert(fs_info, preftree, ref, sc);
 415	return extent_is_shared(sc);
 416}
 417
 418/* direct refs use root == 0, key == NULL */
 419static int add_direct_ref(const struct btrfs_fs_info *fs_info,
 420			  struct preftrees *preftrees, int level, u64 parent,
 421			  u64 wanted_disk_byte, int count,
 422			  struct share_check *sc, gfp_t gfp_mask)
 423{
 424	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
 425			      parent, wanted_disk_byte, count, sc, gfp_mask);
 426}
 427
 428/* indirect refs use parent == 0 */
 429static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
 430			    struct preftrees *preftrees, u64 root_id,
 431			    const struct btrfs_key *key, int level,
 432			    u64 wanted_disk_byte, int count,
 433			    struct share_check *sc, gfp_t gfp_mask)
 434{
 435	struct preftree *tree = &preftrees->indirect;
 436
 437	if (!key)
 438		tree = &preftrees->indirect_missing_keys;
 439	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
 440			      wanted_disk_byte, count, sc, gfp_mask);
 441}
 442
 443static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
 444{
 445	struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
 446	struct rb_node *parent = NULL;
 447	struct prelim_ref *ref = NULL;
 448	struct prelim_ref target = {};
 449	int result;
 450
 451	target.parent = bytenr;
 452
 453	while (*p) {
 454		parent = *p;
 455		ref = rb_entry(parent, struct prelim_ref, rbnode);
 456		result = prelim_ref_compare(ref, &target);
 457
 458		if (result < 0)
 459			p = &(*p)->rb_left;
 460		else if (result > 0)
 461			p = &(*p)->rb_right;
 462		else
 463			return 1;
 464	}
 465	return 0;
 466}
 467
 468static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
 469			   struct btrfs_root *root, struct btrfs_path *path,
 470			   struct ulist *parents,
 471			   struct preftrees *preftrees, struct prelim_ref *ref,
 472			   int level)
 
 473{
 474	int ret = 0;
 475	int slot;
 476	struct extent_buffer *eb;
 477	struct btrfs_key key;
 478	struct btrfs_key *key_for_search = &ref->key_for_search;
 479	struct btrfs_file_extent_item *fi;
 480	struct extent_inode_elem *eie = NULL, *old = NULL;
 481	u64 disk_byte;
 482	u64 wanted_disk_byte = ref->wanted_disk_byte;
 483	u64 count = 0;
 484	u64 data_offset;
 485	u8 type;
 486
 487	if (level != 0) {
 488		eb = path->nodes[level];
 489		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
 490		if (ret < 0)
 491			return ret;
 492		return 0;
 493	}
 494
 495	/*
 496	 * 1. We normally enter this function with the path already pointing to
 497	 *    the first item to check. But sometimes, we may enter it with
 498	 *    slot == nritems.
 499	 * 2. We are searching for normal backref but bytenr of this leaf
 500	 *    matches shared data backref
 501	 * 3. The leaf owner is not equal to the root we are searching
 502	 *
 503	 * For these cases, go to the next leaf before we continue.
 504	 */
 505	eb = path->nodes[0];
 506	if (path->slots[0] >= btrfs_header_nritems(eb) ||
 507	    is_shared_data_backref(preftrees, eb->start) ||
 508	    ref->root_id != btrfs_header_owner(eb)) {
 509		if (ctx->time_seq == BTRFS_SEQ_LAST)
 510			ret = btrfs_next_leaf(root, path);
 511		else
 512			ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
 513	}
 514
 515	while (!ret && count < ref->count) {
 516		eb = path->nodes[0];
 517		slot = path->slots[0];
 518
 519		btrfs_item_key_to_cpu(eb, &key, slot);
 520
 521		if (key.objectid != key_for_search->objectid ||
 522		    key.type != BTRFS_EXTENT_DATA_KEY)
 523			break;
 524
 525		/*
 526		 * We are searching for normal backref but bytenr of this leaf
 527		 * matches shared data backref, OR
 528		 * the leaf owner is not equal to the root we are searching for
 529		 */
 530		if (slot == 0 &&
 531		    (is_shared_data_backref(preftrees, eb->start) ||
 532		     ref->root_id != btrfs_header_owner(eb))) {
 533			if (ctx->time_seq == BTRFS_SEQ_LAST)
 534				ret = btrfs_next_leaf(root, path);
 535			else
 536				ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
 537			continue;
 538		}
 539		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 540		type = btrfs_file_extent_type(eb, fi);
 541		if (type == BTRFS_FILE_EXTENT_INLINE)
 542			goto next;
 543		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 544		data_offset = btrfs_file_extent_offset(eb, fi);
 545
 546		if (disk_byte == wanted_disk_byte) {
 547			eie = NULL;
 548			old = NULL;
 549			if (ref->key_for_search.offset == key.offset - data_offset)
 550				count++;
 551			else
 552				goto next;
 553			if (!ctx->skip_inode_ref_list) {
 554				ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
 555				if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
 556				    ret < 0)
 
 557					break;
 558			}
 559			if (ret > 0)
 560				goto next;
 561			ret = ulist_add_merge_ptr(parents, eb->start,
 562						  eie, (void **)&old, GFP_NOFS);
 563			if (ret < 0)
 564				break;
 565			if (!ret && !ctx->skip_inode_ref_list) {
 566				while (old->next)
 567					old = old->next;
 568				old->next = eie;
 569			}
 570			eie = NULL;
 571		}
 572next:
 573		if (ctx->time_seq == BTRFS_SEQ_LAST)
 574			ret = btrfs_next_item(root, path);
 575		else
 576			ret = btrfs_next_old_item(root, path, ctx->time_seq);
 577	}
 578
 579	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
 580		free_inode_elem_list(eie);
 581	else if (ret > 0)
 582		ret = 0;
 583
 
 584	return ret;
 585}
 586
 587/*
 588 * resolve an indirect backref in the form (root_id, key, level)
 589 * to a logical address
 590 */
 591static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx,
 592				struct btrfs_path *path,
 593				struct preftrees *preftrees,
 594				struct prelim_ref *ref, struct ulist *parents)
 
 595{
 596	struct btrfs_root *root;
 597	struct extent_buffer *eb;
 598	int ret = 0;
 599	int root_level;
 600	int level = ref->level;
 601	struct btrfs_key search_key = ref->key_for_search;
 602
 603	/*
 604	 * If we're search_commit_root we could possibly be holding locks on
 605	 * other tree nodes.  This happens when qgroups does backref walks when
 606	 * adding new delayed refs.  To deal with this we need to look in cache
 607	 * for the root, and if we don't find it then we need to search the
 608	 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
 609	 * here.
 610	 */
 611	if (path->search_commit_root)
 612		root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id);
 613	else
 614		root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false);
 615	if (IS_ERR(root)) {
 616		ret = PTR_ERR(root);
 617		goto out_free;
 618	}
 619
 620	if (!path->search_commit_root &&
 621	    test_bit(BTRFS_ROOT_DELETING, &root->state)) {
 622		ret = -ENOENT;
 623		goto out;
 624	}
 625
 626	if (btrfs_is_testing(ctx->fs_info)) {
 627		ret = -ENOENT;
 628		goto out;
 629	}
 630
 631	if (path->search_commit_root)
 632		root_level = btrfs_header_level(root->commit_root);
 633	else if (ctx->time_seq == BTRFS_SEQ_LAST)
 634		root_level = btrfs_header_level(root->node);
 635	else
 636		root_level = btrfs_old_root_level(root, ctx->time_seq);
 637
 638	if (root_level + 1 == level)
 639		goto out;
 640
 641	/*
 642	 * We can often find data backrefs with an offset that is too large
 643	 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
 644	 * subtracting a file's offset with the data offset of its
 645	 * corresponding extent data item. This can happen for example in the
 646	 * clone ioctl.
 647	 *
 648	 * So if we detect such case we set the search key's offset to zero to
 649	 * make sure we will find the matching file extent item at
 650	 * add_all_parents(), otherwise we will miss it because the offset
 651	 * taken form the backref is much larger then the offset of the file
 652	 * extent item. This can make us scan a very large number of file
 653	 * extent items, but at least it will not make us miss any.
 654	 *
 655	 * This is an ugly workaround for a behaviour that should have never
 656	 * existed, but it does and a fix for the clone ioctl would touch a lot
 657	 * of places, cause backwards incompatibility and would not fix the
 658	 * problem for extents cloned with older kernels.
 659	 */
 660	if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
 661	    search_key.offset >= LLONG_MAX)
 662		search_key.offset = 0;
 663	path->lowest_level = level;
 664	if (ctx->time_seq == BTRFS_SEQ_LAST)
 665		ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
 666	else
 667		ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq);
 668
 669	btrfs_debug(ctx->fs_info,
 670		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
 671		 ref->root_id, level, ref->count, ret,
 672		 ref->key_for_search.objectid, ref->key_for_search.type,
 673		 ref->key_for_search.offset);
 674	if (ret < 0)
 675		goto out;
 676
 677	eb = path->nodes[level];
 678	while (!eb) {
 679		if (WARN_ON(!level)) {
 680			ret = 1;
 681			goto out;
 682		}
 683		level--;
 684		eb = path->nodes[level];
 685	}
 686
 687	ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level);
 
 688out:
 689	btrfs_put_root(root);
 690out_free:
 691	path->lowest_level = 0;
 692	btrfs_release_path(path);
 693	return ret;
 694}
 695
 696static struct extent_inode_elem *
 697unode_aux_to_inode_list(struct ulist_node *node)
 698{
 699	if (!node)
 700		return NULL;
 701	return (struct extent_inode_elem *)(uintptr_t)node->aux;
 702}
 703
 704static void free_leaf_list(struct ulist *ulist)
 705{
 706	struct ulist_node *node;
 707	struct ulist_iterator uiter;
 708
 709	ULIST_ITER_INIT(&uiter);
 710	while ((node = ulist_next(ulist, &uiter)))
 711		free_inode_elem_list(unode_aux_to_inode_list(node));
 712
 713	ulist_free(ulist);
 714}
 715
 716/*
 717 * We maintain three separate rbtrees: one for direct refs, one for
 718 * indirect refs which have a key, and one for indirect refs which do not
 719 * have a key. Each tree does merge on insertion.
 720 *
 721 * Once all of the references are located, we iterate over the tree of
 722 * indirect refs with missing keys. An appropriate key is located and
 723 * the ref is moved onto the tree for indirect refs. After all missing
 724 * keys are thus located, we iterate over the indirect ref tree, resolve
 725 * each reference, and then insert the resolved reference onto the
 726 * direct tree (merging there too).
 727 *
 728 * New backrefs (i.e., for parent nodes) are added to the appropriate
 729 * rbtree as they are encountered. The new backrefs are subsequently
 730 * resolved as above.
 731 */
 732static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
 733				 struct btrfs_path *path,
 734				 struct preftrees *preftrees,
 735				 struct share_check *sc)
 
 736{
 737	int err;
 738	int ret = 0;
 739	struct ulist *parents;
 740	struct ulist_node *node;
 741	struct ulist_iterator uiter;
 742	struct rb_node *rnode;
 743
 744	parents = ulist_alloc(GFP_NOFS);
 745	if (!parents)
 746		return -ENOMEM;
 747
 748	/*
 749	 * We could trade memory usage for performance here by iterating
 750	 * the tree, allocating new refs for each insertion, and then
 751	 * freeing the entire indirect tree when we're done.  In some test
 752	 * cases, the tree can grow quite large (~200k objects).
 753	 */
 754	while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
 755		struct prelim_ref *ref;
 756
 757		ref = rb_entry(rnode, struct prelim_ref, rbnode);
 758		if (WARN(ref->parent,
 759			 "BUG: direct ref found in indirect tree")) {
 760			ret = -EINVAL;
 761			goto out;
 762		}
 763
 764		rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
 765		preftrees->indirect.count--;
 766
 767		if (ref->count == 0) {
 768			free_pref(ref);
 769			continue;
 770		}
 771
 772		if (sc && ref->root_id != btrfs_root_id(sc->root)) {
 
 773			free_pref(ref);
 774			ret = BACKREF_FOUND_SHARED;
 775			goto out;
 776		}
 777		err = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
 
 
 778		/*
 779		 * we can only tolerate ENOENT,otherwise,we should catch error
 780		 * and return directly.
 781		 */
 782		if (err == -ENOENT) {
 783			prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref,
 784					  NULL);
 785			continue;
 786		} else if (err) {
 787			free_pref(ref);
 788			ret = err;
 789			goto out;
 790		}
 791
 792		/* we put the first parent into the ref at hand */
 793		ULIST_ITER_INIT(&uiter);
 794		node = ulist_next(parents, &uiter);
 795		ref->parent = node ? node->val : 0;
 796		ref->inode_list = unode_aux_to_inode_list(node);
 797
 798		/* Add a prelim_ref(s) for any other parent(s). */
 799		while ((node = ulist_next(parents, &uiter))) {
 800			struct prelim_ref *new_ref;
 801
 802			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
 803						   GFP_NOFS);
 804			if (!new_ref) {
 805				free_pref(ref);
 806				ret = -ENOMEM;
 807				goto out;
 808			}
 809			memcpy(new_ref, ref, sizeof(*ref));
 810			new_ref->parent = node->val;
 811			new_ref->inode_list = unode_aux_to_inode_list(node);
 812			prelim_ref_insert(ctx->fs_info, &preftrees->direct,
 813					  new_ref, NULL);
 814		}
 815
 816		/*
 817		 * Now it's a direct ref, put it in the direct tree. We must
 818		 * do this last because the ref could be merged/freed here.
 819		 */
 820		prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL);
 821
 822		ulist_reinit(parents);
 823		cond_resched();
 824	}
 825out:
 826	/*
 827	 * We may have inode lists attached to refs in the parents ulist, so we
 828	 * must free them before freeing the ulist and its refs.
 829	 */
 830	free_leaf_list(parents);
 831	return ret;
 832}
 833
 834/*
 835 * read tree blocks and add keys where required.
 836 */
 837static int add_missing_keys(struct btrfs_fs_info *fs_info,
 838			    struct preftrees *preftrees, bool lock)
 839{
 840	struct prelim_ref *ref;
 841	struct extent_buffer *eb;
 842	struct preftree *tree = &preftrees->indirect_missing_keys;
 843	struct rb_node *node;
 844
 845	while ((node = rb_first_cached(&tree->root))) {
 846		struct btrfs_tree_parent_check check = { 0 };
 847
 848		ref = rb_entry(node, struct prelim_ref, rbnode);
 849		rb_erase_cached(node, &tree->root);
 850
 851		BUG_ON(ref->parent);	/* should not be a direct ref */
 852		BUG_ON(ref->key_for_search.type);
 853		BUG_ON(!ref->wanted_disk_byte);
 854
 855		check.level = ref->level - 1;
 856		check.owner_root = ref->root_id;
 857
 858		eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check);
 859		if (IS_ERR(eb)) {
 860			free_pref(ref);
 861			return PTR_ERR(eb);
 862		}
 863		if (!extent_buffer_uptodate(eb)) {
 864			free_pref(ref);
 865			free_extent_buffer(eb);
 866			return -EIO;
 867		}
 868
 869		if (lock)
 870			btrfs_tree_read_lock(eb);
 871		if (btrfs_header_level(eb) == 0)
 872			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
 873		else
 874			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
 875		if (lock)
 876			btrfs_tree_read_unlock(eb);
 877		free_extent_buffer(eb);
 878		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
 879		cond_resched();
 880	}
 881	return 0;
 882}
 883
 884/*
 885 * add all currently queued delayed refs from this head whose seq nr is
 886 * smaller or equal that seq to the list
 887 */
 888static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
 889			    struct btrfs_delayed_ref_head *head, u64 seq,
 890			    struct preftrees *preftrees, struct share_check *sc)
 891{
 892	struct btrfs_delayed_ref_node *node;
 
 893	struct btrfs_key key;
 
 894	struct rb_node *n;
 895	int count;
 896	int ret = 0;
 897
 
 
 
 898	spin_lock(&head->lock);
 899	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
 900		node = rb_entry(n, struct btrfs_delayed_ref_node,
 901				ref_node);
 902		if (node->seq > seq)
 903			continue;
 904
 905		switch (node->action) {
 906		case BTRFS_ADD_DELAYED_EXTENT:
 907		case BTRFS_UPDATE_DELAYED_HEAD:
 908			WARN_ON(1);
 909			continue;
 910		case BTRFS_ADD_DELAYED_REF:
 911			count = node->ref_mod;
 912			break;
 913		case BTRFS_DROP_DELAYED_REF:
 914			count = node->ref_mod * -1;
 915			break;
 916		default:
 917			BUG();
 918		}
 919		switch (node->type) {
 920		case BTRFS_TREE_BLOCK_REF_KEY: {
 921			/* NORMAL INDIRECT METADATA backref */
 922			struct btrfs_key *key_ptr = NULL;
 923			/* The owner of a tree block ref is the level. */
 924			int level = btrfs_delayed_ref_owner(node);
 925
 926			if (head->extent_op && head->extent_op->update_key) {
 927				btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
 928				key_ptr = &key;
 929			}
 930
 931			ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
 932					       key_ptr, level + 1, node->bytenr,
 933					       count, sc, GFP_ATOMIC);
 
 
 934			break;
 935		}
 936		case BTRFS_SHARED_BLOCK_REF_KEY: {
 937			/*
 938			 * SHARED DIRECT METADATA backref
 939			 *
 940			 * The owner of a tree block ref is the level.
 941			 */
 942			int level = btrfs_delayed_ref_owner(node);
 943
 944			ret = add_direct_ref(fs_info, preftrees, level + 1,
 945					     node->parent, node->bytenr, count,
 946					     sc, GFP_ATOMIC);
 947			break;
 948		}
 949		case BTRFS_EXTENT_DATA_REF_KEY: {
 950			/* NORMAL INDIRECT DATA backref */
 951			key.objectid = btrfs_delayed_ref_owner(node);
 
 
 
 952			key.type = BTRFS_EXTENT_DATA_KEY;
 953			key.offset = btrfs_delayed_ref_offset(node);
 954
 955			/*
 956			 * If we have a share check context and a reference for
 957			 * another inode, we can't exit immediately. This is
 958			 * because even if this is a BTRFS_ADD_DELAYED_REF
 959			 * reference we may find next a BTRFS_DROP_DELAYED_REF
 960			 * which cancels out this ADD reference.
 961			 *
 962			 * If this is a DROP reference and there was no previous
 963			 * ADD reference, then we need to signal that when we
 964			 * process references from the extent tree (through
 965			 * add_inline_refs() and add_keyed_refs()), we should
 966			 * not exit early if we find a reference for another
 967			 * inode, because one of the delayed DROP references
 968			 * may cancel that reference in the extent tree.
 969			 */
 970			if (sc && count < 0)
 971				sc->have_delayed_delete_refs = true;
 
 
 972
 973			ret = add_indirect_ref(fs_info, preftrees, node->ref_root,
 974					       &key, 0, node->bytenr, count, sc,
 975					       GFP_ATOMIC);
 976			break;
 977		}
 978		case BTRFS_SHARED_DATA_REF_KEY: {
 979			/* SHARED DIRECT FULL backref */
 980			ret = add_direct_ref(fs_info, preftrees, 0, node->parent,
 
 
 
 
 981					     node->bytenr, count, sc,
 982					     GFP_ATOMIC);
 983			break;
 984		}
 985		default:
 986			WARN_ON(1);
 987		}
 988		/*
 989		 * We must ignore BACKREF_FOUND_SHARED until all delayed
 990		 * refs have been checked.
 991		 */
 992		if (ret && (ret != BACKREF_FOUND_SHARED))
 993			break;
 994	}
 995	if (!ret)
 996		ret = extent_is_shared(sc);
 997
 998	spin_unlock(&head->lock);
 999	return ret;
1000}
1001
1002/*
1003 * add all inline backrefs for bytenr to the list
1004 *
1005 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1006 */
1007static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
1008			   struct btrfs_path *path,
1009			   int *info_level, struct preftrees *preftrees,
1010			   struct share_check *sc)
1011{
1012	int ret = 0;
1013	int slot;
1014	struct extent_buffer *leaf;
1015	struct btrfs_key key;
1016	struct btrfs_key found_key;
1017	unsigned long ptr;
1018	unsigned long end;
1019	struct btrfs_extent_item *ei;
1020	u64 flags;
1021	u64 item_size;
1022
1023	/*
1024	 * enumerate all inline refs
1025	 */
1026	leaf = path->nodes[0];
1027	slot = path->slots[0];
1028
1029	item_size = btrfs_item_size(leaf, slot);
1030	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
1031
1032	if (ctx->check_extent_item) {
1033		ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx);
1034		if (ret)
1035			return ret;
1036	}
1037
 
1038	flags = btrfs_extent_flags(leaf, ei);
1039	btrfs_item_key_to_cpu(leaf, &found_key, slot);
1040
1041	ptr = (unsigned long)(ei + 1);
1042	end = (unsigned long)ei + item_size;
1043
1044	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1045	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1046		struct btrfs_tree_block_info *info;
1047
1048		info = (struct btrfs_tree_block_info *)ptr;
1049		*info_level = btrfs_tree_block_level(leaf, info);
1050		ptr += sizeof(struct btrfs_tree_block_info);
1051		BUG_ON(ptr > end);
1052	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1053		*info_level = found_key.offset;
1054	} else {
1055		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1056	}
1057
1058	while (ptr < end) {
1059		struct btrfs_extent_inline_ref *iref;
1060		u64 offset;
1061		int type;
1062
1063		iref = (struct btrfs_extent_inline_ref *)ptr;
1064		type = btrfs_get_extent_inline_ref_type(leaf, iref,
1065							BTRFS_REF_TYPE_ANY);
1066		if (type == BTRFS_REF_TYPE_INVALID)
1067			return -EUCLEAN;
1068
1069		offset = btrfs_extent_inline_ref_offset(leaf, iref);
1070
1071		switch (type) {
1072		case BTRFS_SHARED_BLOCK_REF_KEY:
1073			ret = add_direct_ref(ctx->fs_info, preftrees,
1074					     *info_level + 1, offset,
1075					     ctx->bytenr, 1, NULL, GFP_NOFS);
1076			break;
1077		case BTRFS_SHARED_DATA_REF_KEY: {
1078			struct btrfs_shared_data_ref *sdref;
1079			int count;
1080
1081			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1082			count = btrfs_shared_data_ref_count(leaf, sdref);
1083
1084			ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset,
1085					     ctx->bytenr, count, sc, GFP_NOFS);
1086			break;
1087		}
1088		case BTRFS_TREE_BLOCK_REF_KEY:
1089			ret = add_indirect_ref(ctx->fs_info, preftrees, offset,
1090					       NULL, *info_level + 1,
1091					       ctx->bytenr, 1, NULL, GFP_NOFS);
1092			break;
1093		case BTRFS_EXTENT_DATA_REF_KEY: {
1094			struct btrfs_extent_data_ref *dref;
1095			int count;
1096			u64 root;
1097
1098			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1099			count = btrfs_extent_data_ref_count(leaf, dref);
1100			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1101								      dref);
1102			key.type = BTRFS_EXTENT_DATA_KEY;
1103			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1104
1105			if (sc && key.objectid != sc->inum &&
1106			    !sc->have_delayed_delete_refs) {
1107				ret = BACKREF_FOUND_SHARED;
1108				break;
1109			}
1110
1111			root = btrfs_extent_data_ref_root(leaf, dref);
1112
1113			if (!ctx->skip_data_ref ||
1114			    !ctx->skip_data_ref(root, key.objectid, key.offset,
1115						ctx->user_ctx))
1116				ret = add_indirect_ref(ctx->fs_info, preftrees,
1117						       root, &key, 0, ctx->bytenr,
1118						       count, sc, GFP_NOFS);
1119			break;
1120		}
1121		case BTRFS_EXTENT_OWNER_REF_KEY:
1122			ASSERT(btrfs_fs_incompat(ctx->fs_info, SIMPLE_QUOTA));
1123			break;
1124		default:
1125			WARN_ON(1);
1126		}
1127		if (ret)
1128			return ret;
1129		ptr += btrfs_extent_inline_ref_size(type);
1130	}
1131
1132	return 0;
1133}
1134
1135/*
1136 * add all non-inline backrefs for bytenr to the list
1137 *
1138 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1139 */
1140static int add_keyed_refs(struct btrfs_backref_walk_ctx *ctx,
1141			  struct btrfs_root *extent_root,
1142			  struct btrfs_path *path,
1143			  int info_level, struct preftrees *preftrees,
1144			  struct share_check *sc)
1145{
1146	struct btrfs_fs_info *fs_info = extent_root->fs_info;
1147	int ret;
1148	int slot;
1149	struct extent_buffer *leaf;
1150	struct btrfs_key key;
1151
1152	while (1) {
1153		ret = btrfs_next_item(extent_root, path);
1154		if (ret < 0)
1155			break;
1156		if (ret) {
1157			ret = 0;
1158			break;
1159		}
1160
1161		slot = path->slots[0];
1162		leaf = path->nodes[0];
1163		btrfs_item_key_to_cpu(leaf, &key, slot);
1164
1165		if (key.objectid != ctx->bytenr)
1166			break;
1167		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1168			continue;
1169		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1170			break;
1171
1172		switch (key.type) {
1173		case BTRFS_SHARED_BLOCK_REF_KEY:
1174			/* SHARED DIRECT METADATA backref */
1175			ret = add_direct_ref(fs_info, preftrees,
1176					     info_level + 1, key.offset,
1177					     ctx->bytenr, 1, NULL, GFP_NOFS);
1178			break;
1179		case BTRFS_SHARED_DATA_REF_KEY: {
1180			/* SHARED DIRECT FULL backref */
1181			struct btrfs_shared_data_ref *sdref;
1182			int count;
1183
1184			sdref = btrfs_item_ptr(leaf, slot,
1185					      struct btrfs_shared_data_ref);
1186			count = btrfs_shared_data_ref_count(leaf, sdref);
1187			ret = add_direct_ref(fs_info, preftrees, 0,
1188					     key.offset, ctx->bytenr, count,
1189					     sc, GFP_NOFS);
1190			break;
1191		}
1192		case BTRFS_TREE_BLOCK_REF_KEY:
1193			/* NORMAL INDIRECT METADATA backref */
1194			ret = add_indirect_ref(fs_info, preftrees, key.offset,
1195					       NULL, info_level + 1, ctx->bytenr,
1196					       1, NULL, GFP_NOFS);
1197			break;
1198		case BTRFS_EXTENT_DATA_REF_KEY: {
1199			/* NORMAL INDIRECT DATA backref */
1200			struct btrfs_extent_data_ref *dref;
1201			int count;
1202			u64 root;
1203
1204			dref = btrfs_item_ptr(leaf, slot,
1205					      struct btrfs_extent_data_ref);
1206			count = btrfs_extent_data_ref_count(leaf, dref);
1207			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1208								      dref);
1209			key.type = BTRFS_EXTENT_DATA_KEY;
1210			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1211
1212			if (sc && key.objectid != sc->inum &&
1213			    !sc->have_delayed_delete_refs) {
1214				ret = BACKREF_FOUND_SHARED;
1215				break;
1216			}
1217
1218			root = btrfs_extent_data_ref_root(leaf, dref);
1219
1220			if (!ctx->skip_data_ref ||
1221			    !ctx->skip_data_ref(root, key.objectid, key.offset,
1222						ctx->user_ctx))
1223				ret = add_indirect_ref(fs_info, preftrees, root,
1224						       &key, 0, ctx->bytenr,
1225						       count, sc, GFP_NOFS);
1226			break;
1227		}
1228		default:
1229			WARN_ON(1);
1230		}
1231		if (ret)
1232			return ret;
1233
1234	}
1235
1236	return ret;
1237}
1238
1239/*
1240 * The caller has joined a transaction or is holding a read lock on the
1241 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1242 * snapshot field changing while updating or checking the cache.
1243 */
1244static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1245					struct btrfs_root *root,
1246					u64 bytenr, int level, bool *is_shared)
1247{
1248	const struct btrfs_fs_info *fs_info = root->fs_info;
1249	struct btrfs_backref_shared_cache_entry *entry;
1250
1251	if (!current->journal_info)
1252		lockdep_assert_held(&fs_info->commit_root_sem);
1253
1254	if (!ctx->use_path_cache)
1255		return false;
1256
1257	if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1258		return false;
1259
1260	/*
1261	 * Level -1 is used for the data extent, which is not reliable to cache
1262	 * because its reference count can increase or decrease without us
1263	 * realizing. We cache results only for extent buffers that lead from
1264	 * the root node down to the leaf with the file extent item.
1265	 */
1266	ASSERT(level >= 0);
1267
1268	entry = &ctx->path_cache_entries[level];
1269
1270	/* Unused cache entry or being used for some other extent buffer. */
1271	if (entry->bytenr != bytenr)
1272		return false;
1273
1274	/*
1275	 * We cached a false result, but the last snapshot generation of the
1276	 * root changed, so we now have a snapshot. Don't trust the result.
1277	 */
1278	if (!entry->is_shared &&
1279	    entry->gen != btrfs_root_last_snapshot(&root->root_item))
1280		return false;
1281
1282	/*
1283	 * If we cached a true result and the last generation used for dropping
1284	 * a root changed, we can not trust the result, because the dropped root
1285	 * could be a snapshot sharing this extent buffer.
1286	 */
1287	if (entry->is_shared &&
1288	    entry->gen != btrfs_get_last_root_drop_gen(fs_info))
1289		return false;
1290
1291	*is_shared = entry->is_shared;
1292	/*
1293	 * If the node at this level is shared, than all nodes below are also
1294	 * shared. Currently some of the nodes below may be marked as not shared
1295	 * because we have just switched from one leaf to another, and switched
1296	 * also other nodes above the leaf and below the current level, so mark
1297	 * them as shared.
1298	 */
1299	if (*is_shared) {
1300		for (int i = 0; i < level; i++) {
1301			ctx->path_cache_entries[i].is_shared = true;
1302			ctx->path_cache_entries[i].gen = entry->gen;
1303		}
1304	}
1305
1306	return true;
1307}
1308
1309/*
1310 * The caller has joined a transaction or is holding a read lock on the
1311 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1312 * snapshot field changing while updating or checking the cache.
1313 */
1314static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1315				       struct btrfs_root *root,
1316				       u64 bytenr, int level, bool is_shared)
1317{
1318	const struct btrfs_fs_info *fs_info = root->fs_info;
1319	struct btrfs_backref_shared_cache_entry *entry;
1320	u64 gen;
1321
1322	if (!current->journal_info)
1323		lockdep_assert_held(&fs_info->commit_root_sem);
1324
1325	if (!ctx->use_path_cache)
1326		return;
1327
1328	if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1329		return;
1330
1331	/*
1332	 * Level -1 is used for the data extent, which is not reliable to cache
1333	 * because its reference count can increase or decrease without us
1334	 * realizing. We cache results only for extent buffers that lead from
1335	 * the root node down to the leaf with the file extent item.
1336	 */
1337	ASSERT(level >= 0);
1338
1339	if (is_shared)
1340		gen = btrfs_get_last_root_drop_gen(fs_info);
1341	else
1342		gen = btrfs_root_last_snapshot(&root->root_item);
1343
1344	entry = &ctx->path_cache_entries[level];
1345	entry->bytenr = bytenr;
1346	entry->is_shared = is_shared;
1347	entry->gen = gen;
1348
1349	/*
1350	 * If we found an extent buffer is shared, set the cache result for all
1351	 * extent buffers below it to true. As nodes in the path are COWed,
1352	 * their sharedness is moved to their children, and if a leaf is COWed,
1353	 * then the sharedness of a data extent becomes direct, the refcount of
1354	 * data extent is increased in the extent item at the extent tree.
1355	 */
1356	if (is_shared) {
1357		for (int i = 0; i < level; i++) {
1358			entry = &ctx->path_cache_entries[i];
1359			entry->is_shared = is_shared;
1360			entry->gen = gen;
1361		}
1362	}
1363}
1364
1365/*
1366 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1367 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1368 * indirect refs to their parent bytenr.
1369 * When roots are found, they're added to the roots list
1370 *
1371 * @ctx:     Backref walking context object, must be not NULL.
1372 * @sc:      If !NULL, then immediately return BACKREF_FOUND_SHARED when a
1373 *           shared extent is detected.
 
 
 
 
1374 *
1375 * Otherwise this returns 0 for success and <0 for an error.
1376 *
 
 
 
 
1377 * FIXME some caching might speed things up
1378 */
1379static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
1380			     struct share_check *sc)
 
 
 
1381{
1382	struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr);
1383	struct btrfs_key key;
1384	struct btrfs_path *path;
1385	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1386	struct btrfs_delayed_ref_head *head;
1387	int info_level = 0;
1388	int ret;
1389	struct prelim_ref *ref;
1390	struct rb_node *node;
1391	struct extent_inode_elem *eie = NULL;
1392	struct preftrees preftrees = {
1393		.direct = PREFTREE_INIT,
1394		.indirect = PREFTREE_INIT,
1395		.indirect_missing_keys = PREFTREE_INIT
1396	};
1397
1398	/* Roots ulist is not needed when using a sharedness check context. */
1399	if (sc)
1400		ASSERT(ctx->roots == NULL);
1401
1402	key.objectid = ctx->bytenr;
1403	key.offset = (u64)-1;
1404	if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA))
1405		key.type = BTRFS_METADATA_ITEM_KEY;
1406	else
1407		key.type = BTRFS_EXTENT_ITEM_KEY;
1408
1409	path = btrfs_alloc_path();
1410	if (!path)
1411		return -ENOMEM;
1412	if (!ctx->trans) {
1413		path->search_commit_root = 1;
1414		path->skip_locking = 1;
1415	}
1416
1417	if (ctx->time_seq == BTRFS_SEQ_LAST)
1418		path->skip_locking = 1;
1419
 
 
 
 
 
1420again:
1421	head = NULL;
1422
1423	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1424	if (ret < 0)
1425		goto out;
1426	if (ret == 0) {
1427		/*
1428		 * Key with offset -1 found, there would have to exist an extent
1429		 * item with such offset, but this is out of the valid range.
1430		 */
1431		ret = -EUCLEAN;
1432		goto out;
1433	}
1434
1435	if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) &&
1436	    ctx->time_seq != BTRFS_SEQ_LAST) {
 
 
 
 
1437		/*
1438		 * We have a specific time_seq we care about and trans which
1439		 * means we have the path lock, we need to grab the ref head and
1440		 * lock it so we have a consistent view of the refs at the given
1441		 * time.
1442		 */
1443		delayed_refs = &ctx->trans->transaction->delayed_refs;
1444		spin_lock(&delayed_refs->lock);
1445		head = btrfs_find_delayed_ref_head(ctx->fs_info, delayed_refs,
1446						   ctx->bytenr);
1447		if (head) {
1448			if (!mutex_trylock(&head->mutex)) {
1449				refcount_inc(&head->refs);
1450				spin_unlock(&delayed_refs->lock);
1451
1452				btrfs_release_path(path);
1453
1454				/*
1455				 * Mutex was contended, block until it's
1456				 * released and try again
1457				 */
1458				mutex_lock(&head->mutex);
1459				mutex_unlock(&head->mutex);
1460				btrfs_put_delayed_ref_head(head);
1461				goto again;
1462			}
1463			spin_unlock(&delayed_refs->lock);
1464			ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq,
1465					       &preftrees, sc);
1466			mutex_unlock(&head->mutex);
1467			if (ret)
1468				goto out;
1469		} else {
1470			spin_unlock(&delayed_refs->lock);
1471		}
1472	}
1473
1474	if (path->slots[0]) {
1475		struct extent_buffer *leaf;
1476		int slot;
1477
1478		path->slots[0]--;
1479		leaf = path->nodes[0];
1480		slot = path->slots[0];
1481		btrfs_item_key_to_cpu(leaf, &key, slot);
1482		if (key.objectid == ctx->bytenr &&
1483		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
1484		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1485			ret = add_inline_refs(ctx, path, &info_level,
1486					      &preftrees, sc);
1487			if (ret)
1488				goto out;
1489			ret = add_keyed_refs(ctx, root, path, info_level,
1490					     &preftrees, sc);
1491			if (ret)
1492				goto out;
1493		}
1494	}
1495
1496	/*
1497	 * If we have a share context and we reached here, it means the extent
1498	 * is not directly shared (no multiple reference items for it),
1499	 * otherwise we would have exited earlier with a return value of
1500	 * BACKREF_FOUND_SHARED after processing delayed references or while
1501	 * processing inline or keyed references from the extent tree.
1502	 * The extent may however be indirectly shared through shared subtrees
1503	 * as a result from creating snapshots, so we determine below what is
1504	 * its parent node, in case we are dealing with a metadata extent, or
1505	 * what's the leaf (or leaves), from a fs tree, that has a file extent
1506	 * item pointing to it in case we are dealing with a data extent.
1507	 */
1508	ASSERT(extent_is_shared(sc) == 0);
1509
1510	/*
1511	 * If we are here for a data extent and we have a share_check structure
1512	 * it means the data extent is not directly shared (does not have
1513	 * multiple reference items), so we have to check if a path in the fs
1514	 * tree (going from the root node down to the leaf that has the file
1515	 * extent item pointing to the data extent) is shared, that is, if any
1516	 * of the extent buffers in the path is referenced by other trees.
1517	 */
1518	if (sc && ctx->bytenr == sc->data_bytenr) {
1519		/*
1520		 * If our data extent is from a generation more recent than the
1521		 * last generation used to snapshot the root, then we know that
1522		 * it can not be shared through subtrees, so we can skip
1523		 * resolving indirect references, there's no point in
1524		 * determining the extent buffers for the path from the fs tree
1525		 * root node down to the leaf that has the file extent item that
1526		 * points to the data extent.
1527		 */
1528		if (sc->data_extent_gen >
1529		    btrfs_root_last_snapshot(&sc->root->root_item)) {
1530			ret = BACKREF_FOUND_NOT_SHARED;
1531			goto out;
1532		}
1533
1534		/*
1535		 * If we are only determining if a data extent is shared or not
1536		 * and the corresponding file extent item is located in the same
1537		 * leaf as the previous file extent item, we can skip resolving
1538		 * indirect references for a data extent, since the fs tree path
1539		 * is the same (same leaf, so same path). We skip as long as the
1540		 * cached result for the leaf is valid and only if there's only
1541		 * one file extent item pointing to the data extent, because in
1542		 * the case of multiple file extent items, they may be located
1543		 * in different leaves and therefore we have multiple paths.
1544		 */
1545		if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr &&
1546		    sc->self_ref_count == 1) {
1547			bool cached;
1548			bool is_shared;
1549
1550			cached = lookup_backref_shared_cache(sc->ctx, sc->root,
1551						     sc->ctx->curr_leaf_bytenr,
1552						     0, &is_shared);
1553			if (cached) {
1554				if (is_shared)
1555					ret = BACKREF_FOUND_SHARED;
1556				else
1557					ret = BACKREF_FOUND_NOT_SHARED;
1558				goto out;
1559			}
1560		}
1561	}
1562
1563	btrfs_release_path(path);
1564
1565	ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0);
1566	if (ret)
1567		goto out;
1568
1569	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1570
1571	ret = resolve_indirect_refs(ctx, path, &preftrees, sc);
 
1572	if (ret)
1573		goto out;
1574
1575	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1576
1577	/*
1578	 * This walks the tree of merged and resolved refs. Tree blocks are
1579	 * read in as needed. Unique entries are added to the ulist, and
1580	 * the list of found roots is updated.
1581	 *
1582	 * We release the entire tree in one go before returning.
1583	 */
1584	node = rb_first_cached(&preftrees.direct.root);
1585	while (node) {
1586		ref = rb_entry(node, struct prelim_ref, rbnode);
1587		node = rb_next(&ref->rbnode);
1588		/*
1589		 * ref->count < 0 can happen here if there are delayed
1590		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1591		 * prelim_ref_insert() relies on this when merging
1592		 * identical refs to keep the overall count correct.
1593		 * prelim_ref_insert() will merge only those refs
1594		 * which compare identically.  Any refs having
1595		 * e.g. different offsets would not be merged,
1596		 * and would retain their original ref->count < 0.
1597		 */
1598		if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) {
 
 
 
 
 
 
1599			/* no parent == root of tree */
1600			ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS);
1601			if (ret < 0)
1602				goto out;
1603		}
1604		if (ref->count && ref->parent) {
1605			if (!ctx->skip_inode_ref_list && !ref->inode_list &&
1606			    ref->level == 0) {
1607				struct btrfs_tree_parent_check check = { 0 };
1608				struct extent_buffer *eb;
1609
1610				check.level = ref->level;
1611
1612				eb = read_tree_block(ctx->fs_info, ref->parent,
1613						     &check);
1614				if (IS_ERR(eb)) {
1615					ret = PTR_ERR(eb);
1616					goto out;
1617				}
1618				if (!extent_buffer_uptodate(eb)) {
1619					free_extent_buffer(eb);
1620					ret = -EIO;
1621					goto out;
1622				}
1623
1624				if (!path->skip_locking)
1625					btrfs_tree_read_lock(eb);
1626				ret = find_extent_in_eb(ctx, eb, &eie);
 
1627				if (!path->skip_locking)
1628					btrfs_tree_read_unlock(eb);
1629				free_extent_buffer(eb);
1630				if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1631				    ret < 0)
1632					goto out;
1633				ref->inode_list = eie;
1634				/*
1635				 * We transferred the list ownership to the ref,
1636				 * so set to NULL to avoid a double free in case
1637				 * an error happens after this.
1638				 */
1639				eie = NULL;
1640			}
1641			ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
1642						  ref->inode_list,
1643						  (void **)&eie, GFP_NOFS);
1644			if (ret < 0)
1645				goto out;
1646			if (!ret && !ctx->skip_inode_ref_list) {
1647				/*
1648				 * We've recorded that parent, so we must extend
1649				 * its inode list here.
1650				 *
1651				 * However if there was corruption we may not
1652				 * have found an eie, return an error in this
1653				 * case.
1654				 */
1655				ASSERT(eie);
1656				if (!eie) {
1657					ret = -EUCLEAN;
1658					goto out;
1659				}
1660				while (eie->next)
1661					eie = eie->next;
1662				eie->next = ref->inode_list;
1663			}
1664			eie = NULL;
1665			/*
1666			 * We have transferred the inode list ownership from
1667			 * this ref to the ref we added to the 'refs' ulist.
1668			 * So set this ref's inode list to NULL to avoid
1669			 * use-after-free when our caller uses it or double
1670			 * frees in case an error happens before we return.
1671			 */
1672			ref->inode_list = NULL;
1673		}
1674		cond_resched();
1675	}
1676
1677out:
1678	btrfs_free_path(path);
1679
1680	prelim_release(&preftrees.direct);
1681	prelim_release(&preftrees.indirect);
1682	prelim_release(&preftrees.indirect_missing_keys);
1683
1684	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
1685		free_inode_elem_list(eie);
1686	return ret;
1687}
1688
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1689/*
1690 * Finds all leaves with a reference to the specified combination of
1691 * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
1692 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1693 * function. The caller should free the ulist with free_leaf_list() if
1694 * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
1695 * enough.
1696 *
1697 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1698 */
1699int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
 
 
 
1700{
1701	int ret;
1702
1703	ASSERT(ctx->refs == NULL);
1704
1705	ctx->refs = ulist_alloc(GFP_NOFS);
1706	if (!ctx->refs)
1707		return -ENOMEM;
1708
1709	ret = find_parent_nodes(ctx, NULL);
1710	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1711	    (ret < 0 && ret != -ENOENT)) {
1712		free_leaf_list(ctx->refs);
1713		ctx->refs = NULL;
1714		return ret;
1715	}
1716
1717	return 0;
1718}
1719
1720/*
1721 * Walk all backrefs for a given extent to find all roots that reference this
1722 * extent. Walking a backref means finding all extents that reference this
1723 * extent and in turn walk the backrefs of those, too. Naturally this is a
1724 * recursive process, but here it is implemented in an iterative fashion: We
1725 * find all referencing extents for the extent in question and put them on a
1726 * list. In turn, we find all referencing extents for those, further appending
1727 * to the list. The way we iterate the list allows adding more elements after
1728 * the current while iterating. The process stops when we reach the end of the
1729 * list.
1730 *
1731 * Found roots are added to @ctx->roots, which is allocated by this function if
1732 * it points to NULL, in which case the caller is responsible for freeing it
1733 * after it's not needed anymore.
1734 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1735 * ulist to do temporary work, and frees it before returning.
1736 *
1737 * Returns 0 on success, < 0 on error.
1738 */
1739static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
 
 
 
1740{
1741	const u64 orig_bytenr = ctx->bytenr;
1742	const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list;
1743	bool roots_ulist_allocated = false;
1744	struct ulist_iterator uiter;
1745	int ret = 0;
1746
1747	ASSERT(ctx->refs == NULL);
1748
1749	ctx->refs = ulist_alloc(GFP_NOFS);
1750	if (!ctx->refs)
 
 
 
 
1751		return -ENOMEM;
1752
1753	if (!ctx->roots) {
1754		ctx->roots = ulist_alloc(GFP_NOFS);
1755		if (!ctx->roots) {
1756			ulist_free(ctx->refs);
1757			ctx->refs = NULL;
1758			return -ENOMEM;
1759		}
1760		roots_ulist_allocated = true;
1761	}
1762
1763	ctx->skip_inode_ref_list = true;
1764
1765	ULIST_ITER_INIT(&uiter);
1766	while (1) {
1767		struct ulist_node *node;
1768
1769		ret = find_parent_nodes(ctx, NULL);
1770		if (ret < 0 && ret != -ENOENT) {
1771			if (roots_ulist_allocated) {
1772				ulist_free(ctx->roots);
1773				ctx->roots = NULL;
1774			}
1775			break;
1776		}
1777		ret = 0;
1778		node = ulist_next(ctx->refs, &uiter);
1779		if (!node)
1780			break;
1781		ctx->bytenr = node->val;
1782		cond_resched();
1783	}
1784
1785	ulist_free(ctx->refs);
1786	ctx->refs = NULL;
1787	ctx->bytenr = orig_bytenr;
1788	ctx->skip_inode_ref_list = orig_skip_inode_ref_list;
1789
1790	return ret;
1791}
1792
1793int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx,
1794			 bool skip_commit_root_sem)
 
 
1795{
1796	int ret;
1797
1798	if (!ctx->trans && !skip_commit_root_sem)
1799		down_read(&ctx->fs_info->commit_root_sem);
1800	ret = btrfs_find_all_roots_safe(ctx);
1801	if (!ctx->trans && !skip_commit_root_sem)
1802		up_read(&ctx->fs_info->commit_root_sem);
 
1803	return ret;
1804}
1805
1806struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void)
1807{
1808	struct btrfs_backref_share_check_ctx *ctx;
1809
1810	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1811	if (!ctx)
1812		return NULL;
1813
1814	ulist_init(&ctx->refs);
1815
1816	return ctx;
1817}
1818
1819void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx)
1820{
1821	if (!ctx)
1822		return;
1823
1824	ulist_release(&ctx->refs);
1825	kfree(ctx);
1826}
1827
1828/*
1829 * Check if a data extent is shared or not.
1830 *
1831 * @inode:       The inode whose extent we are checking.
1832 * @bytenr:      Logical bytenr of the extent we are checking.
1833 * @extent_gen:  Generation of the extent (file extent item) or 0 if it is
1834 *               not known.
1835 * @ctx:         A backref sharedness check context.
1836 *
1837 * btrfs_is_data_extent_shared uses the backref walking code but will short
1838 * circuit as soon as it finds a root or inode that doesn't match the
1839 * one passed in. This provides a significant performance benefit for
1840 * callers (such as fiemap) which want to know whether the extent is
1841 * shared but do not need a ref count.
1842 *
1843 * This attempts to attach to the running transaction in order to account for
1844 * delayed refs, but continues on even when no running transaction exists.
1845 *
1846 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1847 */
1848int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
1849				u64 extent_gen,
1850				struct btrfs_backref_share_check_ctx *ctx)
1851{
1852	struct btrfs_backref_walk_ctx walk_ctx = { 0 };
1853	struct btrfs_root *root = inode->root;
1854	struct btrfs_fs_info *fs_info = root->fs_info;
1855	struct btrfs_trans_handle *trans;
1856	struct ulist_iterator uiter;
1857	struct ulist_node *node;
1858	struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1859	int ret = 0;
1860	struct share_check shared = {
1861		.ctx = ctx,
1862		.root = root,
1863		.inum = btrfs_ino(inode),
1864		.data_bytenr = bytenr,
1865		.data_extent_gen = extent_gen,
1866		.share_count = 0,
1867		.self_ref_count = 0,
1868		.have_delayed_delete_refs = false,
1869	};
1870	int level;
1871	bool leaf_cached;
1872	bool leaf_is_shared;
1873
1874	for (int i = 0; i < BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; i++) {
1875		if (ctx->prev_extents_cache[i].bytenr == bytenr)
1876			return ctx->prev_extents_cache[i].is_shared;
1877	}
1878
1879	ulist_init(&ctx->refs);
 
1880
1881	trans = btrfs_join_transaction_nostart(root);
1882	if (IS_ERR(trans)) {
1883		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1884			ret = PTR_ERR(trans);
1885			goto out;
1886		}
1887		trans = NULL;
1888		down_read(&fs_info->commit_root_sem);
1889	} else {
1890		btrfs_get_tree_mod_seq(fs_info, &elem);
1891		walk_ctx.time_seq = elem.seq;
1892	}
1893
1894	ctx->use_path_cache = true;
1895
1896	/*
1897	 * We may have previously determined that the current leaf is shared.
1898	 * If it is, then we have a data extent that is shared due to a shared
1899	 * subtree (caused by snapshotting) and we don't need to check for data
1900	 * backrefs. If the leaf is not shared, then we must do backref walking
1901	 * to determine if the data extent is shared through reflinks.
1902	 */
1903	leaf_cached = lookup_backref_shared_cache(ctx, root,
1904						  ctx->curr_leaf_bytenr, 0,
1905						  &leaf_is_shared);
1906	if (leaf_cached && leaf_is_shared) {
1907		ret = 1;
1908		goto out_trans;
1909	}
1910
1911	walk_ctx.skip_inode_ref_list = true;
1912	walk_ctx.trans = trans;
1913	walk_ctx.fs_info = fs_info;
1914	walk_ctx.refs = &ctx->refs;
1915
1916	/* -1 means we are in the bytenr of the data extent. */
1917	level = -1;
1918	ULIST_ITER_INIT(&uiter);
1919	while (1) {
1920		const unsigned long prev_ref_count = ctx->refs.nnodes;
1921
1922		walk_ctx.bytenr = bytenr;
1923		ret = find_parent_nodes(&walk_ctx, &shared);
1924		if (ret == BACKREF_FOUND_SHARED ||
1925		    ret == BACKREF_FOUND_NOT_SHARED) {
1926			/* If shared must return 1, otherwise return 0. */
1927			ret = (ret == BACKREF_FOUND_SHARED) ? 1 : 0;
1928			if (level >= 0)
1929				store_backref_shared_cache(ctx, root, bytenr,
1930							   level, ret == 1);
1931			break;
1932		}
1933		if (ret < 0 && ret != -ENOENT)
1934			break;
1935		ret = 0;
1936
1937		/*
1938		 * More than one extent buffer (bytenr) may have been added to
1939		 * the ctx->refs ulist, in which case we have to check multiple
1940		 * tree paths in case the first one is not shared, so we can not
1941		 * use the path cache which is made for a single path. Multiple
1942		 * extent buffers at the current level happen when:
1943		 *
1944		 * 1) level -1, the data extent: If our data extent was not
1945		 *    directly shared (without multiple reference items), then
1946		 *    it might have a single reference item with a count > 1 for
1947		 *    the same offset, which means there are 2 (or more) file
1948		 *    extent items that point to the data extent - this happens
1949		 *    when a file extent item needs to be split and then one
1950		 *    item gets moved to another leaf due to a b+tree leaf split
1951		 *    when inserting some item. In this case the file extent
1952		 *    items may be located in different leaves and therefore
1953		 *    some of the leaves may be referenced through shared
1954		 *    subtrees while others are not. Since our extent buffer
1955		 *    cache only works for a single path (by far the most common
1956		 *    case and simpler to deal with), we can not use it if we
1957		 *    have multiple leaves (which implies multiple paths).
1958		 *
1959		 * 2) level >= 0, a tree node/leaf: We can have a mix of direct
1960		 *    and indirect references on a b+tree node/leaf, so we have
1961		 *    to check multiple paths, and the extent buffer (the
1962		 *    current bytenr) may be shared or not. One example is
1963		 *    during relocation as we may get a shared tree block ref
1964		 *    (direct ref) and a non-shared tree block ref (indirect
1965		 *    ref) for the same node/leaf.
1966		 */
1967		if ((ctx->refs.nnodes - prev_ref_count) > 1)
1968			ctx->use_path_cache = false;
1969
1970		if (level >= 0)
1971			store_backref_shared_cache(ctx, root, bytenr,
1972						   level, false);
1973		node = ulist_next(&ctx->refs, &uiter);
1974		if (!node)
1975			break;
1976		bytenr = node->val;
1977		if (ctx->use_path_cache) {
1978			bool is_shared;
1979			bool cached;
1980
1981			level++;
1982			cached = lookup_backref_shared_cache(ctx, root, bytenr,
1983							     level, &is_shared);
1984			if (cached) {
1985				ret = (is_shared ? 1 : 0);
1986				break;
1987			}
1988		}
1989		shared.share_count = 0;
1990		shared.have_delayed_delete_refs = false;
1991		cond_resched();
1992	}
1993
1994	/*
1995	 * If the path cache is disabled, then it means at some tree level we
1996	 * got multiple parents due to a mix of direct and indirect backrefs or
1997	 * multiple leaves with file extent items pointing to the same data
1998	 * extent. We have to invalidate the cache and cache only the sharedness
1999	 * result for the levels where we got only one node/reference.
2000	 */
2001	if (!ctx->use_path_cache) {
2002		int i = 0;
2003
2004		level--;
2005		if (ret >= 0 && level >= 0) {
2006			bytenr = ctx->path_cache_entries[level].bytenr;
2007			ctx->use_path_cache = true;
2008			store_backref_shared_cache(ctx, root, bytenr, level, ret);
2009			i = level + 1;
2010		}
2011
2012		for ( ; i < BTRFS_MAX_LEVEL; i++)
2013			ctx->path_cache_entries[i].bytenr = 0;
2014	}
2015
2016	/*
2017	 * Cache the sharedness result for the data extent if we know our inode
2018	 * has more than 1 file extent item that refers to the data extent.
2019	 */
2020	if (ret >= 0 && shared.self_ref_count > 1) {
2021		int slot = ctx->prev_extents_cache_slot;
2022
2023		ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr;
2024		ctx->prev_extents_cache[slot].is_shared = (ret == 1);
2025
2026		slot = (slot + 1) % BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE;
2027		ctx->prev_extents_cache_slot = slot;
2028	}
2029
2030out_trans:
2031	if (trans) {
2032		btrfs_put_tree_mod_seq(fs_info, &elem);
2033		btrfs_end_transaction(trans);
2034	} else {
2035		up_read(&fs_info->commit_root_sem);
2036	}
2037out:
2038	ulist_release(&ctx->refs);
2039	ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr;
2040
2041	return ret;
2042}
2043
2044int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
2045			  u64 start_off, struct btrfs_path *path,
2046			  struct btrfs_inode_extref **ret_extref,
2047			  u64 *found_off)
2048{
2049	int ret, slot;
2050	struct btrfs_key key;
2051	struct btrfs_key found_key;
2052	struct btrfs_inode_extref *extref;
2053	const struct extent_buffer *leaf;
2054	unsigned long ptr;
2055
2056	key.objectid = inode_objectid;
2057	key.type = BTRFS_INODE_EXTREF_KEY;
2058	key.offset = start_off;
2059
2060	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2061	if (ret < 0)
2062		return ret;
2063
2064	while (1) {
2065		leaf = path->nodes[0];
2066		slot = path->slots[0];
2067		if (slot >= btrfs_header_nritems(leaf)) {
2068			/*
2069			 * If the item at offset is not found,
2070			 * btrfs_search_slot will point us to the slot
2071			 * where it should be inserted. In our case
2072			 * that will be the slot directly before the
2073			 * next INODE_REF_KEY_V2 item. In the case
2074			 * that we're pointing to the last slot in a
2075			 * leaf, we must move one leaf over.
2076			 */
2077			ret = btrfs_next_leaf(root, path);
2078			if (ret) {
2079				if (ret >= 1)
2080					ret = -ENOENT;
2081				break;
2082			}
2083			continue;
2084		}
2085
2086		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2087
2088		/*
2089		 * Check that we're still looking at an extended ref key for
2090		 * this particular objectid. If we have different
2091		 * objectid or type then there are no more to be found
2092		 * in the tree and we can exit.
2093		 */
2094		ret = -ENOENT;
2095		if (found_key.objectid != inode_objectid)
2096			break;
2097		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
2098			break;
2099
2100		ret = 0;
2101		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2102		extref = (struct btrfs_inode_extref *)ptr;
2103		*ret_extref = extref;
2104		if (found_off)
2105			*found_off = found_key.offset;
2106		break;
2107	}
2108
2109	return ret;
2110}
2111
2112/*
2113 * this iterates to turn a name (from iref/extref) into a full filesystem path.
2114 * Elements of the path are separated by '/' and the path is guaranteed to be
2115 * 0-terminated. the path is only given within the current file system.
2116 * Therefore, it never starts with a '/'. the caller is responsible to provide
2117 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
2118 * the start point of the resulting string is returned. this pointer is within
2119 * dest, normally.
2120 * in case the path buffer would overflow, the pointer is decremented further
2121 * as if output was written to the buffer, though no more output is actually
2122 * generated. that way, the caller can determine how much space would be
2123 * required for the path to fit into the buffer. in that case, the returned
2124 * value will be smaller than dest. callers must check this!
2125 */
2126char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
2127			u32 name_len, unsigned long name_off,
2128			struct extent_buffer *eb_in, u64 parent,
2129			char *dest, u32 size)
2130{
2131	int slot;
2132	u64 next_inum;
2133	int ret;
2134	s64 bytes_left = ((s64)size) - 1;
2135	struct extent_buffer *eb = eb_in;
2136	struct btrfs_key found_key;
2137	struct btrfs_inode_ref *iref;
2138
2139	if (bytes_left >= 0)
2140		dest[bytes_left] = '\0';
2141
2142	while (1) {
2143		bytes_left -= name_len;
2144		if (bytes_left >= 0)
2145			read_extent_buffer(eb, dest + bytes_left,
2146					   name_off, name_len);
2147		if (eb != eb_in) {
2148			if (!path->skip_locking)
2149				btrfs_tree_read_unlock(eb);
2150			free_extent_buffer(eb);
2151		}
2152		ret = btrfs_find_item(fs_root, path, parent, 0,
2153				BTRFS_INODE_REF_KEY, &found_key);
2154		if (ret > 0)
2155			ret = -ENOENT;
2156		if (ret)
2157			break;
2158
2159		next_inum = found_key.offset;
2160
2161		/* regular exit ahead */
2162		if (parent == next_inum)
2163			break;
2164
2165		slot = path->slots[0];
2166		eb = path->nodes[0];
2167		/* make sure we can use eb after releasing the path */
2168		if (eb != eb_in) {
2169			path->nodes[0] = NULL;
2170			path->locks[0] = 0;
2171		}
2172		btrfs_release_path(path);
2173		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2174
2175		name_len = btrfs_inode_ref_name_len(eb, iref);
2176		name_off = (unsigned long)(iref + 1);
2177
2178		parent = next_inum;
2179		--bytes_left;
2180		if (bytes_left >= 0)
2181			dest[bytes_left] = '/';
2182	}
2183
2184	btrfs_release_path(path);
2185
2186	if (ret)
2187		return ERR_PTR(ret);
2188
2189	return dest + bytes_left;
2190}
2191
2192/*
2193 * this makes the path point to (logical EXTENT_ITEM *)
2194 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
2195 * tree blocks and <0 on error.
2196 */
2197int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
2198			struct btrfs_path *path, struct btrfs_key *found_key,
2199			u64 *flags_ret)
2200{
2201	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
2202	int ret;
2203	u64 flags;
2204	u64 size = 0;
2205	u32 item_size;
2206	const struct extent_buffer *eb;
2207	struct btrfs_extent_item *ei;
2208	struct btrfs_key key;
2209
2210	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2211		key.type = BTRFS_METADATA_ITEM_KEY;
2212	else
2213		key.type = BTRFS_EXTENT_ITEM_KEY;
2214	key.objectid = logical;
2215	key.offset = (u64)-1;
2216
2217	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2218	if (ret < 0)
2219		return ret;
2220	if (ret == 0) {
2221		/*
2222		 * Key with offset -1 found, there would have to exist an extent
2223		 * item with such offset, but this is out of the valid range.
2224		 */
2225		return -EUCLEAN;
2226	}
2227
2228	ret = btrfs_previous_extent_item(extent_root, path, 0);
2229	if (ret) {
2230		if (ret > 0)
2231			ret = -ENOENT;
2232		return ret;
2233	}
2234	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
2235	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
2236		size = fs_info->nodesize;
2237	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
2238		size = found_key->offset;
2239
2240	if (found_key->objectid > logical ||
2241	    found_key->objectid + size <= logical) {
2242		btrfs_debug(fs_info,
2243			"logical %llu is not within any extent", logical);
2244		return -ENOENT;
2245	}
2246
2247	eb = path->nodes[0];
2248	item_size = btrfs_item_size(eb, path->slots[0]);
 
2249
2250	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
2251	flags = btrfs_extent_flags(eb, ei);
2252
2253	btrfs_debug(fs_info,
2254		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
2255		 logical, logical - found_key->objectid, found_key->objectid,
2256		 found_key->offset, flags, item_size);
2257
2258	WARN_ON(!flags_ret);
2259	if (flags_ret) {
2260		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2261			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
2262		else if (flags & BTRFS_EXTENT_FLAG_DATA)
2263			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
2264		else
2265			BUG();
2266		return 0;
2267	}
2268
2269	return -EIO;
2270}
2271
2272/*
2273 * helper function to iterate extent inline refs. ptr must point to a 0 value
2274 * for the first call and may be modified. it is used to track state.
2275 * if more refs exist, 0 is returned and the next call to
2276 * get_extent_inline_ref must pass the modified ptr parameter to get the
2277 * next ref. after the last ref was processed, 1 is returned.
2278 * returns <0 on error
2279 */
2280static int get_extent_inline_ref(unsigned long *ptr,
2281				 const struct extent_buffer *eb,
2282				 const struct btrfs_key *key,
2283				 const struct btrfs_extent_item *ei,
2284				 u32 item_size,
2285				 struct btrfs_extent_inline_ref **out_eiref,
2286				 int *out_type)
2287{
2288	unsigned long end;
2289	u64 flags;
2290	struct btrfs_tree_block_info *info;
2291
2292	if (!*ptr) {
2293		/* first call */
2294		flags = btrfs_extent_flags(eb, ei);
2295		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2296			if (key->type == BTRFS_METADATA_ITEM_KEY) {
2297				/* a skinny metadata extent */
2298				*out_eiref =
2299				     (struct btrfs_extent_inline_ref *)(ei + 1);
2300			} else {
2301				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
2302				info = (struct btrfs_tree_block_info *)(ei + 1);
2303				*out_eiref =
2304				   (struct btrfs_extent_inline_ref *)(info + 1);
2305			}
2306		} else {
2307			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
2308		}
2309		*ptr = (unsigned long)*out_eiref;
2310		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
2311			return -ENOENT;
2312	}
2313
2314	end = (unsigned long)ei + item_size;
2315	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
2316	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
2317						     BTRFS_REF_TYPE_ANY);
2318	if (*out_type == BTRFS_REF_TYPE_INVALID)
2319		return -EUCLEAN;
2320
2321	*ptr += btrfs_extent_inline_ref_size(*out_type);
2322	WARN_ON(*ptr > end);
2323	if (*ptr == end)
2324		return 1; /* last */
2325
2326	return 0;
2327}
2328
2329/*
2330 * reads the tree block backref for an extent. tree level and root are returned
2331 * through out_level and out_root. ptr must point to a 0 value for the first
2332 * call and may be modified (see get_extent_inline_ref comment).
2333 * returns 0 if data was provided, 1 if there was no more data to provide or
2334 * <0 on error.
2335 */
2336int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2337			    struct btrfs_key *key, struct btrfs_extent_item *ei,
2338			    u32 item_size, u64 *out_root, u8 *out_level)
2339{
2340	int ret;
2341	int type;
2342	struct btrfs_extent_inline_ref *eiref;
2343
2344	if (*ptr == (unsigned long)-1)
2345		return 1;
2346
2347	while (1) {
2348		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
2349					      &eiref, &type);
2350		if (ret < 0)
2351			return ret;
2352
2353		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
2354		    type == BTRFS_SHARED_BLOCK_REF_KEY)
2355			break;
2356
2357		if (ret == 1)
2358			return 1;
2359	}
2360
2361	/* we can treat both ref types equally here */
2362	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
2363
2364	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
2365		struct btrfs_tree_block_info *info;
2366
2367		info = (struct btrfs_tree_block_info *)(ei + 1);
2368		*out_level = btrfs_tree_block_level(eb, info);
2369	} else {
2370		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
2371		*out_level = (u8)key->offset;
2372	}
2373
2374	if (ret == 1)
2375		*ptr = (unsigned long)-1;
2376
2377	return 0;
2378}
2379
2380static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
2381			     struct extent_inode_elem *inode_list,
2382			     u64 root, u64 extent_item_objectid,
2383			     iterate_extent_inodes_t *iterate, void *ctx)
2384{
2385	struct extent_inode_elem *eie;
2386	int ret = 0;
2387
2388	for (eie = inode_list; eie; eie = eie->next) {
2389		btrfs_debug(fs_info,
2390			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
2391			    extent_item_objectid, eie->inum,
2392			    eie->offset, root);
2393		ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx);
2394		if (ret) {
2395			btrfs_debug(fs_info,
2396				    "stopping iteration for %llu due to ret=%d",
2397				    extent_item_objectid, ret);
2398			break;
2399		}
2400	}
2401
2402	return ret;
2403}
2404
2405/*
2406 * calls iterate() for every inode that references the extent identified by
2407 * the given parameters.
2408 * when the iterator function returns a non-zero value, iteration stops.
2409 */
2410int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
2411			  bool search_commit_root,
2412			  iterate_extent_inodes_t *iterate, void *user_ctx)
 
 
2413{
2414	int ret;
2415	struct ulist *refs;
2416	struct ulist_node *ref_node;
 
 
 
2417	struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
2418	struct ulist_iterator ref_uiter;
 
2419
2420	btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu",
2421		    ctx->bytenr);
2422
2423	ASSERT(ctx->trans == NULL);
2424	ASSERT(ctx->roots == NULL);
2425
2426	if (!search_commit_root) {
2427		struct btrfs_trans_handle *trans;
2428
2429		trans = btrfs_attach_transaction(ctx->fs_info->tree_root);
2430		if (IS_ERR(trans)) {
2431			if (PTR_ERR(trans) != -ENOENT &&
2432			    PTR_ERR(trans) != -EROFS)
2433				return PTR_ERR(trans);
2434			trans = NULL;
2435		}
2436		ctx->trans = trans;
2437	}
2438
2439	if (ctx->trans) {
2440		btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem);
2441		ctx->time_seq = seq_elem.seq;
2442	} else {
2443		down_read(&ctx->fs_info->commit_root_sem);
2444	}
2445
2446	ret = btrfs_find_all_leafs(ctx);
 
 
2447	if (ret)
2448		goto out;
2449	refs = ctx->refs;
2450	ctx->refs = NULL;
2451
2452	ULIST_ITER_INIT(&ref_uiter);
2453	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2454		const u64 leaf_bytenr = ref_node->val;
2455		struct ulist_node *root_node;
2456		struct ulist_iterator root_uiter;
2457		struct extent_inode_elem *inode_list;
2458
2459		inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux;
2460
2461		if (ctx->cache_lookup) {
2462			const u64 *root_ids;
2463			int root_count;
2464			bool cached;
2465
2466			cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx,
2467						   &root_ids, &root_count);
2468			if (cached) {
2469				for (int i = 0; i < root_count; i++) {
2470					ret = iterate_leaf_refs(ctx->fs_info,
2471								inode_list,
2472								root_ids[i],
2473								leaf_bytenr,
2474								iterate,
2475								user_ctx);
2476					if (ret)
2477						break;
2478				}
2479				continue;
2480			}
2481		}
2482
2483		if (!ctx->roots) {
2484			ctx->roots = ulist_alloc(GFP_NOFS);
2485			if (!ctx->roots) {
2486				ret = -ENOMEM;
2487				break;
2488			}
2489		}
2490
2491		ctx->bytenr = leaf_bytenr;
2492		ret = btrfs_find_all_roots_safe(ctx);
2493		if (ret)
2494			break;
2495
2496		if (ctx->cache_store)
2497			ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx);
2498
2499		ULIST_ITER_INIT(&root_uiter);
2500		while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) {
2501			btrfs_debug(ctx->fs_info,
2502				    "root %llu references leaf %llu, data list %#llx",
2503				    root_node->val, ref_node->val,
2504				    ref_node->aux);
2505			ret = iterate_leaf_refs(ctx->fs_info, inode_list,
2506						root_node->val, ctx->bytenr,
2507						iterate, user_ctx);
 
 
 
2508		}
2509		ulist_reinit(ctx->roots);
2510	}
2511
2512	free_leaf_list(refs);
2513out:
2514	if (ctx->trans) {
2515		btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem);
2516		btrfs_end_transaction(ctx->trans);
2517		ctx->trans = NULL;
2518	} else {
2519		up_read(&ctx->fs_info->commit_root_sem);
2520	}
2521
2522	ulist_free(ctx->roots);
2523	ctx->roots = NULL;
2524
2525	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP)
2526		ret = 0;
2527
2528	return ret;
2529}
2530
2531static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx)
2532{
2533	struct btrfs_data_container *inodes = ctx;
2534	const size_t c = 3 * sizeof(u64);
2535
2536	if (inodes->bytes_left >= c) {
2537		inodes->bytes_left -= c;
2538		inodes->val[inodes->elem_cnt] = inum;
2539		inodes->val[inodes->elem_cnt + 1] = offset;
2540		inodes->val[inodes->elem_cnt + 2] = root;
2541		inodes->elem_cnt += 3;
2542	} else {
2543		inodes->bytes_missing += c - inodes->bytes_left;
2544		inodes->bytes_left = 0;
2545		inodes->elem_missed += 3;
2546	}
2547
2548	return 0;
2549}
2550
2551int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2552				struct btrfs_path *path,
2553				void *ctx, bool ignore_offset)
 
2554{
2555	struct btrfs_backref_walk_ctx walk_ctx = { 0 };
2556	int ret;
 
2557	u64 flags = 0;
2558	struct btrfs_key found_key;
2559	int search_commit_root = path->search_commit_root;
2560
2561	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2562	btrfs_release_path(path);
2563	if (ret < 0)
2564		return ret;
2565	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2566		return -EINVAL;
2567
2568	walk_ctx.bytenr = found_key.objectid;
2569	if (ignore_offset)
2570		walk_ctx.ignore_extent_item_pos = true;
2571	else
2572		walk_ctx.extent_item_pos = logical - found_key.objectid;
2573	walk_ctx.fs_info = fs_info;
2574
2575	return iterate_extent_inodes(&walk_ctx, search_commit_root,
2576				     build_ino_list, ctx);
2577}
2578
2579static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2580			 struct extent_buffer *eb, struct inode_fs_paths *ipath);
2581
2582static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
 
 
2583{
2584	int ret = 0;
2585	int slot;
2586	u32 cur;
2587	u32 len;
2588	u32 name_len;
2589	u64 parent = 0;
2590	int found = 0;
2591	struct btrfs_root *fs_root = ipath->fs_root;
2592	struct btrfs_path *path = ipath->btrfs_path;
2593	struct extent_buffer *eb;
 
2594	struct btrfs_inode_ref *iref;
2595	struct btrfs_key found_key;
2596
2597	while (!ret) {
2598		ret = btrfs_find_item(fs_root, path, inum,
2599				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2600				&found_key);
2601
2602		if (ret < 0)
2603			break;
2604		if (ret) {
2605			ret = found ? 0 : -ENOENT;
2606			break;
2607		}
2608		++found;
2609
2610		parent = found_key.offset;
2611		slot = path->slots[0];
2612		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2613		if (!eb) {
2614			ret = -ENOMEM;
2615			break;
2616		}
2617		btrfs_release_path(path);
2618
 
2619		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2620
2621		for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2622			name_len = btrfs_inode_ref_name_len(eb, iref);
2623			/* path must be released before calling iterate()! */
2624			btrfs_debug(fs_root->fs_info,
2625				"following ref at offset %u for inode %llu in tree %llu",
2626				cur, found_key.objectid,
2627				btrfs_root_id(fs_root));
2628			ret = inode_to_path(parent, name_len,
2629				      (unsigned long)(iref + 1), eb, ipath);
2630			if (ret)
2631				break;
2632			len = sizeof(*iref) + name_len;
2633			iref = (struct btrfs_inode_ref *)((char *)iref + len);
2634		}
2635		free_extent_buffer(eb);
2636	}
2637
2638	btrfs_release_path(path);
2639
2640	return ret;
2641}
2642
2643static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
 
 
2644{
2645	int ret;
2646	int slot;
2647	u64 offset = 0;
2648	u64 parent;
2649	int found = 0;
2650	struct btrfs_root *fs_root = ipath->fs_root;
2651	struct btrfs_path *path = ipath->btrfs_path;
2652	struct extent_buffer *eb;
2653	struct btrfs_inode_extref *extref;
2654	u32 item_size;
2655	u32 cur_offset;
2656	unsigned long ptr;
2657
2658	while (1) {
2659		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2660					    &offset);
2661		if (ret < 0)
2662			break;
2663		if (ret) {
2664			ret = found ? 0 : -ENOENT;
2665			break;
2666		}
2667		++found;
2668
2669		slot = path->slots[0];
2670		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2671		if (!eb) {
2672			ret = -ENOMEM;
2673			break;
2674		}
2675		btrfs_release_path(path);
2676
2677		item_size = btrfs_item_size(eb, slot);
2678		ptr = btrfs_item_ptr_offset(eb, slot);
2679		cur_offset = 0;
2680
2681		while (cur_offset < item_size) {
2682			u32 name_len;
2683
2684			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2685			parent = btrfs_inode_extref_parent(eb, extref);
2686			name_len = btrfs_inode_extref_name_len(eb, extref);
2687			ret = inode_to_path(parent, name_len,
2688				      (unsigned long)&extref->name, eb, ipath);
2689			if (ret)
2690				break;
2691
2692			cur_offset += btrfs_inode_extref_name_len(eb, extref);
2693			cur_offset += sizeof(*extref);
2694		}
2695		free_extent_buffer(eb);
2696
2697		offset++;
2698	}
2699
2700	btrfs_release_path(path);
2701
2702	return ret;
2703}
2704
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2705/*
2706 * returns 0 if the path could be dumped (probably truncated)
2707 * returns <0 in case of an error
2708 */
2709static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2710			 struct extent_buffer *eb, struct inode_fs_paths *ipath)
2711{
 
2712	char *fspath;
2713	char *fspath_min;
2714	int i = ipath->fspath->elem_cnt;
2715	const int s_ptr = sizeof(char *);
2716	u32 bytes_left;
2717
2718	bytes_left = ipath->fspath->bytes_left > s_ptr ?
2719					ipath->fspath->bytes_left - s_ptr : 0;
2720
2721	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2722	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2723				   name_off, eb, inum, fspath_min, bytes_left);
2724	if (IS_ERR(fspath))
2725		return PTR_ERR(fspath);
2726
2727	if (fspath > fspath_min) {
2728		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2729		++ipath->fspath->elem_cnt;
2730		ipath->fspath->bytes_left = fspath - fspath_min;
2731	} else {
2732		++ipath->fspath->elem_missed;
2733		ipath->fspath->bytes_missing += fspath_min - fspath;
2734		ipath->fspath->bytes_left = 0;
2735	}
2736
2737	return 0;
2738}
2739
2740/*
2741 * this dumps all file system paths to the inode into the ipath struct, provided
2742 * is has been created large enough. each path is zero-terminated and accessed
2743 * from ipath->fspath->val[i].
2744 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2745 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2746 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2747 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2748 * have been needed to return all paths.
2749 */
2750int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2751{
2752	int ret;
2753	int found_refs = 0;
2754
2755	ret = iterate_inode_refs(inum, ipath);
2756	if (!ret)
2757		++found_refs;
2758	else if (ret != -ENOENT)
2759		return ret;
2760
2761	ret = iterate_inode_extrefs(inum, ipath);
2762	if (ret == -ENOENT && found_refs)
2763		return 0;
2764
2765	return ret;
2766}
2767
2768struct btrfs_data_container *init_data_container(u32 total_bytes)
2769{
2770	struct btrfs_data_container *data;
2771	size_t alloc_bytes;
2772
2773	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2774	data = kvzalloc(alloc_bytes, GFP_KERNEL);
2775	if (!data)
2776		return ERR_PTR(-ENOMEM);
2777
2778	if (total_bytes >= sizeof(*data))
2779		data->bytes_left = total_bytes - sizeof(*data);
2780	else
 
2781		data->bytes_missing = sizeof(*data) - total_bytes;
 
 
 
 
 
2782
2783	return data;
2784}
2785
2786/*
2787 * allocates space to return multiple file system paths for an inode.
2788 * total_bytes to allocate are passed, note that space usable for actual path
2789 * information will be total_bytes - sizeof(struct inode_fs_paths).
2790 * the returned pointer must be freed with free_ipath() in the end.
2791 */
2792struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2793					struct btrfs_path *path)
2794{
2795	struct inode_fs_paths *ifp;
2796	struct btrfs_data_container *fspath;
2797
2798	fspath = init_data_container(total_bytes);
2799	if (IS_ERR(fspath))
2800		return ERR_CAST(fspath);
2801
2802	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2803	if (!ifp) {
2804		kvfree(fspath);
2805		return ERR_PTR(-ENOMEM);
2806	}
2807
2808	ifp->btrfs_path = path;
2809	ifp->fspath = fspath;
2810	ifp->fs_root = fs_root;
2811
2812	return ifp;
2813}
2814
2815void free_ipath(struct inode_fs_paths *ipath)
2816{
2817	if (!ipath)
2818		return;
2819	kvfree(ipath->fspath);
2820	kfree(ipath);
2821}
2822
2823struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info)
 
2824{
2825	struct btrfs_backref_iter *ret;
2826
2827	ret = kzalloc(sizeof(*ret), GFP_NOFS);
2828	if (!ret)
2829		return NULL;
2830
2831	ret->path = btrfs_alloc_path();
2832	if (!ret->path) {
2833		kfree(ret);
2834		return NULL;
2835	}
2836
2837	/* Current backref iterator only supports iteration in commit root */
2838	ret->path->search_commit_root = 1;
2839	ret->path->skip_locking = 1;
2840	ret->fs_info = fs_info;
2841
2842	return ret;
2843}
2844
2845static void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
2846{
2847	iter->bytenr = 0;
2848	iter->item_ptr = 0;
2849	iter->cur_ptr = 0;
2850	iter->end_ptr = 0;
2851	btrfs_release_path(iter->path);
2852	memset(&iter->cur_key, 0, sizeof(iter->cur_key));
2853}
2854
2855int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2856{
2857	struct btrfs_fs_info *fs_info = iter->fs_info;
2858	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2859	struct btrfs_path *path = iter->path;
2860	struct btrfs_extent_item *ei;
2861	struct btrfs_key key;
2862	int ret;
2863
2864	key.objectid = bytenr;
2865	key.type = BTRFS_METADATA_ITEM_KEY;
2866	key.offset = (u64)-1;
2867	iter->bytenr = bytenr;
2868
2869	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2870	if (ret < 0)
2871		return ret;
2872	if (ret == 0) {
2873		/*
2874		 * Key with offset -1 found, there would have to exist an extent
2875		 * item with such offset, but this is out of the valid range.
2876		 */
2877		ret = -EUCLEAN;
2878		goto release;
2879	}
2880	if (path->slots[0] == 0) {
2881		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2882		ret = -EUCLEAN;
2883		goto release;
2884	}
2885	path->slots[0]--;
2886
2887	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2888	if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2889	     key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2890		ret = -ENOENT;
2891		goto release;
2892	}
2893	memcpy(&iter->cur_key, &key, sizeof(key));
2894	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2895						    path->slots[0]);
2896	iter->end_ptr = (u32)(iter->item_ptr +
2897			btrfs_item_size(path->nodes[0], path->slots[0]));
2898	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2899			    struct btrfs_extent_item);
2900
2901	/*
2902	 * Only support iteration on tree backref yet.
2903	 *
2904	 * This is an extra precaution for non skinny-metadata, where
2905	 * EXTENT_ITEM is also used for tree blocks, that we can only use
2906	 * extent flags to determine if it's a tree block.
2907	 */
2908	if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2909		ret = -ENOTSUPP;
2910		goto release;
2911	}
2912	iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2913
2914	/* If there is no inline backref, go search for keyed backref */
2915	if (iter->cur_ptr >= iter->end_ptr) {
2916		ret = btrfs_next_item(extent_root, path);
2917
2918		/* No inline nor keyed ref */
2919		if (ret > 0) {
2920			ret = -ENOENT;
2921			goto release;
2922		}
2923		if (ret < 0)
2924			goto release;
2925
2926		btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2927				path->slots[0]);
2928		if (iter->cur_key.objectid != bytenr ||
2929		    (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2930		     iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2931			ret = -ENOENT;
2932			goto release;
2933		}
2934		iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2935							   path->slots[0]);
2936		iter->item_ptr = iter->cur_ptr;
2937		iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
2938				      path->nodes[0], path->slots[0]));
2939	}
2940
2941	return 0;
2942release:
2943	btrfs_backref_iter_release(iter);
2944	return ret;
2945}
2946
2947static bool btrfs_backref_iter_is_inline_ref(struct btrfs_backref_iter *iter)
2948{
2949	if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
2950	    iter->cur_key.type == BTRFS_METADATA_ITEM_KEY)
2951		return true;
2952	return false;
2953}
2954
2955/*
2956 * Go to the next backref item of current bytenr, can be either inlined or
2957 * keyed.
2958 *
2959 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2960 *
2961 * Return 0 if we get next backref without problem.
2962 * Return >0 if there is no extra backref for this bytenr.
2963 * Return <0 if there is something wrong happened.
2964 */
2965int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2966{
2967	struct extent_buffer *eb = iter->path->nodes[0];
2968	struct btrfs_root *extent_root;
2969	struct btrfs_path *path = iter->path;
2970	struct btrfs_extent_inline_ref *iref;
2971	int ret;
2972	u32 size;
2973
2974	if (btrfs_backref_iter_is_inline_ref(iter)) {
2975		/* We're still inside the inline refs */
2976		ASSERT(iter->cur_ptr < iter->end_ptr);
2977
2978		if (btrfs_backref_has_tree_block_info(iter)) {
2979			/* First tree block info */
2980			size = sizeof(struct btrfs_tree_block_info);
2981		} else {
2982			/* Use inline ref type to determine the size */
2983			int type;
2984
2985			iref = (struct btrfs_extent_inline_ref *)
2986				((unsigned long)iter->cur_ptr);
2987			type = btrfs_extent_inline_ref_type(eb, iref);
2988
2989			size = btrfs_extent_inline_ref_size(type);
2990		}
2991		iter->cur_ptr += size;
2992		if (iter->cur_ptr < iter->end_ptr)
2993			return 0;
2994
2995		/* All inline items iterated, fall through */
2996	}
2997
2998	/* We're at keyed items, there is no inline item, go to the next one */
2999	extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
3000	ret = btrfs_next_item(extent_root, iter->path);
3001	if (ret)
3002		return ret;
3003
3004	btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
3005	if (iter->cur_key.objectid != iter->bytenr ||
3006	    (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
3007	     iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
3008		return 1;
3009	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
3010					path->slots[0]);
3011	iter->cur_ptr = iter->item_ptr;
3012	iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
3013						path->slots[0]);
3014	return 0;
3015}
3016
3017void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
3018			      struct btrfs_backref_cache *cache, bool is_reloc)
3019{
3020	int i;
3021
3022	cache->rb_root = RB_ROOT;
3023	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3024		INIT_LIST_HEAD(&cache->pending[i]);
3025	INIT_LIST_HEAD(&cache->changed);
3026	INIT_LIST_HEAD(&cache->detached);
3027	INIT_LIST_HEAD(&cache->leaves);
3028	INIT_LIST_HEAD(&cache->pending_edge);
3029	INIT_LIST_HEAD(&cache->useless_node);
3030	cache->fs_info = fs_info;
3031	cache->is_reloc = is_reloc;
3032}
3033
3034struct btrfs_backref_node *btrfs_backref_alloc_node(
3035		struct btrfs_backref_cache *cache, u64 bytenr, int level)
3036{
3037	struct btrfs_backref_node *node;
3038
3039	ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
3040	node = kzalloc(sizeof(*node), GFP_NOFS);
3041	if (!node)
3042		return node;
3043
3044	INIT_LIST_HEAD(&node->list);
3045	INIT_LIST_HEAD(&node->upper);
3046	INIT_LIST_HEAD(&node->lower);
3047	RB_CLEAR_NODE(&node->rb_node);
3048	cache->nr_nodes++;
3049	node->level = level;
3050	node->bytenr = bytenr;
3051
3052	return node;
3053}
3054
3055void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
3056			     struct btrfs_backref_node *node)
3057{
3058	if (node) {
3059		ASSERT(list_empty(&node->list));
3060		ASSERT(list_empty(&node->lower));
3061		ASSERT(node->eb == NULL);
3062		cache->nr_nodes--;
3063		btrfs_put_root(node->root);
3064		kfree(node);
3065	}
3066}
3067
3068struct btrfs_backref_edge *btrfs_backref_alloc_edge(
3069		struct btrfs_backref_cache *cache)
3070{
3071	struct btrfs_backref_edge *edge;
3072
3073	edge = kzalloc(sizeof(*edge), GFP_NOFS);
3074	if (edge)
3075		cache->nr_edges++;
3076	return edge;
3077}
3078
3079void btrfs_backref_free_edge(struct btrfs_backref_cache *cache,
3080			     struct btrfs_backref_edge *edge)
3081{
3082	if (edge) {
3083		cache->nr_edges--;
3084		kfree(edge);
3085	}
3086}
3087
3088void btrfs_backref_unlock_node_buffer(struct btrfs_backref_node *node)
3089{
3090	if (node->locked) {
3091		btrfs_tree_unlock(node->eb);
3092		node->locked = 0;
3093	}
3094}
3095
3096void btrfs_backref_drop_node_buffer(struct btrfs_backref_node *node)
3097{
3098	if (node->eb) {
3099		btrfs_backref_unlock_node_buffer(node);
3100		free_extent_buffer(node->eb);
3101		node->eb = NULL;
3102	}
3103}
3104
3105/*
3106 * Drop the backref node from cache without cleaning up its children
3107 * edges.
3108 *
3109 * This can only be called on node without parent edges.
3110 * The children edges are still kept as is.
3111 */
3112void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
3113			     struct btrfs_backref_node *node)
3114{
3115	ASSERT(list_empty(&node->upper));
3116
3117	btrfs_backref_drop_node_buffer(node);
3118	list_del_init(&node->list);
3119	list_del_init(&node->lower);
3120	if (!RB_EMPTY_NODE(&node->rb_node))
3121		rb_erase(&node->rb_node, &tree->rb_root);
3122	btrfs_backref_free_node(tree, node);
3123}
3124
3125/*
3126 * Drop the backref node from cache, also cleaning up all its
3127 * upper edges and any uncached nodes in the path.
3128 *
3129 * This cleanup happens bottom up, thus the node should either
3130 * be the lowest node in the cache or a detached node.
3131 */
3132void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
3133				struct btrfs_backref_node *node)
3134{
3135	struct btrfs_backref_node *upper;
3136	struct btrfs_backref_edge *edge;
3137
3138	if (!node)
3139		return;
3140
3141	BUG_ON(!node->lowest && !node->detached);
3142	while (!list_empty(&node->upper)) {
3143		edge = list_entry(node->upper.next, struct btrfs_backref_edge,
3144				  list[LOWER]);
3145		upper = edge->node[UPPER];
3146		list_del(&edge->list[LOWER]);
3147		list_del(&edge->list[UPPER]);
3148		btrfs_backref_free_edge(cache, edge);
3149
3150		/*
3151		 * Add the node to leaf node list if no other child block
3152		 * cached.
3153		 */
3154		if (list_empty(&upper->lower)) {
3155			list_add_tail(&upper->lower, &cache->leaves);
3156			upper->lowest = 1;
3157		}
3158	}
3159
3160	btrfs_backref_drop_node(cache, node);
3161}
3162
3163/*
3164 * Release all nodes/edges from current cache
3165 */
3166void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
3167{
3168	struct btrfs_backref_node *node;
3169	int i;
3170
3171	while (!list_empty(&cache->detached)) {
3172		node = list_entry(cache->detached.next,
3173				  struct btrfs_backref_node, list);
3174		btrfs_backref_cleanup_node(cache, node);
3175	}
3176
3177	while (!list_empty(&cache->leaves)) {
3178		node = list_entry(cache->leaves.next,
3179				  struct btrfs_backref_node, lower);
3180		btrfs_backref_cleanup_node(cache, node);
3181	}
3182
3183	for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3184		while (!list_empty(&cache->pending[i])) {
3185			node = list_first_entry(&cache->pending[i],
3186						struct btrfs_backref_node,
3187						list);
3188			btrfs_backref_cleanup_node(cache, node);
3189		}
3190	}
3191	ASSERT(list_empty(&cache->pending_edge));
3192	ASSERT(list_empty(&cache->useless_node));
3193	ASSERT(list_empty(&cache->changed));
3194	ASSERT(list_empty(&cache->detached));
3195	ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
3196	ASSERT(!cache->nr_nodes);
3197	ASSERT(!cache->nr_edges);
3198}
3199
3200void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
3201			     struct btrfs_backref_node *lower,
3202			     struct btrfs_backref_node *upper,
3203			     int link_which)
3204{
3205	ASSERT(upper && lower && upper->level == lower->level + 1);
3206	edge->node[LOWER] = lower;
3207	edge->node[UPPER] = upper;
3208	if (link_which & LINK_LOWER)
3209		list_add_tail(&edge->list[LOWER], &lower->upper);
3210	if (link_which & LINK_UPPER)
3211		list_add_tail(&edge->list[UPPER], &upper->lower);
3212}
3213/*
3214 * Handle direct tree backref
3215 *
3216 * Direct tree backref means, the backref item shows its parent bytenr
3217 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
3218 *
3219 * @ref_key:	The converted backref key.
3220 *		For keyed backref, it's the item key.
3221 *		For inlined backref, objectid is the bytenr,
3222 *		type is btrfs_inline_ref_type, offset is
3223 *		btrfs_inline_ref_offset.
3224 */
3225static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
3226				      struct btrfs_key *ref_key,
3227				      struct btrfs_backref_node *cur)
3228{
3229	struct btrfs_backref_edge *edge;
3230	struct btrfs_backref_node *upper;
3231	struct rb_node *rb_node;
3232
3233	ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
3234
3235	/* Only reloc root uses backref pointing to itself */
3236	if (ref_key->objectid == ref_key->offset) {
3237		struct btrfs_root *root;
3238
3239		cur->is_reloc_root = 1;
3240		/* Only reloc backref cache cares about a specific root */
3241		if (cache->is_reloc) {
3242			root = find_reloc_root(cache->fs_info, cur->bytenr);
3243			if (!root)
3244				return -ENOENT;
3245			cur->root = root;
3246		} else {
3247			/*
3248			 * For generic purpose backref cache, reloc root node
3249			 * is useless.
3250			 */
3251			list_add(&cur->list, &cache->useless_node);
3252		}
3253		return 0;
3254	}
3255
3256	edge = btrfs_backref_alloc_edge(cache);
3257	if (!edge)
3258		return -ENOMEM;
3259
3260	rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
3261	if (!rb_node) {
3262		/* Parent node not yet cached */
3263		upper = btrfs_backref_alloc_node(cache, ref_key->offset,
3264					   cur->level + 1);
3265		if (!upper) {
3266			btrfs_backref_free_edge(cache, edge);
3267			return -ENOMEM;
3268		}
3269
3270		/*
3271		 *  Backrefs for the upper level block isn't cached, add the
3272		 *  block to pending list
3273		 */
3274		list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3275	} else {
3276		/* Parent node already cached */
3277		upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
3278		ASSERT(upper->checked);
3279		INIT_LIST_HEAD(&edge->list[UPPER]);
3280	}
3281	btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
3282	return 0;
3283}
3284
3285/*
3286 * Handle indirect tree backref
3287 *
3288 * Indirect tree backref means, we only know which tree the node belongs to.
3289 * We still need to do a tree search to find out the parents. This is for
3290 * TREE_BLOCK_REF backref (keyed or inlined).
3291 *
3292 * @trans:	Transaction handle.
3293 * @ref_key:	The same as @ref_key in  handle_direct_tree_backref()
3294 * @tree_key:	The first key of this tree block.
3295 * @path:	A clean (released) path, to avoid allocating path every time
3296 *		the function get called.
3297 */
3298static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
3299					struct btrfs_backref_cache *cache,
3300					struct btrfs_path *path,
3301					struct btrfs_key *ref_key,
3302					struct btrfs_key *tree_key,
3303					struct btrfs_backref_node *cur)
3304{
3305	struct btrfs_fs_info *fs_info = cache->fs_info;
3306	struct btrfs_backref_node *upper;
3307	struct btrfs_backref_node *lower;
3308	struct btrfs_backref_edge *edge;
3309	struct extent_buffer *eb;
3310	struct btrfs_root *root;
3311	struct rb_node *rb_node;
3312	int level;
3313	bool need_check = true;
3314	int ret;
3315
3316	root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
3317	if (IS_ERR(root))
3318		return PTR_ERR(root);
3319	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3320		cur->cowonly = 1;
3321
3322	if (btrfs_root_level(&root->root_item) == cur->level) {
3323		/* Tree root */
3324		ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
3325		/*
3326		 * For reloc backref cache, we may ignore reloc root.  But for
3327		 * general purpose backref cache, we can't rely on
3328		 * btrfs_should_ignore_reloc_root() as it may conflict with
3329		 * current running relocation and lead to missing root.
3330		 *
3331		 * For general purpose backref cache, reloc root detection is
3332		 * completely relying on direct backref (key->offset is parent
3333		 * bytenr), thus only do such check for reloc cache.
3334		 */
3335		if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
3336			btrfs_put_root(root);
3337			list_add(&cur->list, &cache->useless_node);
3338		} else {
3339			cur->root = root;
3340		}
3341		return 0;
3342	}
3343
3344	level = cur->level + 1;
3345
3346	/* Search the tree to find parent blocks referring to the block */
3347	path->search_commit_root = 1;
3348	path->skip_locking = 1;
3349	path->lowest_level = level;
3350	ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
3351	path->lowest_level = 0;
3352	if (ret < 0) {
3353		btrfs_put_root(root);
3354		return ret;
3355	}
3356	if (ret > 0 && path->slots[level] > 0)
3357		path->slots[level]--;
3358
3359	eb = path->nodes[level];
3360	if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
3361		btrfs_err(fs_info,
3362"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
3363			  cur->bytenr, level - 1, btrfs_root_id(root),
3364			  tree_key->objectid, tree_key->type, tree_key->offset);
3365		btrfs_put_root(root);
3366		ret = -ENOENT;
3367		goto out;
3368	}
3369	lower = cur;
3370
3371	/* Add all nodes and edges in the path */
3372	for (; level < BTRFS_MAX_LEVEL; level++) {
3373		if (!path->nodes[level]) {
3374			ASSERT(btrfs_root_bytenr(&root->root_item) ==
3375			       lower->bytenr);
3376			/* Same as previous should_ignore_reloc_root() call */
3377			if (btrfs_should_ignore_reloc_root(root) &&
3378			    cache->is_reloc) {
3379				btrfs_put_root(root);
3380				list_add(&lower->list, &cache->useless_node);
3381			} else {
3382				lower->root = root;
3383			}
3384			break;
3385		}
3386
3387		edge = btrfs_backref_alloc_edge(cache);
3388		if (!edge) {
3389			btrfs_put_root(root);
3390			ret = -ENOMEM;
3391			goto out;
3392		}
3393
3394		eb = path->nodes[level];
3395		rb_node = rb_simple_search(&cache->rb_root, eb->start);
3396		if (!rb_node) {
3397			upper = btrfs_backref_alloc_node(cache, eb->start,
3398							 lower->level + 1);
3399			if (!upper) {
3400				btrfs_put_root(root);
3401				btrfs_backref_free_edge(cache, edge);
3402				ret = -ENOMEM;
3403				goto out;
3404			}
3405			upper->owner = btrfs_header_owner(eb);
3406			if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3407				upper->cowonly = 1;
3408
3409			/*
3410			 * If we know the block isn't shared we can avoid
3411			 * checking its backrefs.
3412			 */
3413			if (btrfs_block_can_be_shared(trans, root, eb))
3414				upper->checked = 0;
3415			else
3416				upper->checked = 1;
3417
3418			/*
3419			 * Add the block to pending list if we need to check its
3420			 * backrefs, we only do this once while walking up a
3421			 * tree as we will catch anything else later on.
3422			 */
3423			if (!upper->checked && need_check) {
3424				need_check = false;
3425				list_add_tail(&edge->list[UPPER],
3426					      &cache->pending_edge);
3427			} else {
3428				if (upper->checked)
3429					need_check = true;
3430				INIT_LIST_HEAD(&edge->list[UPPER]);
3431			}
3432		} else {
3433			upper = rb_entry(rb_node, struct btrfs_backref_node,
3434					 rb_node);
3435			ASSERT(upper->checked);
3436			INIT_LIST_HEAD(&edge->list[UPPER]);
3437			if (!upper->owner)
3438				upper->owner = btrfs_header_owner(eb);
3439		}
3440		btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
3441
3442		if (rb_node) {
3443			btrfs_put_root(root);
3444			break;
3445		}
3446		lower = upper;
3447		upper = NULL;
3448	}
3449out:
3450	btrfs_release_path(path);
3451	return ret;
3452}
3453
3454/*
3455 * Add backref node @cur into @cache.
3456 *
3457 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
3458 *	 links aren't yet bi-directional. Needs to finish such links.
3459 *	 Use btrfs_backref_finish_upper_links() to finish such linkage.
3460 *
3461 * @trans:	Transaction handle.
3462 * @path:	Released path for indirect tree backref lookup
3463 * @iter:	Released backref iter for extent tree search
3464 * @node_key:	The first key of the tree block
3465 */
3466int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
3467				struct btrfs_backref_cache *cache,
3468				struct btrfs_path *path,
3469				struct btrfs_backref_iter *iter,
3470				struct btrfs_key *node_key,
3471				struct btrfs_backref_node *cur)
3472{
 
3473	struct btrfs_backref_edge *edge;
3474	struct btrfs_backref_node *exist;
3475	int ret;
3476
3477	ret = btrfs_backref_iter_start(iter, cur->bytenr);
3478	if (ret < 0)
3479		return ret;
3480	/*
3481	 * We skip the first btrfs_tree_block_info, as we don't use the key
3482	 * stored in it, but fetch it from the tree block
3483	 */
3484	if (btrfs_backref_has_tree_block_info(iter)) {
3485		ret = btrfs_backref_iter_next(iter);
3486		if (ret < 0)
3487			goto out;
3488		/* No extra backref? This means the tree block is corrupted */
3489		if (ret > 0) {
3490			ret = -EUCLEAN;
3491			goto out;
3492		}
3493	}
3494	WARN_ON(cur->checked);
3495	if (!list_empty(&cur->upper)) {
3496		/*
3497		 * The backref was added previously when processing backref of
3498		 * type BTRFS_TREE_BLOCK_REF_KEY
3499		 */
3500		ASSERT(list_is_singular(&cur->upper));
3501		edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
3502				  list[LOWER]);
3503		ASSERT(list_empty(&edge->list[UPPER]));
3504		exist = edge->node[UPPER];
3505		/*
3506		 * Add the upper level block to pending list if we need check
3507		 * its backrefs
3508		 */
3509		if (!exist->checked)
3510			list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3511	} else {
3512		exist = NULL;
3513	}
3514
3515	for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
3516		struct extent_buffer *eb;
3517		struct btrfs_key key;
3518		int type;
3519
3520		cond_resched();
3521		eb = iter->path->nodes[0];
3522
3523		key.objectid = iter->bytenr;
3524		if (btrfs_backref_iter_is_inline_ref(iter)) {
3525			struct btrfs_extent_inline_ref *iref;
3526
3527			/* Update key for inline backref */
3528			iref = (struct btrfs_extent_inline_ref *)
3529				((unsigned long)iter->cur_ptr);
3530			type = btrfs_get_extent_inline_ref_type(eb, iref,
3531							BTRFS_REF_TYPE_BLOCK);
3532			if (type == BTRFS_REF_TYPE_INVALID) {
3533				ret = -EUCLEAN;
3534				goto out;
3535			}
3536			key.type = type;
3537			key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3538		} else {
3539			key.type = iter->cur_key.type;
3540			key.offset = iter->cur_key.offset;
3541		}
3542
3543		/*
3544		 * Parent node found and matches current inline ref, no need to
3545		 * rebuild this node for this inline ref
3546		 */
3547		if (exist &&
3548		    ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
3549		      exist->owner == key.offset) ||
3550		     (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
3551		      exist->bytenr == key.offset))) {
3552			exist = NULL;
3553			continue;
3554		}
3555
3556		/* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3557		if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3558			ret = handle_direct_tree_backref(cache, &key, cur);
3559			if (ret < 0)
3560				goto out;
3561		} else if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
3562			/*
3563			 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref
3564			 * offset means the root objectid. We need to search
3565			 * the tree to get its parent bytenr.
3566			 */
3567			ret = handle_indirect_tree_backref(trans, cache, path,
3568							   &key, node_key, cur);
3569			if (ret < 0)
3570				goto out;
3571		}
 
3572		/*
3573		 * Unrecognized tree backref items (if it can pass tree-checker)
3574		 * would be ignored.
 
3575		 */
 
 
 
 
3576	}
3577	ret = 0;
3578	cur->checked = 1;
3579	WARN_ON(exist);
3580out:
3581	btrfs_backref_iter_release(iter);
3582	return ret;
3583}
3584
3585/*
3586 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3587 */
3588int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3589				     struct btrfs_backref_node *start)
3590{
3591	struct list_head *useless_node = &cache->useless_node;
3592	struct btrfs_backref_edge *edge;
3593	struct rb_node *rb_node;
3594	LIST_HEAD(pending_edge);
3595
3596	ASSERT(start->checked);
3597
3598	/* Insert this node to cache if it's not COW-only */
3599	if (!start->cowonly) {
3600		rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3601					   &start->rb_node);
3602		if (rb_node)
3603			btrfs_backref_panic(cache->fs_info, start->bytenr,
3604					    -EEXIST);
3605		list_add_tail(&start->lower, &cache->leaves);
3606	}
3607
3608	/*
3609	 * Use breadth first search to iterate all related edges.
3610	 *
3611	 * The starting points are all the edges of this node
3612	 */
3613	list_for_each_entry(edge, &start->upper, list[LOWER])
3614		list_add_tail(&edge->list[UPPER], &pending_edge);
3615
3616	while (!list_empty(&pending_edge)) {
3617		struct btrfs_backref_node *upper;
3618		struct btrfs_backref_node *lower;
3619
3620		edge = list_first_entry(&pending_edge,
3621				struct btrfs_backref_edge, list[UPPER]);
3622		list_del_init(&edge->list[UPPER]);
3623		upper = edge->node[UPPER];
3624		lower = edge->node[LOWER];
3625
3626		/* Parent is detached, no need to keep any edges */
3627		if (upper->detached) {
3628			list_del(&edge->list[LOWER]);
3629			btrfs_backref_free_edge(cache, edge);
3630
3631			/* Lower node is orphan, queue for cleanup */
3632			if (list_empty(&lower->upper))
3633				list_add(&lower->list, useless_node);
3634			continue;
3635		}
3636
3637		/*
3638		 * All new nodes added in current build_backref_tree() haven't
3639		 * been linked to the cache rb tree.
3640		 * So if we have upper->rb_node populated, this means a cache
3641		 * hit. We only need to link the edge, as @upper and all its
3642		 * parents have already been linked.
3643		 */
3644		if (!RB_EMPTY_NODE(&upper->rb_node)) {
3645			if (upper->lowest) {
3646				list_del_init(&upper->lower);
3647				upper->lowest = 0;
3648			}
3649
3650			list_add_tail(&edge->list[UPPER], &upper->lower);
3651			continue;
3652		}
3653
3654		/* Sanity check, we shouldn't have any unchecked nodes */
3655		if (!upper->checked) {
3656			ASSERT(0);
3657			return -EUCLEAN;
3658		}
3659
3660		/* Sanity check, COW-only node has non-COW-only parent */
3661		if (start->cowonly != upper->cowonly) {
3662			ASSERT(0);
3663			return -EUCLEAN;
3664		}
3665
3666		/* Only cache non-COW-only (subvolume trees) tree blocks */
3667		if (!upper->cowonly) {
3668			rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3669						   &upper->rb_node);
3670			if (rb_node) {
3671				btrfs_backref_panic(cache->fs_info,
3672						upper->bytenr, -EEXIST);
3673				return -EUCLEAN;
3674			}
3675		}
3676
3677		list_add_tail(&edge->list[UPPER], &upper->lower);
3678
3679		/*
3680		 * Also queue all the parent edges of this uncached node
3681		 * to finish the upper linkage
3682		 */
3683		list_for_each_entry(edge, &upper->upper, list[LOWER])
3684			list_add_tail(&edge->list[UPPER], &pending_edge);
3685	}
3686	return 0;
3687}
3688
3689void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3690				 struct btrfs_backref_node *node)
3691{
3692	struct btrfs_backref_node *lower;
3693	struct btrfs_backref_node *upper;
3694	struct btrfs_backref_edge *edge;
3695
3696	while (!list_empty(&cache->useless_node)) {
3697		lower = list_first_entry(&cache->useless_node,
3698				   struct btrfs_backref_node, list);
3699		list_del_init(&lower->list);
3700	}
3701	while (!list_empty(&cache->pending_edge)) {
3702		edge = list_first_entry(&cache->pending_edge,
3703				struct btrfs_backref_edge, list[UPPER]);
3704		list_del(&edge->list[UPPER]);
3705		list_del(&edge->list[LOWER]);
3706		lower = edge->node[LOWER];
3707		upper = edge->node[UPPER];
3708		btrfs_backref_free_edge(cache, edge);
3709
3710		/*
3711		 * Lower is no longer linked to any upper backref nodes and
3712		 * isn't in the cache, we can free it ourselves.
3713		 */
3714		if (list_empty(&lower->upper) &&
3715		    RB_EMPTY_NODE(&lower->rb_node))
3716			list_add(&lower->list, &cache->useless_node);
3717
3718		if (!RB_EMPTY_NODE(&upper->rb_node))
3719			continue;
3720
3721		/* Add this guy's upper edges to the list to process */
3722		list_for_each_entry(edge, &upper->upper, list[LOWER])
3723			list_add_tail(&edge->list[UPPER],
3724				      &cache->pending_edge);
3725		if (list_empty(&upper->upper))
3726			list_add(&upper->list, &cache->useless_node);
3727	}
3728
3729	while (!list_empty(&cache->useless_node)) {
3730		lower = list_first_entry(&cache->useless_node,
3731				   struct btrfs_backref_node, list);
3732		list_del_init(&lower->list);
3733		if (lower == node)
3734			node = NULL;
3735		btrfs_backref_drop_node(cache, lower);
3736	}
3737
3738	btrfs_backref_cleanup_node(cache, node);
3739	ASSERT(list_empty(&cache->useless_node) &&
3740	       list_empty(&cache->pending_edge));
3741}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/rbtree.h>
   8#include <trace/events/btrfs.h>
   9#include "ctree.h"
  10#include "disk-io.h"
  11#include "backref.h"
  12#include "ulist.h"
  13#include "transaction.h"
  14#include "delayed-ref.h"
  15#include "locking.h"
  16#include "misc.h"
  17#include "tree-mod-log.h"
  18
  19/* Just an arbitrary number so we can be sure this happened */
  20#define BACKREF_FOUND_SHARED 6
 
 
 
 
 
 
  21
  22struct extent_inode_elem {
  23	u64 inum;
  24	u64 offset;
 
  25	struct extent_inode_elem *next;
  26};
  27
  28static int check_extent_in_eb(const struct btrfs_key *key,
 
  29			      const struct extent_buffer *eb,
  30			      const struct btrfs_file_extent_item *fi,
  31			      u64 extent_item_pos,
  32			      struct extent_inode_elem **eie,
  33			      bool ignore_offset)
  34{
  35	u64 offset = 0;
 
  36	struct extent_inode_elem *e;
 
 
 
  37
  38	if (!ignore_offset &&
  39	    !btrfs_file_extent_compression(eb, fi) &&
  40	    !btrfs_file_extent_encryption(eb, fi) &&
  41	    !btrfs_file_extent_other_encoding(eb, fi)) {
  42		u64 data_offset;
  43		u64 data_len;
  44
  45		data_offset = btrfs_file_extent_offset(eb, fi);
  46		data_len = btrfs_file_extent_num_bytes(eb, fi);
  47
  48		if (extent_item_pos < data_offset ||
  49		    extent_item_pos >= data_offset + data_len)
  50			return 1;
  51		offset = extent_item_pos - data_offset;
  52	}
  53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  54	e = kmalloc(sizeof(*e), GFP_NOFS);
  55	if (!e)
  56		return -ENOMEM;
  57
  58	e->next = *eie;
  59	e->inum = key->objectid;
  60	e->offset = key->offset + offset;
 
  61	*eie = e;
  62
  63	return 0;
  64}
  65
  66static void free_inode_elem_list(struct extent_inode_elem *eie)
  67{
  68	struct extent_inode_elem *eie_next;
  69
  70	for (; eie; eie = eie_next) {
  71		eie_next = eie->next;
  72		kfree(eie);
  73	}
  74}
  75
  76static int find_extent_in_eb(const struct extent_buffer *eb,
  77			     u64 wanted_disk_byte, u64 extent_item_pos,
  78			     struct extent_inode_elem **eie,
  79			     bool ignore_offset)
  80{
  81	u64 disk_byte;
  82	struct btrfs_key key;
  83	struct btrfs_file_extent_item *fi;
  84	int slot;
  85	int nritems;
  86	int extent_type;
  87	int ret;
  88
  89	/*
  90	 * from the shared data ref, we only have the leaf but we need
  91	 * the key. thus, we must look into all items and see that we
  92	 * find one (some) with a reference to our extent item.
  93	 */
  94	nritems = btrfs_header_nritems(eb);
  95	for (slot = 0; slot < nritems; ++slot) {
  96		btrfs_item_key_to_cpu(eb, &key, slot);
  97		if (key.type != BTRFS_EXTENT_DATA_KEY)
  98			continue;
  99		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 100		extent_type = btrfs_file_extent_type(eb, fi);
 101		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
 102			continue;
 103		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
 104		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 105		if (disk_byte != wanted_disk_byte)
 106			continue;
 107
 108		ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
 109		if (ret < 0)
 110			return ret;
 111	}
 112
 113	return 0;
 114}
 115
 116struct preftree {
 117	struct rb_root_cached root;
 118	unsigned int count;
 119};
 120
 121#define PREFTREE_INIT	{ .root = RB_ROOT_CACHED, .count = 0 }
 122
 123struct preftrees {
 124	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
 125	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
 126	struct preftree indirect_missing_keys;
 127};
 128
 129/*
 130 * Checks for a shared extent during backref search.
 131 *
 132 * The share_count tracks prelim_refs (direct and indirect) having a
 133 * ref->count >0:
 134 *  - incremented when a ref->count transitions to >0
 135 *  - decremented when a ref->count transitions to <1
 136 */
 137struct share_check {
 138	u64 root_objectid;
 
 139	u64 inum;
 
 
 
 
 
 
 
 
 140	int share_count;
 
 
 
 
 
 
 
 
 
 
 
 
 141};
 142
 143static inline int extent_is_shared(struct share_check *sc)
 144{
 145	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
 146}
 147
 148static struct kmem_cache *btrfs_prelim_ref_cache;
 149
 150int __init btrfs_prelim_ref_init(void)
 151{
 152	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
 153					sizeof(struct prelim_ref),
 154					0,
 155					SLAB_MEM_SPREAD,
 156					NULL);
 157	if (!btrfs_prelim_ref_cache)
 158		return -ENOMEM;
 159	return 0;
 160}
 161
 162void __cold btrfs_prelim_ref_exit(void)
 163{
 164	kmem_cache_destroy(btrfs_prelim_ref_cache);
 165}
 166
 167static void free_pref(struct prelim_ref *ref)
 168{
 169	kmem_cache_free(btrfs_prelim_ref_cache, ref);
 170}
 171
 172/*
 173 * Return 0 when both refs are for the same block (and can be merged).
 174 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
 175 * indicates a 'higher' block.
 176 */
 177static int prelim_ref_compare(struct prelim_ref *ref1,
 178			      struct prelim_ref *ref2)
 179{
 180	if (ref1->level < ref2->level)
 181		return -1;
 182	if (ref1->level > ref2->level)
 183		return 1;
 184	if (ref1->root_id < ref2->root_id)
 185		return -1;
 186	if (ref1->root_id > ref2->root_id)
 187		return 1;
 188	if (ref1->key_for_search.type < ref2->key_for_search.type)
 189		return -1;
 190	if (ref1->key_for_search.type > ref2->key_for_search.type)
 191		return 1;
 192	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
 193		return -1;
 194	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
 195		return 1;
 196	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
 197		return -1;
 198	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
 199		return 1;
 200	if (ref1->parent < ref2->parent)
 201		return -1;
 202	if (ref1->parent > ref2->parent)
 203		return 1;
 204
 205	return 0;
 206}
 207
 208static void update_share_count(struct share_check *sc, int oldcount,
 209			       int newcount)
 210{
 211	if ((!sc) || (oldcount == 0 && newcount < 1))
 212		return;
 213
 214	if (oldcount > 0 && newcount < 1)
 215		sc->share_count--;
 216	else if (oldcount < 1 && newcount > 0)
 217		sc->share_count++;
 
 
 
 
 
 218}
 219
 220/*
 221 * Add @newref to the @root rbtree, merging identical refs.
 222 *
 223 * Callers should assume that newref has been freed after calling.
 224 */
 225static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
 226			      struct preftree *preftree,
 227			      struct prelim_ref *newref,
 228			      struct share_check *sc)
 229{
 230	struct rb_root_cached *root;
 231	struct rb_node **p;
 232	struct rb_node *parent = NULL;
 233	struct prelim_ref *ref;
 234	int result;
 235	bool leftmost = true;
 236
 237	root = &preftree->root;
 238	p = &root->rb_root.rb_node;
 239
 240	while (*p) {
 241		parent = *p;
 242		ref = rb_entry(parent, struct prelim_ref, rbnode);
 243		result = prelim_ref_compare(ref, newref);
 244		if (result < 0) {
 245			p = &(*p)->rb_left;
 246		} else if (result > 0) {
 247			p = &(*p)->rb_right;
 248			leftmost = false;
 249		} else {
 250			/* Identical refs, merge them and free @newref */
 251			struct extent_inode_elem *eie = ref->inode_list;
 252
 253			while (eie && eie->next)
 254				eie = eie->next;
 255
 256			if (!eie)
 257				ref->inode_list = newref->inode_list;
 258			else
 259				eie->next = newref->inode_list;
 260			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
 261						     preftree->count);
 262			/*
 263			 * A delayed ref can have newref->count < 0.
 264			 * The ref->count is updated to follow any
 265			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
 266			 */
 267			update_share_count(sc, ref->count,
 268					   ref->count + newref->count);
 269			ref->count += newref->count;
 270			free_pref(newref);
 271			return;
 272		}
 273	}
 274
 275	update_share_count(sc, 0, newref->count);
 276	preftree->count++;
 277	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
 278	rb_link_node(&newref->rbnode, parent, p);
 279	rb_insert_color_cached(&newref->rbnode, root, leftmost);
 280}
 281
 282/*
 283 * Release the entire tree.  We don't care about internal consistency so
 284 * just free everything and then reset the tree root.
 285 */
 286static void prelim_release(struct preftree *preftree)
 287{
 288	struct prelim_ref *ref, *next_ref;
 289
 290	rbtree_postorder_for_each_entry_safe(ref, next_ref,
 291					     &preftree->root.rb_root, rbnode)
 
 292		free_pref(ref);
 
 293
 294	preftree->root = RB_ROOT_CACHED;
 295	preftree->count = 0;
 296}
 297
 298/*
 299 * the rules for all callers of this function are:
 300 * - obtaining the parent is the goal
 301 * - if you add a key, you must know that it is a correct key
 302 * - if you cannot add the parent or a correct key, then we will look into the
 303 *   block later to set a correct key
 304 *
 305 * delayed refs
 306 * ============
 307 *        backref type | shared | indirect | shared | indirect
 308 * information         |   tree |     tree |   data |     data
 309 * --------------------+--------+----------+--------+----------
 310 *      parent logical |    y   |     -    |    -   |     -
 311 *      key to resolve |    -   |     y    |    y   |     y
 312 *  tree block logical |    -   |     -    |    -   |     -
 313 *  root for resolving |    y   |     y    |    y   |     y
 314 *
 315 * - column 1:       we've the parent -> done
 316 * - column 2, 3, 4: we use the key to find the parent
 317 *
 318 * on disk refs (inline or keyed)
 319 * ==============================
 320 *        backref type | shared | indirect | shared | indirect
 321 * information         |   tree |     tree |   data |     data
 322 * --------------------+--------+----------+--------+----------
 323 *      parent logical |    y   |     -    |    y   |     -
 324 *      key to resolve |    -   |     -    |    -   |     y
 325 *  tree block logical |    y   |     y    |    y   |     y
 326 *  root for resolving |    -   |     y    |    y   |     y
 327 *
 328 * - column 1, 3: we've the parent -> done
 329 * - column 2:    we take the first key from the block to find the parent
 330 *                (see add_missing_keys)
 331 * - column 4:    we use the key to find the parent
 332 *
 333 * additional information that's available but not required to find the parent
 334 * block might help in merging entries to gain some speed.
 335 */
 336static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
 337			  struct preftree *preftree, u64 root_id,
 338			  const struct btrfs_key *key, int level, u64 parent,
 339			  u64 wanted_disk_byte, int count,
 340			  struct share_check *sc, gfp_t gfp_mask)
 341{
 342	struct prelim_ref *ref;
 343
 344	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
 345		return 0;
 346
 347	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
 348	if (!ref)
 349		return -ENOMEM;
 350
 351	ref->root_id = root_id;
 352	if (key)
 353		ref->key_for_search = *key;
 354	else
 355		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
 356
 357	ref->inode_list = NULL;
 358	ref->level = level;
 359	ref->count = count;
 360	ref->parent = parent;
 361	ref->wanted_disk_byte = wanted_disk_byte;
 362	prelim_ref_insert(fs_info, preftree, ref, sc);
 363	return extent_is_shared(sc);
 364}
 365
 366/* direct refs use root == 0, key == NULL */
 367static int add_direct_ref(const struct btrfs_fs_info *fs_info,
 368			  struct preftrees *preftrees, int level, u64 parent,
 369			  u64 wanted_disk_byte, int count,
 370			  struct share_check *sc, gfp_t gfp_mask)
 371{
 372	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
 373			      parent, wanted_disk_byte, count, sc, gfp_mask);
 374}
 375
 376/* indirect refs use parent == 0 */
 377static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
 378			    struct preftrees *preftrees, u64 root_id,
 379			    const struct btrfs_key *key, int level,
 380			    u64 wanted_disk_byte, int count,
 381			    struct share_check *sc, gfp_t gfp_mask)
 382{
 383	struct preftree *tree = &preftrees->indirect;
 384
 385	if (!key)
 386		tree = &preftrees->indirect_missing_keys;
 387	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
 388			      wanted_disk_byte, count, sc, gfp_mask);
 389}
 390
 391static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
 392{
 393	struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
 394	struct rb_node *parent = NULL;
 395	struct prelim_ref *ref = NULL;
 396	struct prelim_ref target = {};
 397	int result;
 398
 399	target.parent = bytenr;
 400
 401	while (*p) {
 402		parent = *p;
 403		ref = rb_entry(parent, struct prelim_ref, rbnode);
 404		result = prelim_ref_compare(ref, &target);
 405
 406		if (result < 0)
 407			p = &(*p)->rb_left;
 408		else if (result > 0)
 409			p = &(*p)->rb_right;
 410		else
 411			return 1;
 412	}
 413	return 0;
 414}
 415
 416static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
 
 417			   struct ulist *parents,
 418			   struct preftrees *preftrees, struct prelim_ref *ref,
 419			   int level, u64 time_seq, const u64 *extent_item_pos,
 420			   bool ignore_offset)
 421{
 422	int ret = 0;
 423	int slot;
 424	struct extent_buffer *eb;
 425	struct btrfs_key key;
 426	struct btrfs_key *key_for_search = &ref->key_for_search;
 427	struct btrfs_file_extent_item *fi;
 428	struct extent_inode_elem *eie = NULL, *old = NULL;
 429	u64 disk_byte;
 430	u64 wanted_disk_byte = ref->wanted_disk_byte;
 431	u64 count = 0;
 432	u64 data_offset;
 
 433
 434	if (level != 0) {
 435		eb = path->nodes[level];
 436		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
 437		if (ret < 0)
 438			return ret;
 439		return 0;
 440	}
 441
 442	/*
 443	 * 1. We normally enter this function with the path already pointing to
 444	 *    the first item to check. But sometimes, we may enter it with
 445	 *    slot == nritems.
 446	 * 2. We are searching for normal backref but bytenr of this leaf
 447	 *    matches shared data backref
 448	 * 3. The leaf owner is not equal to the root we are searching
 449	 *
 450	 * For these cases, go to the next leaf before we continue.
 451	 */
 452	eb = path->nodes[0];
 453	if (path->slots[0] >= btrfs_header_nritems(eb) ||
 454	    is_shared_data_backref(preftrees, eb->start) ||
 455	    ref->root_id != btrfs_header_owner(eb)) {
 456		if (time_seq == BTRFS_SEQ_LAST)
 457			ret = btrfs_next_leaf(root, path);
 458		else
 459			ret = btrfs_next_old_leaf(root, path, time_seq);
 460	}
 461
 462	while (!ret && count < ref->count) {
 463		eb = path->nodes[0];
 464		slot = path->slots[0];
 465
 466		btrfs_item_key_to_cpu(eb, &key, slot);
 467
 468		if (key.objectid != key_for_search->objectid ||
 469		    key.type != BTRFS_EXTENT_DATA_KEY)
 470			break;
 471
 472		/*
 473		 * We are searching for normal backref but bytenr of this leaf
 474		 * matches shared data backref, OR
 475		 * the leaf owner is not equal to the root we are searching for
 476		 */
 477		if (slot == 0 &&
 478		    (is_shared_data_backref(preftrees, eb->start) ||
 479		     ref->root_id != btrfs_header_owner(eb))) {
 480			if (time_seq == BTRFS_SEQ_LAST)
 481				ret = btrfs_next_leaf(root, path);
 482			else
 483				ret = btrfs_next_old_leaf(root, path, time_seq);
 484			continue;
 485		}
 486		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 
 
 
 487		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 488		data_offset = btrfs_file_extent_offset(eb, fi);
 489
 490		if (disk_byte == wanted_disk_byte) {
 491			eie = NULL;
 492			old = NULL;
 493			if (ref->key_for_search.offset == key.offset - data_offset)
 494				count++;
 495			else
 496				goto next;
 497			if (extent_item_pos) {
 498				ret = check_extent_in_eb(&key, eb, fi,
 499						*extent_item_pos,
 500						&eie, ignore_offset);
 501				if (ret < 0)
 502					break;
 503			}
 504			if (ret > 0)
 505				goto next;
 506			ret = ulist_add_merge_ptr(parents, eb->start,
 507						  eie, (void **)&old, GFP_NOFS);
 508			if (ret < 0)
 509				break;
 510			if (!ret && extent_item_pos) {
 511				while (old->next)
 512					old = old->next;
 513				old->next = eie;
 514			}
 515			eie = NULL;
 516		}
 517next:
 518		if (time_seq == BTRFS_SEQ_LAST)
 519			ret = btrfs_next_item(root, path);
 520		else
 521			ret = btrfs_next_old_item(root, path, time_seq);
 522	}
 523
 524	if (ret > 0)
 
 
 525		ret = 0;
 526	else if (ret < 0)
 527		free_inode_elem_list(eie);
 528	return ret;
 529}
 530
 531/*
 532 * resolve an indirect backref in the form (root_id, key, level)
 533 * to a logical address
 534 */
 535static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
 536				struct btrfs_path *path, u64 time_seq,
 537				struct preftrees *preftrees,
 538				struct prelim_ref *ref, struct ulist *parents,
 539				const u64 *extent_item_pos, bool ignore_offset)
 540{
 541	struct btrfs_root *root;
 542	struct extent_buffer *eb;
 543	int ret = 0;
 544	int root_level;
 545	int level = ref->level;
 546	struct btrfs_key search_key = ref->key_for_search;
 547
 548	/*
 549	 * If we're search_commit_root we could possibly be holding locks on
 550	 * other tree nodes.  This happens when qgroups does backref walks when
 551	 * adding new delayed refs.  To deal with this we need to look in cache
 552	 * for the root, and if we don't find it then we need to search the
 553	 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
 554	 * here.
 555	 */
 556	if (path->search_commit_root)
 557		root = btrfs_get_fs_root_commit_root(fs_info, path, ref->root_id);
 558	else
 559		root = btrfs_get_fs_root(fs_info, ref->root_id, false);
 560	if (IS_ERR(root)) {
 561		ret = PTR_ERR(root);
 562		goto out_free;
 563	}
 564
 565	if (!path->search_commit_root &&
 566	    test_bit(BTRFS_ROOT_DELETING, &root->state)) {
 567		ret = -ENOENT;
 568		goto out;
 569	}
 570
 571	if (btrfs_is_testing(fs_info)) {
 572		ret = -ENOENT;
 573		goto out;
 574	}
 575
 576	if (path->search_commit_root)
 577		root_level = btrfs_header_level(root->commit_root);
 578	else if (time_seq == BTRFS_SEQ_LAST)
 579		root_level = btrfs_header_level(root->node);
 580	else
 581		root_level = btrfs_old_root_level(root, time_seq);
 582
 583	if (root_level + 1 == level)
 584		goto out;
 585
 586	/*
 587	 * We can often find data backrefs with an offset that is too large
 588	 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
 589	 * subtracting a file's offset with the data offset of its
 590	 * corresponding extent data item. This can happen for example in the
 591	 * clone ioctl.
 592	 *
 593	 * So if we detect such case we set the search key's offset to zero to
 594	 * make sure we will find the matching file extent item at
 595	 * add_all_parents(), otherwise we will miss it because the offset
 596	 * taken form the backref is much larger then the offset of the file
 597	 * extent item. This can make us scan a very large number of file
 598	 * extent items, but at least it will not make us miss any.
 599	 *
 600	 * This is an ugly workaround for a behaviour that should have never
 601	 * existed, but it does and a fix for the clone ioctl would touch a lot
 602	 * of places, cause backwards incompatibility and would not fix the
 603	 * problem for extents cloned with older kernels.
 604	 */
 605	if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
 606	    search_key.offset >= LLONG_MAX)
 607		search_key.offset = 0;
 608	path->lowest_level = level;
 609	if (time_seq == BTRFS_SEQ_LAST)
 610		ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
 611	else
 612		ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
 613
 614	btrfs_debug(fs_info,
 615		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
 616		 ref->root_id, level, ref->count, ret,
 617		 ref->key_for_search.objectid, ref->key_for_search.type,
 618		 ref->key_for_search.offset);
 619	if (ret < 0)
 620		goto out;
 621
 622	eb = path->nodes[level];
 623	while (!eb) {
 624		if (WARN_ON(!level)) {
 625			ret = 1;
 626			goto out;
 627		}
 628		level--;
 629		eb = path->nodes[level];
 630	}
 631
 632	ret = add_all_parents(root, path, parents, preftrees, ref, level,
 633			      time_seq, extent_item_pos, ignore_offset);
 634out:
 635	btrfs_put_root(root);
 636out_free:
 637	path->lowest_level = 0;
 638	btrfs_release_path(path);
 639	return ret;
 640}
 641
 642static struct extent_inode_elem *
 643unode_aux_to_inode_list(struct ulist_node *node)
 644{
 645	if (!node)
 646		return NULL;
 647	return (struct extent_inode_elem *)(uintptr_t)node->aux;
 648}
 649
 
 
 
 
 
 
 
 
 
 
 
 
 650/*
 651 * We maintain three separate rbtrees: one for direct refs, one for
 652 * indirect refs which have a key, and one for indirect refs which do not
 653 * have a key. Each tree does merge on insertion.
 654 *
 655 * Once all of the references are located, we iterate over the tree of
 656 * indirect refs with missing keys. An appropriate key is located and
 657 * the ref is moved onto the tree for indirect refs. After all missing
 658 * keys are thus located, we iterate over the indirect ref tree, resolve
 659 * each reference, and then insert the resolved reference onto the
 660 * direct tree (merging there too).
 661 *
 662 * New backrefs (i.e., for parent nodes) are added to the appropriate
 663 * rbtree as they are encountered. The new backrefs are subsequently
 664 * resolved as above.
 665 */
 666static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
 667				 struct btrfs_path *path, u64 time_seq,
 668				 struct preftrees *preftrees,
 669				 const u64 *extent_item_pos,
 670				 struct share_check *sc, bool ignore_offset)
 671{
 672	int err;
 673	int ret = 0;
 674	struct ulist *parents;
 675	struct ulist_node *node;
 676	struct ulist_iterator uiter;
 677	struct rb_node *rnode;
 678
 679	parents = ulist_alloc(GFP_NOFS);
 680	if (!parents)
 681		return -ENOMEM;
 682
 683	/*
 684	 * We could trade memory usage for performance here by iterating
 685	 * the tree, allocating new refs for each insertion, and then
 686	 * freeing the entire indirect tree when we're done.  In some test
 687	 * cases, the tree can grow quite large (~200k objects).
 688	 */
 689	while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
 690		struct prelim_ref *ref;
 691
 692		ref = rb_entry(rnode, struct prelim_ref, rbnode);
 693		if (WARN(ref->parent,
 694			 "BUG: direct ref found in indirect tree")) {
 695			ret = -EINVAL;
 696			goto out;
 697		}
 698
 699		rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
 700		preftrees->indirect.count--;
 701
 702		if (ref->count == 0) {
 703			free_pref(ref);
 704			continue;
 705		}
 706
 707		if (sc && sc->root_objectid &&
 708		    ref->root_id != sc->root_objectid) {
 709			free_pref(ref);
 710			ret = BACKREF_FOUND_SHARED;
 711			goto out;
 712		}
 713		err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
 714					   ref, parents, extent_item_pos,
 715					   ignore_offset);
 716		/*
 717		 * we can only tolerate ENOENT,otherwise,we should catch error
 718		 * and return directly.
 719		 */
 720		if (err == -ENOENT) {
 721			prelim_ref_insert(fs_info, &preftrees->direct, ref,
 722					  NULL);
 723			continue;
 724		} else if (err) {
 725			free_pref(ref);
 726			ret = err;
 727			goto out;
 728		}
 729
 730		/* we put the first parent into the ref at hand */
 731		ULIST_ITER_INIT(&uiter);
 732		node = ulist_next(parents, &uiter);
 733		ref->parent = node ? node->val : 0;
 734		ref->inode_list = unode_aux_to_inode_list(node);
 735
 736		/* Add a prelim_ref(s) for any other parent(s). */
 737		while ((node = ulist_next(parents, &uiter))) {
 738			struct prelim_ref *new_ref;
 739
 740			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
 741						   GFP_NOFS);
 742			if (!new_ref) {
 743				free_pref(ref);
 744				ret = -ENOMEM;
 745				goto out;
 746			}
 747			memcpy(new_ref, ref, sizeof(*ref));
 748			new_ref->parent = node->val;
 749			new_ref->inode_list = unode_aux_to_inode_list(node);
 750			prelim_ref_insert(fs_info, &preftrees->direct,
 751					  new_ref, NULL);
 752		}
 753
 754		/*
 755		 * Now it's a direct ref, put it in the direct tree. We must
 756		 * do this last because the ref could be merged/freed here.
 757		 */
 758		prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
 759
 760		ulist_reinit(parents);
 761		cond_resched();
 762	}
 763out:
 764	ulist_free(parents);
 
 
 
 
 765	return ret;
 766}
 767
 768/*
 769 * read tree blocks and add keys where required.
 770 */
 771static int add_missing_keys(struct btrfs_fs_info *fs_info,
 772			    struct preftrees *preftrees, bool lock)
 773{
 774	struct prelim_ref *ref;
 775	struct extent_buffer *eb;
 776	struct preftree *tree = &preftrees->indirect_missing_keys;
 777	struct rb_node *node;
 778
 779	while ((node = rb_first_cached(&tree->root))) {
 
 
 780		ref = rb_entry(node, struct prelim_ref, rbnode);
 781		rb_erase_cached(node, &tree->root);
 782
 783		BUG_ON(ref->parent);	/* should not be a direct ref */
 784		BUG_ON(ref->key_for_search.type);
 785		BUG_ON(!ref->wanted_disk_byte);
 786
 787		eb = read_tree_block(fs_info, ref->wanted_disk_byte,
 788				     ref->root_id, 0, ref->level - 1, NULL);
 
 
 789		if (IS_ERR(eb)) {
 790			free_pref(ref);
 791			return PTR_ERR(eb);
 792		} else if (!extent_buffer_uptodate(eb)) {
 
 793			free_pref(ref);
 794			free_extent_buffer(eb);
 795			return -EIO;
 796		}
 
 797		if (lock)
 798			btrfs_tree_read_lock(eb);
 799		if (btrfs_header_level(eb) == 0)
 800			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
 801		else
 802			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
 803		if (lock)
 804			btrfs_tree_read_unlock(eb);
 805		free_extent_buffer(eb);
 806		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
 807		cond_resched();
 808	}
 809	return 0;
 810}
 811
 812/*
 813 * add all currently queued delayed refs from this head whose seq nr is
 814 * smaller or equal that seq to the list
 815 */
 816static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
 817			    struct btrfs_delayed_ref_head *head, u64 seq,
 818			    struct preftrees *preftrees, struct share_check *sc)
 819{
 820	struct btrfs_delayed_ref_node *node;
 821	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
 822	struct btrfs_key key;
 823	struct btrfs_key tmp_op_key;
 824	struct rb_node *n;
 825	int count;
 826	int ret = 0;
 827
 828	if (extent_op && extent_op->update_key)
 829		btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
 830
 831	spin_lock(&head->lock);
 832	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
 833		node = rb_entry(n, struct btrfs_delayed_ref_node,
 834				ref_node);
 835		if (node->seq > seq)
 836			continue;
 837
 838		switch (node->action) {
 839		case BTRFS_ADD_DELAYED_EXTENT:
 840		case BTRFS_UPDATE_DELAYED_HEAD:
 841			WARN_ON(1);
 842			continue;
 843		case BTRFS_ADD_DELAYED_REF:
 844			count = node->ref_mod;
 845			break;
 846		case BTRFS_DROP_DELAYED_REF:
 847			count = node->ref_mod * -1;
 848			break;
 849		default:
 850			BUG();
 851		}
 852		switch (node->type) {
 853		case BTRFS_TREE_BLOCK_REF_KEY: {
 854			/* NORMAL INDIRECT METADATA backref */
 855			struct btrfs_delayed_tree_ref *ref;
 
 
 
 
 
 
 
 856
 857			ref = btrfs_delayed_node_to_tree_ref(node);
 858			ret = add_indirect_ref(fs_info, preftrees, ref->root,
 859					       &tmp_op_key, ref->level + 1,
 860					       node->bytenr, count, sc,
 861					       GFP_ATOMIC);
 862			break;
 863		}
 864		case BTRFS_SHARED_BLOCK_REF_KEY: {
 865			/* SHARED DIRECT METADATA backref */
 866			struct btrfs_delayed_tree_ref *ref;
 867
 868			ref = btrfs_delayed_node_to_tree_ref(node);
 
 
 869
 870			ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
 871					     ref->parent, node->bytenr, count,
 872					     sc, GFP_ATOMIC);
 873			break;
 874		}
 875		case BTRFS_EXTENT_DATA_REF_KEY: {
 876			/* NORMAL INDIRECT DATA backref */
 877			struct btrfs_delayed_data_ref *ref;
 878			ref = btrfs_delayed_node_to_data_ref(node);
 879
 880			key.objectid = ref->objectid;
 881			key.type = BTRFS_EXTENT_DATA_KEY;
 882			key.offset = ref->offset;
 883
 884			/*
 885			 * Found a inum that doesn't match our known inum, we
 886			 * know it's shared.
 
 
 
 
 
 
 
 
 
 
 
 887			 */
 888			if (sc && sc->inum && ref->objectid != sc->inum) {
 889				ret = BACKREF_FOUND_SHARED;
 890				goto out;
 891			}
 892
 893			ret = add_indirect_ref(fs_info, preftrees, ref->root,
 894					       &key, 0, node->bytenr, count, sc,
 895					       GFP_ATOMIC);
 896			break;
 897		}
 898		case BTRFS_SHARED_DATA_REF_KEY: {
 899			/* SHARED DIRECT FULL backref */
 900			struct btrfs_delayed_data_ref *ref;
 901
 902			ref = btrfs_delayed_node_to_data_ref(node);
 903
 904			ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
 905					     node->bytenr, count, sc,
 906					     GFP_ATOMIC);
 907			break;
 908		}
 909		default:
 910			WARN_ON(1);
 911		}
 912		/*
 913		 * We must ignore BACKREF_FOUND_SHARED until all delayed
 914		 * refs have been checked.
 915		 */
 916		if (ret && (ret != BACKREF_FOUND_SHARED))
 917			break;
 918	}
 919	if (!ret)
 920		ret = extent_is_shared(sc);
 921out:
 922	spin_unlock(&head->lock);
 923	return ret;
 924}
 925
 926/*
 927 * add all inline backrefs for bytenr to the list
 928 *
 929 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
 930 */
 931static int add_inline_refs(const struct btrfs_fs_info *fs_info,
 932			   struct btrfs_path *path, u64 bytenr,
 933			   int *info_level, struct preftrees *preftrees,
 934			   struct share_check *sc)
 935{
 936	int ret = 0;
 937	int slot;
 938	struct extent_buffer *leaf;
 939	struct btrfs_key key;
 940	struct btrfs_key found_key;
 941	unsigned long ptr;
 942	unsigned long end;
 943	struct btrfs_extent_item *ei;
 944	u64 flags;
 945	u64 item_size;
 946
 947	/*
 948	 * enumerate all inline refs
 949	 */
 950	leaf = path->nodes[0];
 951	slot = path->slots[0];
 952
 953	item_size = btrfs_item_size_nr(leaf, slot);
 954	BUG_ON(item_size < sizeof(*ei));
 
 
 
 
 
 
 955
 956	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
 957	flags = btrfs_extent_flags(leaf, ei);
 958	btrfs_item_key_to_cpu(leaf, &found_key, slot);
 959
 960	ptr = (unsigned long)(ei + 1);
 961	end = (unsigned long)ei + item_size;
 962
 963	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
 964	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
 965		struct btrfs_tree_block_info *info;
 966
 967		info = (struct btrfs_tree_block_info *)ptr;
 968		*info_level = btrfs_tree_block_level(leaf, info);
 969		ptr += sizeof(struct btrfs_tree_block_info);
 970		BUG_ON(ptr > end);
 971	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
 972		*info_level = found_key.offset;
 973	} else {
 974		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
 975	}
 976
 977	while (ptr < end) {
 978		struct btrfs_extent_inline_ref *iref;
 979		u64 offset;
 980		int type;
 981
 982		iref = (struct btrfs_extent_inline_ref *)ptr;
 983		type = btrfs_get_extent_inline_ref_type(leaf, iref,
 984							BTRFS_REF_TYPE_ANY);
 985		if (type == BTRFS_REF_TYPE_INVALID)
 986			return -EUCLEAN;
 987
 988		offset = btrfs_extent_inline_ref_offset(leaf, iref);
 989
 990		switch (type) {
 991		case BTRFS_SHARED_BLOCK_REF_KEY:
 992			ret = add_direct_ref(fs_info, preftrees,
 993					     *info_level + 1, offset,
 994					     bytenr, 1, NULL, GFP_NOFS);
 995			break;
 996		case BTRFS_SHARED_DATA_REF_KEY: {
 997			struct btrfs_shared_data_ref *sdref;
 998			int count;
 999
1000			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1001			count = btrfs_shared_data_ref_count(leaf, sdref);
1002
1003			ret = add_direct_ref(fs_info, preftrees, 0, offset,
1004					     bytenr, count, sc, GFP_NOFS);
1005			break;
1006		}
1007		case BTRFS_TREE_BLOCK_REF_KEY:
1008			ret = add_indirect_ref(fs_info, preftrees, offset,
1009					       NULL, *info_level + 1,
1010					       bytenr, 1, NULL, GFP_NOFS);
1011			break;
1012		case BTRFS_EXTENT_DATA_REF_KEY: {
1013			struct btrfs_extent_data_ref *dref;
1014			int count;
1015			u64 root;
1016
1017			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1018			count = btrfs_extent_data_ref_count(leaf, dref);
1019			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1020								      dref);
1021			key.type = BTRFS_EXTENT_DATA_KEY;
1022			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1023
1024			if (sc && sc->inum && key.objectid != sc->inum) {
 
1025				ret = BACKREF_FOUND_SHARED;
1026				break;
1027			}
1028
1029			root = btrfs_extent_data_ref_root(leaf, dref);
1030
1031			ret = add_indirect_ref(fs_info, preftrees, root,
1032					       &key, 0, bytenr, count,
1033					       sc, GFP_NOFS);
 
 
 
1034			break;
1035		}
 
 
 
1036		default:
1037			WARN_ON(1);
1038		}
1039		if (ret)
1040			return ret;
1041		ptr += btrfs_extent_inline_ref_size(type);
1042	}
1043
1044	return 0;
1045}
1046
1047/*
1048 * add all non-inline backrefs for bytenr to the list
1049 *
1050 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1051 */
1052static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1053			  struct btrfs_path *path, u64 bytenr,
 
1054			  int info_level, struct preftrees *preftrees,
1055			  struct share_check *sc)
1056{
1057	struct btrfs_root *extent_root = fs_info->extent_root;
1058	int ret;
1059	int slot;
1060	struct extent_buffer *leaf;
1061	struct btrfs_key key;
1062
1063	while (1) {
1064		ret = btrfs_next_item(extent_root, path);
1065		if (ret < 0)
1066			break;
1067		if (ret) {
1068			ret = 0;
1069			break;
1070		}
1071
1072		slot = path->slots[0];
1073		leaf = path->nodes[0];
1074		btrfs_item_key_to_cpu(leaf, &key, slot);
1075
1076		if (key.objectid != bytenr)
1077			break;
1078		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1079			continue;
1080		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1081			break;
1082
1083		switch (key.type) {
1084		case BTRFS_SHARED_BLOCK_REF_KEY:
1085			/* SHARED DIRECT METADATA backref */
1086			ret = add_direct_ref(fs_info, preftrees,
1087					     info_level + 1, key.offset,
1088					     bytenr, 1, NULL, GFP_NOFS);
1089			break;
1090		case BTRFS_SHARED_DATA_REF_KEY: {
1091			/* SHARED DIRECT FULL backref */
1092			struct btrfs_shared_data_ref *sdref;
1093			int count;
1094
1095			sdref = btrfs_item_ptr(leaf, slot,
1096					      struct btrfs_shared_data_ref);
1097			count = btrfs_shared_data_ref_count(leaf, sdref);
1098			ret = add_direct_ref(fs_info, preftrees, 0,
1099					     key.offset, bytenr, count,
1100					     sc, GFP_NOFS);
1101			break;
1102		}
1103		case BTRFS_TREE_BLOCK_REF_KEY:
1104			/* NORMAL INDIRECT METADATA backref */
1105			ret = add_indirect_ref(fs_info, preftrees, key.offset,
1106					       NULL, info_level + 1, bytenr,
1107					       1, NULL, GFP_NOFS);
1108			break;
1109		case BTRFS_EXTENT_DATA_REF_KEY: {
1110			/* NORMAL INDIRECT DATA backref */
1111			struct btrfs_extent_data_ref *dref;
1112			int count;
1113			u64 root;
1114
1115			dref = btrfs_item_ptr(leaf, slot,
1116					      struct btrfs_extent_data_ref);
1117			count = btrfs_extent_data_ref_count(leaf, dref);
1118			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1119								      dref);
1120			key.type = BTRFS_EXTENT_DATA_KEY;
1121			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1122
1123			if (sc && sc->inum && key.objectid != sc->inum) {
 
1124				ret = BACKREF_FOUND_SHARED;
1125				break;
1126			}
1127
1128			root = btrfs_extent_data_ref_root(leaf, dref);
1129			ret = add_indirect_ref(fs_info, preftrees, root,
1130					       &key, 0, bytenr, count,
1131					       sc, GFP_NOFS);
 
 
 
 
1132			break;
1133		}
1134		default:
1135			WARN_ON(1);
1136		}
1137		if (ret)
1138			return ret;
1139
1140	}
1141
1142	return ret;
1143}
1144
1145/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1146 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1147 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1148 * indirect refs to their parent bytenr.
1149 * When roots are found, they're added to the roots list
1150 *
1151 * If time_seq is set to BTRFS_SEQ_LAST, it will not search delayed_refs, and
1152 * behave much like trans == NULL case, the difference only lies in it will not
1153 * commit root.
1154 * The special case is for qgroup to search roots in commit_transaction().
1155 *
1156 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1157 * shared extent is detected.
1158 *
1159 * Otherwise this returns 0 for success and <0 for an error.
1160 *
1161 * If ignore_offset is set to false, only extent refs whose offsets match
1162 * extent_item_pos are returned.  If true, every extent ref is returned
1163 * and extent_item_pos is ignored.
1164 *
1165 * FIXME some caching might speed things up
1166 */
1167static int find_parent_nodes(struct btrfs_trans_handle *trans,
1168			     struct btrfs_fs_info *fs_info, u64 bytenr,
1169			     u64 time_seq, struct ulist *refs,
1170			     struct ulist *roots, const u64 *extent_item_pos,
1171			     struct share_check *sc, bool ignore_offset)
1172{
 
1173	struct btrfs_key key;
1174	struct btrfs_path *path;
1175	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1176	struct btrfs_delayed_ref_head *head;
1177	int info_level = 0;
1178	int ret;
1179	struct prelim_ref *ref;
1180	struct rb_node *node;
1181	struct extent_inode_elem *eie = NULL;
1182	struct preftrees preftrees = {
1183		.direct = PREFTREE_INIT,
1184		.indirect = PREFTREE_INIT,
1185		.indirect_missing_keys = PREFTREE_INIT
1186	};
1187
1188	key.objectid = bytenr;
 
 
 
 
1189	key.offset = (u64)-1;
1190	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1191		key.type = BTRFS_METADATA_ITEM_KEY;
1192	else
1193		key.type = BTRFS_EXTENT_ITEM_KEY;
1194
1195	path = btrfs_alloc_path();
1196	if (!path)
1197		return -ENOMEM;
1198	if (!trans) {
1199		path->search_commit_root = 1;
1200		path->skip_locking = 1;
1201	}
1202
1203	if (time_seq == BTRFS_SEQ_LAST)
1204		path->skip_locking = 1;
1205
1206	/*
1207	 * grab both a lock on the path and a lock on the delayed ref head.
1208	 * We need both to get a consistent picture of how the refs look
1209	 * at a specified point in time
1210	 */
1211again:
1212	head = NULL;
1213
1214	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1215	if (ret < 0)
1216		goto out;
1217	BUG_ON(ret == 0);
 
 
 
 
 
 
 
1218
1219#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1220	if (trans && likely(trans->type != __TRANS_DUMMY) &&
1221	    time_seq != BTRFS_SEQ_LAST) {
1222#else
1223	if (trans && time_seq != BTRFS_SEQ_LAST) {
1224#endif
1225		/*
1226		 * look if there are updates for this ref queued and lock the
1227		 * head
 
 
1228		 */
1229		delayed_refs = &trans->transaction->delayed_refs;
1230		spin_lock(&delayed_refs->lock);
1231		head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
 
1232		if (head) {
1233			if (!mutex_trylock(&head->mutex)) {
1234				refcount_inc(&head->refs);
1235				spin_unlock(&delayed_refs->lock);
1236
1237				btrfs_release_path(path);
1238
1239				/*
1240				 * Mutex was contended, block until it's
1241				 * released and try again
1242				 */
1243				mutex_lock(&head->mutex);
1244				mutex_unlock(&head->mutex);
1245				btrfs_put_delayed_ref_head(head);
1246				goto again;
1247			}
1248			spin_unlock(&delayed_refs->lock);
1249			ret = add_delayed_refs(fs_info, head, time_seq,
1250					       &preftrees, sc);
1251			mutex_unlock(&head->mutex);
1252			if (ret)
1253				goto out;
1254		} else {
1255			spin_unlock(&delayed_refs->lock);
1256		}
1257	}
1258
1259	if (path->slots[0]) {
1260		struct extent_buffer *leaf;
1261		int slot;
1262
1263		path->slots[0]--;
1264		leaf = path->nodes[0];
1265		slot = path->slots[0];
1266		btrfs_item_key_to_cpu(leaf, &key, slot);
1267		if (key.objectid == bytenr &&
1268		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
1269		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1270			ret = add_inline_refs(fs_info, path, bytenr,
1271					      &info_level, &preftrees, sc);
1272			if (ret)
1273				goto out;
1274			ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1275					     &preftrees, sc);
1276			if (ret)
1277				goto out;
1278		}
1279	}
1280
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1281	btrfs_release_path(path);
1282
1283	ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1284	if (ret)
1285		goto out;
1286
1287	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1288
1289	ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1290				    extent_item_pos, sc, ignore_offset);
1291	if (ret)
1292		goto out;
1293
1294	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1295
1296	/*
1297	 * This walks the tree of merged and resolved refs. Tree blocks are
1298	 * read in as needed. Unique entries are added to the ulist, and
1299	 * the list of found roots is updated.
1300	 *
1301	 * We release the entire tree in one go before returning.
1302	 */
1303	node = rb_first_cached(&preftrees.direct.root);
1304	while (node) {
1305		ref = rb_entry(node, struct prelim_ref, rbnode);
1306		node = rb_next(&ref->rbnode);
1307		/*
1308		 * ref->count < 0 can happen here if there are delayed
1309		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1310		 * prelim_ref_insert() relies on this when merging
1311		 * identical refs to keep the overall count correct.
1312		 * prelim_ref_insert() will merge only those refs
1313		 * which compare identically.  Any refs having
1314		 * e.g. different offsets would not be merged,
1315		 * and would retain their original ref->count < 0.
1316		 */
1317		if (roots && ref->count && ref->root_id && ref->parent == 0) {
1318			if (sc && sc->root_objectid &&
1319			    ref->root_id != sc->root_objectid) {
1320				ret = BACKREF_FOUND_SHARED;
1321				goto out;
1322			}
1323
1324			/* no parent == root of tree */
1325			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1326			if (ret < 0)
1327				goto out;
1328		}
1329		if (ref->count && ref->parent) {
1330			if (extent_item_pos && !ref->inode_list &&
1331			    ref->level == 0) {
 
1332				struct extent_buffer *eb;
1333
1334				eb = read_tree_block(fs_info, ref->parent, 0,
1335						     0, ref->level, NULL);
 
 
1336				if (IS_ERR(eb)) {
1337					ret = PTR_ERR(eb);
1338					goto out;
1339				} else if (!extent_buffer_uptodate(eb)) {
 
1340					free_extent_buffer(eb);
1341					ret = -EIO;
1342					goto out;
1343				}
1344
1345				if (!path->skip_locking)
1346					btrfs_tree_read_lock(eb);
1347				ret = find_extent_in_eb(eb, bytenr,
1348							*extent_item_pos, &eie, ignore_offset);
1349				if (!path->skip_locking)
1350					btrfs_tree_read_unlock(eb);
1351				free_extent_buffer(eb);
1352				if (ret < 0)
 
1353					goto out;
1354				ref->inode_list = eie;
 
 
 
 
 
 
1355			}
1356			ret = ulist_add_merge_ptr(refs, ref->parent,
1357						  ref->inode_list,
1358						  (void **)&eie, GFP_NOFS);
1359			if (ret < 0)
1360				goto out;
1361			if (!ret && extent_item_pos) {
1362				/*
1363				 * we've recorded that parent, so we must extend
1364				 * its inode list here
 
 
 
 
1365				 */
1366				BUG_ON(!eie);
 
 
 
 
1367				while (eie->next)
1368					eie = eie->next;
1369				eie->next = ref->inode_list;
1370			}
1371			eie = NULL;
 
 
 
 
 
 
 
 
1372		}
1373		cond_resched();
1374	}
1375
1376out:
1377	btrfs_free_path(path);
1378
1379	prelim_release(&preftrees.direct);
1380	prelim_release(&preftrees.indirect);
1381	prelim_release(&preftrees.indirect_missing_keys);
1382
1383	if (ret < 0)
1384		free_inode_elem_list(eie);
1385	return ret;
1386}
1387
1388static void free_leaf_list(struct ulist *blocks)
1389{
1390	struct ulist_node *node = NULL;
1391	struct extent_inode_elem *eie;
1392	struct ulist_iterator uiter;
1393
1394	ULIST_ITER_INIT(&uiter);
1395	while ((node = ulist_next(blocks, &uiter))) {
1396		if (!node->aux)
1397			continue;
1398		eie = unode_aux_to_inode_list(node);
1399		free_inode_elem_list(eie);
1400		node->aux = 0;
1401	}
1402
1403	ulist_free(blocks);
1404}
1405
1406/*
1407 * Finds all leafs with a reference to the specified combination of bytenr and
1408 * offset. key_list_head will point to a list of corresponding keys (caller must
1409 * free each list element). The leafs will be stored in the leafs ulist, which
1410 * must be freed with ulist_free.
 
 
1411 *
1412 * returns 0 on success, <0 on error
1413 */
1414int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1415			 struct btrfs_fs_info *fs_info, u64 bytenr,
1416			 u64 time_seq, struct ulist **leafs,
1417			 const u64 *extent_item_pos, bool ignore_offset)
1418{
1419	int ret;
1420
1421	*leafs = ulist_alloc(GFP_NOFS);
1422	if (!*leafs)
 
 
1423		return -ENOMEM;
1424
1425	ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1426				*leafs, NULL, extent_item_pos, NULL, ignore_offset);
1427	if (ret < 0 && ret != -ENOENT) {
1428		free_leaf_list(*leafs);
 
1429		return ret;
1430	}
1431
1432	return 0;
1433}
1434
1435/*
1436 * walk all backrefs for a given extent to find all roots that reference this
1437 * extent. Walking a backref means finding all extents that reference this
1438 * extent and in turn walk the backrefs of those, too. Naturally this is a
1439 * recursive process, but here it is implemented in an iterative fashion: We
1440 * find all referencing extents for the extent in question and put them on a
1441 * list. In turn, we find all referencing extents for those, further appending
1442 * to the list. The way we iterate the list allows adding more elements after
1443 * the current while iterating. The process stops when we reach the end of the
1444 * list. Found roots are added to the roots list.
1445 *
1446 * returns 0 on success, < 0 on error.
 
 
 
 
 
 
1447 */
1448static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1449				     struct btrfs_fs_info *fs_info, u64 bytenr,
1450				     u64 time_seq, struct ulist **roots,
1451				     bool ignore_offset)
1452{
1453	struct ulist *tmp;
1454	struct ulist_node *node = NULL;
 
1455	struct ulist_iterator uiter;
1456	int ret;
 
 
1457
1458	tmp = ulist_alloc(GFP_NOFS);
1459	if (!tmp)
1460		return -ENOMEM;
1461	*roots = ulist_alloc(GFP_NOFS);
1462	if (!*roots) {
1463		ulist_free(tmp);
1464		return -ENOMEM;
 
 
 
 
 
 
 
 
 
1465	}
1466
 
 
1467	ULIST_ITER_INIT(&uiter);
1468	while (1) {
1469		ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1470					tmp, *roots, NULL, NULL, ignore_offset);
 
1471		if (ret < 0 && ret != -ENOENT) {
1472			ulist_free(tmp);
1473			ulist_free(*roots);
1474			*roots = NULL;
1475			return ret;
 
1476		}
1477		node = ulist_next(tmp, &uiter);
 
1478		if (!node)
1479			break;
1480		bytenr = node->val;
1481		cond_resched();
1482	}
1483
1484	ulist_free(tmp);
1485	return 0;
 
 
 
 
1486}
1487
1488int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1489			 struct btrfs_fs_info *fs_info, u64 bytenr,
1490			 u64 time_seq, struct ulist **roots,
1491			 bool ignore_offset, bool skip_commit_root_sem)
1492{
1493	int ret;
1494
1495	if (!trans && !skip_commit_root_sem)
1496		down_read(&fs_info->commit_root_sem);
1497	ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1498					time_seq, roots, ignore_offset);
1499	if (!trans && !skip_commit_root_sem)
1500		up_read(&fs_info->commit_root_sem);
1501	return ret;
1502}
1503
1504/**
1505 * Check if an extent is shared or not
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1506 *
1507 * @root:   root inode belongs to
1508 * @inum:   inode number of the inode whose extent we are checking
1509 * @bytenr: logical bytenr of the extent we are checking
1510 * @roots:  list of roots this extent is shared among
1511 * @tmp:    temporary list used for iteration
1512 *
1513 * btrfs_check_shared uses the backref walking code but will short
1514 * circuit as soon as it finds a root or inode that doesn't match the
1515 * one passed in. This provides a significant performance benefit for
1516 * callers (such as fiemap) which want to know whether the extent is
1517 * shared but do not need a ref count.
1518 *
1519 * This attempts to attach to the running transaction in order to account for
1520 * delayed refs, but continues on even when no running transaction exists.
1521 *
1522 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1523 */
1524int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1525		struct ulist *roots, struct ulist *tmp)
 
1526{
 
 
1527	struct btrfs_fs_info *fs_info = root->fs_info;
1528	struct btrfs_trans_handle *trans;
1529	struct ulist_iterator uiter;
1530	struct ulist_node *node;
1531	struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1532	int ret = 0;
1533	struct share_check shared = {
1534		.root_objectid = root->root_key.objectid,
1535		.inum = inum,
 
 
 
1536		.share_count = 0,
 
 
1537	};
 
 
 
 
 
 
 
 
1538
1539	ulist_init(roots);
1540	ulist_init(tmp);
1541
1542	trans = btrfs_join_transaction_nostart(root);
1543	if (IS_ERR(trans)) {
1544		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1545			ret = PTR_ERR(trans);
1546			goto out;
1547		}
1548		trans = NULL;
1549		down_read(&fs_info->commit_root_sem);
1550	} else {
1551		btrfs_get_tree_mod_seq(fs_info, &elem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1552	}
1553
 
 
 
 
 
 
 
1554	ULIST_ITER_INIT(&uiter);
1555	while (1) {
1556		ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1557					roots, NULL, &shared, false);
1558		if (ret == BACKREF_FOUND_SHARED) {
1559			/* this is the only condition under which we return 1 */
1560			ret = 1;
 
 
 
 
 
 
1561			break;
1562		}
1563		if (ret < 0 && ret != -ENOENT)
1564			break;
1565		ret = 0;
1566		node = ulist_next(tmp, &uiter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1567		if (!node)
1568			break;
1569		bytenr = node->val;
 
 
 
 
 
 
 
 
 
 
 
 
1570		shared.share_count = 0;
 
1571		cond_resched();
1572	}
1573
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1574	if (trans) {
1575		btrfs_put_tree_mod_seq(fs_info, &elem);
1576		btrfs_end_transaction(trans);
1577	} else {
1578		up_read(&fs_info->commit_root_sem);
1579	}
1580out:
1581	ulist_release(roots);
1582	ulist_release(tmp);
 
1583	return ret;
1584}
1585
1586int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1587			  u64 start_off, struct btrfs_path *path,
1588			  struct btrfs_inode_extref **ret_extref,
1589			  u64 *found_off)
1590{
1591	int ret, slot;
1592	struct btrfs_key key;
1593	struct btrfs_key found_key;
1594	struct btrfs_inode_extref *extref;
1595	const struct extent_buffer *leaf;
1596	unsigned long ptr;
1597
1598	key.objectid = inode_objectid;
1599	key.type = BTRFS_INODE_EXTREF_KEY;
1600	key.offset = start_off;
1601
1602	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1603	if (ret < 0)
1604		return ret;
1605
1606	while (1) {
1607		leaf = path->nodes[0];
1608		slot = path->slots[0];
1609		if (slot >= btrfs_header_nritems(leaf)) {
1610			/*
1611			 * If the item at offset is not found,
1612			 * btrfs_search_slot will point us to the slot
1613			 * where it should be inserted. In our case
1614			 * that will be the slot directly before the
1615			 * next INODE_REF_KEY_V2 item. In the case
1616			 * that we're pointing to the last slot in a
1617			 * leaf, we must move one leaf over.
1618			 */
1619			ret = btrfs_next_leaf(root, path);
1620			if (ret) {
1621				if (ret >= 1)
1622					ret = -ENOENT;
1623				break;
1624			}
1625			continue;
1626		}
1627
1628		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1629
1630		/*
1631		 * Check that we're still looking at an extended ref key for
1632		 * this particular objectid. If we have different
1633		 * objectid or type then there are no more to be found
1634		 * in the tree and we can exit.
1635		 */
1636		ret = -ENOENT;
1637		if (found_key.objectid != inode_objectid)
1638			break;
1639		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1640			break;
1641
1642		ret = 0;
1643		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1644		extref = (struct btrfs_inode_extref *)ptr;
1645		*ret_extref = extref;
1646		if (found_off)
1647			*found_off = found_key.offset;
1648		break;
1649	}
1650
1651	return ret;
1652}
1653
1654/*
1655 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1656 * Elements of the path are separated by '/' and the path is guaranteed to be
1657 * 0-terminated. the path is only given within the current file system.
1658 * Therefore, it never starts with a '/'. the caller is responsible to provide
1659 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1660 * the start point of the resulting string is returned. this pointer is within
1661 * dest, normally.
1662 * in case the path buffer would overflow, the pointer is decremented further
1663 * as if output was written to the buffer, though no more output is actually
1664 * generated. that way, the caller can determine how much space would be
1665 * required for the path to fit into the buffer. in that case, the returned
1666 * value will be smaller than dest. callers must check this!
1667 */
1668char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1669			u32 name_len, unsigned long name_off,
1670			struct extent_buffer *eb_in, u64 parent,
1671			char *dest, u32 size)
1672{
1673	int slot;
1674	u64 next_inum;
1675	int ret;
1676	s64 bytes_left = ((s64)size) - 1;
1677	struct extent_buffer *eb = eb_in;
1678	struct btrfs_key found_key;
1679	struct btrfs_inode_ref *iref;
1680
1681	if (bytes_left >= 0)
1682		dest[bytes_left] = '\0';
1683
1684	while (1) {
1685		bytes_left -= name_len;
1686		if (bytes_left >= 0)
1687			read_extent_buffer(eb, dest + bytes_left,
1688					   name_off, name_len);
1689		if (eb != eb_in) {
1690			if (!path->skip_locking)
1691				btrfs_tree_read_unlock(eb);
1692			free_extent_buffer(eb);
1693		}
1694		ret = btrfs_find_item(fs_root, path, parent, 0,
1695				BTRFS_INODE_REF_KEY, &found_key);
1696		if (ret > 0)
1697			ret = -ENOENT;
1698		if (ret)
1699			break;
1700
1701		next_inum = found_key.offset;
1702
1703		/* regular exit ahead */
1704		if (parent == next_inum)
1705			break;
1706
1707		slot = path->slots[0];
1708		eb = path->nodes[0];
1709		/* make sure we can use eb after releasing the path */
1710		if (eb != eb_in) {
1711			path->nodes[0] = NULL;
1712			path->locks[0] = 0;
1713		}
1714		btrfs_release_path(path);
1715		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1716
1717		name_len = btrfs_inode_ref_name_len(eb, iref);
1718		name_off = (unsigned long)(iref + 1);
1719
1720		parent = next_inum;
1721		--bytes_left;
1722		if (bytes_left >= 0)
1723			dest[bytes_left] = '/';
1724	}
1725
1726	btrfs_release_path(path);
1727
1728	if (ret)
1729		return ERR_PTR(ret);
1730
1731	return dest + bytes_left;
1732}
1733
1734/*
1735 * this makes the path point to (logical EXTENT_ITEM *)
1736 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1737 * tree blocks and <0 on error.
1738 */
1739int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1740			struct btrfs_path *path, struct btrfs_key *found_key,
1741			u64 *flags_ret)
1742{
 
1743	int ret;
1744	u64 flags;
1745	u64 size = 0;
1746	u32 item_size;
1747	const struct extent_buffer *eb;
1748	struct btrfs_extent_item *ei;
1749	struct btrfs_key key;
1750
1751	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1752		key.type = BTRFS_METADATA_ITEM_KEY;
1753	else
1754		key.type = BTRFS_EXTENT_ITEM_KEY;
1755	key.objectid = logical;
1756	key.offset = (u64)-1;
1757
1758	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1759	if (ret < 0)
1760		return ret;
 
 
 
 
 
 
 
1761
1762	ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1763	if (ret) {
1764		if (ret > 0)
1765			ret = -ENOENT;
1766		return ret;
1767	}
1768	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1769	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1770		size = fs_info->nodesize;
1771	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1772		size = found_key->offset;
1773
1774	if (found_key->objectid > logical ||
1775	    found_key->objectid + size <= logical) {
1776		btrfs_debug(fs_info,
1777			"logical %llu is not within any extent", logical);
1778		return -ENOENT;
1779	}
1780
1781	eb = path->nodes[0];
1782	item_size = btrfs_item_size_nr(eb, path->slots[0]);
1783	BUG_ON(item_size < sizeof(*ei));
1784
1785	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1786	flags = btrfs_extent_flags(eb, ei);
1787
1788	btrfs_debug(fs_info,
1789		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1790		 logical, logical - found_key->objectid, found_key->objectid,
1791		 found_key->offset, flags, item_size);
1792
1793	WARN_ON(!flags_ret);
1794	if (flags_ret) {
1795		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1796			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1797		else if (flags & BTRFS_EXTENT_FLAG_DATA)
1798			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
1799		else
1800			BUG();
1801		return 0;
1802	}
1803
1804	return -EIO;
1805}
1806
1807/*
1808 * helper function to iterate extent inline refs. ptr must point to a 0 value
1809 * for the first call and may be modified. it is used to track state.
1810 * if more refs exist, 0 is returned and the next call to
1811 * get_extent_inline_ref must pass the modified ptr parameter to get the
1812 * next ref. after the last ref was processed, 1 is returned.
1813 * returns <0 on error
1814 */
1815static int get_extent_inline_ref(unsigned long *ptr,
1816				 const struct extent_buffer *eb,
1817				 const struct btrfs_key *key,
1818				 const struct btrfs_extent_item *ei,
1819				 u32 item_size,
1820				 struct btrfs_extent_inline_ref **out_eiref,
1821				 int *out_type)
1822{
1823	unsigned long end;
1824	u64 flags;
1825	struct btrfs_tree_block_info *info;
1826
1827	if (!*ptr) {
1828		/* first call */
1829		flags = btrfs_extent_flags(eb, ei);
1830		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1831			if (key->type == BTRFS_METADATA_ITEM_KEY) {
1832				/* a skinny metadata extent */
1833				*out_eiref =
1834				     (struct btrfs_extent_inline_ref *)(ei + 1);
1835			} else {
1836				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1837				info = (struct btrfs_tree_block_info *)(ei + 1);
1838				*out_eiref =
1839				   (struct btrfs_extent_inline_ref *)(info + 1);
1840			}
1841		} else {
1842			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1843		}
1844		*ptr = (unsigned long)*out_eiref;
1845		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1846			return -ENOENT;
1847	}
1848
1849	end = (unsigned long)ei + item_size;
1850	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1851	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1852						     BTRFS_REF_TYPE_ANY);
1853	if (*out_type == BTRFS_REF_TYPE_INVALID)
1854		return -EUCLEAN;
1855
1856	*ptr += btrfs_extent_inline_ref_size(*out_type);
1857	WARN_ON(*ptr > end);
1858	if (*ptr == end)
1859		return 1; /* last */
1860
1861	return 0;
1862}
1863
1864/*
1865 * reads the tree block backref for an extent. tree level and root are returned
1866 * through out_level and out_root. ptr must point to a 0 value for the first
1867 * call and may be modified (see get_extent_inline_ref comment).
1868 * returns 0 if data was provided, 1 if there was no more data to provide or
1869 * <0 on error.
1870 */
1871int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1872			    struct btrfs_key *key, struct btrfs_extent_item *ei,
1873			    u32 item_size, u64 *out_root, u8 *out_level)
1874{
1875	int ret;
1876	int type;
1877	struct btrfs_extent_inline_ref *eiref;
1878
1879	if (*ptr == (unsigned long)-1)
1880		return 1;
1881
1882	while (1) {
1883		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1884					      &eiref, &type);
1885		if (ret < 0)
1886			return ret;
1887
1888		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1889		    type == BTRFS_SHARED_BLOCK_REF_KEY)
1890			break;
1891
1892		if (ret == 1)
1893			return 1;
1894	}
1895
1896	/* we can treat both ref types equally here */
1897	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1898
1899	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1900		struct btrfs_tree_block_info *info;
1901
1902		info = (struct btrfs_tree_block_info *)(ei + 1);
1903		*out_level = btrfs_tree_block_level(eb, info);
1904	} else {
1905		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1906		*out_level = (u8)key->offset;
1907	}
1908
1909	if (ret == 1)
1910		*ptr = (unsigned long)-1;
1911
1912	return 0;
1913}
1914
1915static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1916			     struct extent_inode_elem *inode_list,
1917			     u64 root, u64 extent_item_objectid,
1918			     iterate_extent_inodes_t *iterate, void *ctx)
1919{
1920	struct extent_inode_elem *eie;
1921	int ret = 0;
1922
1923	for (eie = inode_list; eie; eie = eie->next) {
1924		btrfs_debug(fs_info,
1925			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1926			    extent_item_objectid, eie->inum,
1927			    eie->offset, root);
1928		ret = iterate(eie->inum, eie->offset, root, ctx);
1929		if (ret) {
1930			btrfs_debug(fs_info,
1931				    "stopping iteration for %llu due to ret=%d",
1932				    extent_item_objectid, ret);
1933			break;
1934		}
1935	}
1936
1937	return ret;
1938}
1939
1940/*
1941 * calls iterate() for every inode that references the extent identified by
1942 * the given parameters.
1943 * when the iterator function returns a non-zero value, iteration stops.
1944 */
1945int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1946				u64 extent_item_objectid, u64 extent_item_pos,
1947				int search_commit_root,
1948				iterate_extent_inodes_t *iterate, void *ctx,
1949				bool ignore_offset)
1950{
1951	int ret;
1952	struct btrfs_trans_handle *trans = NULL;
1953	struct ulist *refs = NULL;
1954	struct ulist *roots = NULL;
1955	struct ulist_node *ref_node = NULL;
1956	struct ulist_node *root_node = NULL;
1957	struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
1958	struct ulist_iterator ref_uiter;
1959	struct ulist_iterator root_uiter;
1960
1961	btrfs_debug(fs_info, "resolving all inodes for extent %llu",
1962			extent_item_objectid);
 
 
 
1963
1964	if (!search_commit_root) {
1965		trans = btrfs_attach_transaction(fs_info->extent_root);
 
 
1966		if (IS_ERR(trans)) {
1967			if (PTR_ERR(trans) != -ENOENT &&
1968			    PTR_ERR(trans) != -EROFS)
1969				return PTR_ERR(trans);
1970			trans = NULL;
1971		}
 
1972	}
1973
1974	if (trans)
1975		btrfs_get_tree_mod_seq(fs_info, &seq_elem);
1976	else
1977		down_read(&fs_info->commit_root_sem);
 
 
1978
1979	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1980				   seq_elem.seq, &refs,
1981				   &extent_item_pos, ignore_offset);
1982	if (ret)
1983		goto out;
 
 
1984
1985	ULIST_ITER_INIT(&ref_uiter);
1986	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1987		ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1988						seq_elem.seq, &roots,
1989						ignore_offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1990		if (ret)
1991			break;
 
 
 
 
1992		ULIST_ITER_INIT(&root_uiter);
1993		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1994			btrfs_debug(fs_info,
1995				    "root %llu references leaf %llu, data list %#llx",
1996				    root_node->val, ref_node->val,
1997				    ref_node->aux);
1998			ret = iterate_leaf_refs(fs_info,
1999						(struct extent_inode_elem *)
2000						(uintptr_t)ref_node->aux,
2001						root_node->val,
2002						extent_item_objectid,
2003						iterate, ctx);
2004		}
2005		ulist_free(roots);
2006	}
2007
2008	free_leaf_list(refs);
2009out:
2010	if (trans) {
2011		btrfs_put_tree_mod_seq(fs_info, &seq_elem);
2012		btrfs_end_transaction(trans);
 
2013	} else {
2014		up_read(&fs_info->commit_root_sem);
2015	}
2016
 
 
 
 
 
 
2017	return ret;
2018}
2019
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2020int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2021				struct btrfs_path *path,
2022				iterate_extent_inodes_t *iterate, void *ctx,
2023				bool ignore_offset)
2024{
 
2025	int ret;
2026	u64 extent_item_pos;
2027	u64 flags = 0;
2028	struct btrfs_key found_key;
2029	int search_commit_root = path->search_commit_root;
2030
2031	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2032	btrfs_release_path(path);
2033	if (ret < 0)
2034		return ret;
2035	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2036		return -EINVAL;
2037
2038	extent_item_pos = logical - found_key.objectid;
2039	ret = iterate_extent_inodes(fs_info, found_key.objectid,
2040					extent_item_pos, search_commit_root,
2041					iterate, ctx, ignore_offset);
 
 
2042
2043	return ret;
 
2044}
2045
2046typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
2047			      struct extent_buffer *eb, void *ctx);
2048
2049static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
2050			      struct btrfs_path *path,
2051			      iterate_irefs_t *iterate, void *ctx)
2052{
2053	int ret = 0;
2054	int slot;
2055	u32 cur;
2056	u32 len;
2057	u32 name_len;
2058	u64 parent = 0;
2059	int found = 0;
 
 
2060	struct extent_buffer *eb;
2061	struct btrfs_item *item;
2062	struct btrfs_inode_ref *iref;
2063	struct btrfs_key found_key;
2064
2065	while (!ret) {
2066		ret = btrfs_find_item(fs_root, path, inum,
2067				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2068				&found_key);
2069
2070		if (ret < 0)
2071			break;
2072		if (ret) {
2073			ret = found ? 0 : -ENOENT;
2074			break;
2075		}
2076		++found;
2077
2078		parent = found_key.offset;
2079		slot = path->slots[0];
2080		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2081		if (!eb) {
2082			ret = -ENOMEM;
2083			break;
2084		}
2085		btrfs_release_path(path);
2086
2087		item = btrfs_item_nr(slot);
2088		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2089
2090		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2091			name_len = btrfs_inode_ref_name_len(eb, iref);
2092			/* path must be released before calling iterate()! */
2093			btrfs_debug(fs_root->fs_info,
2094				"following ref at offset %u for inode %llu in tree %llu",
2095				cur, found_key.objectid,
2096				fs_root->root_key.objectid);
2097			ret = iterate(parent, name_len,
2098				      (unsigned long)(iref + 1), eb, ctx);
2099			if (ret)
2100				break;
2101			len = sizeof(*iref) + name_len;
2102			iref = (struct btrfs_inode_ref *)((char *)iref + len);
2103		}
2104		free_extent_buffer(eb);
2105	}
2106
2107	btrfs_release_path(path);
2108
2109	return ret;
2110}
2111
2112static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2113				 struct btrfs_path *path,
2114				 iterate_irefs_t *iterate, void *ctx)
2115{
2116	int ret;
2117	int slot;
2118	u64 offset = 0;
2119	u64 parent;
2120	int found = 0;
 
 
2121	struct extent_buffer *eb;
2122	struct btrfs_inode_extref *extref;
2123	u32 item_size;
2124	u32 cur_offset;
2125	unsigned long ptr;
2126
2127	while (1) {
2128		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2129					    &offset);
2130		if (ret < 0)
2131			break;
2132		if (ret) {
2133			ret = found ? 0 : -ENOENT;
2134			break;
2135		}
2136		++found;
2137
2138		slot = path->slots[0];
2139		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2140		if (!eb) {
2141			ret = -ENOMEM;
2142			break;
2143		}
2144		btrfs_release_path(path);
2145
2146		item_size = btrfs_item_size_nr(eb, slot);
2147		ptr = btrfs_item_ptr_offset(eb, slot);
2148		cur_offset = 0;
2149
2150		while (cur_offset < item_size) {
2151			u32 name_len;
2152
2153			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2154			parent = btrfs_inode_extref_parent(eb, extref);
2155			name_len = btrfs_inode_extref_name_len(eb, extref);
2156			ret = iterate(parent, name_len,
2157				      (unsigned long)&extref->name, eb, ctx);
2158			if (ret)
2159				break;
2160
2161			cur_offset += btrfs_inode_extref_name_len(eb, extref);
2162			cur_offset += sizeof(*extref);
2163		}
2164		free_extent_buffer(eb);
2165
2166		offset++;
2167	}
2168
2169	btrfs_release_path(path);
2170
2171	return ret;
2172}
2173
2174static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2175			 struct btrfs_path *path, iterate_irefs_t *iterate,
2176			 void *ctx)
2177{
2178	int ret;
2179	int found_refs = 0;
2180
2181	ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2182	if (!ret)
2183		++found_refs;
2184	else if (ret != -ENOENT)
2185		return ret;
2186
2187	ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2188	if (ret == -ENOENT && found_refs)
2189		return 0;
2190
2191	return ret;
2192}
2193
2194/*
2195 * returns 0 if the path could be dumped (probably truncated)
2196 * returns <0 in case of an error
2197 */
2198static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2199			 struct extent_buffer *eb, void *ctx)
2200{
2201	struct inode_fs_paths *ipath = ctx;
2202	char *fspath;
2203	char *fspath_min;
2204	int i = ipath->fspath->elem_cnt;
2205	const int s_ptr = sizeof(char *);
2206	u32 bytes_left;
2207
2208	bytes_left = ipath->fspath->bytes_left > s_ptr ?
2209					ipath->fspath->bytes_left - s_ptr : 0;
2210
2211	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2212	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2213				   name_off, eb, inum, fspath_min, bytes_left);
2214	if (IS_ERR(fspath))
2215		return PTR_ERR(fspath);
2216
2217	if (fspath > fspath_min) {
2218		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2219		++ipath->fspath->elem_cnt;
2220		ipath->fspath->bytes_left = fspath - fspath_min;
2221	} else {
2222		++ipath->fspath->elem_missed;
2223		ipath->fspath->bytes_missing += fspath_min - fspath;
2224		ipath->fspath->bytes_left = 0;
2225	}
2226
2227	return 0;
2228}
2229
2230/*
2231 * this dumps all file system paths to the inode into the ipath struct, provided
2232 * is has been created large enough. each path is zero-terminated and accessed
2233 * from ipath->fspath->val[i].
2234 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2235 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2236 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2237 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2238 * have been needed to return all paths.
2239 */
2240int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2241{
2242	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2243			     inode_to_path, ipath);
 
 
 
 
 
 
 
 
 
 
 
 
2244}
2245
2246struct btrfs_data_container *init_data_container(u32 total_bytes)
2247{
2248	struct btrfs_data_container *data;
2249	size_t alloc_bytes;
2250
2251	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2252	data = kvmalloc(alloc_bytes, GFP_KERNEL);
2253	if (!data)
2254		return ERR_PTR(-ENOMEM);
2255
2256	if (total_bytes >= sizeof(*data)) {
2257		data->bytes_left = total_bytes - sizeof(*data);
2258		data->bytes_missing = 0;
2259	} else {
2260		data->bytes_missing = sizeof(*data) - total_bytes;
2261		data->bytes_left = 0;
2262	}
2263
2264	data->elem_cnt = 0;
2265	data->elem_missed = 0;
2266
2267	return data;
2268}
2269
2270/*
2271 * allocates space to return multiple file system paths for an inode.
2272 * total_bytes to allocate are passed, note that space usable for actual path
2273 * information will be total_bytes - sizeof(struct inode_fs_paths).
2274 * the returned pointer must be freed with free_ipath() in the end.
2275 */
2276struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2277					struct btrfs_path *path)
2278{
2279	struct inode_fs_paths *ifp;
2280	struct btrfs_data_container *fspath;
2281
2282	fspath = init_data_container(total_bytes);
2283	if (IS_ERR(fspath))
2284		return ERR_CAST(fspath);
2285
2286	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2287	if (!ifp) {
2288		kvfree(fspath);
2289		return ERR_PTR(-ENOMEM);
2290	}
2291
2292	ifp->btrfs_path = path;
2293	ifp->fspath = fspath;
2294	ifp->fs_root = fs_root;
2295
2296	return ifp;
2297}
2298
2299void free_ipath(struct inode_fs_paths *ipath)
2300{
2301	if (!ipath)
2302		return;
2303	kvfree(ipath->fspath);
2304	kfree(ipath);
2305}
2306
2307struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2308		struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2309{
2310	struct btrfs_backref_iter *ret;
2311
2312	ret = kzalloc(sizeof(*ret), gfp_flag);
2313	if (!ret)
2314		return NULL;
2315
2316	ret->path = btrfs_alloc_path();
2317	if (!ret->path) {
2318		kfree(ret);
2319		return NULL;
2320	}
2321
2322	/* Current backref iterator only supports iteration in commit root */
2323	ret->path->search_commit_root = 1;
2324	ret->path->skip_locking = 1;
2325	ret->fs_info = fs_info;
2326
2327	return ret;
2328}
2329
 
 
 
 
 
 
 
 
 
 
2330int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2331{
2332	struct btrfs_fs_info *fs_info = iter->fs_info;
 
2333	struct btrfs_path *path = iter->path;
2334	struct btrfs_extent_item *ei;
2335	struct btrfs_key key;
2336	int ret;
2337
2338	key.objectid = bytenr;
2339	key.type = BTRFS_METADATA_ITEM_KEY;
2340	key.offset = (u64)-1;
2341	iter->bytenr = bytenr;
2342
2343	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2344	if (ret < 0)
2345		return ret;
2346	if (ret == 0) {
 
 
 
 
2347		ret = -EUCLEAN;
2348		goto release;
2349	}
2350	if (path->slots[0] == 0) {
2351		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2352		ret = -EUCLEAN;
2353		goto release;
2354	}
2355	path->slots[0]--;
2356
2357	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2358	if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2359	     key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2360		ret = -ENOENT;
2361		goto release;
2362	}
2363	memcpy(&iter->cur_key, &key, sizeof(key));
2364	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2365						    path->slots[0]);
2366	iter->end_ptr = (u32)(iter->item_ptr +
2367			btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2368	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2369			    struct btrfs_extent_item);
2370
2371	/*
2372	 * Only support iteration on tree backref yet.
2373	 *
2374	 * This is an extra precaution for non skinny-metadata, where
2375	 * EXTENT_ITEM is also used for tree blocks, that we can only use
2376	 * extent flags to determine if it's a tree block.
2377	 */
2378	if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2379		ret = -ENOTSUPP;
2380		goto release;
2381	}
2382	iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2383
2384	/* If there is no inline backref, go search for keyed backref */
2385	if (iter->cur_ptr >= iter->end_ptr) {
2386		ret = btrfs_next_item(fs_info->extent_root, path);
2387
2388		/* No inline nor keyed ref */
2389		if (ret > 0) {
2390			ret = -ENOENT;
2391			goto release;
2392		}
2393		if (ret < 0)
2394			goto release;
2395
2396		btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2397				path->slots[0]);
2398		if (iter->cur_key.objectid != bytenr ||
2399		    (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2400		     iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2401			ret = -ENOENT;
2402			goto release;
2403		}
2404		iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2405							   path->slots[0]);
2406		iter->item_ptr = iter->cur_ptr;
2407		iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2408				      path->nodes[0], path->slots[0]));
2409	}
2410
2411	return 0;
2412release:
2413	btrfs_backref_iter_release(iter);
2414	return ret;
2415}
2416
 
 
 
 
 
 
 
 
2417/*
2418 * Go to the next backref item of current bytenr, can be either inlined or
2419 * keyed.
2420 *
2421 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2422 *
2423 * Return 0 if we get next backref without problem.
2424 * Return >0 if there is no extra backref for this bytenr.
2425 * Return <0 if there is something wrong happened.
2426 */
2427int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2428{
2429	struct extent_buffer *eb = btrfs_backref_get_eb(iter);
 
2430	struct btrfs_path *path = iter->path;
2431	struct btrfs_extent_inline_ref *iref;
2432	int ret;
2433	u32 size;
2434
2435	if (btrfs_backref_iter_is_inline_ref(iter)) {
2436		/* We're still inside the inline refs */
2437		ASSERT(iter->cur_ptr < iter->end_ptr);
2438
2439		if (btrfs_backref_has_tree_block_info(iter)) {
2440			/* First tree block info */
2441			size = sizeof(struct btrfs_tree_block_info);
2442		} else {
2443			/* Use inline ref type to determine the size */
2444			int type;
2445
2446			iref = (struct btrfs_extent_inline_ref *)
2447				((unsigned long)iter->cur_ptr);
2448			type = btrfs_extent_inline_ref_type(eb, iref);
2449
2450			size = btrfs_extent_inline_ref_size(type);
2451		}
2452		iter->cur_ptr += size;
2453		if (iter->cur_ptr < iter->end_ptr)
2454			return 0;
2455
2456		/* All inline items iterated, fall through */
2457	}
2458
2459	/* We're at keyed items, there is no inline item, go to the next one */
2460	ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
 
2461	if (ret)
2462		return ret;
2463
2464	btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2465	if (iter->cur_key.objectid != iter->bytenr ||
2466	    (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2467	     iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2468		return 1;
2469	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2470					path->slots[0]);
2471	iter->cur_ptr = iter->item_ptr;
2472	iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2473						path->slots[0]);
2474	return 0;
2475}
2476
2477void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2478			      struct btrfs_backref_cache *cache, int is_reloc)
2479{
2480	int i;
2481
2482	cache->rb_root = RB_ROOT;
2483	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2484		INIT_LIST_HEAD(&cache->pending[i]);
2485	INIT_LIST_HEAD(&cache->changed);
2486	INIT_LIST_HEAD(&cache->detached);
2487	INIT_LIST_HEAD(&cache->leaves);
2488	INIT_LIST_HEAD(&cache->pending_edge);
2489	INIT_LIST_HEAD(&cache->useless_node);
2490	cache->fs_info = fs_info;
2491	cache->is_reloc = is_reloc;
2492}
2493
2494struct btrfs_backref_node *btrfs_backref_alloc_node(
2495		struct btrfs_backref_cache *cache, u64 bytenr, int level)
2496{
2497	struct btrfs_backref_node *node;
2498
2499	ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2500	node = kzalloc(sizeof(*node), GFP_NOFS);
2501	if (!node)
2502		return node;
2503
2504	INIT_LIST_HEAD(&node->list);
2505	INIT_LIST_HEAD(&node->upper);
2506	INIT_LIST_HEAD(&node->lower);
2507	RB_CLEAR_NODE(&node->rb_node);
2508	cache->nr_nodes++;
2509	node->level = level;
2510	node->bytenr = bytenr;
2511
2512	return node;
2513}
2514
 
 
 
 
 
 
 
 
 
 
 
 
 
2515struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2516		struct btrfs_backref_cache *cache)
2517{
2518	struct btrfs_backref_edge *edge;
2519
2520	edge = kzalloc(sizeof(*edge), GFP_NOFS);
2521	if (edge)
2522		cache->nr_edges++;
2523	return edge;
2524}
2525
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2526/*
2527 * Drop the backref node from cache, also cleaning up all its
2528 * upper edges and any uncached nodes in the path.
2529 *
2530 * This cleanup happens bottom up, thus the node should either
2531 * be the lowest node in the cache or a detached node.
2532 */
2533void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2534				struct btrfs_backref_node *node)
2535{
2536	struct btrfs_backref_node *upper;
2537	struct btrfs_backref_edge *edge;
2538
2539	if (!node)
2540		return;
2541
2542	BUG_ON(!node->lowest && !node->detached);
2543	while (!list_empty(&node->upper)) {
2544		edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2545				  list[LOWER]);
2546		upper = edge->node[UPPER];
2547		list_del(&edge->list[LOWER]);
2548		list_del(&edge->list[UPPER]);
2549		btrfs_backref_free_edge(cache, edge);
2550
2551		/*
2552		 * Add the node to leaf node list if no other child block
2553		 * cached.
2554		 */
2555		if (list_empty(&upper->lower)) {
2556			list_add_tail(&upper->lower, &cache->leaves);
2557			upper->lowest = 1;
2558		}
2559	}
2560
2561	btrfs_backref_drop_node(cache, node);
2562}
2563
2564/*
2565 * Release all nodes/edges from current cache
2566 */
2567void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2568{
2569	struct btrfs_backref_node *node;
2570	int i;
2571
2572	while (!list_empty(&cache->detached)) {
2573		node = list_entry(cache->detached.next,
2574				  struct btrfs_backref_node, list);
2575		btrfs_backref_cleanup_node(cache, node);
2576	}
2577
2578	while (!list_empty(&cache->leaves)) {
2579		node = list_entry(cache->leaves.next,
2580				  struct btrfs_backref_node, lower);
2581		btrfs_backref_cleanup_node(cache, node);
2582	}
2583
2584	cache->last_trans = 0;
2585
2586	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2587		ASSERT(list_empty(&cache->pending[i]));
 
 
 
 
2588	ASSERT(list_empty(&cache->pending_edge));
2589	ASSERT(list_empty(&cache->useless_node));
2590	ASSERT(list_empty(&cache->changed));
2591	ASSERT(list_empty(&cache->detached));
2592	ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2593	ASSERT(!cache->nr_nodes);
2594	ASSERT(!cache->nr_edges);
2595}
2596
 
 
 
 
 
 
 
 
 
 
 
 
 
2597/*
2598 * Handle direct tree backref
2599 *
2600 * Direct tree backref means, the backref item shows its parent bytenr
2601 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2602 *
2603 * @ref_key:	The converted backref key.
2604 *		For keyed backref, it's the item key.
2605 *		For inlined backref, objectid is the bytenr,
2606 *		type is btrfs_inline_ref_type, offset is
2607 *		btrfs_inline_ref_offset.
2608 */
2609static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2610				      struct btrfs_key *ref_key,
2611				      struct btrfs_backref_node *cur)
2612{
2613	struct btrfs_backref_edge *edge;
2614	struct btrfs_backref_node *upper;
2615	struct rb_node *rb_node;
2616
2617	ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2618
2619	/* Only reloc root uses backref pointing to itself */
2620	if (ref_key->objectid == ref_key->offset) {
2621		struct btrfs_root *root;
2622
2623		cur->is_reloc_root = 1;
2624		/* Only reloc backref cache cares about a specific root */
2625		if (cache->is_reloc) {
2626			root = find_reloc_root(cache->fs_info, cur->bytenr);
2627			if (!root)
2628				return -ENOENT;
2629			cur->root = root;
2630		} else {
2631			/*
2632			 * For generic purpose backref cache, reloc root node
2633			 * is useless.
2634			 */
2635			list_add(&cur->list, &cache->useless_node);
2636		}
2637		return 0;
2638	}
2639
2640	edge = btrfs_backref_alloc_edge(cache);
2641	if (!edge)
2642		return -ENOMEM;
2643
2644	rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2645	if (!rb_node) {
2646		/* Parent node not yet cached */
2647		upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2648					   cur->level + 1);
2649		if (!upper) {
2650			btrfs_backref_free_edge(cache, edge);
2651			return -ENOMEM;
2652		}
2653
2654		/*
2655		 *  Backrefs for the upper level block isn't cached, add the
2656		 *  block to pending list
2657		 */
2658		list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2659	} else {
2660		/* Parent node already cached */
2661		upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2662		ASSERT(upper->checked);
2663		INIT_LIST_HEAD(&edge->list[UPPER]);
2664	}
2665	btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2666	return 0;
2667}
2668
2669/*
2670 * Handle indirect tree backref
2671 *
2672 * Indirect tree backref means, we only know which tree the node belongs to.
2673 * We still need to do a tree search to find out the parents. This is for
2674 * TREE_BLOCK_REF backref (keyed or inlined).
2675 *
 
2676 * @ref_key:	The same as @ref_key in  handle_direct_tree_backref()
2677 * @tree_key:	The first key of this tree block.
2678 * @path:	A clean (released) path, to avoid allocating path every time
2679 *		the function get called.
2680 */
2681static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
 
2682					struct btrfs_path *path,
2683					struct btrfs_key *ref_key,
2684					struct btrfs_key *tree_key,
2685					struct btrfs_backref_node *cur)
2686{
2687	struct btrfs_fs_info *fs_info = cache->fs_info;
2688	struct btrfs_backref_node *upper;
2689	struct btrfs_backref_node *lower;
2690	struct btrfs_backref_edge *edge;
2691	struct extent_buffer *eb;
2692	struct btrfs_root *root;
2693	struct rb_node *rb_node;
2694	int level;
2695	bool need_check = true;
2696	int ret;
2697
2698	root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2699	if (IS_ERR(root))
2700		return PTR_ERR(root);
2701	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2702		cur->cowonly = 1;
2703
2704	if (btrfs_root_level(&root->root_item) == cur->level) {
2705		/* Tree root */
2706		ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2707		/*
2708		 * For reloc backref cache, we may ignore reloc root.  But for
2709		 * general purpose backref cache, we can't rely on
2710		 * btrfs_should_ignore_reloc_root() as it may conflict with
2711		 * current running relocation and lead to missing root.
2712		 *
2713		 * For general purpose backref cache, reloc root detection is
2714		 * completely relying on direct backref (key->offset is parent
2715		 * bytenr), thus only do such check for reloc cache.
2716		 */
2717		if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2718			btrfs_put_root(root);
2719			list_add(&cur->list, &cache->useless_node);
2720		} else {
2721			cur->root = root;
2722		}
2723		return 0;
2724	}
2725
2726	level = cur->level + 1;
2727
2728	/* Search the tree to find parent blocks referring to the block */
2729	path->search_commit_root = 1;
2730	path->skip_locking = 1;
2731	path->lowest_level = level;
2732	ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2733	path->lowest_level = 0;
2734	if (ret < 0) {
2735		btrfs_put_root(root);
2736		return ret;
2737	}
2738	if (ret > 0 && path->slots[level] > 0)
2739		path->slots[level]--;
2740
2741	eb = path->nodes[level];
2742	if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2743		btrfs_err(fs_info,
2744"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2745			  cur->bytenr, level - 1, root->root_key.objectid,
2746			  tree_key->objectid, tree_key->type, tree_key->offset);
2747		btrfs_put_root(root);
2748		ret = -ENOENT;
2749		goto out;
2750	}
2751	lower = cur;
2752
2753	/* Add all nodes and edges in the path */
2754	for (; level < BTRFS_MAX_LEVEL; level++) {
2755		if (!path->nodes[level]) {
2756			ASSERT(btrfs_root_bytenr(&root->root_item) ==
2757			       lower->bytenr);
2758			/* Same as previous should_ignore_reloc_root() call */
2759			if (btrfs_should_ignore_reloc_root(root) &&
2760			    cache->is_reloc) {
2761				btrfs_put_root(root);
2762				list_add(&lower->list, &cache->useless_node);
2763			} else {
2764				lower->root = root;
2765			}
2766			break;
2767		}
2768
2769		edge = btrfs_backref_alloc_edge(cache);
2770		if (!edge) {
2771			btrfs_put_root(root);
2772			ret = -ENOMEM;
2773			goto out;
2774		}
2775
2776		eb = path->nodes[level];
2777		rb_node = rb_simple_search(&cache->rb_root, eb->start);
2778		if (!rb_node) {
2779			upper = btrfs_backref_alloc_node(cache, eb->start,
2780							 lower->level + 1);
2781			if (!upper) {
2782				btrfs_put_root(root);
2783				btrfs_backref_free_edge(cache, edge);
2784				ret = -ENOMEM;
2785				goto out;
2786			}
2787			upper->owner = btrfs_header_owner(eb);
2788			if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2789				upper->cowonly = 1;
2790
2791			/*
2792			 * If we know the block isn't shared we can avoid
2793			 * checking its backrefs.
2794			 */
2795			if (btrfs_block_can_be_shared(root, eb))
2796				upper->checked = 0;
2797			else
2798				upper->checked = 1;
2799
2800			/*
2801			 * Add the block to pending list if we need to check its
2802			 * backrefs, we only do this once while walking up a
2803			 * tree as we will catch anything else later on.
2804			 */
2805			if (!upper->checked && need_check) {
2806				need_check = false;
2807				list_add_tail(&edge->list[UPPER],
2808					      &cache->pending_edge);
2809			} else {
2810				if (upper->checked)
2811					need_check = true;
2812				INIT_LIST_HEAD(&edge->list[UPPER]);
2813			}
2814		} else {
2815			upper = rb_entry(rb_node, struct btrfs_backref_node,
2816					 rb_node);
2817			ASSERT(upper->checked);
2818			INIT_LIST_HEAD(&edge->list[UPPER]);
2819			if (!upper->owner)
2820				upper->owner = btrfs_header_owner(eb);
2821		}
2822		btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2823
2824		if (rb_node) {
2825			btrfs_put_root(root);
2826			break;
2827		}
2828		lower = upper;
2829		upper = NULL;
2830	}
2831out:
2832	btrfs_release_path(path);
2833	return ret;
2834}
2835
2836/*
2837 * Add backref node @cur into @cache.
2838 *
2839 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2840 *	 links aren't yet bi-directional. Needs to finish such links.
2841 *	 Use btrfs_backref_finish_upper_links() to finish such linkage.
2842 *
 
2843 * @path:	Released path for indirect tree backref lookup
2844 * @iter:	Released backref iter for extent tree search
2845 * @node_key:	The first key of the tree block
2846 */
2847int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
 
2848				struct btrfs_path *path,
2849				struct btrfs_backref_iter *iter,
2850				struct btrfs_key *node_key,
2851				struct btrfs_backref_node *cur)
2852{
2853	struct btrfs_fs_info *fs_info = cache->fs_info;
2854	struct btrfs_backref_edge *edge;
2855	struct btrfs_backref_node *exist;
2856	int ret;
2857
2858	ret = btrfs_backref_iter_start(iter, cur->bytenr);
2859	if (ret < 0)
2860		return ret;
2861	/*
2862	 * We skip the first btrfs_tree_block_info, as we don't use the key
2863	 * stored in it, but fetch it from the tree block
2864	 */
2865	if (btrfs_backref_has_tree_block_info(iter)) {
2866		ret = btrfs_backref_iter_next(iter);
2867		if (ret < 0)
2868			goto out;
2869		/* No extra backref? This means the tree block is corrupted */
2870		if (ret > 0) {
2871			ret = -EUCLEAN;
2872			goto out;
2873		}
2874	}
2875	WARN_ON(cur->checked);
2876	if (!list_empty(&cur->upper)) {
2877		/*
2878		 * The backref was added previously when processing backref of
2879		 * type BTRFS_TREE_BLOCK_REF_KEY
2880		 */
2881		ASSERT(list_is_singular(&cur->upper));
2882		edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2883				  list[LOWER]);
2884		ASSERT(list_empty(&edge->list[UPPER]));
2885		exist = edge->node[UPPER];
2886		/*
2887		 * Add the upper level block to pending list if we need check
2888		 * its backrefs
2889		 */
2890		if (!exist->checked)
2891			list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2892	} else {
2893		exist = NULL;
2894	}
2895
2896	for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2897		struct extent_buffer *eb;
2898		struct btrfs_key key;
2899		int type;
2900
2901		cond_resched();
2902		eb = btrfs_backref_get_eb(iter);
2903
2904		key.objectid = iter->bytenr;
2905		if (btrfs_backref_iter_is_inline_ref(iter)) {
2906			struct btrfs_extent_inline_ref *iref;
2907
2908			/* Update key for inline backref */
2909			iref = (struct btrfs_extent_inline_ref *)
2910				((unsigned long)iter->cur_ptr);
2911			type = btrfs_get_extent_inline_ref_type(eb, iref,
2912							BTRFS_REF_TYPE_BLOCK);
2913			if (type == BTRFS_REF_TYPE_INVALID) {
2914				ret = -EUCLEAN;
2915				goto out;
2916			}
2917			key.type = type;
2918			key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2919		} else {
2920			key.type = iter->cur_key.type;
2921			key.offset = iter->cur_key.offset;
2922		}
2923
2924		/*
2925		 * Parent node found and matches current inline ref, no need to
2926		 * rebuild this node for this inline ref
2927		 */
2928		if (exist &&
2929		    ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2930		      exist->owner == key.offset) ||
2931		     (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2932		      exist->bytenr == key.offset))) {
2933			exist = NULL;
2934			continue;
2935		}
2936
2937		/* SHARED_BLOCK_REF means key.offset is the parent bytenr */
2938		if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
2939			ret = handle_direct_tree_backref(cache, &key, cur);
2940			if (ret < 0)
2941				goto out;
2942			continue;
2943		} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
2944			ret = -EINVAL;
2945			btrfs_print_v0_err(fs_info);
2946			btrfs_handle_fs_error(fs_info, ret, NULL);
2947			goto out;
2948		} else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
2949			continue;
 
 
2950		}
2951
2952		/*
2953		 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
2954		 * means the root objectid. We need to search the tree to get
2955		 * its parent bytenr.
2956		 */
2957		ret = handle_indirect_tree_backref(cache, path, &key, node_key,
2958						   cur);
2959		if (ret < 0)
2960			goto out;
2961	}
2962	ret = 0;
2963	cur->checked = 1;
2964	WARN_ON(exist);
2965out:
2966	btrfs_backref_iter_release(iter);
2967	return ret;
2968}
2969
2970/*
2971 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
2972 */
2973int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
2974				     struct btrfs_backref_node *start)
2975{
2976	struct list_head *useless_node = &cache->useless_node;
2977	struct btrfs_backref_edge *edge;
2978	struct rb_node *rb_node;
2979	LIST_HEAD(pending_edge);
2980
2981	ASSERT(start->checked);
2982
2983	/* Insert this node to cache if it's not COW-only */
2984	if (!start->cowonly) {
2985		rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
2986					   &start->rb_node);
2987		if (rb_node)
2988			btrfs_backref_panic(cache->fs_info, start->bytenr,
2989					    -EEXIST);
2990		list_add_tail(&start->lower, &cache->leaves);
2991	}
2992
2993	/*
2994	 * Use breadth first search to iterate all related edges.
2995	 *
2996	 * The starting points are all the edges of this node
2997	 */
2998	list_for_each_entry(edge, &start->upper, list[LOWER])
2999		list_add_tail(&edge->list[UPPER], &pending_edge);
3000
3001	while (!list_empty(&pending_edge)) {
3002		struct btrfs_backref_node *upper;
3003		struct btrfs_backref_node *lower;
3004
3005		edge = list_first_entry(&pending_edge,
3006				struct btrfs_backref_edge, list[UPPER]);
3007		list_del_init(&edge->list[UPPER]);
3008		upper = edge->node[UPPER];
3009		lower = edge->node[LOWER];
3010
3011		/* Parent is detached, no need to keep any edges */
3012		if (upper->detached) {
3013			list_del(&edge->list[LOWER]);
3014			btrfs_backref_free_edge(cache, edge);
3015
3016			/* Lower node is orphan, queue for cleanup */
3017			if (list_empty(&lower->upper))
3018				list_add(&lower->list, useless_node);
3019			continue;
3020		}
3021
3022		/*
3023		 * All new nodes added in current build_backref_tree() haven't
3024		 * been linked to the cache rb tree.
3025		 * So if we have upper->rb_node populated, this means a cache
3026		 * hit. We only need to link the edge, as @upper and all its
3027		 * parents have already been linked.
3028		 */
3029		if (!RB_EMPTY_NODE(&upper->rb_node)) {
3030			if (upper->lowest) {
3031				list_del_init(&upper->lower);
3032				upper->lowest = 0;
3033			}
3034
3035			list_add_tail(&edge->list[UPPER], &upper->lower);
3036			continue;
3037		}
3038
3039		/* Sanity check, we shouldn't have any unchecked nodes */
3040		if (!upper->checked) {
3041			ASSERT(0);
3042			return -EUCLEAN;
3043		}
3044
3045		/* Sanity check, COW-only node has non-COW-only parent */
3046		if (start->cowonly != upper->cowonly) {
3047			ASSERT(0);
3048			return -EUCLEAN;
3049		}
3050
3051		/* Only cache non-COW-only (subvolume trees) tree blocks */
3052		if (!upper->cowonly) {
3053			rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3054						   &upper->rb_node);
3055			if (rb_node) {
3056				btrfs_backref_panic(cache->fs_info,
3057						upper->bytenr, -EEXIST);
3058				return -EUCLEAN;
3059			}
3060		}
3061
3062		list_add_tail(&edge->list[UPPER], &upper->lower);
3063
3064		/*
3065		 * Also queue all the parent edges of this uncached node
3066		 * to finish the upper linkage
3067		 */
3068		list_for_each_entry(edge, &upper->upper, list[LOWER])
3069			list_add_tail(&edge->list[UPPER], &pending_edge);
3070	}
3071	return 0;
3072}
3073
3074void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3075				 struct btrfs_backref_node *node)
3076{
3077	struct btrfs_backref_node *lower;
3078	struct btrfs_backref_node *upper;
3079	struct btrfs_backref_edge *edge;
3080
3081	while (!list_empty(&cache->useless_node)) {
3082		lower = list_first_entry(&cache->useless_node,
3083				   struct btrfs_backref_node, list);
3084		list_del_init(&lower->list);
3085	}
3086	while (!list_empty(&cache->pending_edge)) {
3087		edge = list_first_entry(&cache->pending_edge,
3088				struct btrfs_backref_edge, list[UPPER]);
3089		list_del(&edge->list[UPPER]);
3090		list_del(&edge->list[LOWER]);
3091		lower = edge->node[LOWER];
3092		upper = edge->node[UPPER];
3093		btrfs_backref_free_edge(cache, edge);
3094
3095		/*
3096		 * Lower is no longer linked to any upper backref nodes and
3097		 * isn't in the cache, we can free it ourselves.
3098		 */
3099		if (list_empty(&lower->upper) &&
3100		    RB_EMPTY_NODE(&lower->rb_node))
3101			list_add(&lower->list, &cache->useless_node);
3102
3103		if (!RB_EMPTY_NODE(&upper->rb_node))
3104			continue;
3105
3106		/* Add this guy's upper edges to the list to process */
3107		list_for_each_entry(edge, &upper->upper, list[LOWER])
3108			list_add_tail(&edge->list[UPPER],
3109				      &cache->pending_edge);
3110		if (list_empty(&upper->upper))
3111			list_add(&upper->list, &cache->useless_node);
3112	}
3113
3114	while (!list_empty(&cache->useless_node)) {
3115		lower = list_first_entry(&cache->useless_node,
3116				   struct btrfs_backref_node, list);
3117		list_del_init(&lower->list);
3118		if (lower == node)
3119			node = NULL;
3120		btrfs_backref_drop_node(cache, lower);
3121	}
3122
3123	btrfs_backref_cleanup_node(cache, node);
3124	ASSERT(list_empty(&cache->useless_node) &&
3125	       list_empty(&cache->pending_edge));
3126}