Linux Audio

Check our new training course

Loading...
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/rbtree.h>
   8#include <trace/events/btrfs.h>
   9#include "ctree.h"
  10#include "disk-io.h"
  11#include "backref.h"
  12#include "ulist.h"
  13#include "transaction.h"
  14#include "delayed-ref.h"
  15#include "locking.h"
  16#include "misc.h"
  17#include "tree-mod-log.h"
  18#include "fs.h"
  19#include "accessors.h"
  20#include "extent-tree.h"
  21#include "relocation.h"
  22#include "tree-checker.h"
  23
  24/* Just arbitrary numbers so we can be sure one of these happened. */
  25#define BACKREF_FOUND_SHARED     6
  26#define BACKREF_FOUND_NOT_SHARED 7
  27
  28struct extent_inode_elem {
  29	u64 inum;
  30	u64 offset;
  31	u64 num_bytes;
  32	struct extent_inode_elem *next;
  33};
  34
  35static int check_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
  36			      const struct btrfs_key *key,
  37			      const struct extent_buffer *eb,
  38			      const struct btrfs_file_extent_item *fi,
  39			      struct extent_inode_elem **eie)
 
 
  40{
  41	const u64 data_len = btrfs_file_extent_num_bytes(eb, fi);
  42	u64 offset = key->offset;
  43	struct extent_inode_elem *e;
  44	const u64 *root_ids;
  45	int root_count;
  46	bool cached;
  47
  48	if (!ctx->ignore_extent_item_pos &&
  49	    !btrfs_file_extent_compression(eb, fi) &&
  50	    !btrfs_file_extent_encryption(eb, fi) &&
  51	    !btrfs_file_extent_other_encoding(eb, fi)) {
  52		u64 data_offset;
 
  53
  54		data_offset = btrfs_file_extent_offset(eb, fi);
 
  55
  56		if (ctx->extent_item_pos < data_offset ||
  57		    ctx->extent_item_pos >= data_offset + data_len)
  58			return 1;
  59		offset += ctx->extent_item_pos - data_offset;
  60	}
  61
  62	if (!ctx->indirect_ref_iterator || !ctx->cache_lookup)
  63		goto add_inode_elem;
  64
  65	cached = ctx->cache_lookup(eb->start, ctx->user_ctx, &root_ids,
  66				   &root_count);
  67	if (!cached)
  68		goto add_inode_elem;
  69
  70	for (int i = 0; i < root_count; i++) {
  71		int ret;
  72
  73		ret = ctx->indirect_ref_iterator(key->objectid, offset,
  74						 data_len, root_ids[i],
  75						 ctx->user_ctx);
  76		if (ret)
  77			return ret;
  78	}
  79
  80add_inode_elem:
  81	e = kmalloc(sizeof(*e), GFP_NOFS);
  82	if (!e)
  83		return -ENOMEM;
  84
  85	e->next = *eie;
  86	e->inum = key->objectid;
  87	e->offset = offset;
  88	e->num_bytes = data_len;
  89	*eie = e;
  90
  91	return 0;
  92}
  93
  94static void free_inode_elem_list(struct extent_inode_elem *eie)
  95{
  96	struct extent_inode_elem *eie_next;
  97
  98	for (; eie; eie = eie_next) {
  99		eie_next = eie->next;
 100		kfree(eie);
 101	}
 102}
 103
 104static int find_extent_in_eb(struct btrfs_backref_walk_ctx *ctx,
 105			     const struct extent_buffer *eb,
 106			     struct extent_inode_elem **eie)
 
 107{
 108	u64 disk_byte;
 109	struct btrfs_key key;
 110	struct btrfs_file_extent_item *fi;
 111	int slot;
 112	int nritems;
 113	int extent_type;
 114	int ret;
 115
 116	/*
 117	 * from the shared data ref, we only have the leaf but we need
 118	 * the key. thus, we must look into all items and see that we
 119	 * find one (some) with a reference to our extent item.
 120	 */
 121	nritems = btrfs_header_nritems(eb);
 122	for (slot = 0; slot < nritems; ++slot) {
 123		btrfs_item_key_to_cpu(eb, &key, slot);
 124		if (key.type != BTRFS_EXTENT_DATA_KEY)
 125			continue;
 126		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 127		extent_type = btrfs_file_extent_type(eb, fi);
 128		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
 129			continue;
 130		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
 131		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 132		if (disk_byte != ctx->bytenr)
 133			continue;
 134
 135		ret = check_extent_in_eb(ctx, &key, eb, fi, eie);
 136		if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
 137			return ret;
 138	}
 139
 140	return 0;
 141}
 142
 143struct preftree {
 144	struct rb_root_cached root;
 145	unsigned int count;
 146};
 147
 148#define PREFTREE_INIT	{ .root = RB_ROOT_CACHED, .count = 0 }
 149
 150struct preftrees {
 151	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
 152	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
 153	struct preftree indirect_missing_keys;
 154};
 155
 156/*
 157 * Checks for a shared extent during backref search.
 158 *
 159 * The share_count tracks prelim_refs (direct and indirect) having a
 160 * ref->count >0:
 161 *  - incremented when a ref->count transitions to >0
 162 *  - decremented when a ref->count transitions to <1
 163 */
 164struct share_check {
 165	struct btrfs_backref_share_check_ctx *ctx;
 166	struct btrfs_root *root;
 167	u64 inum;
 168	u64 data_bytenr;
 169	u64 data_extent_gen;
 170	/*
 171	 * Counts number of inodes that refer to an extent (different inodes in
 172	 * the same root or different roots) that we could find. The sharedness
 173	 * check typically stops once this counter gets greater than 1, so it
 174	 * may not reflect the total number of inodes.
 175	 */
 176	int share_count;
 177	/*
 178	 * The number of times we found our inode refers to the data extent we
 179	 * are determining the sharedness. In other words, how many file extent
 180	 * items we could find for our inode that point to our target data
 181	 * extent. The value we get here after finishing the extent sharedness
 182	 * check may be smaller than reality, but if it ends up being greater
 183	 * than 1, then we know for sure the inode has multiple file extent
 184	 * items that point to our inode, and we can safely assume it's useful
 185	 * to cache the sharedness check result.
 186	 */
 187	int self_ref_count;
 188	bool have_delayed_delete_refs;
 189};
 190
 191static inline int extent_is_shared(struct share_check *sc)
 192{
 193	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
 194}
 195
 196static struct kmem_cache *btrfs_prelim_ref_cache;
 197
 198int __init btrfs_prelim_ref_init(void)
 199{
 200	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
 201					sizeof(struct prelim_ref), 0, 0, NULL);
 
 
 
 202	if (!btrfs_prelim_ref_cache)
 203		return -ENOMEM;
 204	return 0;
 205}
 206
 207void __cold btrfs_prelim_ref_exit(void)
 208{
 209	kmem_cache_destroy(btrfs_prelim_ref_cache);
 210}
 211
 212static void free_pref(struct prelim_ref *ref)
 213{
 214	kmem_cache_free(btrfs_prelim_ref_cache, ref);
 215}
 216
 217/*
 218 * Return 0 when both refs are for the same block (and can be merged).
 219 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
 220 * indicates a 'higher' block.
 221 */
 222static int prelim_ref_compare(struct prelim_ref *ref1,
 223			      struct prelim_ref *ref2)
 224{
 225	if (ref1->level < ref2->level)
 226		return -1;
 227	if (ref1->level > ref2->level)
 228		return 1;
 229	if (ref1->root_id < ref2->root_id)
 230		return -1;
 231	if (ref1->root_id > ref2->root_id)
 232		return 1;
 233	if (ref1->key_for_search.type < ref2->key_for_search.type)
 234		return -1;
 235	if (ref1->key_for_search.type > ref2->key_for_search.type)
 236		return 1;
 237	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
 238		return -1;
 239	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
 240		return 1;
 241	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
 242		return -1;
 243	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
 244		return 1;
 245	if (ref1->parent < ref2->parent)
 246		return -1;
 247	if (ref1->parent > ref2->parent)
 248		return 1;
 249
 250	return 0;
 251}
 252
 253static void update_share_count(struct share_check *sc, int oldcount,
 254			       int newcount, struct prelim_ref *newref)
 255{
 256	if ((!sc) || (oldcount == 0 && newcount < 1))
 257		return;
 258
 259	if (oldcount > 0 && newcount < 1)
 260		sc->share_count--;
 261	else if (oldcount < 1 && newcount > 0)
 262		sc->share_count++;
 263
 264	if (newref->root_id == sc->root->root_key.objectid &&
 265	    newref->wanted_disk_byte == sc->data_bytenr &&
 266	    newref->key_for_search.objectid == sc->inum)
 267		sc->self_ref_count += newref->count;
 268}
 269
 270/*
 271 * Add @newref to the @root rbtree, merging identical refs.
 272 *
 273 * Callers should assume that newref has been freed after calling.
 274 */
 275static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
 276			      struct preftree *preftree,
 277			      struct prelim_ref *newref,
 278			      struct share_check *sc)
 279{
 280	struct rb_root_cached *root;
 281	struct rb_node **p;
 282	struct rb_node *parent = NULL;
 283	struct prelim_ref *ref;
 284	int result;
 285	bool leftmost = true;
 286
 287	root = &preftree->root;
 288	p = &root->rb_root.rb_node;
 289
 290	while (*p) {
 291		parent = *p;
 292		ref = rb_entry(parent, struct prelim_ref, rbnode);
 293		result = prelim_ref_compare(ref, newref);
 294		if (result < 0) {
 295			p = &(*p)->rb_left;
 296		} else if (result > 0) {
 297			p = &(*p)->rb_right;
 298			leftmost = false;
 299		} else {
 300			/* Identical refs, merge them and free @newref */
 301			struct extent_inode_elem *eie = ref->inode_list;
 302
 303			while (eie && eie->next)
 304				eie = eie->next;
 305
 306			if (!eie)
 307				ref->inode_list = newref->inode_list;
 308			else
 309				eie->next = newref->inode_list;
 310			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
 311						     preftree->count);
 312			/*
 313			 * A delayed ref can have newref->count < 0.
 314			 * The ref->count is updated to follow any
 315			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
 316			 */
 317			update_share_count(sc, ref->count,
 318					   ref->count + newref->count, newref);
 319			ref->count += newref->count;
 320			free_pref(newref);
 321			return;
 322		}
 323	}
 324
 325	update_share_count(sc, 0, newref->count, newref);
 326	preftree->count++;
 327	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
 328	rb_link_node(&newref->rbnode, parent, p);
 329	rb_insert_color_cached(&newref->rbnode, root, leftmost);
 330}
 331
 332/*
 333 * Release the entire tree.  We don't care about internal consistency so
 334 * just free everything and then reset the tree root.
 335 */
 336static void prelim_release(struct preftree *preftree)
 337{
 338	struct prelim_ref *ref, *next_ref;
 339
 340	rbtree_postorder_for_each_entry_safe(ref, next_ref,
 341					     &preftree->root.rb_root, rbnode) {
 342		free_inode_elem_list(ref->inode_list);
 343		free_pref(ref);
 344	}
 345
 346	preftree->root = RB_ROOT_CACHED;
 347	preftree->count = 0;
 348}
 349
 350/*
 351 * the rules for all callers of this function are:
 352 * - obtaining the parent is the goal
 353 * - if you add a key, you must know that it is a correct key
 354 * - if you cannot add the parent or a correct key, then we will look into the
 355 *   block later to set a correct key
 356 *
 357 * delayed refs
 358 * ============
 359 *        backref type | shared | indirect | shared | indirect
 360 * information         |   tree |     tree |   data |     data
 361 * --------------------+--------+----------+--------+----------
 362 *      parent logical |    y   |     -    |    -   |     -
 363 *      key to resolve |    -   |     y    |    y   |     y
 364 *  tree block logical |    -   |     -    |    -   |     -
 365 *  root for resolving |    y   |     y    |    y   |     y
 366 *
 367 * - column 1:       we've the parent -> done
 368 * - column 2, 3, 4: we use the key to find the parent
 369 *
 370 * on disk refs (inline or keyed)
 371 * ==============================
 372 *        backref type | shared | indirect | shared | indirect
 373 * information         |   tree |     tree |   data |     data
 374 * --------------------+--------+----------+--------+----------
 375 *      parent logical |    y   |     -    |    y   |     -
 376 *      key to resolve |    -   |     -    |    -   |     y
 377 *  tree block logical |    y   |     y    |    y   |     y
 378 *  root for resolving |    -   |     y    |    y   |     y
 379 *
 380 * - column 1, 3: we've the parent -> done
 381 * - column 2:    we take the first key from the block to find the parent
 382 *                (see add_missing_keys)
 383 * - column 4:    we use the key to find the parent
 384 *
 385 * additional information that's available but not required to find the parent
 386 * block might help in merging entries to gain some speed.
 387 */
 388static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
 389			  struct preftree *preftree, u64 root_id,
 390			  const struct btrfs_key *key, int level, u64 parent,
 391			  u64 wanted_disk_byte, int count,
 392			  struct share_check *sc, gfp_t gfp_mask)
 393{
 394	struct prelim_ref *ref;
 395
 396	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
 397		return 0;
 398
 399	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
 400	if (!ref)
 401		return -ENOMEM;
 402
 403	ref->root_id = root_id;
 404	if (key)
 405		ref->key_for_search = *key;
 406	else
 407		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
 408
 409	ref->inode_list = NULL;
 410	ref->level = level;
 411	ref->count = count;
 412	ref->parent = parent;
 413	ref->wanted_disk_byte = wanted_disk_byte;
 414	prelim_ref_insert(fs_info, preftree, ref, sc);
 415	return extent_is_shared(sc);
 416}
 417
 418/* direct refs use root == 0, key == NULL */
 419static int add_direct_ref(const struct btrfs_fs_info *fs_info,
 420			  struct preftrees *preftrees, int level, u64 parent,
 421			  u64 wanted_disk_byte, int count,
 422			  struct share_check *sc, gfp_t gfp_mask)
 423{
 424	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
 425			      parent, wanted_disk_byte, count, sc, gfp_mask);
 426}
 427
 428/* indirect refs use parent == 0 */
 429static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
 430			    struct preftrees *preftrees, u64 root_id,
 431			    const struct btrfs_key *key, int level,
 432			    u64 wanted_disk_byte, int count,
 433			    struct share_check *sc, gfp_t gfp_mask)
 434{
 435	struct preftree *tree = &preftrees->indirect;
 436
 437	if (!key)
 438		tree = &preftrees->indirect_missing_keys;
 439	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
 440			      wanted_disk_byte, count, sc, gfp_mask);
 441}
 442
 443static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
 444{
 445	struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
 446	struct rb_node *parent = NULL;
 447	struct prelim_ref *ref = NULL;
 448	struct prelim_ref target = {};
 449	int result;
 450
 451	target.parent = bytenr;
 452
 453	while (*p) {
 454		parent = *p;
 455		ref = rb_entry(parent, struct prelim_ref, rbnode);
 456		result = prelim_ref_compare(ref, &target);
 457
 458		if (result < 0)
 459			p = &(*p)->rb_left;
 460		else if (result > 0)
 461			p = &(*p)->rb_right;
 462		else
 463			return 1;
 464	}
 465	return 0;
 466}
 467
 468static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
 469			   struct btrfs_root *root, struct btrfs_path *path,
 470			   struct ulist *parents,
 471			   struct preftrees *preftrees, struct prelim_ref *ref,
 472			   int level)
 
 473{
 474	int ret = 0;
 475	int slot;
 476	struct extent_buffer *eb;
 477	struct btrfs_key key;
 478	struct btrfs_key *key_for_search = &ref->key_for_search;
 479	struct btrfs_file_extent_item *fi;
 480	struct extent_inode_elem *eie = NULL, *old = NULL;
 481	u64 disk_byte;
 482	u64 wanted_disk_byte = ref->wanted_disk_byte;
 483	u64 count = 0;
 484	u64 data_offset;
 485	u8 type;
 486
 487	if (level != 0) {
 488		eb = path->nodes[level];
 489		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
 490		if (ret < 0)
 491			return ret;
 492		return 0;
 493	}
 494
 495	/*
 496	 * 1. We normally enter this function with the path already pointing to
 497	 *    the first item to check. But sometimes, we may enter it with
 498	 *    slot == nritems.
 499	 * 2. We are searching for normal backref but bytenr of this leaf
 500	 *    matches shared data backref
 501	 * 3. The leaf owner is not equal to the root we are searching
 502	 *
 503	 * For these cases, go to the next leaf before we continue.
 504	 */
 505	eb = path->nodes[0];
 506	if (path->slots[0] >= btrfs_header_nritems(eb) ||
 507	    is_shared_data_backref(preftrees, eb->start) ||
 508	    ref->root_id != btrfs_header_owner(eb)) {
 509		if (ctx->time_seq == BTRFS_SEQ_LAST)
 510			ret = btrfs_next_leaf(root, path);
 511		else
 512			ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
 513	}
 514
 515	while (!ret && count < ref->count) {
 516		eb = path->nodes[0];
 517		slot = path->slots[0];
 518
 519		btrfs_item_key_to_cpu(eb, &key, slot);
 520
 521		if (key.objectid != key_for_search->objectid ||
 522		    key.type != BTRFS_EXTENT_DATA_KEY)
 523			break;
 524
 525		/*
 526		 * We are searching for normal backref but bytenr of this leaf
 527		 * matches shared data backref, OR
 528		 * the leaf owner is not equal to the root we are searching for
 529		 */
 530		if (slot == 0 &&
 531		    (is_shared_data_backref(preftrees, eb->start) ||
 532		     ref->root_id != btrfs_header_owner(eb))) {
 533			if (ctx->time_seq == BTRFS_SEQ_LAST)
 534				ret = btrfs_next_leaf(root, path);
 535			else
 536				ret = btrfs_next_old_leaf(root, path, ctx->time_seq);
 537			continue;
 538		}
 539		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 540		type = btrfs_file_extent_type(eb, fi);
 541		if (type == BTRFS_FILE_EXTENT_INLINE)
 542			goto next;
 543		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 544		data_offset = btrfs_file_extent_offset(eb, fi);
 545
 546		if (disk_byte == wanted_disk_byte) {
 547			eie = NULL;
 548			old = NULL;
 549			if (ref->key_for_search.offset == key.offset - data_offset)
 550				count++;
 551			else
 552				goto next;
 553			if (!ctx->skip_inode_ref_list) {
 554				ret = check_extent_in_eb(ctx, &key, eb, fi, &eie);
 555				if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
 556				    ret < 0)
 
 557					break;
 558			}
 559			if (ret > 0)
 560				goto next;
 561			ret = ulist_add_merge_ptr(parents, eb->start,
 562						  eie, (void **)&old, GFP_NOFS);
 563			if (ret < 0)
 564				break;
 565			if (!ret && !ctx->skip_inode_ref_list) {
 566				while (old->next)
 567					old = old->next;
 568				old->next = eie;
 569			}
 570			eie = NULL;
 571		}
 572next:
 573		if (ctx->time_seq == BTRFS_SEQ_LAST)
 574			ret = btrfs_next_item(root, path);
 575		else
 576			ret = btrfs_next_old_item(root, path, ctx->time_seq);
 577	}
 578
 579	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
 580		free_inode_elem_list(eie);
 581	else if (ret > 0)
 582		ret = 0;
 583
 
 584	return ret;
 585}
 586
 587/*
 588 * resolve an indirect backref in the form (root_id, key, level)
 589 * to a logical address
 590 */
 591static int resolve_indirect_ref(struct btrfs_backref_walk_ctx *ctx,
 592				struct btrfs_path *path,
 593				struct preftrees *preftrees,
 594				struct prelim_ref *ref, struct ulist *parents)
 
 595{
 596	struct btrfs_root *root;
 597	struct extent_buffer *eb;
 598	int ret = 0;
 599	int root_level;
 600	int level = ref->level;
 601	struct btrfs_key search_key = ref->key_for_search;
 602
 603	/*
 604	 * If we're search_commit_root we could possibly be holding locks on
 605	 * other tree nodes.  This happens when qgroups does backref walks when
 606	 * adding new delayed refs.  To deal with this we need to look in cache
 607	 * for the root, and if we don't find it then we need to search the
 608	 * tree_root's commit root, thus the btrfs_get_fs_root_commit_root usage
 609	 * here.
 610	 */
 611	if (path->search_commit_root)
 612		root = btrfs_get_fs_root_commit_root(ctx->fs_info, path, ref->root_id);
 613	else
 614		root = btrfs_get_fs_root(ctx->fs_info, ref->root_id, false);
 615	if (IS_ERR(root)) {
 616		ret = PTR_ERR(root);
 617		goto out_free;
 618	}
 619
 620	if (!path->search_commit_root &&
 621	    test_bit(BTRFS_ROOT_DELETING, &root->state)) {
 622		ret = -ENOENT;
 623		goto out;
 624	}
 625
 626	if (btrfs_is_testing(ctx->fs_info)) {
 627		ret = -ENOENT;
 628		goto out;
 629	}
 630
 631	if (path->search_commit_root)
 632		root_level = btrfs_header_level(root->commit_root);
 633	else if (ctx->time_seq == BTRFS_SEQ_LAST)
 634		root_level = btrfs_header_level(root->node);
 635	else
 636		root_level = btrfs_old_root_level(root, ctx->time_seq);
 637
 638	if (root_level + 1 == level)
 639		goto out;
 640
 641	/*
 642	 * We can often find data backrefs with an offset that is too large
 643	 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
 644	 * subtracting a file's offset with the data offset of its
 645	 * corresponding extent data item. This can happen for example in the
 646	 * clone ioctl.
 647	 *
 648	 * So if we detect such case we set the search key's offset to zero to
 649	 * make sure we will find the matching file extent item at
 650	 * add_all_parents(), otherwise we will miss it because the offset
 651	 * taken form the backref is much larger then the offset of the file
 652	 * extent item. This can make us scan a very large number of file
 653	 * extent items, but at least it will not make us miss any.
 654	 *
 655	 * This is an ugly workaround for a behaviour that should have never
 656	 * existed, but it does and a fix for the clone ioctl would touch a lot
 657	 * of places, cause backwards incompatibility and would not fix the
 658	 * problem for extents cloned with older kernels.
 659	 */
 660	if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
 661	    search_key.offset >= LLONG_MAX)
 662		search_key.offset = 0;
 663	path->lowest_level = level;
 664	if (ctx->time_seq == BTRFS_SEQ_LAST)
 665		ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
 666	else
 667		ret = btrfs_search_old_slot(root, &search_key, path, ctx->time_seq);
 668
 669	btrfs_debug(ctx->fs_info,
 670		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
 671		 ref->root_id, level, ref->count, ret,
 672		 ref->key_for_search.objectid, ref->key_for_search.type,
 673		 ref->key_for_search.offset);
 674	if (ret < 0)
 675		goto out;
 676
 677	eb = path->nodes[level];
 678	while (!eb) {
 679		if (WARN_ON(!level)) {
 680			ret = 1;
 681			goto out;
 682		}
 683		level--;
 684		eb = path->nodes[level];
 685	}
 686
 687	ret = add_all_parents(ctx, root, path, parents, preftrees, ref, level);
 
 688out:
 689	btrfs_put_root(root);
 690out_free:
 691	path->lowest_level = 0;
 692	btrfs_release_path(path);
 693	return ret;
 694}
 695
 696static struct extent_inode_elem *
 697unode_aux_to_inode_list(struct ulist_node *node)
 698{
 699	if (!node)
 700		return NULL;
 701	return (struct extent_inode_elem *)(uintptr_t)node->aux;
 702}
 703
 704static void free_leaf_list(struct ulist *ulist)
 705{
 706	struct ulist_node *node;
 707	struct ulist_iterator uiter;
 708
 709	ULIST_ITER_INIT(&uiter);
 710	while ((node = ulist_next(ulist, &uiter)))
 711		free_inode_elem_list(unode_aux_to_inode_list(node));
 712
 713	ulist_free(ulist);
 714}
 715
 716/*
 717 * We maintain three separate rbtrees: one for direct refs, one for
 718 * indirect refs which have a key, and one for indirect refs which do not
 719 * have a key. Each tree does merge on insertion.
 720 *
 721 * Once all of the references are located, we iterate over the tree of
 722 * indirect refs with missing keys. An appropriate key is located and
 723 * the ref is moved onto the tree for indirect refs. After all missing
 724 * keys are thus located, we iterate over the indirect ref tree, resolve
 725 * each reference, and then insert the resolved reference onto the
 726 * direct tree (merging there too).
 727 *
 728 * New backrefs (i.e., for parent nodes) are added to the appropriate
 729 * rbtree as they are encountered. The new backrefs are subsequently
 730 * resolved as above.
 731 */
 732static int resolve_indirect_refs(struct btrfs_backref_walk_ctx *ctx,
 733				 struct btrfs_path *path,
 734				 struct preftrees *preftrees,
 735				 struct share_check *sc)
 
 736{
 737	int err;
 738	int ret = 0;
 739	struct ulist *parents;
 740	struct ulist_node *node;
 741	struct ulist_iterator uiter;
 742	struct rb_node *rnode;
 743
 744	parents = ulist_alloc(GFP_NOFS);
 745	if (!parents)
 746		return -ENOMEM;
 747
 748	/*
 749	 * We could trade memory usage for performance here by iterating
 750	 * the tree, allocating new refs for each insertion, and then
 751	 * freeing the entire indirect tree when we're done.  In some test
 752	 * cases, the tree can grow quite large (~200k objects).
 753	 */
 754	while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
 755		struct prelim_ref *ref;
 756
 757		ref = rb_entry(rnode, struct prelim_ref, rbnode);
 758		if (WARN(ref->parent,
 759			 "BUG: direct ref found in indirect tree")) {
 760			ret = -EINVAL;
 761			goto out;
 762		}
 763
 764		rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
 765		preftrees->indirect.count--;
 766
 767		if (ref->count == 0) {
 768			free_pref(ref);
 769			continue;
 770		}
 771
 772		if (sc && ref->root_id != sc->root->root_key.objectid) {
 
 773			free_pref(ref);
 774			ret = BACKREF_FOUND_SHARED;
 775			goto out;
 776		}
 777		err = resolve_indirect_ref(ctx, path, preftrees, ref, parents);
 
 
 778		/*
 779		 * we can only tolerate ENOENT,otherwise,we should catch error
 780		 * and return directly.
 781		 */
 782		if (err == -ENOENT) {
 783			prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref,
 784					  NULL);
 785			continue;
 786		} else if (err) {
 787			free_pref(ref);
 788			ret = err;
 789			goto out;
 790		}
 791
 792		/* we put the first parent into the ref at hand */
 793		ULIST_ITER_INIT(&uiter);
 794		node = ulist_next(parents, &uiter);
 795		ref->parent = node ? node->val : 0;
 796		ref->inode_list = unode_aux_to_inode_list(node);
 797
 798		/* Add a prelim_ref(s) for any other parent(s). */
 799		while ((node = ulist_next(parents, &uiter))) {
 800			struct prelim_ref *new_ref;
 801
 802			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
 803						   GFP_NOFS);
 804			if (!new_ref) {
 805				free_pref(ref);
 806				ret = -ENOMEM;
 807				goto out;
 808			}
 809			memcpy(new_ref, ref, sizeof(*ref));
 810			new_ref->parent = node->val;
 811			new_ref->inode_list = unode_aux_to_inode_list(node);
 812			prelim_ref_insert(ctx->fs_info, &preftrees->direct,
 813					  new_ref, NULL);
 814		}
 815
 816		/*
 817		 * Now it's a direct ref, put it in the direct tree. We must
 818		 * do this last because the ref could be merged/freed here.
 819		 */
 820		prelim_ref_insert(ctx->fs_info, &preftrees->direct, ref, NULL);
 821
 822		ulist_reinit(parents);
 823		cond_resched();
 824	}
 825out:
 826	/*
 827	 * We may have inode lists attached to refs in the parents ulist, so we
 828	 * must free them before freeing the ulist and its refs.
 829	 */
 830	free_leaf_list(parents);
 831	return ret;
 832}
 833
 834/*
 835 * read tree blocks and add keys where required.
 836 */
 837static int add_missing_keys(struct btrfs_fs_info *fs_info,
 838			    struct preftrees *preftrees, bool lock)
 839{
 840	struct prelim_ref *ref;
 841	struct extent_buffer *eb;
 842	struct preftree *tree = &preftrees->indirect_missing_keys;
 843	struct rb_node *node;
 844
 845	while ((node = rb_first_cached(&tree->root))) {
 846		struct btrfs_tree_parent_check check = { 0 };
 847
 848		ref = rb_entry(node, struct prelim_ref, rbnode);
 849		rb_erase_cached(node, &tree->root);
 850
 851		BUG_ON(ref->parent);	/* should not be a direct ref */
 852		BUG_ON(ref->key_for_search.type);
 853		BUG_ON(!ref->wanted_disk_byte);
 854
 855		check.level = ref->level - 1;
 856		check.owner_root = ref->root_id;
 857
 858		eb = read_tree_block(fs_info, ref->wanted_disk_byte, &check);
 859		if (IS_ERR(eb)) {
 860			free_pref(ref);
 861			return PTR_ERR(eb);
 862		}
 863		if (!extent_buffer_uptodate(eb)) {
 864			free_pref(ref);
 865			free_extent_buffer(eb);
 866			return -EIO;
 867		}
 868
 869		if (lock)
 870			btrfs_tree_read_lock(eb);
 871		if (btrfs_header_level(eb) == 0)
 872			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
 873		else
 874			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
 875		if (lock)
 876			btrfs_tree_read_unlock(eb);
 877		free_extent_buffer(eb);
 878		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
 879		cond_resched();
 880	}
 881	return 0;
 882}
 883
 884/*
 885 * add all currently queued delayed refs from this head whose seq nr is
 886 * smaller or equal that seq to the list
 887 */
 888static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
 889			    struct btrfs_delayed_ref_head *head, u64 seq,
 890			    struct preftrees *preftrees, struct share_check *sc)
 891{
 892	struct btrfs_delayed_ref_node *node;
 
 893	struct btrfs_key key;
 
 894	struct rb_node *n;
 895	int count;
 896	int ret = 0;
 897
 
 
 
 898	spin_lock(&head->lock);
 899	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
 900		node = rb_entry(n, struct btrfs_delayed_ref_node,
 901				ref_node);
 902		if (node->seq > seq)
 903			continue;
 904
 905		switch (node->action) {
 906		case BTRFS_ADD_DELAYED_EXTENT:
 907		case BTRFS_UPDATE_DELAYED_HEAD:
 908			WARN_ON(1);
 909			continue;
 910		case BTRFS_ADD_DELAYED_REF:
 911			count = node->ref_mod;
 912			break;
 913		case BTRFS_DROP_DELAYED_REF:
 914			count = node->ref_mod * -1;
 915			break;
 916		default:
 917			BUG();
 918		}
 919		switch (node->type) {
 920		case BTRFS_TREE_BLOCK_REF_KEY: {
 921			/* NORMAL INDIRECT METADATA backref */
 922			struct btrfs_delayed_tree_ref *ref;
 923			struct btrfs_key *key_ptr = NULL;
 924
 925			if (head->extent_op && head->extent_op->update_key) {
 926				btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
 927				key_ptr = &key;
 928			}
 929
 930			ref = btrfs_delayed_node_to_tree_ref(node);
 931			ret = add_indirect_ref(fs_info, preftrees, ref->root,
 932					       key_ptr, ref->level + 1,
 933					       node->bytenr, count, sc,
 934					       GFP_ATOMIC);
 935			break;
 936		}
 937		case BTRFS_SHARED_BLOCK_REF_KEY: {
 938			/* SHARED DIRECT METADATA backref */
 939			struct btrfs_delayed_tree_ref *ref;
 940
 941			ref = btrfs_delayed_node_to_tree_ref(node);
 942
 943			ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
 944					     ref->parent, node->bytenr, count,
 945					     sc, GFP_ATOMIC);
 946			break;
 947		}
 948		case BTRFS_EXTENT_DATA_REF_KEY: {
 949			/* NORMAL INDIRECT DATA backref */
 950			struct btrfs_delayed_data_ref *ref;
 951			ref = btrfs_delayed_node_to_data_ref(node);
 952
 953			key.objectid = ref->objectid;
 954			key.type = BTRFS_EXTENT_DATA_KEY;
 955			key.offset = ref->offset;
 956
 957			/*
 958			 * If we have a share check context and a reference for
 959			 * another inode, we can't exit immediately. This is
 960			 * because even if this is a BTRFS_ADD_DELAYED_REF
 961			 * reference we may find next a BTRFS_DROP_DELAYED_REF
 962			 * which cancels out this ADD reference.
 963			 *
 964			 * If this is a DROP reference and there was no previous
 965			 * ADD reference, then we need to signal that when we
 966			 * process references from the extent tree (through
 967			 * add_inline_refs() and add_keyed_refs()), we should
 968			 * not exit early if we find a reference for another
 969			 * inode, because one of the delayed DROP references
 970			 * may cancel that reference in the extent tree.
 971			 */
 972			if (sc && count < 0)
 973				sc->have_delayed_delete_refs = true;
 
 
 974
 975			ret = add_indirect_ref(fs_info, preftrees, ref->root,
 976					       &key, 0, node->bytenr, count, sc,
 977					       GFP_ATOMIC);
 978			break;
 979		}
 980		case BTRFS_SHARED_DATA_REF_KEY: {
 981			/* SHARED DIRECT FULL backref */
 982			struct btrfs_delayed_data_ref *ref;
 983
 984			ref = btrfs_delayed_node_to_data_ref(node);
 985
 986			ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
 987					     node->bytenr, count, sc,
 988					     GFP_ATOMIC);
 989			break;
 990		}
 991		default:
 992			WARN_ON(1);
 993		}
 994		/*
 995		 * We must ignore BACKREF_FOUND_SHARED until all delayed
 996		 * refs have been checked.
 997		 */
 998		if (ret && (ret != BACKREF_FOUND_SHARED))
 999			break;
1000	}
1001	if (!ret)
1002		ret = extent_is_shared(sc);
1003
1004	spin_unlock(&head->lock);
1005	return ret;
1006}
1007
1008/*
1009 * add all inline backrefs for bytenr to the list
1010 *
1011 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1012 */
1013static int add_inline_refs(struct btrfs_backref_walk_ctx *ctx,
1014			   struct btrfs_path *path,
1015			   int *info_level, struct preftrees *preftrees,
1016			   struct share_check *sc)
1017{
1018	int ret = 0;
1019	int slot;
1020	struct extent_buffer *leaf;
1021	struct btrfs_key key;
1022	struct btrfs_key found_key;
1023	unsigned long ptr;
1024	unsigned long end;
1025	struct btrfs_extent_item *ei;
1026	u64 flags;
1027	u64 item_size;
1028
1029	/*
1030	 * enumerate all inline refs
1031	 */
1032	leaf = path->nodes[0];
1033	slot = path->slots[0];
1034
1035	item_size = btrfs_item_size(leaf, slot);
1036	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
1037
1038	if (ctx->check_extent_item) {
1039		ret = ctx->check_extent_item(ctx->bytenr, ei, leaf, ctx->user_ctx);
1040		if (ret)
1041			return ret;
1042	}
1043
 
1044	flags = btrfs_extent_flags(leaf, ei);
1045	btrfs_item_key_to_cpu(leaf, &found_key, slot);
1046
1047	ptr = (unsigned long)(ei + 1);
1048	end = (unsigned long)ei + item_size;
1049
1050	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1051	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1052		struct btrfs_tree_block_info *info;
1053
1054		info = (struct btrfs_tree_block_info *)ptr;
1055		*info_level = btrfs_tree_block_level(leaf, info);
1056		ptr += sizeof(struct btrfs_tree_block_info);
1057		BUG_ON(ptr > end);
1058	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
1059		*info_level = found_key.offset;
1060	} else {
1061		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1062	}
1063
1064	while (ptr < end) {
1065		struct btrfs_extent_inline_ref *iref;
1066		u64 offset;
1067		int type;
1068
1069		iref = (struct btrfs_extent_inline_ref *)ptr;
1070		type = btrfs_get_extent_inline_ref_type(leaf, iref,
1071							BTRFS_REF_TYPE_ANY);
1072		if (type == BTRFS_REF_TYPE_INVALID)
1073			return -EUCLEAN;
1074
1075		offset = btrfs_extent_inline_ref_offset(leaf, iref);
1076
1077		switch (type) {
1078		case BTRFS_SHARED_BLOCK_REF_KEY:
1079			ret = add_direct_ref(ctx->fs_info, preftrees,
1080					     *info_level + 1, offset,
1081					     ctx->bytenr, 1, NULL, GFP_NOFS);
1082			break;
1083		case BTRFS_SHARED_DATA_REF_KEY: {
1084			struct btrfs_shared_data_ref *sdref;
1085			int count;
1086
1087			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
1088			count = btrfs_shared_data_ref_count(leaf, sdref);
1089
1090			ret = add_direct_ref(ctx->fs_info, preftrees, 0, offset,
1091					     ctx->bytenr, count, sc, GFP_NOFS);
1092			break;
1093		}
1094		case BTRFS_TREE_BLOCK_REF_KEY:
1095			ret = add_indirect_ref(ctx->fs_info, preftrees, offset,
1096					       NULL, *info_level + 1,
1097					       ctx->bytenr, 1, NULL, GFP_NOFS);
1098			break;
1099		case BTRFS_EXTENT_DATA_REF_KEY: {
1100			struct btrfs_extent_data_ref *dref;
1101			int count;
1102			u64 root;
1103
1104			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1105			count = btrfs_extent_data_ref_count(leaf, dref);
1106			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1107								      dref);
1108			key.type = BTRFS_EXTENT_DATA_KEY;
1109			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1110
1111			if (sc && key.objectid != sc->inum &&
1112			    !sc->have_delayed_delete_refs) {
1113				ret = BACKREF_FOUND_SHARED;
1114				break;
1115			}
1116
1117			root = btrfs_extent_data_ref_root(leaf, dref);
1118
1119			if (!ctx->skip_data_ref ||
1120			    !ctx->skip_data_ref(root, key.objectid, key.offset,
1121						ctx->user_ctx))
1122				ret = add_indirect_ref(ctx->fs_info, preftrees,
1123						       root, &key, 0, ctx->bytenr,
1124						       count, sc, GFP_NOFS);
1125			break;
1126		}
1127		case BTRFS_EXTENT_OWNER_REF_KEY:
1128			ASSERT(btrfs_fs_incompat(ctx->fs_info, SIMPLE_QUOTA));
1129			break;
1130		default:
1131			WARN_ON(1);
1132		}
1133		if (ret)
1134			return ret;
1135		ptr += btrfs_extent_inline_ref_size(type);
1136	}
1137
1138	return 0;
1139}
1140
1141/*
1142 * add all non-inline backrefs for bytenr to the list
1143 *
1144 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1145 */
1146static int add_keyed_refs(struct btrfs_backref_walk_ctx *ctx,
1147			  struct btrfs_root *extent_root,
1148			  struct btrfs_path *path,
1149			  int info_level, struct preftrees *preftrees,
1150			  struct share_check *sc)
1151{
1152	struct btrfs_fs_info *fs_info = extent_root->fs_info;
1153	int ret;
1154	int slot;
1155	struct extent_buffer *leaf;
1156	struct btrfs_key key;
1157
1158	while (1) {
1159		ret = btrfs_next_item(extent_root, path);
1160		if (ret < 0)
1161			break;
1162		if (ret) {
1163			ret = 0;
1164			break;
1165		}
1166
1167		slot = path->slots[0];
1168		leaf = path->nodes[0];
1169		btrfs_item_key_to_cpu(leaf, &key, slot);
1170
1171		if (key.objectid != ctx->bytenr)
1172			break;
1173		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1174			continue;
1175		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1176			break;
1177
1178		switch (key.type) {
1179		case BTRFS_SHARED_BLOCK_REF_KEY:
1180			/* SHARED DIRECT METADATA backref */
1181			ret = add_direct_ref(fs_info, preftrees,
1182					     info_level + 1, key.offset,
1183					     ctx->bytenr, 1, NULL, GFP_NOFS);
1184			break;
1185		case BTRFS_SHARED_DATA_REF_KEY: {
1186			/* SHARED DIRECT FULL backref */
1187			struct btrfs_shared_data_ref *sdref;
1188			int count;
1189
1190			sdref = btrfs_item_ptr(leaf, slot,
1191					      struct btrfs_shared_data_ref);
1192			count = btrfs_shared_data_ref_count(leaf, sdref);
1193			ret = add_direct_ref(fs_info, preftrees, 0,
1194					     key.offset, ctx->bytenr, count,
1195					     sc, GFP_NOFS);
1196			break;
1197		}
1198		case BTRFS_TREE_BLOCK_REF_KEY:
1199			/* NORMAL INDIRECT METADATA backref */
1200			ret = add_indirect_ref(fs_info, preftrees, key.offset,
1201					       NULL, info_level + 1, ctx->bytenr,
1202					       1, NULL, GFP_NOFS);
1203			break;
1204		case BTRFS_EXTENT_DATA_REF_KEY: {
1205			/* NORMAL INDIRECT DATA backref */
1206			struct btrfs_extent_data_ref *dref;
1207			int count;
1208			u64 root;
1209
1210			dref = btrfs_item_ptr(leaf, slot,
1211					      struct btrfs_extent_data_ref);
1212			count = btrfs_extent_data_ref_count(leaf, dref);
1213			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1214								      dref);
1215			key.type = BTRFS_EXTENT_DATA_KEY;
1216			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1217
1218			if (sc && key.objectid != sc->inum &&
1219			    !sc->have_delayed_delete_refs) {
1220				ret = BACKREF_FOUND_SHARED;
1221				break;
1222			}
1223
1224			root = btrfs_extent_data_ref_root(leaf, dref);
1225
1226			if (!ctx->skip_data_ref ||
1227			    !ctx->skip_data_ref(root, key.objectid, key.offset,
1228						ctx->user_ctx))
1229				ret = add_indirect_ref(fs_info, preftrees, root,
1230						       &key, 0, ctx->bytenr,
1231						       count, sc, GFP_NOFS);
1232			break;
1233		}
1234		default:
1235			WARN_ON(1);
1236		}
1237		if (ret)
1238			return ret;
1239
1240	}
1241
1242	return ret;
1243}
1244
1245/*
1246 * The caller has joined a transaction or is holding a read lock on the
1247 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1248 * snapshot field changing while updating or checking the cache.
1249 */
1250static bool lookup_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1251					struct btrfs_root *root,
1252					u64 bytenr, int level, bool *is_shared)
1253{
1254	const struct btrfs_fs_info *fs_info = root->fs_info;
1255	struct btrfs_backref_shared_cache_entry *entry;
1256
1257	if (!current->journal_info)
1258		lockdep_assert_held(&fs_info->commit_root_sem);
1259
1260	if (!ctx->use_path_cache)
1261		return false;
1262
1263	if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1264		return false;
1265
1266	/*
1267	 * Level -1 is used for the data extent, which is not reliable to cache
1268	 * because its reference count can increase or decrease without us
1269	 * realizing. We cache results only for extent buffers that lead from
1270	 * the root node down to the leaf with the file extent item.
1271	 */
1272	ASSERT(level >= 0);
1273
1274	entry = &ctx->path_cache_entries[level];
1275
1276	/* Unused cache entry or being used for some other extent buffer. */
1277	if (entry->bytenr != bytenr)
1278		return false;
1279
1280	/*
1281	 * We cached a false result, but the last snapshot generation of the
1282	 * root changed, so we now have a snapshot. Don't trust the result.
1283	 */
1284	if (!entry->is_shared &&
1285	    entry->gen != btrfs_root_last_snapshot(&root->root_item))
1286		return false;
1287
1288	/*
1289	 * If we cached a true result and the last generation used for dropping
1290	 * a root changed, we can not trust the result, because the dropped root
1291	 * could be a snapshot sharing this extent buffer.
1292	 */
1293	if (entry->is_shared &&
1294	    entry->gen != btrfs_get_last_root_drop_gen(fs_info))
1295		return false;
1296
1297	*is_shared = entry->is_shared;
1298	/*
1299	 * If the node at this level is shared, than all nodes below are also
1300	 * shared. Currently some of the nodes below may be marked as not shared
1301	 * because we have just switched from one leaf to another, and switched
1302	 * also other nodes above the leaf and below the current level, so mark
1303	 * them as shared.
1304	 */
1305	if (*is_shared) {
1306		for (int i = 0; i < level; i++) {
1307			ctx->path_cache_entries[i].is_shared = true;
1308			ctx->path_cache_entries[i].gen = entry->gen;
1309		}
1310	}
1311
1312	return true;
1313}
1314
1315/*
1316 * The caller has joined a transaction or is holding a read lock on the
1317 * fs_info->commit_root_sem semaphore, so no need to worry about the root's last
1318 * snapshot field changing while updating or checking the cache.
1319 */
1320static void store_backref_shared_cache(struct btrfs_backref_share_check_ctx *ctx,
1321				       struct btrfs_root *root,
1322				       u64 bytenr, int level, bool is_shared)
1323{
1324	const struct btrfs_fs_info *fs_info = root->fs_info;
1325	struct btrfs_backref_shared_cache_entry *entry;
1326	u64 gen;
1327
1328	if (!current->journal_info)
1329		lockdep_assert_held(&fs_info->commit_root_sem);
1330
1331	if (!ctx->use_path_cache)
1332		return;
1333
1334	if (WARN_ON_ONCE(level >= BTRFS_MAX_LEVEL))
1335		return;
1336
1337	/*
1338	 * Level -1 is used for the data extent, which is not reliable to cache
1339	 * because its reference count can increase or decrease without us
1340	 * realizing. We cache results only for extent buffers that lead from
1341	 * the root node down to the leaf with the file extent item.
1342	 */
1343	ASSERT(level >= 0);
1344
1345	if (is_shared)
1346		gen = btrfs_get_last_root_drop_gen(fs_info);
1347	else
1348		gen = btrfs_root_last_snapshot(&root->root_item);
1349
1350	entry = &ctx->path_cache_entries[level];
1351	entry->bytenr = bytenr;
1352	entry->is_shared = is_shared;
1353	entry->gen = gen;
1354
1355	/*
1356	 * If we found an extent buffer is shared, set the cache result for all
1357	 * extent buffers below it to true. As nodes in the path are COWed,
1358	 * their sharedness is moved to their children, and if a leaf is COWed,
1359	 * then the sharedness of a data extent becomes direct, the refcount of
1360	 * data extent is increased in the extent item at the extent tree.
1361	 */
1362	if (is_shared) {
1363		for (int i = 0; i < level; i++) {
1364			entry = &ctx->path_cache_entries[i];
1365			entry->is_shared = is_shared;
1366			entry->gen = gen;
1367		}
1368	}
1369}
1370
1371/*
1372 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1373 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1374 * indirect refs to their parent bytenr.
1375 * When roots are found, they're added to the roots list
1376 *
1377 * @ctx:     Backref walking context object, must be not NULL.
1378 * @sc:      If !NULL, then immediately return BACKREF_FOUND_SHARED when a
1379 *           shared extent is detected.
 
 
 
 
1380 *
1381 * Otherwise this returns 0 for success and <0 for an error.
1382 *
 
 
 
 
1383 * FIXME some caching might speed things up
1384 */
1385static int find_parent_nodes(struct btrfs_backref_walk_ctx *ctx,
1386			     struct share_check *sc)
 
 
 
1387{
1388	struct btrfs_root *root = btrfs_extent_root(ctx->fs_info, ctx->bytenr);
1389	struct btrfs_key key;
1390	struct btrfs_path *path;
1391	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1392	struct btrfs_delayed_ref_head *head;
1393	int info_level = 0;
1394	int ret;
1395	struct prelim_ref *ref;
1396	struct rb_node *node;
1397	struct extent_inode_elem *eie = NULL;
1398	struct preftrees preftrees = {
1399		.direct = PREFTREE_INIT,
1400		.indirect = PREFTREE_INIT,
1401		.indirect_missing_keys = PREFTREE_INIT
1402	};
1403
1404	/* Roots ulist is not needed when using a sharedness check context. */
1405	if (sc)
1406		ASSERT(ctx->roots == NULL);
1407
1408	key.objectid = ctx->bytenr;
1409	key.offset = (u64)-1;
1410	if (btrfs_fs_incompat(ctx->fs_info, SKINNY_METADATA))
1411		key.type = BTRFS_METADATA_ITEM_KEY;
1412	else
1413		key.type = BTRFS_EXTENT_ITEM_KEY;
1414
1415	path = btrfs_alloc_path();
1416	if (!path)
1417		return -ENOMEM;
1418	if (!ctx->trans) {
1419		path->search_commit_root = 1;
1420		path->skip_locking = 1;
1421	}
1422
1423	if (ctx->time_seq == BTRFS_SEQ_LAST)
1424		path->skip_locking = 1;
1425
 
 
 
 
 
1426again:
1427	head = NULL;
1428
1429	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1430	if (ret < 0)
1431		goto out;
1432	if (ret == 0) {
1433		/*
1434		 * Key with offset -1 found, there would have to exist an extent
1435		 * item with such offset, but this is out of the valid range.
1436		 */
1437		ret = -EUCLEAN;
1438		goto out;
1439	}
1440
1441	if (ctx->trans && likely(ctx->trans->type != __TRANS_DUMMY) &&
1442	    ctx->time_seq != BTRFS_SEQ_LAST) {
 
 
 
 
1443		/*
1444		 * We have a specific time_seq we care about and trans which
1445		 * means we have the path lock, we need to grab the ref head and
1446		 * lock it so we have a consistent view of the refs at the given
1447		 * time.
1448		 */
1449		delayed_refs = &ctx->trans->transaction->delayed_refs;
1450		spin_lock(&delayed_refs->lock);
1451		head = btrfs_find_delayed_ref_head(delayed_refs, ctx->bytenr);
1452		if (head) {
1453			if (!mutex_trylock(&head->mutex)) {
1454				refcount_inc(&head->refs);
1455				spin_unlock(&delayed_refs->lock);
1456
1457				btrfs_release_path(path);
1458
1459				/*
1460				 * Mutex was contended, block until it's
1461				 * released and try again
1462				 */
1463				mutex_lock(&head->mutex);
1464				mutex_unlock(&head->mutex);
1465				btrfs_put_delayed_ref_head(head);
1466				goto again;
1467			}
1468			spin_unlock(&delayed_refs->lock);
1469			ret = add_delayed_refs(ctx->fs_info, head, ctx->time_seq,
1470					       &preftrees, sc);
1471			mutex_unlock(&head->mutex);
1472			if (ret)
1473				goto out;
1474		} else {
1475			spin_unlock(&delayed_refs->lock);
1476		}
1477	}
1478
1479	if (path->slots[0]) {
1480		struct extent_buffer *leaf;
1481		int slot;
1482
1483		path->slots[0]--;
1484		leaf = path->nodes[0];
1485		slot = path->slots[0];
1486		btrfs_item_key_to_cpu(leaf, &key, slot);
1487		if (key.objectid == ctx->bytenr &&
1488		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
1489		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1490			ret = add_inline_refs(ctx, path, &info_level,
1491					      &preftrees, sc);
1492			if (ret)
1493				goto out;
1494			ret = add_keyed_refs(ctx, root, path, info_level,
1495					     &preftrees, sc);
1496			if (ret)
1497				goto out;
1498		}
1499	}
1500
1501	/*
1502	 * If we have a share context and we reached here, it means the extent
1503	 * is not directly shared (no multiple reference items for it),
1504	 * otherwise we would have exited earlier with a return value of
1505	 * BACKREF_FOUND_SHARED after processing delayed references or while
1506	 * processing inline or keyed references from the extent tree.
1507	 * The extent may however be indirectly shared through shared subtrees
1508	 * as a result from creating snapshots, so we determine below what is
1509	 * its parent node, in case we are dealing with a metadata extent, or
1510	 * what's the leaf (or leaves), from a fs tree, that has a file extent
1511	 * item pointing to it in case we are dealing with a data extent.
1512	 */
1513	ASSERT(extent_is_shared(sc) == 0);
1514
1515	/*
1516	 * If we are here for a data extent and we have a share_check structure
1517	 * it means the data extent is not directly shared (does not have
1518	 * multiple reference items), so we have to check if a path in the fs
1519	 * tree (going from the root node down to the leaf that has the file
1520	 * extent item pointing to the data extent) is shared, that is, if any
1521	 * of the extent buffers in the path is referenced by other trees.
1522	 */
1523	if (sc && ctx->bytenr == sc->data_bytenr) {
1524		/*
1525		 * If our data extent is from a generation more recent than the
1526		 * last generation used to snapshot the root, then we know that
1527		 * it can not be shared through subtrees, so we can skip
1528		 * resolving indirect references, there's no point in
1529		 * determining the extent buffers for the path from the fs tree
1530		 * root node down to the leaf that has the file extent item that
1531		 * points to the data extent.
1532		 */
1533		if (sc->data_extent_gen >
1534		    btrfs_root_last_snapshot(&sc->root->root_item)) {
1535			ret = BACKREF_FOUND_NOT_SHARED;
1536			goto out;
1537		}
1538
1539		/*
1540		 * If we are only determining if a data extent is shared or not
1541		 * and the corresponding file extent item is located in the same
1542		 * leaf as the previous file extent item, we can skip resolving
1543		 * indirect references for a data extent, since the fs tree path
1544		 * is the same (same leaf, so same path). We skip as long as the
1545		 * cached result for the leaf is valid and only if there's only
1546		 * one file extent item pointing to the data extent, because in
1547		 * the case of multiple file extent items, they may be located
1548		 * in different leaves and therefore we have multiple paths.
1549		 */
1550		if (sc->ctx->curr_leaf_bytenr == sc->ctx->prev_leaf_bytenr &&
1551		    sc->self_ref_count == 1) {
1552			bool cached;
1553			bool is_shared;
1554
1555			cached = lookup_backref_shared_cache(sc->ctx, sc->root,
1556						     sc->ctx->curr_leaf_bytenr,
1557						     0, &is_shared);
1558			if (cached) {
1559				if (is_shared)
1560					ret = BACKREF_FOUND_SHARED;
1561				else
1562					ret = BACKREF_FOUND_NOT_SHARED;
1563				goto out;
1564			}
1565		}
1566	}
1567
1568	btrfs_release_path(path);
1569
1570	ret = add_missing_keys(ctx->fs_info, &preftrees, path->skip_locking == 0);
1571	if (ret)
1572		goto out;
1573
1574	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1575
1576	ret = resolve_indirect_refs(ctx, path, &preftrees, sc);
 
1577	if (ret)
1578		goto out;
1579
1580	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1581
1582	/*
1583	 * This walks the tree of merged and resolved refs. Tree blocks are
1584	 * read in as needed. Unique entries are added to the ulist, and
1585	 * the list of found roots is updated.
1586	 *
1587	 * We release the entire tree in one go before returning.
1588	 */
1589	node = rb_first_cached(&preftrees.direct.root);
1590	while (node) {
1591		ref = rb_entry(node, struct prelim_ref, rbnode);
1592		node = rb_next(&ref->rbnode);
1593		/*
1594		 * ref->count < 0 can happen here if there are delayed
1595		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1596		 * prelim_ref_insert() relies on this when merging
1597		 * identical refs to keep the overall count correct.
1598		 * prelim_ref_insert() will merge only those refs
1599		 * which compare identically.  Any refs having
1600		 * e.g. different offsets would not be merged,
1601		 * and would retain their original ref->count < 0.
1602		 */
1603		if (ctx->roots && ref->count && ref->root_id && ref->parent == 0) {
 
 
 
 
 
 
1604			/* no parent == root of tree */
1605			ret = ulist_add(ctx->roots, ref->root_id, 0, GFP_NOFS);
1606			if (ret < 0)
1607				goto out;
1608		}
1609		if (ref->count && ref->parent) {
1610			if (!ctx->skip_inode_ref_list && !ref->inode_list &&
1611			    ref->level == 0) {
1612				struct btrfs_tree_parent_check check = { 0 };
1613				struct extent_buffer *eb;
1614
1615				check.level = ref->level;
1616
1617				eb = read_tree_block(ctx->fs_info, ref->parent,
1618						     &check);
1619				if (IS_ERR(eb)) {
1620					ret = PTR_ERR(eb);
1621					goto out;
1622				}
1623				if (!extent_buffer_uptodate(eb)) {
1624					free_extent_buffer(eb);
1625					ret = -EIO;
1626					goto out;
1627				}
1628
1629				if (!path->skip_locking)
1630					btrfs_tree_read_lock(eb);
1631				ret = find_extent_in_eb(ctx, eb, &eie);
 
 
 
1632				if (!path->skip_locking)
1633					btrfs_tree_read_unlock(eb);
1634				free_extent_buffer(eb);
1635				if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1636				    ret < 0)
1637					goto out;
1638				ref->inode_list = eie;
1639				/*
1640				 * We transferred the list ownership to the ref,
1641				 * so set to NULL to avoid a double free in case
1642				 * an error happens after this.
1643				 */
1644				eie = NULL;
1645			}
1646			ret = ulist_add_merge_ptr(ctx->refs, ref->parent,
1647						  ref->inode_list,
1648						  (void **)&eie, GFP_NOFS);
1649			if (ret < 0)
1650				goto out;
1651			if (!ret && !ctx->skip_inode_ref_list) {
1652				/*
1653				 * We've recorded that parent, so we must extend
1654				 * its inode list here.
1655				 *
1656				 * However if there was corruption we may not
1657				 * have found an eie, return an error in this
1658				 * case.
1659				 */
1660				ASSERT(eie);
1661				if (!eie) {
1662					ret = -EUCLEAN;
1663					goto out;
1664				}
1665				while (eie->next)
1666					eie = eie->next;
1667				eie->next = ref->inode_list;
1668			}
1669			eie = NULL;
1670			/*
1671			 * We have transferred the inode list ownership from
1672			 * this ref to the ref we added to the 'refs' ulist.
1673			 * So set this ref's inode list to NULL to avoid
1674			 * use-after-free when our caller uses it or double
1675			 * frees in case an error happens before we return.
1676			 */
1677			ref->inode_list = NULL;
1678		}
1679		cond_resched();
1680	}
1681
1682out:
1683	btrfs_free_path(path);
1684
1685	prelim_release(&preftrees.direct);
1686	prelim_release(&preftrees.indirect);
1687	prelim_release(&preftrees.indirect_missing_keys);
1688
1689	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP || ret < 0)
1690		free_inode_elem_list(eie);
1691	return ret;
1692}
1693
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1694/*
1695 * Finds all leaves with a reference to the specified combination of
1696 * @ctx->bytenr and @ctx->extent_item_pos. The bytenr of the found leaves are
1697 * added to the ulist at @ctx->refs, and that ulist is allocated by this
1698 * function. The caller should free the ulist with free_leaf_list() if
1699 * @ctx->ignore_extent_item_pos is false, otherwise a fimple ulist_free() is
1700 * enough.
1701 *
1702 * Returns 0 on success and < 0 on error. On error @ctx->refs is not allocated.
1703 */
1704int btrfs_find_all_leafs(struct btrfs_backref_walk_ctx *ctx)
 
 
 
1705{
1706	int ret;
1707
1708	ASSERT(ctx->refs == NULL);
1709
1710	ctx->refs = ulist_alloc(GFP_NOFS);
1711	if (!ctx->refs)
1712		return -ENOMEM;
1713
1714	ret = find_parent_nodes(ctx, NULL);
1715	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP ||
1716	    (ret < 0 && ret != -ENOENT)) {
1717		free_leaf_list(ctx->refs);
1718		ctx->refs = NULL;
1719		return ret;
1720	}
1721
1722	return 0;
1723}
1724
1725/*
1726 * Walk all backrefs for a given extent to find all roots that reference this
1727 * extent. Walking a backref means finding all extents that reference this
1728 * extent and in turn walk the backrefs of those, too. Naturally this is a
1729 * recursive process, but here it is implemented in an iterative fashion: We
1730 * find all referencing extents for the extent in question and put them on a
1731 * list. In turn, we find all referencing extents for those, further appending
1732 * to the list. The way we iterate the list allows adding more elements after
1733 * the current while iterating. The process stops when we reach the end of the
1734 * list.
1735 *
1736 * Found roots are added to @ctx->roots, which is allocated by this function if
1737 * it points to NULL, in which case the caller is responsible for freeing it
1738 * after it's not needed anymore.
1739 * This function requires @ctx->refs to be NULL, as it uses it for allocating a
1740 * ulist to do temporary work, and frees it before returning.
1741 *
1742 * Returns 0 on success, < 0 on error.
1743 */
1744static int btrfs_find_all_roots_safe(struct btrfs_backref_walk_ctx *ctx)
 
 
 
1745{
1746	const u64 orig_bytenr = ctx->bytenr;
1747	const bool orig_skip_inode_ref_list = ctx->skip_inode_ref_list;
1748	bool roots_ulist_allocated = false;
1749	struct ulist_iterator uiter;
1750	int ret = 0;
1751
1752	ASSERT(ctx->refs == NULL);
1753
1754	ctx->refs = ulist_alloc(GFP_NOFS);
1755	if (!ctx->refs)
 
 
 
 
1756		return -ENOMEM;
1757
1758	if (!ctx->roots) {
1759		ctx->roots = ulist_alloc(GFP_NOFS);
1760		if (!ctx->roots) {
1761			ulist_free(ctx->refs);
1762			ctx->refs = NULL;
1763			return -ENOMEM;
1764		}
1765		roots_ulist_allocated = true;
1766	}
1767
1768	ctx->skip_inode_ref_list = true;
1769
1770	ULIST_ITER_INIT(&uiter);
1771	while (1) {
1772		struct ulist_node *node;
1773
1774		ret = find_parent_nodes(ctx, NULL);
1775		if (ret < 0 && ret != -ENOENT) {
1776			if (roots_ulist_allocated) {
1777				ulist_free(ctx->roots);
1778				ctx->roots = NULL;
1779			}
1780			break;
1781		}
1782		ret = 0;
1783		node = ulist_next(ctx->refs, &uiter);
1784		if (!node)
1785			break;
1786		ctx->bytenr = node->val;
1787		cond_resched();
1788	}
1789
1790	ulist_free(ctx->refs);
1791	ctx->refs = NULL;
1792	ctx->bytenr = orig_bytenr;
1793	ctx->skip_inode_ref_list = orig_skip_inode_ref_list;
1794
1795	return ret;
1796}
1797
1798int btrfs_find_all_roots(struct btrfs_backref_walk_ctx *ctx,
1799			 bool skip_commit_root_sem)
 
 
1800{
1801	int ret;
1802
1803	if (!ctx->trans && !skip_commit_root_sem)
1804		down_read(&ctx->fs_info->commit_root_sem);
1805	ret = btrfs_find_all_roots_safe(ctx);
1806	if (!ctx->trans && !skip_commit_root_sem)
1807		up_read(&ctx->fs_info->commit_root_sem);
 
1808	return ret;
1809}
1810
1811struct btrfs_backref_share_check_ctx *btrfs_alloc_backref_share_check_ctx(void)
1812{
1813	struct btrfs_backref_share_check_ctx *ctx;
1814
1815	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1816	if (!ctx)
1817		return NULL;
1818
1819	ulist_init(&ctx->refs);
1820
1821	return ctx;
1822}
1823
1824void btrfs_free_backref_share_ctx(struct btrfs_backref_share_check_ctx *ctx)
1825{
1826	if (!ctx)
1827		return;
1828
1829	ulist_release(&ctx->refs);
1830	kfree(ctx);
1831}
1832
1833/*
1834 * Check if a data extent is shared or not.
1835 *
1836 * @inode:       The inode whose extent we are checking.
1837 * @bytenr:      Logical bytenr of the extent we are checking.
1838 * @extent_gen:  Generation of the extent (file extent item) or 0 if it is
1839 *               not known.
1840 * @ctx:         A backref sharedness check context.
1841 *
1842 * btrfs_is_data_extent_shared uses the backref walking code but will short
1843 * circuit as soon as it finds a root or inode that doesn't match the
1844 * one passed in. This provides a significant performance benefit for
1845 * callers (such as fiemap) which want to know whether the extent is
1846 * shared but do not need a ref count.
1847 *
1848 * This attempts to attach to the running transaction in order to account for
1849 * delayed refs, but continues on even when no running transaction exists.
1850 *
1851 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1852 */
1853int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr,
1854				u64 extent_gen,
1855				struct btrfs_backref_share_check_ctx *ctx)
1856{
1857	struct btrfs_backref_walk_ctx walk_ctx = { 0 };
1858	struct btrfs_root *root = inode->root;
1859	struct btrfs_fs_info *fs_info = root->fs_info;
1860	struct btrfs_trans_handle *trans;
1861	struct ulist_iterator uiter;
1862	struct ulist_node *node;
1863	struct btrfs_seq_list elem = BTRFS_SEQ_LIST_INIT(elem);
1864	int ret = 0;
1865	struct share_check shared = {
1866		.ctx = ctx,
1867		.root = root,
1868		.inum = btrfs_ino(inode),
1869		.data_bytenr = bytenr,
1870		.data_extent_gen = extent_gen,
1871		.share_count = 0,
1872		.self_ref_count = 0,
1873		.have_delayed_delete_refs = false,
1874	};
1875	int level;
1876	bool leaf_cached;
1877	bool leaf_is_shared;
1878
1879	for (int i = 0; i < BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE; i++) {
1880		if (ctx->prev_extents_cache[i].bytenr == bytenr)
1881			return ctx->prev_extents_cache[i].is_shared;
1882	}
1883
1884	ulist_init(&ctx->refs);
 
1885
1886	trans = btrfs_join_transaction_nostart(root);
1887	if (IS_ERR(trans)) {
1888		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1889			ret = PTR_ERR(trans);
1890			goto out;
1891		}
1892		trans = NULL;
1893		down_read(&fs_info->commit_root_sem);
1894	} else {
1895		btrfs_get_tree_mod_seq(fs_info, &elem);
1896		walk_ctx.time_seq = elem.seq;
1897	}
1898
1899	ctx->use_path_cache = true;
1900
1901	/*
1902	 * We may have previously determined that the current leaf is shared.
1903	 * If it is, then we have a data extent that is shared due to a shared
1904	 * subtree (caused by snapshotting) and we don't need to check for data
1905	 * backrefs. If the leaf is not shared, then we must do backref walking
1906	 * to determine if the data extent is shared through reflinks.
1907	 */
1908	leaf_cached = lookup_backref_shared_cache(ctx, root,
1909						  ctx->curr_leaf_bytenr, 0,
1910						  &leaf_is_shared);
1911	if (leaf_cached && leaf_is_shared) {
1912		ret = 1;
1913		goto out_trans;
1914	}
1915
1916	walk_ctx.skip_inode_ref_list = true;
1917	walk_ctx.trans = trans;
1918	walk_ctx.fs_info = fs_info;
1919	walk_ctx.refs = &ctx->refs;
1920
1921	/* -1 means we are in the bytenr of the data extent. */
1922	level = -1;
1923	ULIST_ITER_INIT(&uiter);
1924	while (1) {
1925		const unsigned long prev_ref_count = ctx->refs.nnodes;
1926
1927		walk_ctx.bytenr = bytenr;
1928		ret = find_parent_nodes(&walk_ctx, &shared);
1929		if (ret == BACKREF_FOUND_SHARED ||
1930		    ret == BACKREF_FOUND_NOT_SHARED) {
1931			/* If shared must return 1, otherwise return 0. */
1932			ret = (ret == BACKREF_FOUND_SHARED) ? 1 : 0;
1933			if (level >= 0)
1934				store_backref_shared_cache(ctx, root, bytenr,
1935							   level, ret == 1);
1936			break;
1937		}
1938		if (ret < 0 && ret != -ENOENT)
1939			break;
1940		ret = 0;
1941
1942		/*
1943		 * More than one extent buffer (bytenr) may have been added to
1944		 * the ctx->refs ulist, in which case we have to check multiple
1945		 * tree paths in case the first one is not shared, so we can not
1946		 * use the path cache which is made for a single path. Multiple
1947		 * extent buffers at the current level happen when:
1948		 *
1949		 * 1) level -1, the data extent: If our data extent was not
1950		 *    directly shared (without multiple reference items), then
1951		 *    it might have a single reference item with a count > 1 for
1952		 *    the same offset, which means there are 2 (or more) file
1953		 *    extent items that point to the data extent - this happens
1954		 *    when a file extent item needs to be split and then one
1955		 *    item gets moved to another leaf due to a b+tree leaf split
1956		 *    when inserting some item. In this case the file extent
1957		 *    items may be located in different leaves and therefore
1958		 *    some of the leaves may be referenced through shared
1959		 *    subtrees while others are not. Since our extent buffer
1960		 *    cache only works for a single path (by far the most common
1961		 *    case and simpler to deal with), we can not use it if we
1962		 *    have multiple leaves (which implies multiple paths).
1963		 *
1964		 * 2) level >= 0, a tree node/leaf: We can have a mix of direct
1965		 *    and indirect references on a b+tree node/leaf, so we have
1966		 *    to check multiple paths, and the extent buffer (the
1967		 *    current bytenr) may be shared or not. One example is
1968		 *    during relocation as we may get a shared tree block ref
1969		 *    (direct ref) and a non-shared tree block ref (indirect
1970		 *    ref) for the same node/leaf.
1971		 */
1972		if ((ctx->refs.nnodes - prev_ref_count) > 1)
1973			ctx->use_path_cache = false;
1974
1975		if (level >= 0)
1976			store_backref_shared_cache(ctx, root, bytenr,
1977						   level, false);
1978		node = ulist_next(&ctx->refs, &uiter);
1979		if (!node)
1980			break;
1981		bytenr = node->val;
1982		if (ctx->use_path_cache) {
1983			bool is_shared;
1984			bool cached;
1985
1986			level++;
1987			cached = lookup_backref_shared_cache(ctx, root, bytenr,
1988							     level, &is_shared);
1989			if (cached) {
1990				ret = (is_shared ? 1 : 0);
1991				break;
1992			}
1993		}
1994		shared.share_count = 0;
1995		shared.have_delayed_delete_refs = false;
1996		cond_resched();
1997	}
1998
1999	/*
2000	 * If the path cache is disabled, then it means at some tree level we
2001	 * got multiple parents due to a mix of direct and indirect backrefs or
2002	 * multiple leaves with file extent items pointing to the same data
2003	 * extent. We have to invalidate the cache and cache only the sharedness
2004	 * result for the levels where we got only one node/reference.
2005	 */
2006	if (!ctx->use_path_cache) {
2007		int i = 0;
2008
2009		level--;
2010		if (ret >= 0 && level >= 0) {
2011			bytenr = ctx->path_cache_entries[level].bytenr;
2012			ctx->use_path_cache = true;
2013			store_backref_shared_cache(ctx, root, bytenr, level, ret);
2014			i = level + 1;
2015		}
2016
2017		for ( ; i < BTRFS_MAX_LEVEL; i++)
2018			ctx->path_cache_entries[i].bytenr = 0;
2019	}
2020
2021	/*
2022	 * Cache the sharedness result for the data extent if we know our inode
2023	 * has more than 1 file extent item that refers to the data extent.
2024	 */
2025	if (ret >= 0 && shared.self_ref_count > 1) {
2026		int slot = ctx->prev_extents_cache_slot;
2027
2028		ctx->prev_extents_cache[slot].bytenr = shared.data_bytenr;
2029		ctx->prev_extents_cache[slot].is_shared = (ret == 1);
2030
2031		slot = (slot + 1) % BTRFS_BACKREF_CTX_PREV_EXTENTS_SIZE;
2032		ctx->prev_extents_cache_slot = slot;
2033	}
2034
2035out_trans:
2036	if (trans) {
2037		btrfs_put_tree_mod_seq(fs_info, &elem);
2038		btrfs_end_transaction(trans);
2039	} else {
2040		up_read(&fs_info->commit_root_sem);
2041	}
2042out:
2043	ulist_release(&ctx->refs);
2044	ctx->prev_leaf_bytenr = ctx->curr_leaf_bytenr;
2045
2046	return ret;
2047}
2048
2049int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
2050			  u64 start_off, struct btrfs_path *path,
2051			  struct btrfs_inode_extref **ret_extref,
2052			  u64 *found_off)
2053{
2054	int ret, slot;
2055	struct btrfs_key key;
2056	struct btrfs_key found_key;
2057	struct btrfs_inode_extref *extref;
2058	const struct extent_buffer *leaf;
2059	unsigned long ptr;
2060
2061	key.objectid = inode_objectid;
2062	key.type = BTRFS_INODE_EXTREF_KEY;
2063	key.offset = start_off;
2064
2065	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2066	if (ret < 0)
2067		return ret;
2068
2069	while (1) {
2070		leaf = path->nodes[0];
2071		slot = path->slots[0];
2072		if (slot >= btrfs_header_nritems(leaf)) {
2073			/*
2074			 * If the item at offset is not found,
2075			 * btrfs_search_slot will point us to the slot
2076			 * where it should be inserted. In our case
2077			 * that will be the slot directly before the
2078			 * next INODE_REF_KEY_V2 item. In the case
2079			 * that we're pointing to the last slot in a
2080			 * leaf, we must move one leaf over.
2081			 */
2082			ret = btrfs_next_leaf(root, path);
2083			if (ret) {
2084				if (ret >= 1)
2085					ret = -ENOENT;
2086				break;
2087			}
2088			continue;
2089		}
2090
2091		btrfs_item_key_to_cpu(leaf, &found_key, slot);
2092
2093		/*
2094		 * Check that we're still looking at an extended ref key for
2095		 * this particular objectid. If we have different
2096		 * objectid or type then there are no more to be found
2097		 * in the tree and we can exit.
2098		 */
2099		ret = -ENOENT;
2100		if (found_key.objectid != inode_objectid)
2101			break;
2102		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
2103			break;
2104
2105		ret = 0;
2106		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
2107		extref = (struct btrfs_inode_extref *)ptr;
2108		*ret_extref = extref;
2109		if (found_off)
2110			*found_off = found_key.offset;
2111		break;
2112	}
2113
2114	return ret;
2115}
2116
2117/*
2118 * this iterates to turn a name (from iref/extref) into a full filesystem path.
2119 * Elements of the path are separated by '/' and the path is guaranteed to be
2120 * 0-terminated. the path is only given within the current file system.
2121 * Therefore, it never starts with a '/'. the caller is responsible to provide
2122 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
2123 * the start point of the resulting string is returned. this pointer is within
2124 * dest, normally.
2125 * in case the path buffer would overflow, the pointer is decremented further
2126 * as if output was written to the buffer, though no more output is actually
2127 * generated. that way, the caller can determine how much space would be
2128 * required for the path to fit into the buffer. in that case, the returned
2129 * value will be smaller than dest. callers must check this!
2130 */
2131char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
2132			u32 name_len, unsigned long name_off,
2133			struct extent_buffer *eb_in, u64 parent,
2134			char *dest, u32 size)
2135{
2136	int slot;
2137	u64 next_inum;
2138	int ret;
2139	s64 bytes_left = ((s64)size) - 1;
2140	struct extent_buffer *eb = eb_in;
2141	struct btrfs_key found_key;
 
2142	struct btrfs_inode_ref *iref;
2143
2144	if (bytes_left >= 0)
2145		dest[bytes_left] = '\0';
2146
 
2147	while (1) {
2148		bytes_left -= name_len;
2149		if (bytes_left >= 0)
2150			read_extent_buffer(eb, dest + bytes_left,
2151					   name_off, name_len);
2152		if (eb != eb_in) {
2153			if (!path->skip_locking)
2154				btrfs_tree_read_unlock(eb);
2155			free_extent_buffer(eb);
2156		}
2157		ret = btrfs_find_item(fs_root, path, parent, 0,
2158				BTRFS_INODE_REF_KEY, &found_key);
2159		if (ret > 0)
2160			ret = -ENOENT;
2161		if (ret)
2162			break;
2163
2164		next_inum = found_key.offset;
2165
2166		/* regular exit ahead */
2167		if (parent == next_inum)
2168			break;
2169
2170		slot = path->slots[0];
2171		eb = path->nodes[0];
2172		/* make sure we can use eb after releasing the path */
2173		if (eb != eb_in) {
 
 
2174			path->nodes[0] = NULL;
2175			path->locks[0] = 0;
2176		}
2177		btrfs_release_path(path);
2178		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2179
2180		name_len = btrfs_inode_ref_name_len(eb, iref);
2181		name_off = (unsigned long)(iref + 1);
2182
2183		parent = next_inum;
2184		--bytes_left;
2185		if (bytes_left >= 0)
2186			dest[bytes_left] = '/';
2187	}
2188
2189	btrfs_release_path(path);
 
2190
2191	if (ret)
2192		return ERR_PTR(ret);
2193
2194	return dest + bytes_left;
2195}
2196
2197/*
2198 * this makes the path point to (logical EXTENT_ITEM *)
2199 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
2200 * tree blocks and <0 on error.
2201 */
2202int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
2203			struct btrfs_path *path, struct btrfs_key *found_key,
2204			u64 *flags_ret)
2205{
2206	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, logical);
2207	int ret;
2208	u64 flags;
2209	u64 size = 0;
2210	u32 item_size;
2211	const struct extent_buffer *eb;
2212	struct btrfs_extent_item *ei;
2213	struct btrfs_key key;
2214
2215	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2216		key.type = BTRFS_METADATA_ITEM_KEY;
2217	else
2218		key.type = BTRFS_EXTENT_ITEM_KEY;
2219	key.objectid = logical;
2220	key.offset = (u64)-1;
2221
2222	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2223	if (ret < 0)
2224		return ret;
2225	if (ret == 0) {
2226		/*
2227		 * Key with offset -1 found, there would have to exist an extent
2228		 * item with such offset, but this is out of the valid range.
2229		 */
2230		return -EUCLEAN;
2231	}
2232
2233	ret = btrfs_previous_extent_item(extent_root, path, 0);
2234	if (ret) {
2235		if (ret > 0)
2236			ret = -ENOENT;
2237		return ret;
2238	}
2239	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
2240	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
2241		size = fs_info->nodesize;
2242	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
2243		size = found_key->offset;
2244
2245	if (found_key->objectid > logical ||
2246	    found_key->objectid + size <= logical) {
2247		btrfs_debug(fs_info,
2248			"logical %llu is not within any extent", logical);
2249		return -ENOENT;
2250	}
2251
2252	eb = path->nodes[0];
2253	item_size = btrfs_item_size(eb, path->slots[0]);
 
2254
2255	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
2256	flags = btrfs_extent_flags(eb, ei);
2257
2258	btrfs_debug(fs_info,
2259		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
2260		 logical, logical - found_key->objectid, found_key->objectid,
2261		 found_key->offset, flags, item_size);
2262
2263	WARN_ON(!flags_ret);
2264	if (flags_ret) {
2265		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2266			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
2267		else if (flags & BTRFS_EXTENT_FLAG_DATA)
2268			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
2269		else
2270			BUG();
2271		return 0;
2272	}
2273
2274	return -EIO;
2275}
2276
2277/*
2278 * helper function to iterate extent inline refs. ptr must point to a 0 value
2279 * for the first call and may be modified. it is used to track state.
2280 * if more refs exist, 0 is returned and the next call to
2281 * get_extent_inline_ref must pass the modified ptr parameter to get the
2282 * next ref. after the last ref was processed, 1 is returned.
2283 * returns <0 on error
2284 */
2285static int get_extent_inline_ref(unsigned long *ptr,
2286				 const struct extent_buffer *eb,
2287				 const struct btrfs_key *key,
2288				 const struct btrfs_extent_item *ei,
2289				 u32 item_size,
2290				 struct btrfs_extent_inline_ref **out_eiref,
2291				 int *out_type)
2292{
2293	unsigned long end;
2294	u64 flags;
2295	struct btrfs_tree_block_info *info;
2296
2297	if (!*ptr) {
2298		/* first call */
2299		flags = btrfs_extent_flags(eb, ei);
2300		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2301			if (key->type == BTRFS_METADATA_ITEM_KEY) {
2302				/* a skinny metadata extent */
2303				*out_eiref =
2304				     (struct btrfs_extent_inline_ref *)(ei + 1);
2305			} else {
2306				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
2307				info = (struct btrfs_tree_block_info *)(ei + 1);
2308				*out_eiref =
2309				   (struct btrfs_extent_inline_ref *)(info + 1);
2310			}
2311		} else {
2312			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
2313		}
2314		*ptr = (unsigned long)*out_eiref;
2315		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
2316			return -ENOENT;
2317	}
2318
2319	end = (unsigned long)ei + item_size;
2320	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
2321	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
2322						     BTRFS_REF_TYPE_ANY);
2323	if (*out_type == BTRFS_REF_TYPE_INVALID)
2324		return -EUCLEAN;
2325
2326	*ptr += btrfs_extent_inline_ref_size(*out_type);
2327	WARN_ON(*ptr > end);
2328	if (*ptr == end)
2329		return 1; /* last */
2330
2331	return 0;
2332}
2333
2334/*
2335 * reads the tree block backref for an extent. tree level and root are returned
2336 * through out_level and out_root. ptr must point to a 0 value for the first
2337 * call and may be modified (see get_extent_inline_ref comment).
2338 * returns 0 if data was provided, 1 if there was no more data to provide or
2339 * <0 on error.
2340 */
2341int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
2342			    struct btrfs_key *key, struct btrfs_extent_item *ei,
2343			    u32 item_size, u64 *out_root, u8 *out_level)
2344{
2345	int ret;
2346	int type;
2347	struct btrfs_extent_inline_ref *eiref;
2348
2349	if (*ptr == (unsigned long)-1)
2350		return 1;
2351
2352	while (1) {
2353		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
2354					      &eiref, &type);
2355		if (ret < 0)
2356			return ret;
2357
2358		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
2359		    type == BTRFS_SHARED_BLOCK_REF_KEY)
2360			break;
2361
2362		if (ret == 1)
2363			return 1;
2364	}
2365
2366	/* we can treat both ref types equally here */
2367	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
2368
2369	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
2370		struct btrfs_tree_block_info *info;
2371
2372		info = (struct btrfs_tree_block_info *)(ei + 1);
2373		*out_level = btrfs_tree_block_level(eb, info);
2374	} else {
2375		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
2376		*out_level = (u8)key->offset;
2377	}
2378
2379	if (ret == 1)
2380		*ptr = (unsigned long)-1;
2381
2382	return 0;
2383}
2384
2385static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
2386			     struct extent_inode_elem *inode_list,
2387			     u64 root, u64 extent_item_objectid,
2388			     iterate_extent_inodes_t *iterate, void *ctx)
2389{
2390	struct extent_inode_elem *eie;
2391	int ret = 0;
2392
2393	for (eie = inode_list; eie; eie = eie->next) {
2394		btrfs_debug(fs_info,
2395			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
2396			    extent_item_objectid, eie->inum,
2397			    eie->offset, root);
2398		ret = iterate(eie->inum, eie->offset, eie->num_bytes, root, ctx);
2399		if (ret) {
2400			btrfs_debug(fs_info,
2401				    "stopping iteration for %llu due to ret=%d",
2402				    extent_item_objectid, ret);
2403			break;
2404		}
2405	}
2406
2407	return ret;
2408}
2409
2410/*
2411 * calls iterate() for every inode that references the extent identified by
2412 * the given parameters.
2413 * when the iterator function returns a non-zero value, iteration stops.
2414 */
2415int iterate_extent_inodes(struct btrfs_backref_walk_ctx *ctx,
2416			  bool search_commit_root,
2417			  iterate_extent_inodes_t *iterate, void *user_ctx)
 
 
2418{
2419	int ret;
2420	struct ulist *refs;
2421	struct ulist_node *ref_node;
2422	struct btrfs_seq_list seq_elem = BTRFS_SEQ_LIST_INIT(seq_elem);
 
 
 
2423	struct ulist_iterator ref_uiter;
 
2424
2425	btrfs_debug(ctx->fs_info, "resolving all inodes for extent %llu",
2426		    ctx->bytenr);
2427
2428	ASSERT(ctx->trans == NULL);
2429	ASSERT(ctx->roots == NULL);
2430
2431	if (!search_commit_root) {
2432		struct btrfs_trans_handle *trans;
2433
2434		trans = btrfs_attach_transaction(ctx->fs_info->tree_root);
2435		if (IS_ERR(trans)) {
2436			if (PTR_ERR(trans) != -ENOENT &&
2437			    PTR_ERR(trans) != -EROFS)
2438				return PTR_ERR(trans);
2439			trans = NULL;
2440		}
2441		ctx->trans = trans;
2442	}
2443
2444	if (ctx->trans) {
2445		btrfs_get_tree_mod_seq(ctx->fs_info, &seq_elem);
2446		ctx->time_seq = seq_elem.seq;
2447	} else {
2448		down_read(&ctx->fs_info->commit_root_sem);
2449	}
2450
2451	ret = btrfs_find_all_leafs(ctx);
 
 
2452	if (ret)
2453		goto out;
2454	refs = ctx->refs;
2455	ctx->refs = NULL;
2456
2457	ULIST_ITER_INIT(&ref_uiter);
2458	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
2459		const u64 leaf_bytenr = ref_node->val;
2460		struct ulist_node *root_node;
2461		struct ulist_iterator root_uiter;
2462		struct extent_inode_elem *inode_list;
2463
2464		inode_list = (struct extent_inode_elem *)(uintptr_t)ref_node->aux;
2465
2466		if (ctx->cache_lookup) {
2467			const u64 *root_ids;
2468			int root_count;
2469			bool cached;
2470
2471			cached = ctx->cache_lookup(leaf_bytenr, ctx->user_ctx,
2472						   &root_ids, &root_count);
2473			if (cached) {
2474				for (int i = 0; i < root_count; i++) {
2475					ret = iterate_leaf_refs(ctx->fs_info,
2476								inode_list,
2477								root_ids[i],
2478								leaf_bytenr,
2479								iterate,
2480								user_ctx);
2481					if (ret)
2482						break;
2483				}
2484				continue;
2485			}
2486		}
2487
2488		if (!ctx->roots) {
2489			ctx->roots = ulist_alloc(GFP_NOFS);
2490			if (!ctx->roots) {
2491				ret = -ENOMEM;
2492				break;
2493			}
2494		}
2495
2496		ctx->bytenr = leaf_bytenr;
2497		ret = btrfs_find_all_roots_safe(ctx);
2498		if (ret)
2499			break;
2500
2501		if (ctx->cache_store)
2502			ctx->cache_store(leaf_bytenr, ctx->roots, ctx->user_ctx);
2503
2504		ULIST_ITER_INIT(&root_uiter);
2505		while (!ret && (root_node = ulist_next(ctx->roots, &root_uiter))) {
2506			btrfs_debug(ctx->fs_info,
2507				    "root %llu references leaf %llu, data list %#llx",
2508				    root_node->val, ref_node->val,
2509				    ref_node->aux);
2510			ret = iterate_leaf_refs(ctx->fs_info, inode_list,
2511						root_node->val, ctx->bytenr,
2512						iterate, user_ctx);
 
 
 
2513		}
2514		ulist_reinit(ctx->roots);
2515	}
2516
2517	free_leaf_list(refs);
2518out:
2519	if (ctx->trans) {
2520		btrfs_put_tree_mod_seq(ctx->fs_info, &seq_elem);
2521		btrfs_end_transaction(ctx->trans);
2522		ctx->trans = NULL;
2523	} else {
2524		up_read(&ctx->fs_info->commit_root_sem);
2525	}
2526
2527	ulist_free(ctx->roots);
2528	ctx->roots = NULL;
2529
2530	if (ret == BTRFS_ITERATE_EXTENT_INODES_STOP)
2531		ret = 0;
2532
2533	return ret;
2534}
2535
2536static int build_ino_list(u64 inum, u64 offset, u64 num_bytes, u64 root, void *ctx)
2537{
2538	struct btrfs_data_container *inodes = ctx;
2539	const size_t c = 3 * sizeof(u64);
2540
2541	if (inodes->bytes_left >= c) {
2542		inodes->bytes_left -= c;
2543		inodes->val[inodes->elem_cnt] = inum;
2544		inodes->val[inodes->elem_cnt + 1] = offset;
2545		inodes->val[inodes->elem_cnt + 2] = root;
2546		inodes->elem_cnt += 3;
2547	} else {
2548		inodes->bytes_missing += c - inodes->bytes_left;
2549		inodes->bytes_left = 0;
2550		inodes->elem_missed += 3;
2551	}
2552
2553	return 0;
2554}
2555
2556int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2557				struct btrfs_path *path,
2558				void *ctx, bool ignore_offset)
 
2559{
2560	struct btrfs_backref_walk_ctx walk_ctx = { 0 };
2561	int ret;
 
2562	u64 flags = 0;
2563	struct btrfs_key found_key;
2564	int search_commit_root = path->search_commit_root;
2565
2566	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2567	btrfs_release_path(path);
2568	if (ret < 0)
2569		return ret;
2570	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2571		return -EINVAL;
2572
2573	walk_ctx.bytenr = found_key.objectid;
2574	if (ignore_offset)
2575		walk_ctx.ignore_extent_item_pos = true;
2576	else
2577		walk_ctx.extent_item_pos = logical - found_key.objectid;
2578	walk_ctx.fs_info = fs_info;
2579
2580	return iterate_extent_inodes(&walk_ctx, search_commit_root,
2581				     build_ino_list, ctx);
2582}
2583
2584static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2585			 struct extent_buffer *eb, struct inode_fs_paths *ipath);
2586
2587static int iterate_inode_refs(u64 inum, struct inode_fs_paths *ipath)
 
 
2588{
2589	int ret = 0;
2590	int slot;
2591	u32 cur;
2592	u32 len;
2593	u32 name_len;
2594	u64 parent = 0;
2595	int found = 0;
2596	struct btrfs_root *fs_root = ipath->fs_root;
2597	struct btrfs_path *path = ipath->btrfs_path;
2598	struct extent_buffer *eb;
 
2599	struct btrfs_inode_ref *iref;
2600	struct btrfs_key found_key;
2601
2602	while (!ret) {
2603		ret = btrfs_find_item(fs_root, path, inum,
2604				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2605				&found_key);
2606
2607		if (ret < 0)
2608			break;
2609		if (ret) {
2610			ret = found ? 0 : -ENOENT;
2611			break;
2612		}
2613		++found;
2614
2615		parent = found_key.offset;
2616		slot = path->slots[0];
2617		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2618		if (!eb) {
2619			ret = -ENOMEM;
2620			break;
2621		}
2622		btrfs_release_path(path);
2623
 
2624		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2625
2626		for (cur = 0; cur < btrfs_item_size(eb, slot); cur += len) {
2627			name_len = btrfs_inode_ref_name_len(eb, iref);
2628			/* path must be released before calling iterate()! */
2629			btrfs_debug(fs_root->fs_info,
2630				"following ref at offset %u for inode %llu in tree %llu",
2631				cur, found_key.objectid,
2632				fs_root->root_key.objectid);
2633			ret = inode_to_path(parent, name_len,
2634				      (unsigned long)(iref + 1), eb, ipath);
2635			if (ret)
2636				break;
2637			len = sizeof(*iref) + name_len;
2638			iref = (struct btrfs_inode_ref *)((char *)iref + len);
2639		}
2640		free_extent_buffer(eb);
2641	}
2642
2643	btrfs_release_path(path);
2644
2645	return ret;
2646}
2647
2648static int iterate_inode_extrefs(u64 inum, struct inode_fs_paths *ipath)
 
 
2649{
2650	int ret;
2651	int slot;
2652	u64 offset = 0;
2653	u64 parent;
2654	int found = 0;
2655	struct btrfs_root *fs_root = ipath->fs_root;
2656	struct btrfs_path *path = ipath->btrfs_path;
2657	struct extent_buffer *eb;
2658	struct btrfs_inode_extref *extref;
2659	u32 item_size;
2660	u32 cur_offset;
2661	unsigned long ptr;
2662
2663	while (1) {
2664		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2665					    &offset);
2666		if (ret < 0)
2667			break;
2668		if (ret) {
2669			ret = found ? 0 : -ENOENT;
2670			break;
2671		}
2672		++found;
2673
2674		slot = path->slots[0];
2675		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2676		if (!eb) {
2677			ret = -ENOMEM;
2678			break;
2679		}
2680		btrfs_release_path(path);
2681
2682		item_size = btrfs_item_size(eb, slot);
2683		ptr = btrfs_item_ptr_offset(eb, slot);
2684		cur_offset = 0;
2685
2686		while (cur_offset < item_size) {
2687			u32 name_len;
2688
2689			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2690			parent = btrfs_inode_extref_parent(eb, extref);
2691			name_len = btrfs_inode_extref_name_len(eb, extref);
2692			ret = inode_to_path(parent, name_len,
2693				      (unsigned long)&extref->name, eb, ipath);
2694			if (ret)
2695				break;
2696
2697			cur_offset += btrfs_inode_extref_name_len(eb, extref);
2698			cur_offset += sizeof(*extref);
2699		}
2700		free_extent_buffer(eb);
2701
2702		offset++;
2703	}
2704
2705	btrfs_release_path(path);
2706
2707	return ret;
2708}
2709
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2710/*
2711 * returns 0 if the path could be dumped (probably truncated)
2712 * returns <0 in case of an error
2713 */
2714static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2715			 struct extent_buffer *eb, struct inode_fs_paths *ipath)
2716{
 
2717	char *fspath;
2718	char *fspath_min;
2719	int i = ipath->fspath->elem_cnt;
2720	const int s_ptr = sizeof(char *);
2721	u32 bytes_left;
2722
2723	bytes_left = ipath->fspath->bytes_left > s_ptr ?
2724					ipath->fspath->bytes_left - s_ptr : 0;
2725
2726	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2727	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2728				   name_off, eb, inum, fspath_min, bytes_left);
2729	if (IS_ERR(fspath))
2730		return PTR_ERR(fspath);
2731
2732	if (fspath > fspath_min) {
2733		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2734		++ipath->fspath->elem_cnt;
2735		ipath->fspath->bytes_left = fspath - fspath_min;
2736	} else {
2737		++ipath->fspath->elem_missed;
2738		ipath->fspath->bytes_missing += fspath_min - fspath;
2739		ipath->fspath->bytes_left = 0;
2740	}
2741
2742	return 0;
2743}
2744
2745/*
2746 * this dumps all file system paths to the inode into the ipath struct, provided
2747 * is has been created large enough. each path is zero-terminated and accessed
2748 * from ipath->fspath->val[i].
2749 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2750 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2751 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2752 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2753 * have been needed to return all paths.
2754 */
2755int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2756{
2757	int ret;
2758	int found_refs = 0;
2759
2760	ret = iterate_inode_refs(inum, ipath);
2761	if (!ret)
2762		++found_refs;
2763	else if (ret != -ENOENT)
2764		return ret;
2765
2766	ret = iterate_inode_extrefs(inum, ipath);
2767	if (ret == -ENOENT && found_refs)
2768		return 0;
2769
2770	return ret;
2771}
2772
2773struct btrfs_data_container *init_data_container(u32 total_bytes)
2774{
2775	struct btrfs_data_container *data;
2776	size_t alloc_bytes;
2777
2778	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2779	data = kvzalloc(alloc_bytes, GFP_KERNEL);
2780	if (!data)
2781		return ERR_PTR(-ENOMEM);
2782
2783	if (total_bytes >= sizeof(*data))
2784		data->bytes_left = total_bytes - sizeof(*data);
2785	else
 
2786		data->bytes_missing = sizeof(*data) - total_bytes;
 
 
 
 
 
2787
2788	return data;
2789}
2790
2791/*
2792 * allocates space to return multiple file system paths for an inode.
2793 * total_bytes to allocate are passed, note that space usable for actual path
2794 * information will be total_bytes - sizeof(struct inode_fs_paths).
2795 * the returned pointer must be freed with free_ipath() in the end.
2796 */
2797struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2798					struct btrfs_path *path)
2799{
2800	struct inode_fs_paths *ifp;
2801	struct btrfs_data_container *fspath;
2802
2803	fspath = init_data_container(total_bytes);
2804	if (IS_ERR(fspath))
2805		return ERR_CAST(fspath);
2806
2807	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2808	if (!ifp) {
2809		kvfree(fspath);
2810		return ERR_PTR(-ENOMEM);
2811	}
2812
2813	ifp->btrfs_path = path;
2814	ifp->fspath = fspath;
2815	ifp->fs_root = fs_root;
2816
2817	return ifp;
2818}
2819
2820void free_ipath(struct inode_fs_paths *ipath)
2821{
2822	if (!ipath)
2823		return;
2824	kvfree(ipath->fspath);
2825	kfree(ipath);
2826}
2827
2828struct btrfs_backref_iter *btrfs_backref_iter_alloc(struct btrfs_fs_info *fs_info)
 
2829{
2830	struct btrfs_backref_iter *ret;
2831
2832	ret = kzalloc(sizeof(*ret), GFP_NOFS);
2833	if (!ret)
2834		return NULL;
2835
2836	ret->path = btrfs_alloc_path();
2837	if (!ret->path) {
2838		kfree(ret);
2839		return NULL;
2840	}
2841
2842	/* Current backref iterator only supports iteration in commit root */
2843	ret->path->search_commit_root = 1;
2844	ret->path->skip_locking = 1;
2845	ret->fs_info = fs_info;
2846
2847	return ret;
2848}
2849
2850static void btrfs_backref_iter_release(struct btrfs_backref_iter *iter)
2851{
2852	iter->bytenr = 0;
2853	iter->item_ptr = 0;
2854	iter->cur_ptr = 0;
2855	iter->end_ptr = 0;
2856	btrfs_release_path(iter->path);
2857	memset(&iter->cur_key, 0, sizeof(iter->cur_key));
2858}
2859
2860int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2861{
2862	struct btrfs_fs_info *fs_info = iter->fs_info;
2863	struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bytenr);
2864	struct btrfs_path *path = iter->path;
2865	struct btrfs_extent_item *ei;
2866	struct btrfs_key key;
2867	int ret;
2868
2869	key.objectid = bytenr;
2870	key.type = BTRFS_METADATA_ITEM_KEY;
2871	key.offset = (u64)-1;
2872	iter->bytenr = bytenr;
2873
2874	ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2875	if (ret < 0)
2876		return ret;
2877	if (ret == 0) {
2878		/*
2879		 * Key with offset -1 found, there would have to exist an extent
2880		 * item with such offset, but this is out of the valid range.
2881		 */
2882		ret = -EUCLEAN;
2883		goto release;
2884	}
2885	if (path->slots[0] == 0) {
2886		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2887		ret = -EUCLEAN;
2888		goto release;
2889	}
2890	path->slots[0]--;
2891
2892	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2893	if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2894	     key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2895		ret = -ENOENT;
2896		goto release;
2897	}
2898	memcpy(&iter->cur_key, &key, sizeof(key));
2899	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2900						    path->slots[0]);
2901	iter->end_ptr = (u32)(iter->item_ptr +
2902			btrfs_item_size(path->nodes[0], path->slots[0]));
2903	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2904			    struct btrfs_extent_item);
2905
2906	/*
2907	 * Only support iteration on tree backref yet.
2908	 *
2909	 * This is an extra precaution for non skinny-metadata, where
2910	 * EXTENT_ITEM is also used for tree blocks, that we can only use
2911	 * extent flags to determine if it's a tree block.
2912	 */
2913	if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2914		ret = -ENOTSUPP;
2915		goto release;
2916	}
2917	iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2918
2919	/* If there is no inline backref, go search for keyed backref */
2920	if (iter->cur_ptr >= iter->end_ptr) {
2921		ret = btrfs_next_item(extent_root, path);
2922
2923		/* No inline nor keyed ref */
2924		if (ret > 0) {
2925			ret = -ENOENT;
2926			goto release;
2927		}
2928		if (ret < 0)
2929			goto release;
2930
2931		btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2932				path->slots[0]);
2933		if (iter->cur_key.objectid != bytenr ||
2934		    (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2935		     iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2936			ret = -ENOENT;
2937			goto release;
2938		}
2939		iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2940							   path->slots[0]);
2941		iter->item_ptr = iter->cur_ptr;
2942		iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size(
2943				      path->nodes[0], path->slots[0]));
2944	}
2945
2946	return 0;
2947release:
2948	btrfs_backref_iter_release(iter);
2949	return ret;
2950}
2951
2952static bool btrfs_backref_iter_is_inline_ref(struct btrfs_backref_iter *iter)
2953{
2954	if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY ||
2955	    iter->cur_key.type == BTRFS_METADATA_ITEM_KEY)
2956		return true;
2957	return false;
2958}
2959
2960/*
2961 * Go to the next backref item of current bytenr, can be either inlined or
2962 * keyed.
2963 *
2964 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2965 *
2966 * Return 0 if we get next backref without problem.
2967 * Return >0 if there is no extra backref for this bytenr.
2968 * Return <0 if there is something wrong happened.
2969 */
2970int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2971{
2972	struct extent_buffer *eb = iter->path->nodes[0];
2973	struct btrfs_root *extent_root;
2974	struct btrfs_path *path = iter->path;
2975	struct btrfs_extent_inline_ref *iref;
2976	int ret;
2977	u32 size;
2978
2979	if (btrfs_backref_iter_is_inline_ref(iter)) {
2980		/* We're still inside the inline refs */
2981		ASSERT(iter->cur_ptr < iter->end_ptr);
2982
2983		if (btrfs_backref_has_tree_block_info(iter)) {
2984			/* First tree block info */
2985			size = sizeof(struct btrfs_tree_block_info);
2986		} else {
2987			/* Use inline ref type to determine the size */
2988			int type;
2989
2990			iref = (struct btrfs_extent_inline_ref *)
2991				((unsigned long)iter->cur_ptr);
2992			type = btrfs_extent_inline_ref_type(eb, iref);
2993
2994			size = btrfs_extent_inline_ref_size(type);
2995		}
2996		iter->cur_ptr += size;
2997		if (iter->cur_ptr < iter->end_ptr)
2998			return 0;
2999
3000		/* All inline items iterated, fall through */
3001	}
3002
3003	/* We're at keyed items, there is no inline item, go to the next one */
3004	extent_root = btrfs_extent_root(iter->fs_info, iter->bytenr);
3005	ret = btrfs_next_item(extent_root, iter->path);
3006	if (ret)
3007		return ret;
3008
3009	btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
3010	if (iter->cur_key.objectid != iter->bytenr ||
3011	    (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
3012	     iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
3013		return 1;
3014	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
3015					path->slots[0]);
3016	iter->cur_ptr = iter->item_ptr;
3017	iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size(path->nodes[0],
3018						path->slots[0]);
3019	return 0;
3020}
3021
3022void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
3023			      struct btrfs_backref_cache *cache, bool is_reloc)
3024{
3025	int i;
3026
3027	cache->rb_root = RB_ROOT;
3028	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3029		INIT_LIST_HEAD(&cache->pending[i]);
3030	INIT_LIST_HEAD(&cache->changed);
3031	INIT_LIST_HEAD(&cache->detached);
3032	INIT_LIST_HEAD(&cache->leaves);
3033	INIT_LIST_HEAD(&cache->pending_edge);
3034	INIT_LIST_HEAD(&cache->useless_node);
3035	cache->fs_info = fs_info;
3036	cache->is_reloc = is_reloc;
3037}
3038
3039struct btrfs_backref_node *btrfs_backref_alloc_node(
3040		struct btrfs_backref_cache *cache, u64 bytenr, int level)
3041{
3042	struct btrfs_backref_node *node;
3043
3044	ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
3045	node = kzalloc(sizeof(*node), GFP_NOFS);
3046	if (!node)
3047		return node;
3048
3049	INIT_LIST_HEAD(&node->list);
3050	INIT_LIST_HEAD(&node->upper);
3051	INIT_LIST_HEAD(&node->lower);
3052	RB_CLEAR_NODE(&node->rb_node);
3053	cache->nr_nodes++;
3054	node->level = level;
3055	node->bytenr = bytenr;
3056
3057	return node;
3058}
3059
3060void btrfs_backref_free_node(struct btrfs_backref_cache *cache,
3061			     struct btrfs_backref_node *node)
3062{
3063	if (node) {
3064		ASSERT(list_empty(&node->list));
3065		ASSERT(list_empty(&node->lower));
3066		ASSERT(node->eb == NULL);
3067		cache->nr_nodes--;
3068		btrfs_put_root(node->root);
3069		kfree(node);
3070	}
3071}
3072
3073struct btrfs_backref_edge *btrfs_backref_alloc_edge(
3074		struct btrfs_backref_cache *cache)
3075{
3076	struct btrfs_backref_edge *edge;
3077
3078	edge = kzalloc(sizeof(*edge), GFP_NOFS);
3079	if (edge)
3080		cache->nr_edges++;
3081	return edge;
3082}
3083
3084void btrfs_backref_free_edge(struct btrfs_backref_cache *cache,
3085			     struct btrfs_backref_edge *edge)
3086{
3087	if (edge) {
3088		cache->nr_edges--;
3089		kfree(edge);
3090	}
3091}
3092
3093void btrfs_backref_unlock_node_buffer(struct btrfs_backref_node *node)
3094{
3095	if (node->locked) {
3096		btrfs_tree_unlock(node->eb);
3097		node->locked = 0;
3098	}
3099}
3100
3101void btrfs_backref_drop_node_buffer(struct btrfs_backref_node *node)
3102{
3103	if (node->eb) {
3104		btrfs_backref_unlock_node_buffer(node);
3105		free_extent_buffer(node->eb);
3106		node->eb = NULL;
3107	}
3108}
3109
3110/*
3111 * Drop the backref node from cache without cleaning up its children
3112 * edges.
3113 *
3114 * This can only be called on node without parent edges.
3115 * The children edges are still kept as is.
3116 */
3117void btrfs_backref_drop_node(struct btrfs_backref_cache *tree,
3118			     struct btrfs_backref_node *node)
3119{
3120	ASSERT(list_empty(&node->upper));
3121
3122	btrfs_backref_drop_node_buffer(node);
3123	list_del_init(&node->list);
3124	list_del_init(&node->lower);
3125	if (!RB_EMPTY_NODE(&node->rb_node))
3126		rb_erase(&node->rb_node, &tree->rb_root);
3127	btrfs_backref_free_node(tree, node);
3128}
3129
3130/*
3131 * Drop the backref node from cache, also cleaning up all its
3132 * upper edges and any uncached nodes in the path.
3133 *
3134 * This cleanup happens bottom up, thus the node should either
3135 * be the lowest node in the cache or a detached node.
3136 */
3137void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
3138				struct btrfs_backref_node *node)
3139{
3140	struct btrfs_backref_node *upper;
3141	struct btrfs_backref_edge *edge;
3142
3143	if (!node)
3144		return;
3145
3146	BUG_ON(!node->lowest && !node->detached);
3147	while (!list_empty(&node->upper)) {
3148		edge = list_entry(node->upper.next, struct btrfs_backref_edge,
3149				  list[LOWER]);
3150		upper = edge->node[UPPER];
3151		list_del(&edge->list[LOWER]);
3152		list_del(&edge->list[UPPER]);
3153		btrfs_backref_free_edge(cache, edge);
3154
 
 
 
 
 
 
 
3155		/*
3156		 * Add the node to leaf node list if no other child block
3157		 * cached.
3158		 */
3159		if (list_empty(&upper->lower)) {
3160			list_add_tail(&upper->lower, &cache->leaves);
3161			upper->lowest = 1;
3162		}
3163	}
3164
3165	btrfs_backref_drop_node(cache, node);
3166}
3167
3168/*
3169 * Release all nodes/edges from current cache
3170 */
3171void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
3172{
3173	struct btrfs_backref_node *node;
3174	int i;
3175
3176	while (!list_empty(&cache->detached)) {
3177		node = list_entry(cache->detached.next,
3178				  struct btrfs_backref_node, list);
3179		btrfs_backref_cleanup_node(cache, node);
3180	}
3181
3182	while (!list_empty(&cache->leaves)) {
3183		node = list_entry(cache->leaves.next,
3184				  struct btrfs_backref_node, lower);
3185		btrfs_backref_cleanup_node(cache, node);
3186	}
3187
3188	cache->last_trans = 0;
3189
3190	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
3191		ASSERT(list_empty(&cache->pending[i]));
3192	ASSERT(list_empty(&cache->pending_edge));
3193	ASSERT(list_empty(&cache->useless_node));
3194	ASSERT(list_empty(&cache->changed));
3195	ASSERT(list_empty(&cache->detached));
3196	ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
3197	ASSERT(!cache->nr_nodes);
3198	ASSERT(!cache->nr_edges);
3199}
3200
3201void btrfs_backref_link_edge(struct btrfs_backref_edge *edge,
3202			     struct btrfs_backref_node *lower,
3203			     struct btrfs_backref_node *upper,
3204			     int link_which)
3205{
3206	ASSERT(upper && lower && upper->level == lower->level + 1);
3207	edge->node[LOWER] = lower;
3208	edge->node[UPPER] = upper;
3209	if (link_which & LINK_LOWER)
3210		list_add_tail(&edge->list[LOWER], &lower->upper);
3211	if (link_which & LINK_UPPER)
3212		list_add_tail(&edge->list[UPPER], &upper->lower);
3213}
3214/*
3215 * Handle direct tree backref
3216 *
3217 * Direct tree backref means, the backref item shows its parent bytenr
3218 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
3219 *
3220 * @ref_key:	The converted backref key.
3221 *		For keyed backref, it's the item key.
3222 *		For inlined backref, objectid is the bytenr,
3223 *		type is btrfs_inline_ref_type, offset is
3224 *		btrfs_inline_ref_offset.
3225 */
3226static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
3227				      struct btrfs_key *ref_key,
3228				      struct btrfs_backref_node *cur)
3229{
3230	struct btrfs_backref_edge *edge;
3231	struct btrfs_backref_node *upper;
3232	struct rb_node *rb_node;
3233
3234	ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
3235
3236	/* Only reloc root uses backref pointing to itself */
3237	if (ref_key->objectid == ref_key->offset) {
3238		struct btrfs_root *root;
3239
3240		cur->is_reloc_root = 1;
3241		/* Only reloc backref cache cares about a specific root */
3242		if (cache->is_reloc) {
3243			root = find_reloc_root(cache->fs_info, cur->bytenr);
3244			if (!root)
3245				return -ENOENT;
3246			cur->root = root;
3247		} else {
3248			/*
3249			 * For generic purpose backref cache, reloc root node
3250			 * is useless.
3251			 */
3252			list_add(&cur->list, &cache->useless_node);
3253		}
3254		return 0;
3255	}
3256
3257	edge = btrfs_backref_alloc_edge(cache);
3258	if (!edge)
3259		return -ENOMEM;
3260
3261	rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
3262	if (!rb_node) {
3263		/* Parent node not yet cached */
3264		upper = btrfs_backref_alloc_node(cache, ref_key->offset,
3265					   cur->level + 1);
3266		if (!upper) {
3267			btrfs_backref_free_edge(cache, edge);
3268			return -ENOMEM;
3269		}
3270
3271		/*
3272		 *  Backrefs for the upper level block isn't cached, add the
3273		 *  block to pending list
3274		 */
3275		list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3276	} else {
3277		/* Parent node already cached */
3278		upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
3279		ASSERT(upper->checked);
3280		INIT_LIST_HEAD(&edge->list[UPPER]);
3281	}
3282	btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
3283	return 0;
3284}
3285
3286/*
3287 * Handle indirect tree backref
3288 *
3289 * Indirect tree backref means, we only know which tree the node belongs to.
3290 * We still need to do a tree search to find out the parents. This is for
3291 * TREE_BLOCK_REF backref (keyed or inlined).
3292 *
3293 * @trans:	Transaction handle.
3294 * @ref_key:	The same as @ref_key in  handle_direct_tree_backref()
3295 * @tree_key:	The first key of this tree block.
3296 * @path:	A clean (released) path, to avoid allocating path every time
3297 *		the function get called.
3298 */
3299static int handle_indirect_tree_backref(struct btrfs_trans_handle *trans,
3300					struct btrfs_backref_cache *cache,
3301					struct btrfs_path *path,
3302					struct btrfs_key *ref_key,
3303					struct btrfs_key *tree_key,
3304					struct btrfs_backref_node *cur)
3305{
3306	struct btrfs_fs_info *fs_info = cache->fs_info;
3307	struct btrfs_backref_node *upper;
3308	struct btrfs_backref_node *lower;
3309	struct btrfs_backref_edge *edge;
3310	struct extent_buffer *eb;
3311	struct btrfs_root *root;
3312	struct rb_node *rb_node;
3313	int level;
3314	bool need_check = true;
3315	int ret;
3316
3317	root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
3318	if (IS_ERR(root))
3319		return PTR_ERR(root);
3320	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3321		cur->cowonly = 1;
3322
3323	if (btrfs_root_level(&root->root_item) == cur->level) {
3324		/* Tree root */
3325		ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
3326		/*
3327		 * For reloc backref cache, we may ignore reloc root.  But for
3328		 * general purpose backref cache, we can't rely on
3329		 * btrfs_should_ignore_reloc_root() as it may conflict with
3330		 * current running relocation and lead to missing root.
3331		 *
3332		 * For general purpose backref cache, reloc root detection is
3333		 * completely relying on direct backref (key->offset is parent
3334		 * bytenr), thus only do such check for reloc cache.
3335		 */
3336		if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
3337			btrfs_put_root(root);
3338			list_add(&cur->list, &cache->useless_node);
3339		} else {
3340			cur->root = root;
3341		}
3342		return 0;
3343	}
3344
3345	level = cur->level + 1;
3346
3347	/* Search the tree to find parent blocks referring to the block */
3348	path->search_commit_root = 1;
3349	path->skip_locking = 1;
3350	path->lowest_level = level;
3351	ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
3352	path->lowest_level = 0;
3353	if (ret < 0) {
3354		btrfs_put_root(root);
3355		return ret;
3356	}
3357	if (ret > 0 && path->slots[level] > 0)
3358		path->slots[level]--;
3359
3360	eb = path->nodes[level];
3361	if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
3362		btrfs_err(fs_info,
3363"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
3364			  cur->bytenr, level - 1, root->root_key.objectid,
3365			  tree_key->objectid, tree_key->type, tree_key->offset);
3366		btrfs_put_root(root);
3367		ret = -ENOENT;
3368		goto out;
3369	}
3370	lower = cur;
3371
3372	/* Add all nodes and edges in the path */
3373	for (; level < BTRFS_MAX_LEVEL; level++) {
3374		if (!path->nodes[level]) {
3375			ASSERT(btrfs_root_bytenr(&root->root_item) ==
3376			       lower->bytenr);
3377			/* Same as previous should_ignore_reloc_root() call */
3378			if (btrfs_should_ignore_reloc_root(root) &&
3379			    cache->is_reloc) {
3380				btrfs_put_root(root);
3381				list_add(&lower->list, &cache->useless_node);
3382			} else {
3383				lower->root = root;
3384			}
3385			break;
3386		}
3387
3388		edge = btrfs_backref_alloc_edge(cache);
3389		if (!edge) {
3390			btrfs_put_root(root);
3391			ret = -ENOMEM;
3392			goto out;
3393		}
3394
3395		eb = path->nodes[level];
3396		rb_node = rb_simple_search(&cache->rb_root, eb->start);
3397		if (!rb_node) {
3398			upper = btrfs_backref_alloc_node(cache, eb->start,
3399							 lower->level + 1);
3400			if (!upper) {
3401				btrfs_put_root(root);
3402				btrfs_backref_free_edge(cache, edge);
3403				ret = -ENOMEM;
3404				goto out;
3405			}
3406			upper->owner = btrfs_header_owner(eb);
3407			if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
3408				upper->cowonly = 1;
3409
3410			/*
3411			 * If we know the block isn't shared we can avoid
3412			 * checking its backrefs.
3413			 */
3414			if (btrfs_block_can_be_shared(trans, root, eb))
3415				upper->checked = 0;
3416			else
3417				upper->checked = 1;
3418
3419			/*
3420			 * Add the block to pending list if we need to check its
3421			 * backrefs, we only do this once while walking up a
3422			 * tree as we will catch anything else later on.
3423			 */
3424			if (!upper->checked && need_check) {
3425				need_check = false;
3426				list_add_tail(&edge->list[UPPER],
3427					      &cache->pending_edge);
3428			} else {
3429				if (upper->checked)
3430					need_check = true;
3431				INIT_LIST_HEAD(&edge->list[UPPER]);
3432			}
3433		} else {
3434			upper = rb_entry(rb_node, struct btrfs_backref_node,
3435					 rb_node);
3436			ASSERT(upper->checked);
3437			INIT_LIST_HEAD(&edge->list[UPPER]);
3438			if (!upper->owner)
3439				upper->owner = btrfs_header_owner(eb);
3440		}
3441		btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
3442
3443		if (rb_node) {
3444			btrfs_put_root(root);
3445			break;
3446		}
3447		lower = upper;
3448		upper = NULL;
3449	}
3450out:
3451	btrfs_release_path(path);
3452	return ret;
3453}
3454
3455/*
3456 * Add backref node @cur into @cache.
3457 *
3458 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
3459 *	 links aren't yet bi-directional. Needs to finish such links.
3460 *	 Use btrfs_backref_finish_upper_links() to finish such linkage.
3461 *
3462 * @trans:	Transaction handle.
3463 * @path:	Released path for indirect tree backref lookup
3464 * @iter:	Released backref iter for extent tree search
3465 * @node_key:	The first key of the tree block
3466 */
3467int btrfs_backref_add_tree_node(struct btrfs_trans_handle *trans,
3468				struct btrfs_backref_cache *cache,
3469				struct btrfs_path *path,
3470				struct btrfs_backref_iter *iter,
3471				struct btrfs_key *node_key,
3472				struct btrfs_backref_node *cur)
3473{
 
3474	struct btrfs_backref_edge *edge;
3475	struct btrfs_backref_node *exist;
3476	int ret;
3477
3478	ret = btrfs_backref_iter_start(iter, cur->bytenr);
3479	if (ret < 0)
3480		return ret;
3481	/*
3482	 * We skip the first btrfs_tree_block_info, as we don't use the key
3483	 * stored in it, but fetch it from the tree block
3484	 */
3485	if (btrfs_backref_has_tree_block_info(iter)) {
3486		ret = btrfs_backref_iter_next(iter);
3487		if (ret < 0)
3488			goto out;
3489		/* No extra backref? This means the tree block is corrupted */
3490		if (ret > 0) {
3491			ret = -EUCLEAN;
3492			goto out;
3493		}
3494	}
3495	WARN_ON(cur->checked);
3496	if (!list_empty(&cur->upper)) {
3497		/*
3498		 * The backref was added previously when processing backref of
3499		 * type BTRFS_TREE_BLOCK_REF_KEY
3500		 */
3501		ASSERT(list_is_singular(&cur->upper));
3502		edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
3503				  list[LOWER]);
3504		ASSERT(list_empty(&edge->list[UPPER]));
3505		exist = edge->node[UPPER];
3506		/*
3507		 * Add the upper level block to pending list if we need check
3508		 * its backrefs
3509		 */
3510		if (!exist->checked)
3511			list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3512	} else {
3513		exist = NULL;
3514	}
3515
3516	for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
3517		struct extent_buffer *eb;
3518		struct btrfs_key key;
3519		int type;
3520
3521		cond_resched();
3522		eb = iter->path->nodes[0];
3523
3524		key.objectid = iter->bytenr;
3525		if (btrfs_backref_iter_is_inline_ref(iter)) {
3526			struct btrfs_extent_inline_ref *iref;
3527
3528			/* Update key for inline backref */
3529			iref = (struct btrfs_extent_inline_ref *)
3530				((unsigned long)iter->cur_ptr);
3531			type = btrfs_get_extent_inline_ref_type(eb, iref,
3532							BTRFS_REF_TYPE_BLOCK);
3533			if (type == BTRFS_REF_TYPE_INVALID) {
3534				ret = -EUCLEAN;
3535				goto out;
3536			}
3537			key.type = type;
3538			key.offset = btrfs_extent_inline_ref_offset(eb, iref);
3539		} else {
3540			key.type = iter->cur_key.type;
3541			key.offset = iter->cur_key.offset;
3542		}
3543
3544		/*
3545		 * Parent node found and matches current inline ref, no need to
3546		 * rebuild this node for this inline ref
3547		 */
3548		if (exist &&
3549		    ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
3550		      exist->owner == key.offset) ||
3551		     (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
3552		      exist->bytenr == key.offset))) {
3553			exist = NULL;
3554			continue;
3555		}
3556
3557		/* SHARED_BLOCK_REF means key.offset is the parent bytenr */
3558		if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
3559			ret = handle_direct_tree_backref(cache, &key, cur);
3560			if (ret < 0)
3561				goto out;
3562		} else if (key.type == BTRFS_TREE_BLOCK_REF_KEY) {
3563			/*
3564			 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref
3565			 * offset means the root objectid. We need to search
3566			 * the tree to get its parent bytenr.
3567			 */
3568			ret = handle_indirect_tree_backref(trans, cache, path,
3569							   &key, node_key, cur);
3570			if (ret < 0)
3571				goto out;
3572		}
 
3573		/*
3574		 * Unrecognized tree backref items (if it can pass tree-checker)
3575		 * would be ignored.
 
3576		 */
 
 
 
 
3577	}
3578	ret = 0;
3579	cur->checked = 1;
3580	WARN_ON(exist);
3581out:
3582	btrfs_backref_iter_release(iter);
3583	return ret;
3584}
3585
3586/*
3587 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
3588 */
3589int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
3590				     struct btrfs_backref_node *start)
3591{
3592	struct list_head *useless_node = &cache->useless_node;
3593	struct btrfs_backref_edge *edge;
3594	struct rb_node *rb_node;
3595	LIST_HEAD(pending_edge);
3596
3597	ASSERT(start->checked);
3598
3599	/* Insert this node to cache if it's not COW-only */
3600	if (!start->cowonly) {
3601		rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
3602					   &start->rb_node);
3603		if (rb_node)
3604			btrfs_backref_panic(cache->fs_info, start->bytenr,
3605					    -EEXIST);
3606		list_add_tail(&start->lower, &cache->leaves);
3607	}
3608
3609	/*
3610	 * Use breadth first search to iterate all related edges.
3611	 *
3612	 * The starting points are all the edges of this node
3613	 */
3614	list_for_each_entry(edge, &start->upper, list[LOWER])
3615		list_add_tail(&edge->list[UPPER], &pending_edge);
3616
3617	while (!list_empty(&pending_edge)) {
3618		struct btrfs_backref_node *upper;
3619		struct btrfs_backref_node *lower;
 
3620
3621		edge = list_first_entry(&pending_edge,
3622				struct btrfs_backref_edge, list[UPPER]);
3623		list_del_init(&edge->list[UPPER]);
3624		upper = edge->node[UPPER];
3625		lower = edge->node[LOWER];
3626
3627		/* Parent is detached, no need to keep any edges */
3628		if (upper->detached) {
3629			list_del(&edge->list[LOWER]);
3630			btrfs_backref_free_edge(cache, edge);
3631
3632			/* Lower node is orphan, queue for cleanup */
3633			if (list_empty(&lower->upper))
3634				list_add(&lower->list, useless_node);
3635			continue;
3636		}
3637
3638		/*
3639		 * All new nodes added in current build_backref_tree() haven't
3640		 * been linked to the cache rb tree.
3641		 * So if we have upper->rb_node populated, this means a cache
3642		 * hit. We only need to link the edge, as @upper and all its
3643		 * parents have already been linked.
3644		 */
3645		if (!RB_EMPTY_NODE(&upper->rb_node)) {
3646			if (upper->lowest) {
3647				list_del_init(&upper->lower);
3648				upper->lowest = 0;
3649			}
3650
3651			list_add_tail(&edge->list[UPPER], &upper->lower);
3652			continue;
3653		}
3654
3655		/* Sanity check, we shouldn't have any unchecked nodes */
3656		if (!upper->checked) {
3657			ASSERT(0);
3658			return -EUCLEAN;
3659		}
3660
3661		/* Sanity check, COW-only node has non-COW-only parent */
3662		if (start->cowonly != upper->cowonly) {
3663			ASSERT(0);
3664			return -EUCLEAN;
3665		}
3666
3667		/* Only cache non-COW-only (subvolume trees) tree blocks */
3668		if (!upper->cowonly) {
3669			rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3670						   &upper->rb_node);
3671			if (rb_node) {
3672				btrfs_backref_panic(cache->fs_info,
3673						upper->bytenr, -EEXIST);
3674				return -EUCLEAN;
3675			}
3676		}
3677
3678		list_add_tail(&edge->list[UPPER], &upper->lower);
3679
3680		/*
3681		 * Also queue all the parent edges of this uncached node
3682		 * to finish the upper linkage
3683		 */
3684		list_for_each_entry(edge, &upper->upper, list[LOWER])
3685			list_add_tail(&edge->list[UPPER], &pending_edge);
3686	}
3687	return 0;
3688}
3689
3690void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3691				 struct btrfs_backref_node *node)
3692{
3693	struct btrfs_backref_node *lower;
3694	struct btrfs_backref_node *upper;
3695	struct btrfs_backref_edge *edge;
3696
3697	while (!list_empty(&cache->useless_node)) {
3698		lower = list_first_entry(&cache->useless_node,
3699				   struct btrfs_backref_node, list);
3700		list_del_init(&lower->list);
3701	}
3702	while (!list_empty(&cache->pending_edge)) {
3703		edge = list_first_entry(&cache->pending_edge,
3704				struct btrfs_backref_edge, list[UPPER]);
3705		list_del(&edge->list[UPPER]);
3706		list_del(&edge->list[LOWER]);
3707		lower = edge->node[LOWER];
3708		upper = edge->node[UPPER];
3709		btrfs_backref_free_edge(cache, edge);
3710
3711		/*
3712		 * Lower is no longer linked to any upper backref nodes and
3713		 * isn't in the cache, we can free it ourselves.
3714		 */
3715		if (list_empty(&lower->upper) &&
3716		    RB_EMPTY_NODE(&lower->rb_node))
3717			list_add(&lower->list, &cache->useless_node);
3718
3719		if (!RB_EMPTY_NODE(&upper->rb_node))
3720			continue;
3721
3722		/* Add this guy's upper edges to the list to process */
3723		list_for_each_entry(edge, &upper->upper, list[LOWER])
3724			list_add_tail(&edge->list[UPPER],
3725				      &cache->pending_edge);
3726		if (list_empty(&upper->upper))
3727			list_add(&upper->list, &cache->useless_node);
3728	}
3729
3730	while (!list_empty(&cache->useless_node)) {
3731		lower = list_first_entry(&cache->useless_node,
3732				   struct btrfs_backref_node, list);
3733		list_del_init(&lower->list);
3734		if (lower == node)
3735			node = NULL;
3736		btrfs_backref_drop_node(cache, lower);
3737	}
3738
3739	btrfs_backref_cleanup_node(cache, node);
3740	ASSERT(list_empty(&cache->useless_node) &&
3741	       list_empty(&cache->pending_edge));
3742}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2011 STRATO.  All rights reserved.
   4 */
   5
   6#include <linux/mm.h>
   7#include <linux/rbtree.h>
   8#include <trace/events/btrfs.h>
   9#include "ctree.h"
  10#include "disk-io.h"
  11#include "backref.h"
  12#include "ulist.h"
  13#include "transaction.h"
  14#include "delayed-ref.h"
  15#include "locking.h"
  16#include "misc.h"
  17
  18/* Just an arbitrary number so we can be sure this happened */
  19#define BACKREF_FOUND_SHARED 6
 
 
 
 
 
 
 
  20
  21struct extent_inode_elem {
  22	u64 inum;
  23	u64 offset;
 
  24	struct extent_inode_elem *next;
  25};
  26
  27static int check_extent_in_eb(const struct btrfs_key *key,
 
  28			      const struct extent_buffer *eb,
  29			      const struct btrfs_file_extent_item *fi,
  30			      u64 extent_item_pos,
  31			      struct extent_inode_elem **eie,
  32			      bool ignore_offset)
  33{
  34	u64 offset = 0;
 
  35	struct extent_inode_elem *e;
 
 
 
  36
  37	if (!ignore_offset &&
  38	    !btrfs_file_extent_compression(eb, fi) &&
  39	    !btrfs_file_extent_encryption(eb, fi) &&
  40	    !btrfs_file_extent_other_encoding(eb, fi)) {
  41		u64 data_offset;
  42		u64 data_len;
  43
  44		data_offset = btrfs_file_extent_offset(eb, fi);
  45		data_len = btrfs_file_extent_num_bytes(eb, fi);
  46
  47		if (extent_item_pos < data_offset ||
  48		    extent_item_pos >= data_offset + data_len)
  49			return 1;
  50		offset = extent_item_pos - data_offset;
  51	}
  52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  53	e = kmalloc(sizeof(*e), GFP_NOFS);
  54	if (!e)
  55		return -ENOMEM;
  56
  57	e->next = *eie;
  58	e->inum = key->objectid;
  59	e->offset = key->offset + offset;
 
  60	*eie = e;
  61
  62	return 0;
  63}
  64
  65static void free_inode_elem_list(struct extent_inode_elem *eie)
  66{
  67	struct extent_inode_elem *eie_next;
  68
  69	for (; eie; eie = eie_next) {
  70		eie_next = eie->next;
  71		kfree(eie);
  72	}
  73}
  74
  75static int find_extent_in_eb(const struct extent_buffer *eb,
  76			     u64 wanted_disk_byte, u64 extent_item_pos,
  77			     struct extent_inode_elem **eie,
  78			     bool ignore_offset)
  79{
  80	u64 disk_byte;
  81	struct btrfs_key key;
  82	struct btrfs_file_extent_item *fi;
  83	int slot;
  84	int nritems;
  85	int extent_type;
  86	int ret;
  87
  88	/*
  89	 * from the shared data ref, we only have the leaf but we need
  90	 * the key. thus, we must look into all items and see that we
  91	 * find one (some) with a reference to our extent item.
  92	 */
  93	nritems = btrfs_header_nritems(eb);
  94	for (slot = 0; slot < nritems; ++slot) {
  95		btrfs_item_key_to_cpu(eb, &key, slot);
  96		if (key.type != BTRFS_EXTENT_DATA_KEY)
  97			continue;
  98		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
  99		extent_type = btrfs_file_extent_type(eb, fi);
 100		if (extent_type == BTRFS_FILE_EXTENT_INLINE)
 101			continue;
 102		/* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
 103		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 104		if (disk_byte != wanted_disk_byte)
 105			continue;
 106
 107		ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie, ignore_offset);
 108		if (ret < 0)
 109			return ret;
 110	}
 111
 112	return 0;
 113}
 114
 115struct preftree {
 116	struct rb_root_cached root;
 117	unsigned int count;
 118};
 119
 120#define PREFTREE_INIT	{ .root = RB_ROOT_CACHED, .count = 0 }
 121
 122struct preftrees {
 123	struct preftree direct;    /* BTRFS_SHARED_[DATA|BLOCK]_REF_KEY */
 124	struct preftree indirect;  /* BTRFS_[TREE_BLOCK|EXTENT_DATA]_REF_KEY */
 125	struct preftree indirect_missing_keys;
 126};
 127
 128/*
 129 * Checks for a shared extent during backref search.
 130 *
 131 * The share_count tracks prelim_refs (direct and indirect) having a
 132 * ref->count >0:
 133 *  - incremented when a ref->count transitions to >0
 134 *  - decremented when a ref->count transitions to <1
 135 */
 136struct share_check {
 137	u64 root_objectid;
 
 138	u64 inum;
 
 
 
 
 
 
 
 
 139	int share_count;
 
 
 
 
 
 
 
 
 
 
 
 
 140};
 141
 142static inline int extent_is_shared(struct share_check *sc)
 143{
 144	return (sc && sc->share_count > 1) ? BACKREF_FOUND_SHARED : 0;
 145}
 146
 147static struct kmem_cache *btrfs_prelim_ref_cache;
 148
 149int __init btrfs_prelim_ref_init(void)
 150{
 151	btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
 152					sizeof(struct prelim_ref),
 153					0,
 154					SLAB_MEM_SPREAD,
 155					NULL);
 156	if (!btrfs_prelim_ref_cache)
 157		return -ENOMEM;
 158	return 0;
 159}
 160
 161void __cold btrfs_prelim_ref_exit(void)
 162{
 163	kmem_cache_destroy(btrfs_prelim_ref_cache);
 164}
 165
 166static void free_pref(struct prelim_ref *ref)
 167{
 168	kmem_cache_free(btrfs_prelim_ref_cache, ref);
 169}
 170
 171/*
 172 * Return 0 when both refs are for the same block (and can be merged).
 173 * A -1 return indicates ref1 is a 'lower' block than ref2, while 1
 174 * indicates a 'higher' block.
 175 */
 176static int prelim_ref_compare(struct prelim_ref *ref1,
 177			      struct prelim_ref *ref2)
 178{
 179	if (ref1->level < ref2->level)
 180		return -1;
 181	if (ref1->level > ref2->level)
 182		return 1;
 183	if (ref1->root_id < ref2->root_id)
 184		return -1;
 185	if (ref1->root_id > ref2->root_id)
 186		return 1;
 187	if (ref1->key_for_search.type < ref2->key_for_search.type)
 188		return -1;
 189	if (ref1->key_for_search.type > ref2->key_for_search.type)
 190		return 1;
 191	if (ref1->key_for_search.objectid < ref2->key_for_search.objectid)
 192		return -1;
 193	if (ref1->key_for_search.objectid > ref2->key_for_search.objectid)
 194		return 1;
 195	if (ref1->key_for_search.offset < ref2->key_for_search.offset)
 196		return -1;
 197	if (ref1->key_for_search.offset > ref2->key_for_search.offset)
 198		return 1;
 199	if (ref1->parent < ref2->parent)
 200		return -1;
 201	if (ref1->parent > ref2->parent)
 202		return 1;
 203
 204	return 0;
 205}
 206
 207static void update_share_count(struct share_check *sc, int oldcount,
 208			       int newcount)
 209{
 210	if ((!sc) || (oldcount == 0 && newcount < 1))
 211		return;
 212
 213	if (oldcount > 0 && newcount < 1)
 214		sc->share_count--;
 215	else if (oldcount < 1 && newcount > 0)
 216		sc->share_count++;
 
 
 
 
 
 217}
 218
 219/*
 220 * Add @newref to the @root rbtree, merging identical refs.
 221 *
 222 * Callers should assume that newref has been freed after calling.
 223 */
 224static void prelim_ref_insert(const struct btrfs_fs_info *fs_info,
 225			      struct preftree *preftree,
 226			      struct prelim_ref *newref,
 227			      struct share_check *sc)
 228{
 229	struct rb_root_cached *root;
 230	struct rb_node **p;
 231	struct rb_node *parent = NULL;
 232	struct prelim_ref *ref;
 233	int result;
 234	bool leftmost = true;
 235
 236	root = &preftree->root;
 237	p = &root->rb_root.rb_node;
 238
 239	while (*p) {
 240		parent = *p;
 241		ref = rb_entry(parent, struct prelim_ref, rbnode);
 242		result = prelim_ref_compare(ref, newref);
 243		if (result < 0) {
 244			p = &(*p)->rb_left;
 245		} else if (result > 0) {
 246			p = &(*p)->rb_right;
 247			leftmost = false;
 248		} else {
 249			/* Identical refs, merge them and free @newref */
 250			struct extent_inode_elem *eie = ref->inode_list;
 251
 252			while (eie && eie->next)
 253				eie = eie->next;
 254
 255			if (!eie)
 256				ref->inode_list = newref->inode_list;
 257			else
 258				eie->next = newref->inode_list;
 259			trace_btrfs_prelim_ref_merge(fs_info, ref, newref,
 260						     preftree->count);
 261			/*
 262			 * A delayed ref can have newref->count < 0.
 263			 * The ref->count is updated to follow any
 264			 * BTRFS_[ADD|DROP]_DELAYED_REF actions.
 265			 */
 266			update_share_count(sc, ref->count,
 267					   ref->count + newref->count);
 268			ref->count += newref->count;
 269			free_pref(newref);
 270			return;
 271		}
 272	}
 273
 274	update_share_count(sc, 0, newref->count);
 275	preftree->count++;
 276	trace_btrfs_prelim_ref_insert(fs_info, newref, NULL, preftree->count);
 277	rb_link_node(&newref->rbnode, parent, p);
 278	rb_insert_color_cached(&newref->rbnode, root, leftmost);
 279}
 280
 281/*
 282 * Release the entire tree.  We don't care about internal consistency so
 283 * just free everything and then reset the tree root.
 284 */
 285static void prelim_release(struct preftree *preftree)
 286{
 287	struct prelim_ref *ref, *next_ref;
 288
 289	rbtree_postorder_for_each_entry_safe(ref, next_ref,
 290					     &preftree->root.rb_root, rbnode)
 
 291		free_pref(ref);
 
 292
 293	preftree->root = RB_ROOT_CACHED;
 294	preftree->count = 0;
 295}
 296
 297/*
 298 * the rules for all callers of this function are:
 299 * - obtaining the parent is the goal
 300 * - if you add a key, you must know that it is a correct key
 301 * - if you cannot add the parent or a correct key, then we will look into the
 302 *   block later to set a correct key
 303 *
 304 * delayed refs
 305 * ============
 306 *        backref type | shared | indirect | shared | indirect
 307 * information         |   tree |     tree |   data |     data
 308 * --------------------+--------+----------+--------+----------
 309 *      parent logical |    y   |     -    |    -   |     -
 310 *      key to resolve |    -   |     y    |    y   |     y
 311 *  tree block logical |    -   |     -    |    -   |     -
 312 *  root for resolving |    y   |     y    |    y   |     y
 313 *
 314 * - column 1:       we've the parent -> done
 315 * - column 2, 3, 4: we use the key to find the parent
 316 *
 317 * on disk refs (inline or keyed)
 318 * ==============================
 319 *        backref type | shared | indirect | shared | indirect
 320 * information         |   tree |     tree |   data |     data
 321 * --------------------+--------+----------+--------+----------
 322 *      parent logical |    y   |     -    |    y   |     -
 323 *      key to resolve |    -   |     -    |    -   |     y
 324 *  tree block logical |    y   |     y    |    y   |     y
 325 *  root for resolving |    -   |     y    |    y   |     y
 326 *
 327 * - column 1, 3: we've the parent -> done
 328 * - column 2:    we take the first key from the block to find the parent
 329 *                (see add_missing_keys)
 330 * - column 4:    we use the key to find the parent
 331 *
 332 * additional information that's available but not required to find the parent
 333 * block might help in merging entries to gain some speed.
 334 */
 335static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
 336			  struct preftree *preftree, u64 root_id,
 337			  const struct btrfs_key *key, int level, u64 parent,
 338			  u64 wanted_disk_byte, int count,
 339			  struct share_check *sc, gfp_t gfp_mask)
 340{
 341	struct prelim_ref *ref;
 342
 343	if (root_id == BTRFS_DATA_RELOC_TREE_OBJECTID)
 344		return 0;
 345
 346	ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask);
 347	if (!ref)
 348		return -ENOMEM;
 349
 350	ref->root_id = root_id;
 351	if (key)
 352		ref->key_for_search = *key;
 353	else
 354		memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
 355
 356	ref->inode_list = NULL;
 357	ref->level = level;
 358	ref->count = count;
 359	ref->parent = parent;
 360	ref->wanted_disk_byte = wanted_disk_byte;
 361	prelim_ref_insert(fs_info, preftree, ref, sc);
 362	return extent_is_shared(sc);
 363}
 364
 365/* direct refs use root == 0, key == NULL */
 366static int add_direct_ref(const struct btrfs_fs_info *fs_info,
 367			  struct preftrees *preftrees, int level, u64 parent,
 368			  u64 wanted_disk_byte, int count,
 369			  struct share_check *sc, gfp_t gfp_mask)
 370{
 371	return add_prelim_ref(fs_info, &preftrees->direct, 0, NULL, level,
 372			      parent, wanted_disk_byte, count, sc, gfp_mask);
 373}
 374
 375/* indirect refs use parent == 0 */
 376static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
 377			    struct preftrees *preftrees, u64 root_id,
 378			    const struct btrfs_key *key, int level,
 379			    u64 wanted_disk_byte, int count,
 380			    struct share_check *sc, gfp_t gfp_mask)
 381{
 382	struct preftree *tree = &preftrees->indirect;
 383
 384	if (!key)
 385		tree = &preftrees->indirect_missing_keys;
 386	return add_prelim_ref(fs_info, tree, root_id, key, level, 0,
 387			      wanted_disk_byte, count, sc, gfp_mask);
 388}
 389
 390static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
 391{
 392	struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
 393	struct rb_node *parent = NULL;
 394	struct prelim_ref *ref = NULL;
 395	struct prelim_ref target = {};
 396	int result;
 397
 398	target.parent = bytenr;
 399
 400	while (*p) {
 401		parent = *p;
 402		ref = rb_entry(parent, struct prelim_ref, rbnode);
 403		result = prelim_ref_compare(ref, &target);
 404
 405		if (result < 0)
 406			p = &(*p)->rb_left;
 407		else if (result > 0)
 408			p = &(*p)->rb_right;
 409		else
 410			return 1;
 411	}
 412	return 0;
 413}
 414
 415static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
 
 416			   struct ulist *parents,
 417			   struct preftrees *preftrees, struct prelim_ref *ref,
 418			   int level, u64 time_seq, const u64 *extent_item_pos,
 419			   bool ignore_offset)
 420{
 421	int ret = 0;
 422	int slot;
 423	struct extent_buffer *eb;
 424	struct btrfs_key key;
 425	struct btrfs_key *key_for_search = &ref->key_for_search;
 426	struct btrfs_file_extent_item *fi;
 427	struct extent_inode_elem *eie = NULL, *old = NULL;
 428	u64 disk_byte;
 429	u64 wanted_disk_byte = ref->wanted_disk_byte;
 430	u64 count = 0;
 431	u64 data_offset;
 
 432
 433	if (level != 0) {
 434		eb = path->nodes[level];
 435		ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
 436		if (ret < 0)
 437			return ret;
 438		return 0;
 439	}
 440
 441	/*
 442	 * 1. We normally enter this function with the path already pointing to
 443	 *    the first item to check. But sometimes, we may enter it with
 444	 *    slot == nritems.
 445	 * 2. We are searching for normal backref but bytenr of this leaf
 446	 *    matches shared data backref
 447	 * 3. The leaf owner is not equal to the root we are searching
 448	 *
 449	 * For these cases, go to the next leaf before we continue.
 450	 */
 451	eb = path->nodes[0];
 452	if (path->slots[0] >= btrfs_header_nritems(eb) ||
 453	    is_shared_data_backref(preftrees, eb->start) ||
 454	    ref->root_id != btrfs_header_owner(eb)) {
 455		if (time_seq == SEQ_LAST)
 456			ret = btrfs_next_leaf(root, path);
 457		else
 458			ret = btrfs_next_old_leaf(root, path, time_seq);
 459	}
 460
 461	while (!ret && count < ref->count) {
 462		eb = path->nodes[0];
 463		slot = path->slots[0];
 464
 465		btrfs_item_key_to_cpu(eb, &key, slot);
 466
 467		if (key.objectid != key_for_search->objectid ||
 468		    key.type != BTRFS_EXTENT_DATA_KEY)
 469			break;
 470
 471		/*
 472		 * We are searching for normal backref but bytenr of this leaf
 473		 * matches shared data backref, OR
 474		 * the leaf owner is not equal to the root we are searching for
 475		 */
 476		if (slot == 0 &&
 477		    (is_shared_data_backref(preftrees, eb->start) ||
 478		     ref->root_id != btrfs_header_owner(eb))) {
 479			if (time_seq == SEQ_LAST)
 480				ret = btrfs_next_leaf(root, path);
 481			else
 482				ret = btrfs_next_old_leaf(root, path, time_seq);
 483			continue;
 484		}
 485		fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
 
 
 
 486		disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
 487		data_offset = btrfs_file_extent_offset(eb, fi);
 488
 489		if (disk_byte == wanted_disk_byte) {
 490			eie = NULL;
 491			old = NULL;
 492			if (ref->key_for_search.offset == key.offset - data_offset)
 493				count++;
 494			else
 495				goto next;
 496			if (extent_item_pos) {
 497				ret = check_extent_in_eb(&key, eb, fi,
 498						*extent_item_pos,
 499						&eie, ignore_offset);
 500				if (ret < 0)
 501					break;
 502			}
 503			if (ret > 0)
 504				goto next;
 505			ret = ulist_add_merge_ptr(parents, eb->start,
 506						  eie, (void **)&old, GFP_NOFS);
 507			if (ret < 0)
 508				break;
 509			if (!ret && extent_item_pos) {
 510				while (old->next)
 511					old = old->next;
 512				old->next = eie;
 513			}
 514			eie = NULL;
 515		}
 516next:
 517		if (time_seq == SEQ_LAST)
 518			ret = btrfs_next_item(root, path);
 519		else
 520			ret = btrfs_next_old_item(root, path, time_seq);
 521	}
 522
 523	if (ret > 0)
 
 
 524		ret = 0;
 525	else if (ret < 0)
 526		free_inode_elem_list(eie);
 527	return ret;
 528}
 529
 530/*
 531 * resolve an indirect backref in the form (root_id, key, level)
 532 * to a logical address
 533 */
 534static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
 535				struct btrfs_path *path, u64 time_seq,
 536				struct preftrees *preftrees,
 537				struct prelim_ref *ref, struct ulist *parents,
 538				const u64 *extent_item_pos, bool ignore_offset)
 539{
 540	struct btrfs_root *root;
 541	struct extent_buffer *eb;
 542	int ret = 0;
 543	int root_level;
 544	int level = ref->level;
 545	struct btrfs_key search_key = ref->key_for_search;
 546
 547	root = btrfs_get_fs_root(fs_info, ref->root_id, false);
 
 
 
 
 
 
 
 
 
 
 
 548	if (IS_ERR(root)) {
 549		ret = PTR_ERR(root);
 550		goto out_free;
 551	}
 552
 553	if (!path->search_commit_root &&
 554	    test_bit(BTRFS_ROOT_DELETING, &root->state)) {
 555		ret = -ENOENT;
 556		goto out;
 557	}
 558
 559	if (btrfs_is_testing(fs_info)) {
 560		ret = -ENOENT;
 561		goto out;
 562	}
 563
 564	if (path->search_commit_root)
 565		root_level = btrfs_header_level(root->commit_root);
 566	else if (time_seq == SEQ_LAST)
 567		root_level = btrfs_header_level(root->node);
 568	else
 569		root_level = btrfs_old_root_level(root, time_seq);
 570
 571	if (root_level + 1 == level)
 572		goto out;
 573
 574	/*
 575	 * We can often find data backrefs with an offset that is too large
 576	 * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
 577	 * subtracting a file's offset with the data offset of its
 578	 * corresponding extent data item. This can happen for example in the
 579	 * clone ioctl.
 580	 *
 581	 * So if we detect such case we set the search key's offset to zero to
 582	 * make sure we will find the matching file extent item at
 583	 * add_all_parents(), otherwise we will miss it because the offset
 584	 * taken form the backref is much larger then the offset of the file
 585	 * extent item. This can make us scan a very large number of file
 586	 * extent items, but at least it will not make us miss any.
 587	 *
 588	 * This is an ugly workaround for a behaviour that should have never
 589	 * existed, but it does and a fix for the clone ioctl would touch a lot
 590	 * of places, cause backwards incompatibility and would not fix the
 591	 * problem for extents cloned with older kernels.
 592	 */
 593	if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
 594	    search_key.offset >= LLONG_MAX)
 595		search_key.offset = 0;
 596	path->lowest_level = level;
 597	if (time_seq == SEQ_LAST)
 598		ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
 599	else
 600		ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
 601
 602	btrfs_debug(fs_info,
 603		"search slot in root %llu (level %d, ref count %d) returned %d for key (%llu %u %llu)",
 604		 ref->root_id, level, ref->count, ret,
 605		 ref->key_for_search.objectid, ref->key_for_search.type,
 606		 ref->key_for_search.offset);
 607	if (ret < 0)
 608		goto out;
 609
 610	eb = path->nodes[level];
 611	while (!eb) {
 612		if (WARN_ON(!level)) {
 613			ret = 1;
 614			goto out;
 615		}
 616		level--;
 617		eb = path->nodes[level];
 618	}
 619
 620	ret = add_all_parents(root, path, parents, preftrees, ref, level,
 621			      time_seq, extent_item_pos, ignore_offset);
 622out:
 623	btrfs_put_root(root);
 624out_free:
 625	path->lowest_level = 0;
 626	btrfs_release_path(path);
 627	return ret;
 628}
 629
 630static struct extent_inode_elem *
 631unode_aux_to_inode_list(struct ulist_node *node)
 632{
 633	if (!node)
 634		return NULL;
 635	return (struct extent_inode_elem *)(uintptr_t)node->aux;
 636}
 637
 
 
 
 
 
 
 
 
 
 
 
 
 638/*
 639 * We maintain three separate rbtrees: one for direct refs, one for
 640 * indirect refs which have a key, and one for indirect refs which do not
 641 * have a key. Each tree does merge on insertion.
 642 *
 643 * Once all of the references are located, we iterate over the tree of
 644 * indirect refs with missing keys. An appropriate key is located and
 645 * the ref is moved onto the tree for indirect refs. After all missing
 646 * keys are thus located, we iterate over the indirect ref tree, resolve
 647 * each reference, and then insert the resolved reference onto the
 648 * direct tree (merging there too).
 649 *
 650 * New backrefs (i.e., for parent nodes) are added to the appropriate
 651 * rbtree as they are encountered. The new backrefs are subsequently
 652 * resolved as above.
 653 */
 654static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
 655				 struct btrfs_path *path, u64 time_seq,
 656				 struct preftrees *preftrees,
 657				 const u64 *extent_item_pos,
 658				 struct share_check *sc, bool ignore_offset)
 659{
 660	int err;
 661	int ret = 0;
 662	struct ulist *parents;
 663	struct ulist_node *node;
 664	struct ulist_iterator uiter;
 665	struct rb_node *rnode;
 666
 667	parents = ulist_alloc(GFP_NOFS);
 668	if (!parents)
 669		return -ENOMEM;
 670
 671	/*
 672	 * We could trade memory usage for performance here by iterating
 673	 * the tree, allocating new refs for each insertion, and then
 674	 * freeing the entire indirect tree when we're done.  In some test
 675	 * cases, the tree can grow quite large (~200k objects).
 676	 */
 677	while ((rnode = rb_first_cached(&preftrees->indirect.root))) {
 678		struct prelim_ref *ref;
 679
 680		ref = rb_entry(rnode, struct prelim_ref, rbnode);
 681		if (WARN(ref->parent,
 682			 "BUG: direct ref found in indirect tree")) {
 683			ret = -EINVAL;
 684			goto out;
 685		}
 686
 687		rb_erase_cached(&ref->rbnode, &preftrees->indirect.root);
 688		preftrees->indirect.count--;
 689
 690		if (ref->count == 0) {
 691			free_pref(ref);
 692			continue;
 693		}
 694
 695		if (sc && sc->root_objectid &&
 696		    ref->root_id != sc->root_objectid) {
 697			free_pref(ref);
 698			ret = BACKREF_FOUND_SHARED;
 699			goto out;
 700		}
 701		err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
 702					   ref, parents, extent_item_pos,
 703					   ignore_offset);
 704		/*
 705		 * we can only tolerate ENOENT,otherwise,we should catch error
 706		 * and return directly.
 707		 */
 708		if (err == -ENOENT) {
 709			prelim_ref_insert(fs_info, &preftrees->direct, ref,
 710					  NULL);
 711			continue;
 712		} else if (err) {
 713			free_pref(ref);
 714			ret = err;
 715			goto out;
 716		}
 717
 718		/* we put the first parent into the ref at hand */
 719		ULIST_ITER_INIT(&uiter);
 720		node = ulist_next(parents, &uiter);
 721		ref->parent = node ? node->val : 0;
 722		ref->inode_list = unode_aux_to_inode_list(node);
 723
 724		/* Add a prelim_ref(s) for any other parent(s). */
 725		while ((node = ulist_next(parents, &uiter))) {
 726			struct prelim_ref *new_ref;
 727
 728			new_ref = kmem_cache_alloc(btrfs_prelim_ref_cache,
 729						   GFP_NOFS);
 730			if (!new_ref) {
 731				free_pref(ref);
 732				ret = -ENOMEM;
 733				goto out;
 734			}
 735			memcpy(new_ref, ref, sizeof(*ref));
 736			new_ref->parent = node->val;
 737			new_ref->inode_list = unode_aux_to_inode_list(node);
 738			prelim_ref_insert(fs_info, &preftrees->direct,
 739					  new_ref, NULL);
 740		}
 741
 742		/*
 743		 * Now it's a direct ref, put it in the direct tree. We must
 744		 * do this last because the ref could be merged/freed here.
 745		 */
 746		prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);
 747
 748		ulist_reinit(parents);
 749		cond_resched();
 750	}
 751out:
 752	ulist_free(parents);
 
 
 
 
 753	return ret;
 754}
 755
 756/*
 757 * read tree blocks and add keys where required.
 758 */
 759static int add_missing_keys(struct btrfs_fs_info *fs_info,
 760			    struct preftrees *preftrees, bool lock)
 761{
 762	struct prelim_ref *ref;
 763	struct extent_buffer *eb;
 764	struct preftree *tree = &preftrees->indirect_missing_keys;
 765	struct rb_node *node;
 766
 767	while ((node = rb_first_cached(&tree->root))) {
 
 
 768		ref = rb_entry(node, struct prelim_ref, rbnode);
 769		rb_erase_cached(node, &tree->root);
 770
 771		BUG_ON(ref->parent);	/* should not be a direct ref */
 772		BUG_ON(ref->key_for_search.type);
 773		BUG_ON(!ref->wanted_disk_byte);
 774
 775		eb = read_tree_block(fs_info, ref->wanted_disk_byte, 0,
 776				     ref->level - 1, NULL);
 
 
 777		if (IS_ERR(eb)) {
 778			free_pref(ref);
 779			return PTR_ERR(eb);
 780		} else if (!extent_buffer_uptodate(eb)) {
 
 781			free_pref(ref);
 782			free_extent_buffer(eb);
 783			return -EIO;
 784		}
 
 785		if (lock)
 786			btrfs_tree_read_lock(eb);
 787		if (btrfs_header_level(eb) == 0)
 788			btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
 789		else
 790			btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
 791		if (lock)
 792			btrfs_tree_read_unlock(eb);
 793		free_extent_buffer(eb);
 794		prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
 795		cond_resched();
 796	}
 797	return 0;
 798}
 799
 800/*
 801 * add all currently queued delayed refs from this head whose seq nr is
 802 * smaller or equal that seq to the list
 803 */
 804static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
 805			    struct btrfs_delayed_ref_head *head, u64 seq,
 806			    struct preftrees *preftrees, struct share_check *sc)
 807{
 808	struct btrfs_delayed_ref_node *node;
 809	struct btrfs_delayed_extent_op *extent_op = head->extent_op;
 810	struct btrfs_key key;
 811	struct btrfs_key tmp_op_key;
 812	struct rb_node *n;
 813	int count;
 814	int ret = 0;
 815
 816	if (extent_op && extent_op->update_key)
 817		btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
 818
 819	spin_lock(&head->lock);
 820	for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
 821		node = rb_entry(n, struct btrfs_delayed_ref_node,
 822				ref_node);
 823		if (node->seq > seq)
 824			continue;
 825
 826		switch (node->action) {
 827		case BTRFS_ADD_DELAYED_EXTENT:
 828		case BTRFS_UPDATE_DELAYED_HEAD:
 829			WARN_ON(1);
 830			continue;
 831		case BTRFS_ADD_DELAYED_REF:
 832			count = node->ref_mod;
 833			break;
 834		case BTRFS_DROP_DELAYED_REF:
 835			count = node->ref_mod * -1;
 836			break;
 837		default:
 838			BUG();
 839		}
 840		switch (node->type) {
 841		case BTRFS_TREE_BLOCK_REF_KEY: {
 842			/* NORMAL INDIRECT METADATA backref */
 843			struct btrfs_delayed_tree_ref *ref;
 
 
 
 
 
 
 844
 845			ref = btrfs_delayed_node_to_tree_ref(node);
 846			ret = add_indirect_ref(fs_info, preftrees, ref->root,
 847					       &tmp_op_key, ref->level + 1,
 848					       node->bytenr, count, sc,
 849					       GFP_ATOMIC);
 850			break;
 851		}
 852		case BTRFS_SHARED_BLOCK_REF_KEY: {
 853			/* SHARED DIRECT METADATA backref */
 854			struct btrfs_delayed_tree_ref *ref;
 855
 856			ref = btrfs_delayed_node_to_tree_ref(node);
 857
 858			ret = add_direct_ref(fs_info, preftrees, ref->level + 1,
 859					     ref->parent, node->bytenr, count,
 860					     sc, GFP_ATOMIC);
 861			break;
 862		}
 863		case BTRFS_EXTENT_DATA_REF_KEY: {
 864			/* NORMAL INDIRECT DATA backref */
 865			struct btrfs_delayed_data_ref *ref;
 866			ref = btrfs_delayed_node_to_data_ref(node);
 867
 868			key.objectid = ref->objectid;
 869			key.type = BTRFS_EXTENT_DATA_KEY;
 870			key.offset = ref->offset;
 871
 872			/*
 873			 * Found a inum that doesn't match our known inum, we
 874			 * know it's shared.
 
 
 
 
 
 
 
 
 
 
 
 875			 */
 876			if (sc && sc->inum && ref->objectid != sc->inum) {
 877				ret = BACKREF_FOUND_SHARED;
 878				goto out;
 879			}
 880
 881			ret = add_indirect_ref(fs_info, preftrees, ref->root,
 882					       &key, 0, node->bytenr, count, sc,
 883					       GFP_ATOMIC);
 884			break;
 885		}
 886		case BTRFS_SHARED_DATA_REF_KEY: {
 887			/* SHARED DIRECT FULL backref */
 888			struct btrfs_delayed_data_ref *ref;
 889
 890			ref = btrfs_delayed_node_to_data_ref(node);
 891
 892			ret = add_direct_ref(fs_info, preftrees, 0, ref->parent,
 893					     node->bytenr, count, sc,
 894					     GFP_ATOMIC);
 895			break;
 896		}
 897		default:
 898			WARN_ON(1);
 899		}
 900		/*
 901		 * We must ignore BACKREF_FOUND_SHARED until all delayed
 902		 * refs have been checked.
 903		 */
 904		if (ret && (ret != BACKREF_FOUND_SHARED))
 905			break;
 906	}
 907	if (!ret)
 908		ret = extent_is_shared(sc);
 909out:
 910	spin_unlock(&head->lock);
 911	return ret;
 912}
 913
 914/*
 915 * add all inline backrefs for bytenr to the list
 916 *
 917 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
 918 */
 919static int add_inline_refs(const struct btrfs_fs_info *fs_info,
 920			   struct btrfs_path *path, u64 bytenr,
 921			   int *info_level, struct preftrees *preftrees,
 922			   struct share_check *sc)
 923{
 924	int ret = 0;
 925	int slot;
 926	struct extent_buffer *leaf;
 927	struct btrfs_key key;
 928	struct btrfs_key found_key;
 929	unsigned long ptr;
 930	unsigned long end;
 931	struct btrfs_extent_item *ei;
 932	u64 flags;
 933	u64 item_size;
 934
 935	/*
 936	 * enumerate all inline refs
 937	 */
 938	leaf = path->nodes[0];
 939	slot = path->slots[0];
 940
 941	item_size = btrfs_item_size_nr(leaf, slot);
 942	BUG_ON(item_size < sizeof(*ei));
 
 
 
 
 
 
 943
 944	ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
 945	flags = btrfs_extent_flags(leaf, ei);
 946	btrfs_item_key_to_cpu(leaf, &found_key, slot);
 947
 948	ptr = (unsigned long)(ei + 1);
 949	end = (unsigned long)ei + item_size;
 950
 951	if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
 952	    flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
 953		struct btrfs_tree_block_info *info;
 954
 955		info = (struct btrfs_tree_block_info *)ptr;
 956		*info_level = btrfs_tree_block_level(leaf, info);
 957		ptr += sizeof(struct btrfs_tree_block_info);
 958		BUG_ON(ptr > end);
 959	} else if (found_key.type == BTRFS_METADATA_ITEM_KEY) {
 960		*info_level = found_key.offset;
 961	} else {
 962		BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
 963	}
 964
 965	while (ptr < end) {
 966		struct btrfs_extent_inline_ref *iref;
 967		u64 offset;
 968		int type;
 969
 970		iref = (struct btrfs_extent_inline_ref *)ptr;
 971		type = btrfs_get_extent_inline_ref_type(leaf, iref,
 972							BTRFS_REF_TYPE_ANY);
 973		if (type == BTRFS_REF_TYPE_INVALID)
 974			return -EUCLEAN;
 975
 976		offset = btrfs_extent_inline_ref_offset(leaf, iref);
 977
 978		switch (type) {
 979		case BTRFS_SHARED_BLOCK_REF_KEY:
 980			ret = add_direct_ref(fs_info, preftrees,
 981					     *info_level + 1, offset,
 982					     bytenr, 1, NULL, GFP_NOFS);
 983			break;
 984		case BTRFS_SHARED_DATA_REF_KEY: {
 985			struct btrfs_shared_data_ref *sdref;
 986			int count;
 987
 988			sdref = (struct btrfs_shared_data_ref *)(iref + 1);
 989			count = btrfs_shared_data_ref_count(leaf, sdref);
 990
 991			ret = add_direct_ref(fs_info, preftrees, 0, offset,
 992					     bytenr, count, sc, GFP_NOFS);
 993			break;
 994		}
 995		case BTRFS_TREE_BLOCK_REF_KEY:
 996			ret = add_indirect_ref(fs_info, preftrees, offset,
 997					       NULL, *info_level + 1,
 998					       bytenr, 1, NULL, GFP_NOFS);
 999			break;
1000		case BTRFS_EXTENT_DATA_REF_KEY: {
1001			struct btrfs_extent_data_ref *dref;
1002			int count;
1003			u64 root;
1004
1005			dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1006			count = btrfs_extent_data_ref_count(leaf, dref);
1007			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1008								      dref);
1009			key.type = BTRFS_EXTENT_DATA_KEY;
1010			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1011
1012			if (sc && sc->inum && key.objectid != sc->inum) {
 
1013				ret = BACKREF_FOUND_SHARED;
1014				break;
1015			}
1016
1017			root = btrfs_extent_data_ref_root(leaf, dref);
1018
1019			ret = add_indirect_ref(fs_info, preftrees, root,
1020					       &key, 0, bytenr, count,
1021					       sc, GFP_NOFS);
 
 
 
1022			break;
1023		}
 
 
 
1024		default:
1025			WARN_ON(1);
1026		}
1027		if (ret)
1028			return ret;
1029		ptr += btrfs_extent_inline_ref_size(type);
1030	}
1031
1032	return 0;
1033}
1034
1035/*
1036 * add all non-inline backrefs for bytenr to the list
1037 *
1038 * Returns 0 on success, <0 on error, or BACKREF_FOUND_SHARED.
1039 */
1040static int add_keyed_refs(struct btrfs_fs_info *fs_info,
1041			  struct btrfs_path *path, u64 bytenr,
 
1042			  int info_level, struct preftrees *preftrees,
1043			  struct share_check *sc)
1044{
1045	struct btrfs_root *extent_root = fs_info->extent_root;
1046	int ret;
1047	int slot;
1048	struct extent_buffer *leaf;
1049	struct btrfs_key key;
1050
1051	while (1) {
1052		ret = btrfs_next_item(extent_root, path);
1053		if (ret < 0)
1054			break;
1055		if (ret) {
1056			ret = 0;
1057			break;
1058		}
1059
1060		slot = path->slots[0];
1061		leaf = path->nodes[0];
1062		btrfs_item_key_to_cpu(leaf, &key, slot);
1063
1064		if (key.objectid != bytenr)
1065			break;
1066		if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
1067			continue;
1068		if (key.type > BTRFS_SHARED_DATA_REF_KEY)
1069			break;
1070
1071		switch (key.type) {
1072		case BTRFS_SHARED_BLOCK_REF_KEY:
1073			/* SHARED DIRECT METADATA backref */
1074			ret = add_direct_ref(fs_info, preftrees,
1075					     info_level + 1, key.offset,
1076					     bytenr, 1, NULL, GFP_NOFS);
1077			break;
1078		case BTRFS_SHARED_DATA_REF_KEY: {
1079			/* SHARED DIRECT FULL backref */
1080			struct btrfs_shared_data_ref *sdref;
1081			int count;
1082
1083			sdref = btrfs_item_ptr(leaf, slot,
1084					      struct btrfs_shared_data_ref);
1085			count = btrfs_shared_data_ref_count(leaf, sdref);
1086			ret = add_direct_ref(fs_info, preftrees, 0,
1087					     key.offset, bytenr, count,
1088					     sc, GFP_NOFS);
1089			break;
1090		}
1091		case BTRFS_TREE_BLOCK_REF_KEY:
1092			/* NORMAL INDIRECT METADATA backref */
1093			ret = add_indirect_ref(fs_info, preftrees, key.offset,
1094					       NULL, info_level + 1, bytenr,
1095					       1, NULL, GFP_NOFS);
1096			break;
1097		case BTRFS_EXTENT_DATA_REF_KEY: {
1098			/* NORMAL INDIRECT DATA backref */
1099			struct btrfs_extent_data_ref *dref;
1100			int count;
1101			u64 root;
1102
1103			dref = btrfs_item_ptr(leaf, slot,
1104					      struct btrfs_extent_data_ref);
1105			count = btrfs_extent_data_ref_count(leaf, dref);
1106			key.objectid = btrfs_extent_data_ref_objectid(leaf,
1107								      dref);
1108			key.type = BTRFS_EXTENT_DATA_KEY;
1109			key.offset = btrfs_extent_data_ref_offset(leaf, dref);
1110
1111			if (sc && sc->inum && key.objectid != sc->inum) {
 
1112				ret = BACKREF_FOUND_SHARED;
1113				break;
1114			}
1115
1116			root = btrfs_extent_data_ref_root(leaf, dref);
1117			ret = add_indirect_ref(fs_info, preftrees, root,
1118					       &key, 0, bytenr, count,
1119					       sc, GFP_NOFS);
 
 
 
 
1120			break;
1121		}
1122		default:
1123			WARN_ON(1);
1124		}
1125		if (ret)
1126			return ret;
1127
1128	}
1129
1130	return ret;
1131}
1132
1133/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1134 * this adds all existing backrefs (inline backrefs, backrefs and delayed
1135 * refs) for the given bytenr to the refs list, merges duplicates and resolves
1136 * indirect refs to their parent bytenr.
1137 * When roots are found, they're added to the roots list
1138 *
1139 * If time_seq is set to SEQ_LAST, it will not search delayed_refs, and behave
1140 * much like trans == NULL case, the difference only lies in it will not
1141 * commit root.
1142 * The special case is for qgroup to search roots in commit_transaction().
1143 *
1144 * @sc - if !NULL, then immediately return BACKREF_FOUND_SHARED when a
1145 * shared extent is detected.
1146 *
1147 * Otherwise this returns 0 for success and <0 for an error.
1148 *
1149 * If ignore_offset is set to false, only extent refs whose offsets match
1150 * extent_item_pos are returned.  If true, every extent ref is returned
1151 * and extent_item_pos is ignored.
1152 *
1153 * FIXME some caching might speed things up
1154 */
1155static int find_parent_nodes(struct btrfs_trans_handle *trans,
1156			     struct btrfs_fs_info *fs_info, u64 bytenr,
1157			     u64 time_seq, struct ulist *refs,
1158			     struct ulist *roots, const u64 *extent_item_pos,
1159			     struct share_check *sc, bool ignore_offset)
1160{
 
1161	struct btrfs_key key;
1162	struct btrfs_path *path;
1163	struct btrfs_delayed_ref_root *delayed_refs = NULL;
1164	struct btrfs_delayed_ref_head *head;
1165	int info_level = 0;
1166	int ret;
1167	struct prelim_ref *ref;
1168	struct rb_node *node;
1169	struct extent_inode_elem *eie = NULL;
1170	struct preftrees preftrees = {
1171		.direct = PREFTREE_INIT,
1172		.indirect = PREFTREE_INIT,
1173		.indirect_missing_keys = PREFTREE_INIT
1174	};
1175
1176	key.objectid = bytenr;
 
 
 
 
1177	key.offset = (u64)-1;
1178	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1179		key.type = BTRFS_METADATA_ITEM_KEY;
1180	else
1181		key.type = BTRFS_EXTENT_ITEM_KEY;
1182
1183	path = btrfs_alloc_path();
1184	if (!path)
1185		return -ENOMEM;
1186	if (!trans) {
1187		path->search_commit_root = 1;
1188		path->skip_locking = 1;
1189	}
1190
1191	if (time_seq == SEQ_LAST)
1192		path->skip_locking = 1;
1193
1194	/*
1195	 * grab both a lock on the path and a lock on the delayed ref head.
1196	 * We need both to get a consistent picture of how the refs look
1197	 * at a specified point in time
1198	 */
1199again:
1200	head = NULL;
1201
1202	ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
1203	if (ret < 0)
1204		goto out;
1205	BUG_ON(ret == 0);
 
 
 
 
 
 
 
1206
1207#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1208	if (trans && likely(trans->type != __TRANS_DUMMY) &&
1209	    time_seq != SEQ_LAST) {
1210#else
1211	if (trans && time_seq != SEQ_LAST) {
1212#endif
1213		/*
1214		 * look if there are updates for this ref queued and lock the
1215		 * head
 
 
1216		 */
1217		delayed_refs = &trans->transaction->delayed_refs;
1218		spin_lock(&delayed_refs->lock);
1219		head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
1220		if (head) {
1221			if (!mutex_trylock(&head->mutex)) {
1222				refcount_inc(&head->refs);
1223				spin_unlock(&delayed_refs->lock);
1224
1225				btrfs_release_path(path);
1226
1227				/*
1228				 * Mutex was contended, block until it's
1229				 * released and try again
1230				 */
1231				mutex_lock(&head->mutex);
1232				mutex_unlock(&head->mutex);
1233				btrfs_put_delayed_ref_head(head);
1234				goto again;
1235			}
1236			spin_unlock(&delayed_refs->lock);
1237			ret = add_delayed_refs(fs_info, head, time_seq,
1238					       &preftrees, sc);
1239			mutex_unlock(&head->mutex);
1240			if (ret)
1241				goto out;
1242		} else {
1243			spin_unlock(&delayed_refs->lock);
1244		}
1245	}
1246
1247	if (path->slots[0]) {
1248		struct extent_buffer *leaf;
1249		int slot;
1250
1251		path->slots[0]--;
1252		leaf = path->nodes[0];
1253		slot = path->slots[0];
1254		btrfs_item_key_to_cpu(leaf, &key, slot);
1255		if (key.objectid == bytenr &&
1256		    (key.type == BTRFS_EXTENT_ITEM_KEY ||
1257		     key.type == BTRFS_METADATA_ITEM_KEY)) {
1258			ret = add_inline_refs(fs_info, path, bytenr,
1259					      &info_level, &preftrees, sc);
1260			if (ret)
1261				goto out;
1262			ret = add_keyed_refs(fs_info, path, bytenr, info_level,
1263					     &preftrees, sc);
1264			if (ret)
1265				goto out;
1266		}
1267	}
1268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1269	btrfs_release_path(path);
1270
1271	ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
1272	if (ret)
1273		goto out;
1274
1275	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
1276
1277	ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
1278				    extent_item_pos, sc, ignore_offset);
1279	if (ret)
1280		goto out;
1281
1282	WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect.root.rb_root));
1283
1284	/*
1285	 * This walks the tree of merged and resolved refs. Tree blocks are
1286	 * read in as needed. Unique entries are added to the ulist, and
1287	 * the list of found roots is updated.
1288	 *
1289	 * We release the entire tree in one go before returning.
1290	 */
1291	node = rb_first_cached(&preftrees.direct.root);
1292	while (node) {
1293		ref = rb_entry(node, struct prelim_ref, rbnode);
1294		node = rb_next(&ref->rbnode);
1295		/*
1296		 * ref->count < 0 can happen here if there are delayed
1297		 * refs with a node->action of BTRFS_DROP_DELAYED_REF.
1298		 * prelim_ref_insert() relies on this when merging
1299		 * identical refs to keep the overall count correct.
1300		 * prelim_ref_insert() will merge only those refs
1301		 * which compare identically.  Any refs having
1302		 * e.g. different offsets would not be merged,
1303		 * and would retain their original ref->count < 0.
1304		 */
1305		if (roots && ref->count && ref->root_id && ref->parent == 0) {
1306			if (sc && sc->root_objectid &&
1307			    ref->root_id != sc->root_objectid) {
1308				ret = BACKREF_FOUND_SHARED;
1309				goto out;
1310			}
1311
1312			/* no parent == root of tree */
1313			ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
1314			if (ret < 0)
1315				goto out;
1316		}
1317		if (ref->count && ref->parent) {
1318			if (extent_item_pos && !ref->inode_list &&
1319			    ref->level == 0) {
 
1320				struct extent_buffer *eb;
1321
1322				eb = read_tree_block(fs_info, ref->parent, 0,
1323						     ref->level, NULL);
 
 
1324				if (IS_ERR(eb)) {
1325					ret = PTR_ERR(eb);
1326					goto out;
1327				} else if (!extent_buffer_uptodate(eb)) {
 
1328					free_extent_buffer(eb);
1329					ret = -EIO;
1330					goto out;
1331				}
1332
1333				if (!path->skip_locking) {
1334					btrfs_tree_read_lock(eb);
1335					btrfs_set_lock_blocking_read(eb);
1336				}
1337				ret = find_extent_in_eb(eb, bytenr,
1338							*extent_item_pos, &eie, ignore_offset);
1339				if (!path->skip_locking)
1340					btrfs_tree_read_unlock_blocking(eb);
1341				free_extent_buffer(eb);
1342				if (ret < 0)
 
1343					goto out;
1344				ref->inode_list = eie;
 
 
 
 
 
 
1345			}
1346			ret = ulist_add_merge_ptr(refs, ref->parent,
1347						  ref->inode_list,
1348						  (void **)&eie, GFP_NOFS);
1349			if (ret < 0)
1350				goto out;
1351			if (!ret && extent_item_pos) {
1352				/*
1353				 * we've recorded that parent, so we must extend
1354				 * its inode list here
 
 
 
 
1355				 */
1356				BUG_ON(!eie);
 
 
 
 
1357				while (eie->next)
1358					eie = eie->next;
1359				eie->next = ref->inode_list;
1360			}
1361			eie = NULL;
 
 
 
 
 
 
 
 
1362		}
1363		cond_resched();
1364	}
1365
1366out:
1367	btrfs_free_path(path);
1368
1369	prelim_release(&preftrees.direct);
1370	prelim_release(&preftrees.indirect);
1371	prelim_release(&preftrees.indirect_missing_keys);
1372
1373	if (ret < 0)
1374		free_inode_elem_list(eie);
1375	return ret;
1376}
1377
1378static void free_leaf_list(struct ulist *blocks)
1379{
1380	struct ulist_node *node = NULL;
1381	struct extent_inode_elem *eie;
1382	struct ulist_iterator uiter;
1383
1384	ULIST_ITER_INIT(&uiter);
1385	while ((node = ulist_next(blocks, &uiter))) {
1386		if (!node->aux)
1387			continue;
1388		eie = unode_aux_to_inode_list(node);
1389		free_inode_elem_list(eie);
1390		node->aux = 0;
1391	}
1392
1393	ulist_free(blocks);
1394}
1395
1396/*
1397 * Finds all leafs with a reference to the specified combination of bytenr and
1398 * offset. key_list_head will point to a list of corresponding keys (caller must
1399 * free each list element). The leafs will be stored in the leafs ulist, which
1400 * must be freed with ulist_free.
 
 
1401 *
1402 * returns 0 on success, <0 on error
1403 */
1404int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
1405			 struct btrfs_fs_info *fs_info, u64 bytenr,
1406			 u64 time_seq, struct ulist **leafs,
1407			 const u64 *extent_item_pos, bool ignore_offset)
1408{
1409	int ret;
1410
1411	*leafs = ulist_alloc(GFP_NOFS);
1412	if (!*leafs)
 
 
1413		return -ENOMEM;
1414
1415	ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1416				*leafs, NULL, extent_item_pos, NULL, ignore_offset);
1417	if (ret < 0 && ret != -ENOENT) {
1418		free_leaf_list(*leafs);
 
1419		return ret;
1420	}
1421
1422	return 0;
1423}
1424
1425/*
1426 * walk all backrefs for a given extent to find all roots that reference this
1427 * extent. Walking a backref means finding all extents that reference this
1428 * extent and in turn walk the backrefs of those, too. Naturally this is a
1429 * recursive process, but here it is implemented in an iterative fashion: We
1430 * find all referencing extents for the extent in question and put them on a
1431 * list. In turn, we find all referencing extents for those, further appending
1432 * to the list. The way we iterate the list allows adding more elements after
1433 * the current while iterating. The process stops when we reach the end of the
1434 * list. Found roots are added to the roots list.
 
 
 
 
 
 
1435 *
1436 * returns 0 on success, < 0 on error.
1437 */
1438static int btrfs_find_all_roots_safe(struct btrfs_trans_handle *trans,
1439				     struct btrfs_fs_info *fs_info, u64 bytenr,
1440				     u64 time_seq, struct ulist **roots,
1441				     bool ignore_offset)
1442{
1443	struct ulist *tmp;
1444	struct ulist_node *node = NULL;
 
1445	struct ulist_iterator uiter;
1446	int ret;
 
 
1447
1448	tmp = ulist_alloc(GFP_NOFS);
1449	if (!tmp)
1450		return -ENOMEM;
1451	*roots = ulist_alloc(GFP_NOFS);
1452	if (!*roots) {
1453		ulist_free(tmp);
1454		return -ENOMEM;
 
 
 
 
 
 
 
 
 
1455	}
1456
 
 
1457	ULIST_ITER_INIT(&uiter);
1458	while (1) {
1459		ret = find_parent_nodes(trans, fs_info, bytenr, time_seq,
1460					tmp, *roots, NULL, NULL, ignore_offset);
 
1461		if (ret < 0 && ret != -ENOENT) {
1462			ulist_free(tmp);
1463			ulist_free(*roots);
1464			*roots = NULL;
1465			return ret;
 
1466		}
1467		node = ulist_next(tmp, &uiter);
 
1468		if (!node)
1469			break;
1470		bytenr = node->val;
1471		cond_resched();
1472	}
1473
1474	ulist_free(tmp);
1475	return 0;
 
 
 
 
1476}
1477
1478int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
1479			 struct btrfs_fs_info *fs_info, u64 bytenr,
1480			 u64 time_seq, struct ulist **roots,
1481			 bool ignore_offset)
1482{
1483	int ret;
1484
1485	if (!trans)
1486		down_read(&fs_info->commit_root_sem);
1487	ret = btrfs_find_all_roots_safe(trans, fs_info, bytenr,
1488					time_seq, roots, ignore_offset);
1489	if (!trans)
1490		up_read(&fs_info->commit_root_sem);
1491	return ret;
1492}
1493
1494/**
1495 * btrfs_check_shared - tell us whether an extent is shared
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1496 *
1497 * btrfs_check_shared uses the backref walking code but will short
1498 * circuit as soon as it finds a root or inode that doesn't match the
1499 * one passed in. This provides a significant performance benefit for
1500 * callers (such as fiemap) which want to know whether the extent is
1501 * shared but do not need a ref count.
1502 *
1503 * This attempts to attach to the running transaction in order to account for
1504 * delayed refs, but continues on even when no running transaction exists.
1505 *
1506 * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
1507 */
1508int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
1509		struct ulist *roots, struct ulist *tmp)
 
1510{
 
 
1511	struct btrfs_fs_info *fs_info = root->fs_info;
1512	struct btrfs_trans_handle *trans;
1513	struct ulist_iterator uiter;
1514	struct ulist_node *node;
1515	struct seq_list elem = SEQ_LIST_INIT(elem);
1516	int ret = 0;
1517	struct share_check shared = {
1518		.root_objectid = root->root_key.objectid,
1519		.inum = inum,
 
 
 
1520		.share_count = 0,
 
 
1521	};
 
 
 
 
 
 
 
 
1522
1523	ulist_init(roots);
1524	ulist_init(tmp);
1525
1526	trans = btrfs_join_transaction_nostart(root);
1527	if (IS_ERR(trans)) {
1528		if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
1529			ret = PTR_ERR(trans);
1530			goto out;
1531		}
1532		trans = NULL;
1533		down_read(&fs_info->commit_root_sem);
1534	} else {
1535		btrfs_get_tree_mod_seq(fs_info, &elem);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1536	}
1537
 
 
 
 
 
 
 
1538	ULIST_ITER_INIT(&uiter);
1539	while (1) {
1540		ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
1541					roots, NULL, &shared, false);
1542		if (ret == BACKREF_FOUND_SHARED) {
1543			/* this is the only condition under which we return 1 */
1544			ret = 1;
 
 
 
 
 
 
1545			break;
1546		}
1547		if (ret < 0 && ret != -ENOENT)
1548			break;
1549		ret = 0;
1550		node = ulist_next(tmp, &uiter);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1551		if (!node)
1552			break;
1553		bytenr = node->val;
 
 
 
 
 
 
 
 
 
 
 
 
1554		shared.share_count = 0;
 
1555		cond_resched();
1556	}
1557
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1558	if (trans) {
1559		btrfs_put_tree_mod_seq(fs_info, &elem);
1560		btrfs_end_transaction(trans);
1561	} else {
1562		up_read(&fs_info->commit_root_sem);
1563	}
1564out:
1565	ulist_release(roots);
1566	ulist_release(tmp);
 
1567	return ret;
1568}
1569
1570int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
1571			  u64 start_off, struct btrfs_path *path,
1572			  struct btrfs_inode_extref **ret_extref,
1573			  u64 *found_off)
1574{
1575	int ret, slot;
1576	struct btrfs_key key;
1577	struct btrfs_key found_key;
1578	struct btrfs_inode_extref *extref;
1579	const struct extent_buffer *leaf;
1580	unsigned long ptr;
1581
1582	key.objectid = inode_objectid;
1583	key.type = BTRFS_INODE_EXTREF_KEY;
1584	key.offset = start_off;
1585
1586	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1587	if (ret < 0)
1588		return ret;
1589
1590	while (1) {
1591		leaf = path->nodes[0];
1592		slot = path->slots[0];
1593		if (slot >= btrfs_header_nritems(leaf)) {
1594			/*
1595			 * If the item at offset is not found,
1596			 * btrfs_search_slot will point us to the slot
1597			 * where it should be inserted. In our case
1598			 * that will be the slot directly before the
1599			 * next INODE_REF_KEY_V2 item. In the case
1600			 * that we're pointing to the last slot in a
1601			 * leaf, we must move one leaf over.
1602			 */
1603			ret = btrfs_next_leaf(root, path);
1604			if (ret) {
1605				if (ret >= 1)
1606					ret = -ENOENT;
1607				break;
1608			}
1609			continue;
1610		}
1611
1612		btrfs_item_key_to_cpu(leaf, &found_key, slot);
1613
1614		/*
1615		 * Check that we're still looking at an extended ref key for
1616		 * this particular objectid. If we have different
1617		 * objectid or type then there are no more to be found
1618		 * in the tree and we can exit.
1619		 */
1620		ret = -ENOENT;
1621		if (found_key.objectid != inode_objectid)
1622			break;
1623		if (found_key.type != BTRFS_INODE_EXTREF_KEY)
1624			break;
1625
1626		ret = 0;
1627		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1628		extref = (struct btrfs_inode_extref *)ptr;
1629		*ret_extref = extref;
1630		if (found_off)
1631			*found_off = found_key.offset;
1632		break;
1633	}
1634
1635	return ret;
1636}
1637
1638/*
1639 * this iterates to turn a name (from iref/extref) into a full filesystem path.
1640 * Elements of the path are separated by '/' and the path is guaranteed to be
1641 * 0-terminated. the path is only given within the current file system.
1642 * Therefore, it never starts with a '/'. the caller is responsible to provide
1643 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1644 * the start point of the resulting string is returned. this pointer is within
1645 * dest, normally.
1646 * in case the path buffer would overflow, the pointer is decremented further
1647 * as if output was written to the buffer, though no more output is actually
1648 * generated. that way, the caller can determine how much space would be
1649 * required for the path to fit into the buffer. in that case, the returned
1650 * value will be smaller than dest. callers must check this!
1651 */
1652char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
1653			u32 name_len, unsigned long name_off,
1654			struct extent_buffer *eb_in, u64 parent,
1655			char *dest, u32 size)
1656{
1657	int slot;
1658	u64 next_inum;
1659	int ret;
1660	s64 bytes_left = ((s64)size) - 1;
1661	struct extent_buffer *eb = eb_in;
1662	struct btrfs_key found_key;
1663	int leave_spinning = path->leave_spinning;
1664	struct btrfs_inode_ref *iref;
1665
1666	if (bytes_left >= 0)
1667		dest[bytes_left] = '\0';
1668
1669	path->leave_spinning = 1;
1670	while (1) {
1671		bytes_left -= name_len;
1672		if (bytes_left >= 0)
1673			read_extent_buffer(eb, dest + bytes_left,
1674					   name_off, name_len);
1675		if (eb != eb_in) {
1676			if (!path->skip_locking)
1677				btrfs_tree_read_unlock_blocking(eb);
1678			free_extent_buffer(eb);
1679		}
1680		ret = btrfs_find_item(fs_root, path, parent, 0,
1681				BTRFS_INODE_REF_KEY, &found_key);
1682		if (ret > 0)
1683			ret = -ENOENT;
1684		if (ret)
1685			break;
1686
1687		next_inum = found_key.offset;
1688
1689		/* regular exit ahead */
1690		if (parent == next_inum)
1691			break;
1692
1693		slot = path->slots[0];
1694		eb = path->nodes[0];
1695		/* make sure we can use eb after releasing the path */
1696		if (eb != eb_in) {
1697			if (!path->skip_locking)
1698				btrfs_set_lock_blocking_read(eb);
1699			path->nodes[0] = NULL;
1700			path->locks[0] = 0;
1701		}
1702		btrfs_release_path(path);
1703		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
1704
1705		name_len = btrfs_inode_ref_name_len(eb, iref);
1706		name_off = (unsigned long)(iref + 1);
1707
1708		parent = next_inum;
1709		--bytes_left;
1710		if (bytes_left >= 0)
1711			dest[bytes_left] = '/';
1712	}
1713
1714	btrfs_release_path(path);
1715	path->leave_spinning = leave_spinning;
1716
1717	if (ret)
1718		return ERR_PTR(ret);
1719
1720	return dest + bytes_left;
1721}
1722
1723/*
1724 * this makes the path point to (logical EXTENT_ITEM *)
1725 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1726 * tree blocks and <0 on error.
1727 */
1728int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
1729			struct btrfs_path *path, struct btrfs_key *found_key,
1730			u64 *flags_ret)
1731{
 
1732	int ret;
1733	u64 flags;
1734	u64 size = 0;
1735	u32 item_size;
1736	const struct extent_buffer *eb;
1737	struct btrfs_extent_item *ei;
1738	struct btrfs_key key;
1739
1740	if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1741		key.type = BTRFS_METADATA_ITEM_KEY;
1742	else
1743		key.type = BTRFS_EXTENT_ITEM_KEY;
1744	key.objectid = logical;
1745	key.offset = (u64)-1;
1746
1747	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
1748	if (ret < 0)
1749		return ret;
 
 
 
 
 
 
 
1750
1751	ret = btrfs_previous_extent_item(fs_info->extent_root, path, 0);
1752	if (ret) {
1753		if (ret > 0)
1754			ret = -ENOENT;
1755		return ret;
1756	}
1757	btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
1758	if (found_key->type == BTRFS_METADATA_ITEM_KEY)
1759		size = fs_info->nodesize;
1760	else if (found_key->type == BTRFS_EXTENT_ITEM_KEY)
1761		size = found_key->offset;
1762
1763	if (found_key->objectid > logical ||
1764	    found_key->objectid + size <= logical) {
1765		btrfs_debug(fs_info,
1766			"logical %llu is not within any extent", logical);
1767		return -ENOENT;
1768	}
1769
1770	eb = path->nodes[0];
1771	item_size = btrfs_item_size_nr(eb, path->slots[0]);
1772	BUG_ON(item_size < sizeof(*ei));
1773
1774	ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
1775	flags = btrfs_extent_flags(eb, ei);
1776
1777	btrfs_debug(fs_info,
1778		"logical %llu is at position %llu within the extent (%llu EXTENT_ITEM %llu) flags %#llx size %u",
1779		 logical, logical - found_key->objectid, found_key->objectid,
1780		 found_key->offset, flags, item_size);
1781
1782	WARN_ON(!flags_ret);
1783	if (flags_ret) {
1784		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1785			*flags_ret = BTRFS_EXTENT_FLAG_TREE_BLOCK;
1786		else if (flags & BTRFS_EXTENT_FLAG_DATA)
1787			*flags_ret = BTRFS_EXTENT_FLAG_DATA;
1788		else
1789			BUG();
1790		return 0;
1791	}
1792
1793	return -EIO;
1794}
1795
1796/*
1797 * helper function to iterate extent inline refs. ptr must point to a 0 value
1798 * for the first call and may be modified. it is used to track state.
1799 * if more refs exist, 0 is returned and the next call to
1800 * get_extent_inline_ref must pass the modified ptr parameter to get the
1801 * next ref. after the last ref was processed, 1 is returned.
1802 * returns <0 on error
1803 */
1804static int get_extent_inline_ref(unsigned long *ptr,
1805				 const struct extent_buffer *eb,
1806				 const struct btrfs_key *key,
1807				 const struct btrfs_extent_item *ei,
1808				 u32 item_size,
1809				 struct btrfs_extent_inline_ref **out_eiref,
1810				 int *out_type)
1811{
1812	unsigned long end;
1813	u64 flags;
1814	struct btrfs_tree_block_info *info;
1815
1816	if (!*ptr) {
1817		/* first call */
1818		flags = btrfs_extent_flags(eb, ei);
1819		if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1820			if (key->type == BTRFS_METADATA_ITEM_KEY) {
1821				/* a skinny metadata extent */
1822				*out_eiref =
1823				     (struct btrfs_extent_inline_ref *)(ei + 1);
1824			} else {
1825				WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
1826				info = (struct btrfs_tree_block_info *)(ei + 1);
1827				*out_eiref =
1828				   (struct btrfs_extent_inline_ref *)(info + 1);
1829			}
1830		} else {
1831			*out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
1832		}
1833		*ptr = (unsigned long)*out_eiref;
1834		if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
1835			return -ENOENT;
1836	}
1837
1838	end = (unsigned long)ei + item_size;
1839	*out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
1840	*out_type = btrfs_get_extent_inline_ref_type(eb, *out_eiref,
1841						     BTRFS_REF_TYPE_ANY);
1842	if (*out_type == BTRFS_REF_TYPE_INVALID)
1843		return -EUCLEAN;
1844
1845	*ptr += btrfs_extent_inline_ref_size(*out_type);
1846	WARN_ON(*ptr > end);
1847	if (*ptr == end)
1848		return 1; /* last */
1849
1850	return 0;
1851}
1852
1853/*
1854 * reads the tree block backref for an extent. tree level and root are returned
1855 * through out_level and out_root. ptr must point to a 0 value for the first
1856 * call and may be modified (see get_extent_inline_ref comment).
1857 * returns 0 if data was provided, 1 if there was no more data to provide or
1858 * <0 on error.
1859 */
1860int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1861			    struct btrfs_key *key, struct btrfs_extent_item *ei,
1862			    u32 item_size, u64 *out_root, u8 *out_level)
1863{
1864	int ret;
1865	int type;
1866	struct btrfs_extent_inline_ref *eiref;
1867
1868	if (*ptr == (unsigned long)-1)
1869		return 1;
1870
1871	while (1) {
1872		ret = get_extent_inline_ref(ptr, eb, key, ei, item_size,
1873					      &eiref, &type);
1874		if (ret < 0)
1875			return ret;
1876
1877		if (type == BTRFS_TREE_BLOCK_REF_KEY ||
1878		    type == BTRFS_SHARED_BLOCK_REF_KEY)
1879			break;
1880
1881		if (ret == 1)
1882			return 1;
1883	}
1884
1885	/* we can treat both ref types equally here */
1886	*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
1887
1888	if (key->type == BTRFS_EXTENT_ITEM_KEY) {
1889		struct btrfs_tree_block_info *info;
1890
1891		info = (struct btrfs_tree_block_info *)(ei + 1);
1892		*out_level = btrfs_tree_block_level(eb, info);
1893	} else {
1894		ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
1895		*out_level = (u8)key->offset;
1896	}
1897
1898	if (ret == 1)
1899		*ptr = (unsigned long)-1;
1900
1901	return 0;
1902}
1903
1904static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
1905			     struct extent_inode_elem *inode_list,
1906			     u64 root, u64 extent_item_objectid,
1907			     iterate_extent_inodes_t *iterate, void *ctx)
1908{
1909	struct extent_inode_elem *eie;
1910	int ret = 0;
1911
1912	for (eie = inode_list; eie; eie = eie->next) {
1913		btrfs_debug(fs_info,
1914			    "ref for %llu resolved, key (%llu EXTEND_DATA %llu), root %llu",
1915			    extent_item_objectid, eie->inum,
1916			    eie->offset, root);
1917		ret = iterate(eie->inum, eie->offset, root, ctx);
1918		if (ret) {
1919			btrfs_debug(fs_info,
1920				    "stopping iteration for %llu due to ret=%d",
1921				    extent_item_objectid, ret);
1922			break;
1923		}
1924	}
1925
1926	return ret;
1927}
1928
1929/*
1930 * calls iterate() for every inode that references the extent identified by
1931 * the given parameters.
1932 * when the iterator function returns a non-zero value, iteration stops.
1933 */
1934int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1935				u64 extent_item_objectid, u64 extent_item_pos,
1936				int search_commit_root,
1937				iterate_extent_inodes_t *iterate, void *ctx,
1938				bool ignore_offset)
1939{
1940	int ret;
1941	struct btrfs_trans_handle *trans = NULL;
1942	struct ulist *refs = NULL;
1943	struct ulist *roots = NULL;
1944	struct ulist_node *ref_node = NULL;
1945	struct ulist_node *root_node = NULL;
1946	struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem);
1947	struct ulist_iterator ref_uiter;
1948	struct ulist_iterator root_uiter;
1949
1950	btrfs_debug(fs_info, "resolving all inodes for extent %llu",
1951			extent_item_objectid);
 
 
 
1952
1953	if (!search_commit_root) {
1954		trans = btrfs_attach_transaction(fs_info->extent_root);
 
 
1955		if (IS_ERR(trans)) {
1956			if (PTR_ERR(trans) != -ENOENT &&
1957			    PTR_ERR(trans) != -EROFS)
1958				return PTR_ERR(trans);
1959			trans = NULL;
1960		}
 
1961	}
1962
1963	if (trans)
1964		btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1965	else
1966		down_read(&fs_info->commit_root_sem);
 
 
1967
1968	ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1969				   tree_mod_seq_elem.seq, &refs,
1970				   &extent_item_pos, ignore_offset);
1971	if (ret)
1972		goto out;
 
 
1973
1974	ULIST_ITER_INIT(&ref_uiter);
1975	while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1976		ret = btrfs_find_all_roots_safe(trans, fs_info, ref_node->val,
1977						tree_mod_seq_elem.seq, &roots,
1978						ignore_offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1979		if (ret)
1980			break;
 
 
 
 
1981		ULIST_ITER_INIT(&root_uiter);
1982		while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1983			btrfs_debug(fs_info,
1984				    "root %llu references leaf %llu, data list %#llx",
1985				    root_node->val, ref_node->val,
1986				    ref_node->aux);
1987			ret = iterate_leaf_refs(fs_info,
1988						(struct extent_inode_elem *)
1989						(uintptr_t)ref_node->aux,
1990						root_node->val,
1991						extent_item_objectid,
1992						iterate, ctx);
1993		}
1994		ulist_free(roots);
1995	}
1996
1997	free_leaf_list(refs);
1998out:
1999	if (trans) {
2000		btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
2001		btrfs_end_transaction(trans);
 
2002	} else {
2003		up_read(&fs_info->commit_root_sem);
2004	}
2005
 
 
 
 
 
 
2006	return ret;
2007}
2008
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2009int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
2010				struct btrfs_path *path,
2011				iterate_extent_inodes_t *iterate, void *ctx,
2012				bool ignore_offset)
2013{
 
2014	int ret;
2015	u64 extent_item_pos;
2016	u64 flags = 0;
2017	struct btrfs_key found_key;
2018	int search_commit_root = path->search_commit_root;
2019
2020	ret = extent_from_logical(fs_info, logical, path, &found_key, &flags);
2021	btrfs_release_path(path);
2022	if (ret < 0)
2023		return ret;
2024	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
2025		return -EINVAL;
2026
2027	extent_item_pos = logical - found_key.objectid;
2028	ret = iterate_extent_inodes(fs_info, found_key.objectid,
2029					extent_item_pos, search_commit_root,
2030					iterate, ctx, ignore_offset);
 
 
2031
2032	return ret;
 
2033}
2034
2035typedef int (iterate_irefs_t)(u64 parent, u32 name_len, unsigned long name_off,
2036			      struct extent_buffer *eb, void *ctx);
2037
2038static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
2039			      struct btrfs_path *path,
2040			      iterate_irefs_t *iterate, void *ctx)
2041{
2042	int ret = 0;
2043	int slot;
2044	u32 cur;
2045	u32 len;
2046	u32 name_len;
2047	u64 parent = 0;
2048	int found = 0;
 
 
2049	struct extent_buffer *eb;
2050	struct btrfs_item *item;
2051	struct btrfs_inode_ref *iref;
2052	struct btrfs_key found_key;
2053
2054	while (!ret) {
2055		ret = btrfs_find_item(fs_root, path, inum,
2056				parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
2057				&found_key);
2058
2059		if (ret < 0)
2060			break;
2061		if (ret) {
2062			ret = found ? 0 : -ENOENT;
2063			break;
2064		}
2065		++found;
2066
2067		parent = found_key.offset;
2068		slot = path->slots[0];
2069		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2070		if (!eb) {
2071			ret = -ENOMEM;
2072			break;
2073		}
2074		btrfs_release_path(path);
2075
2076		item = btrfs_item_nr(slot);
2077		iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
2078
2079		for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
2080			name_len = btrfs_inode_ref_name_len(eb, iref);
2081			/* path must be released before calling iterate()! */
2082			btrfs_debug(fs_root->fs_info,
2083				"following ref at offset %u for inode %llu in tree %llu",
2084				cur, found_key.objectid,
2085				fs_root->root_key.objectid);
2086			ret = iterate(parent, name_len,
2087				      (unsigned long)(iref + 1), eb, ctx);
2088			if (ret)
2089				break;
2090			len = sizeof(*iref) + name_len;
2091			iref = (struct btrfs_inode_ref *)((char *)iref + len);
2092		}
2093		free_extent_buffer(eb);
2094	}
2095
2096	btrfs_release_path(path);
2097
2098	return ret;
2099}
2100
2101static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
2102				 struct btrfs_path *path,
2103				 iterate_irefs_t *iterate, void *ctx)
2104{
2105	int ret;
2106	int slot;
2107	u64 offset = 0;
2108	u64 parent;
2109	int found = 0;
 
 
2110	struct extent_buffer *eb;
2111	struct btrfs_inode_extref *extref;
2112	u32 item_size;
2113	u32 cur_offset;
2114	unsigned long ptr;
2115
2116	while (1) {
2117		ret = btrfs_find_one_extref(fs_root, inum, offset, path, &extref,
2118					    &offset);
2119		if (ret < 0)
2120			break;
2121		if (ret) {
2122			ret = found ? 0 : -ENOENT;
2123			break;
2124		}
2125		++found;
2126
2127		slot = path->slots[0];
2128		eb = btrfs_clone_extent_buffer(path->nodes[0]);
2129		if (!eb) {
2130			ret = -ENOMEM;
2131			break;
2132		}
2133		btrfs_release_path(path);
2134
2135		item_size = btrfs_item_size_nr(eb, slot);
2136		ptr = btrfs_item_ptr_offset(eb, slot);
2137		cur_offset = 0;
2138
2139		while (cur_offset < item_size) {
2140			u32 name_len;
2141
2142			extref = (struct btrfs_inode_extref *)(ptr + cur_offset);
2143			parent = btrfs_inode_extref_parent(eb, extref);
2144			name_len = btrfs_inode_extref_name_len(eb, extref);
2145			ret = iterate(parent, name_len,
2146				      (unsigned long)&extref->name, eb, ctx);
2147			if (ret)
2148				break;
2149
2150			cur_offset += btrfs_inode_extref_name_len(eb, extref);
2151			cur_offset += sizeof(*extref);
2152		}
2153		free_extent_buffer(eb);
2154
2155		offset++;
2156	}
2157
2158	btrfs_release_path(path);
2159
2160	return ret;
2161}
2162
2163static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
2164			 struct btrfs_path *path, iterate_irefs_t *iterate,
2165			 void *ctx)
2166{
2167	int ret;
2168	int found_refs = 0;
2169
2170	ret = iterate_inode_refs(inum, fs_root, path, iterate, ctx);
2171	if (!ret)
2172		++found_refs;
2173	else if (ret != -ENOENT)
2174		return ret;
2175
2176	ret = iterate_inode_extrefs(inum, fs_root, path, iterate, ctx);
2177	if (ret == -ENOENT && found_refs)
2178		return 0;
2179
2180	return ret;
2181}
2182
2183/*
2184 * returns 0 if the path could be dumped (probably truncated)
2185 * returns <0 in case of an error
2186 */
2187static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
2188			 struct extent_buffer *eb, void *ctx)
2189{
2190	struct inode_fs_paths *ipath = ctx;
2191	char *fspath;
2192	char *fspath_min;
2193	int i = ipath->fspath->elem_cnt;
2194	const int s_ptr = sizeof(char *);
2195	u32 bytes_left;
2196
2197	bytes_left = ipath->fspath->bytes_left > s_ptr ?
2198					ipath->fspath->bytes_left - s_ptr : 0;
2199
2200	fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
2201	fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
2202				   name_off, eb, inum, fspath_min, bytes_left);
2203	if (IS_ERR(fspath))
2204		return PTR_ERR(fspath);
2205
2206	if (fspath > fspath_min) {
2207		ipath->fspath->val[i] = (u64)(unsigned long)fspath;
2208		++ipath->fspath->elem_cnt;
2209		ipath->fspath->bytes_left = fspath - fspath_min;
2210	} else {
2211		++ipath->fspath->elem_missed;
2212		ipath->fspath->bytes_missing += fspath_min - fspath;
2213		ipath->fspath->bytes_left = 0;
2214	}
2215
2216	return 0;
2217}
2218
2219/*
2220 * this dumps all file system paths to the inode into the ipath struct, provided
2221 * is has been created large enough. each path is zero-terminated and accessed
2222 * from ipath->fspath->val[i].
2223 * when it returns, there are ipath->fspath->elem_cnt number of paths available
2224 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
2225 * number of missed paths is recorded in ipath->fspath->elem_missed, otherwise,
2226 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
2227 * have been needed to return all paths.
2228 */
2229int paths_from_inode(u64 inum, struct inode_fs_paths *ipath)
2230{
2231	return iterate_irefs(inum, ipath->fs_root, ipath->btrfs_path,
2232			     inode_to_path, ipath);
 
 
 
 
 
 
 
 
 
 
 
 
2233}
2234
2235struct btrfs_data_container *init_data_container(u32 total_bytes)
2236{
2237	struct btrfs_data_container *data;
2238	size_t alloc_bytes;
2239
2240	alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
2241	data = kvmalloc(alloc_bytes, GFP_KERNEL);
2242	if (!data)
2243		return ERR_PTR(-ENOMEM);
2244
2245	if (total_bytes >= sizeof(*data)) {
2246		data->bytes_left = total_bytes - sizeof(*data);
2247		data->bytes_missing = 0;
2248	} else {
2249		data->bytes_missing = sizeof(*data) - total_bytes;
2250		data->bytes_left = 0;
2251	}
2252
2253	data->elem_cnt = 0;
2254	data->elem_missed = 0;
2255
2256	return data;
2257}
2258
2259/*
2260 * allocates space to return multiple file system paths for an inode.
2261 * total_bytes to allocate are passed, note that space usable for actual path
2262 * information will be total_bytes - sizeof(struct inode_fs_paths).
2263 * the returned pointer must be freed with free_ipath() in the end.
2264 */
2265struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
2266					struct btrfs_path *path)
2267{
2268	struct inode_fs_paths *ifp;
2269	struct btrfs_data_container *fspath;
2270
2271	fspath = init_data_container(total_bytes);
2272	if (IS_ERR(fspath))
2273		return ERR_CAST(fspath);
2274
2275	ifp = kmalloc(sizeof(*ifp), GFP_KERNEL);
2276	if (!ifp) {
2277		kvfree(fspath);
2278		return ERR_PTR(-ENOMEM);
2279	}
2280
2281	ifp->btrfs_path = path;
2282	ifp->fspath = fspath;
2283	ifp->fs_root = fs_root;
2284
2285	return ifp;
2286}
2287
2288void free_ipath(struct inode_fs_paths *ipath)
2289{
2290	if (!ipath)
2291		return;
2292	kvfree(ipath->fspath);
2293	kfree(ipath);
2294}
2295
2296struct btrfs_backref_iter *btrfs_backref_iter_alloc(
2297		struct btrfs_fs_info *fs_info, gfp_t gfp_flag)
2298{
2299	struct btrfs_backref_iter *ret;
2300
2301	ret = kzalloc(sizeof(*ret), gfp_flag);
2302	if (!ret)
2303		return NULL;
2304
2305	ret->path = btrfs_alloc_path();
2306	if (!ret->path) {
2307		kfree(ret);
2308		return NULL;
2309	}
2310
2311	/* Current backref iterator only supports iteration in commit root */
2312	ret->path->search_commit_root = 1;
2313	ret->path->skip_locking = 1;
2314	ret->fs_info = fs_info;
2315
2316	return ret;
2317}
2318
 
 
 
 
 
 
 
 
 
 
2319int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr)
2320{
2321	struct btrfs_fs_info *fs_info = iter->fs_info;
 
2322	struct btrfs_path *path = iter->path;
2323	struct btrfs_extent_item *ei;
2324	struct btrfs_key key;
2325	int ret;
2326
2327	key.objectid = bytenr;
2328	key.type = BTRFS_METADATA_ITEM_KEY;
2329	key.offset = (u64)-1;
2330	iter->bytenr = bytenr;
2331
2332	ret = btrfs_search_slot(NULL, fs_info->extent_root, &key, path, 0, 0);
2333	if (ret < 0)
2334		return ret;
2335	if (ret == 0) {
 
 
 
 
2336		ret = -EUCLEAN;
2337		goto release;
2338	}
2339	if (path->slots[0] == 0) {
2340		WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
2341		ret = -EUCLEAN;
2342		goto release;
2343	}
2344	path->slots[0]--;
2345
2346	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2347	if ((key.type != BTRFS_EXTENT_ITEM_KEY &&
2348	     key.type != BTRFS_METADATA_ITEM_KEY) || key.objectid != bytenr) {
2349		ret = -ENOENT;
2350		goto release;
2351	}
2352	memcpy(&iter->cur_key, &key, sizeof(key));
2353	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2354						    path->slots[0]);
2355	iter->end_ptr = (u32)(iter->item_ptr +
2356			btrfs_item_size_nr(path->nodes[0], path->slots[0]));
2357	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
2358			    struct btrfs_extent_item);
2359
2360	/*
2361	 * Only support iteration on tree backref yet.
2362	 *
2363	 * This is an extra precaution for non skinny-metadata, where
2364	 * EXTENT_ITEM is also used for tree blocks, that we can only use
2365	 * extent flags to determine if it's a tree block.
2366	 */
2367	if (btrfs_extent_flags(path->nodes[0], ei) & BTRFS_EXTENT_FLAG_DATA) {
2368		ret = -ENOTSUPP;
2369		goto release;
2370	}
2371	iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei));
2372
2373	/* If there is no inline backref, go search for keyed backref */
2374	if (iter->cur_ptr >= iter->end_ptr) {
2375		ret = btrfs_next_item(fs_info->extent_root, path);
2376
2377		/* No inline nor keyed ref */
2378		if (ret > 0) {
2379			ret = -ENOENT;
2380			goto release;
2381		}
2382		if (ret < 0)
2383			goto release;
2384
2385		btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key,
2386				path->slots[0]);
2387		if (iter->cur_key.objectid != bytenr ||
2388		    (iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY &&
2389		     iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY)) {
2390			ret = -ENOENT;
2391			goto release;
2392		}
2393		iter->cur_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2394							   path->slots[0]);
2395		iter->item_ptr = iter->cur_ptr;
2396		iter->end_ptr = (u32)(iter->item_ptr + btrfs_item_size_nr(
2397				      path->nodes[0], path->slots[0]));
2398	}
2399
2400	return 0;
2401release:
2402	btrfs_backref_iter_release(iter);
2403	return ret;
2404}
2405
 
 
 
 
 
 
 
 
2406/*
2407 * Go to the next backref item of current bytenr, can be either inlined or
2408 * keyed.
2409 *
2410 * Caller needs to check whether it's inline ref or not by iter->cur_key.
2411 *
2412 * Return 0 if we get next backref without problem.
2413 * Return >0 if there is no extra backref for this bytenr.
2414 * Return <0 if there is something wrong happened.
2415 */
2416int btrfs_backref_iter_next(struct btrfs_backref_iter *iter)
2417{
2418	struct extent_buffer *eb = btrfs_backref_get_eb(iter);
 
2419	struct btrfs_path *path = iter->path;
2420	struct btrfs_extent_inline_ref *iref;
2421	int ret;
2422	u32 size;
2423
2424	if (btrfs_backref_iter_is_inline_ref(iter)) {
2425		/* We're still inside the inline refs */
2426		ASSERT(iter->cur_ptr < iter->end_ptr);
2427
2428		if (btrfs_backref_has_tree_block_info(iter)) {
2429			/* First tree block info */
2430			size = sizeof(struct btrfs_tree_block_info);
2431		} else {
2432			/* Use inline ref type to determine the size */
2433			int type;
2434
2435			iref = (struct btrfs_extent_inline_ref *)
2436				((unsigned long)iter->cur_ptr);
2437			type = btrfs_extent_inline_ref_type(eb, iref);
2438
2439			size = btrfs_extent_inline_ref_size(type);
2440		}
2441		iter->cur_ptr += size;
2442		if (iter->cur_ptr < iter->end_ptr)
2443			return 0;
2444
2445		/* All inline items iterated, fall through */
2446	}
2447
2448	/* We're at keyed items, there is no inline item, go to the next one */
2449	ret = btrfs_next_item(iter->fs_info->extent_root, iter->path);
 
2450	if (ret)
2451		return ret;
2452
2453	btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, path->slots[0]);
2454	if (iter->cur_key.objectid != iter->bytenr ||
2455	    (iter->cur_key.type != BTRFS_TREE_BLOCK_REF_KEY &&
2456	     iter->cur_key.type != BTRFS_SHARED_BLOCK_REF_KEY))
2457		return 1;
2458	iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0],
2459					path->slots[0]);
2460	iter->cur_ptr = iter->item_ptr;
2461	iter->end_ptr = iter->item_ptr + (u32)btrfs_item_size_nr(path->nodes[0],
2462						path->slots[0]);
2463	return 0;
2464}
2465
2466void btrfs_backref_init_cache(struct btrfs_fs_info *fs_info,
2467			      struct btrfs_backref_cache *cache, int is_reloc)
2468{
2469	int i;
2470
2471	cache->rb_root = RB_ROOT;
2472	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2473		INIT_LIST_HEAD(&cache->pending[i]);
2474	INIT_LIST_HEAD(&cache->changed);
2475	INIT_LIST_HEAD(&cache->detached);
2476	INIT_LIST_HEAD(&cache->leaves);
2477	INIT_LIST_HEAD(&cache->pending_edge);
2478	INIT_LIST_HEAD(&cache->useless_node);
2479	cache->fs_info = fs_info;
2480	cache->is_reloc = is_reloc;
2481}
2482
2483struct btrfs_backref_node *btrfs_backref_alloc_node(
2484		struct btrfs_backref_cache *cache, u64 bytenr, int level)
2485{
2486	struct btrfs_backref_node *node;
2487
2488	ASSERT(level >= 0 && level < BTRFS_MAX_LEVEL);
2489	node = kzalloc(sizeof(*node), GFP_NOFS);
2490	if (!node)
2491		return node;
2492
2493	INIT_LIST_HEAD(&node->list);
2494	INIT_LIST_HEAD(&node->upper);
2495	INIT_LIST_HEAD(&node->lower);
2496	RB_CLEAR_NODE(&node->rb_node);
2497	cache->nr_nodes++;
2498	node->level = level;
2499	node->bytenr = bytenr;
2500
2501	return node;
2502}
2503
 
 
 
 
 
 
 
 
 
 
 
 
 
2504struct btrfs_backref_edge *btrfs_backref_alloc_edge(
2505		struct btrfs_backref_cache *cache)
2506{
2507	struct btrfs_backref_edge *edge;
2508
2509	edge = kzalloc(sizeof(*edge), GFP_NOFS);
2510	if (edge)
2511		cache->nr_edges++;
2512	return edge;
2513}
2514
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2515/*
2516 * Drop the backref node from cache, also cleaning up all its
2517 * upper edges and any uncached nodes in the path.
2518 *
2519 * This cleanup happens bottom up, thus the node should either
2520 * be the lowest node in the cache or a detached node.
2521 */
2522void btrfs_backref_cleanup_node(struct btrfs_backref_cache *cache,
2523				struct btrfs_backref_node *node)
2524{
2525	struct btrfs_backref_node *upper;
2526	struct btrfs_backref_edge *edge;
2527
2528	if (!node)
2529		return;
2530
2531	BUG_ON(!node->lowest && !node->detached);
2532	while (!list_empty(&node->upper)) {
2533		edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2534				  list[LOWER]);
2535		upper = edge->node[UPPER];
2536		list_del(&edge->list[LOWER]);
2537		list_del(&edge->list[UPPER]);
2538		btrfs_backref_free_edge(cache, edge);
2539
2540		if (RB_EMPTY_NODE(&upper->rb_node)) {
2541			BUG_ON(!list_empty(&node->upper));
2542			btrfs_backref_drop_node(cache, node);
2543			node = upper;
2544			node->lowest = 1;
2545			continue;
2546		}
2547		/*
2548		 * Add the node to leaf node list if no other child block
2549		 * cached.
2550		 */
2551		if (list_empty(&upper->lower)) {
2552			list_add_tail(&upper->lower, &cache->leaves);
2553			upper->lowest = 1;
2554		}
2555	}
2556
2557	btrfs_backref_drop_node(cache, node);
2558}
2559
2560/*
2561 * Release all nodes/edges from current cache
2562 */
2563void btrfs_backref_release_cache(struct btrfs_backref_cache *cache)
2564{
2565	struct btrfs_backref_node *node;
2566	int i;
2567
2568	while (!list_empty(&cache->detached)) {
2569		node = list_entry(cache->detached.next,
2570				  struct btrfs_backref_node, list);
2571		btrfs_backref_cleanup_node(cache, node);
2572	}
2573
2574	while (!list_empty(&cache->leaves)) {
2575		node = list_entry(cache->leaves.next,
2576				  struct btrfs_backref_node, lower);
2577		btrfs_backref_cleanup_node(cache, node);
2578	}
2579
2580	cache->last_trans = 0;
2581
2582	for (i = 0; i < BTRFS_MAX_LEVEL; i++)
2583		ASSERT(list_empty(&cache->pending[i]));
2584	ASSERT(list_empty(&cache->pending_edge));
2585	ASSERT(list_empty(&cache->useless_node));
2586	ASSERT(list_empty(&cache->changed));
2587	ASSERT(list_empty(&cache->detached));
2588	ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
2589	ASSERT(!cache->nr_nodes);
2590	ASSERT(!cache->nr_edges);
2591}
2592
 
 
 
 
 
 
 
 
 
 
 
 
 
2593/*
2594 * Handle direct tree backref
2595 *
2596 * Direct tree backref means, the backref item shows its parent bytenr
2597 * directly. This is for SHARED_BLOCK_REF backref (keyed or inlined).
2598 *
2599 * @ref_key:	The converted backref key.
2600 *		For keyed backref, it's the item key.
2601 *		For inlined backref, objectid is the bytenr,
2602 *		type is btrfs_inline_ref_type, offset is
2603 *		btrfs_inline_ref_offset.
2604 */
2605static int handle_direct_tree_backref(struct btrfs_backref_cache *cache,
2606				      struct btrfs_key *ref_key,
2607				      struct btrfs_backref_node *cur)
2608{
2609	struct btrfs_backref_edge *edge;
2610	struct btrfs_backref_node *upper;
2611	struct rb_node *rb_node;
2612
2613	ASSERT(ref_key->type == BTRFS_SHARED_BLOCK_REF_KEY);
2614
2615	/* Only reloc root uses backref pointing to itself */
2616	if (ref_key->objectid == ref_key->offset) {
2617		struct btrfs_root *root;
2618
2619		cur->is_reloc_root = 1;
2620		/* Only reloc backref cache cares about a specific root */
2621		if (cache->is_reloc) {
2622			root = find_reloc_root(cache->fs_info, cur->bytenr);
2623			if (WARN_ON(!root))
2624				return -ENOENT;
2625			cur->root = root;
2626		} else {
2627			/*
2628			 * For generic purpose backref cache, reloc root node
2629			 * is useless.
2630			 */
2631			list_add(&cur->list, &cache->useless_node);
2632		}
2633		return 0;
2634	}
2635
2636	edge = btrfs_backref_alloc_edge(cache);
2637	if (!edge)
2638		return -ENOMEM;
2639
2640	rb_node = rb_simple_search(&cache->rb_root, ref_key->offset);
2641	if (!rb_node) {
2642		/* Parent node not yet cached */
2643		upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2644					   cur->level + 1);
2645		if (!upper) {
2646			btrfs_backref_free_edge(cache, edge);
2647			return -ENOMEM;
2648		}
2649
2650		/*
2651		 *  Backrefs for the upper level block isn't cached, add the
2652		 *  block to pending list
2653		 */
2654		list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2655	} else {
2656		/* Parent node already cached */
2657		upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2658		ASSERT(upper->checked);
2659		INIT_LIST_HEAD(&edge->list[UPPER]);
2660	}
2661	btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2662	return 0;
2663}
2664
2665/*
2666 * Handle indirect tree backref
2667 *
2668 * Indirect tree backref means, we only know which tree the node belongs to.
2669 * We still need to do a tree search to find out the parents. This is for
2670 * TREE_BLOCK_REF backref (keyed or inlined).
2671 *
 
2672 * @ref_key:	The same as @ref_key in  handle_direct_tree_backref()
2673 * @tree_key:	The first key of this tree block.
2674 * @path:	A clean (released) path, to avoid allocating path everytime
2675 *		the function get called.
2676 */
2677static int handle_indirect_tree_backref(struct btrfs_backref_cache *cache,
 
2678					struct btrfs_path *path,
2679					struct btrfs_key *ref_key,
2680					struct btrfs_key *tree_key,
2681					struct btrfs_backref_node *cur)
2682{
2683	struct btrfs_fs_info *fs_info = cache->fs_info;
2684	struct btrfs_backref_node *upper;
2685	struct btrfs_backref_node *lower;
2686	struct btrfs_backref_edge *edge;
2687	struct extent_buffer *eb;
2688	struct btrfs_root *root;
2689	struct rb_node *rb_node;
2690	int level;
2691	bool need_check = true;
2692	int ret;
2693
2694	root = btrfs_get_fs_root(fs_info, ref_key->offset, false);
2695	if (IS_ERR(root))
2696		return PTR_ERR(root);
2697	if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2698		cur->cowonly = 1;
2699
2700	if (btrfs_root_level(&root->root_item) == cur->level) {
2701		/* Tree root */
2702		ASSERT(btrfs_root_bytenr(&root->root_item) == cur->bytenr);
2703		/*
2704		 * For reloc backref cache, we may ignore reloc root.  But for
2705		 * general purpose backref cache, we can't rely on
2706		 * btrfs_should_ignore_reloc_root() as it may conflict with
2707		 * current running relocation and lead to missing root.
2708		 *
2709		 * For general purpose backref cache, reloc root detection is
2710		 * completely relying on direct backref (key->offset is parent
2711		 * bytenr), thus only do such check for reloc cache.
2712		 */
2713		if (btrfs_should_ignore_reloc_root(root) && cache->is_reloc) {
2714			btrfs_put_root(root);
2715			list_add(&cur->list, &cache->useless_node);
2716		} else {
2717			cur->root = root;
2718		}
2719		return 0;
2720	}
2721
2722	level = cur->level + 1;
2723
2724	/* Search the tree to find parent blocks referring to the block */
2725	path->search_commit_root = 1;
2726	path->skip_locking = 1;
2727	path->lowest_level = level;
2728	ret = btrfs_search_slot(NULL, root, tree_key, path, 0, 0);
2729	path->lowest_level = 0;
2730	if (ret < 0) {
2731		btrfs_put_root(root);
2732		return ret;
2733	}
2734	if (ret > 0 && path->slots[level] > 0)
2735		path->slots[level]--;
2736
2737	eb = path->nodes[level];
2738	if (btrfs_node_blockptr(eb, path->slots[level]) != cur->bytenr) {
2739		btrfs_err(fs_info,
2740"couldn't find block (%llu) (level %d) in tree (%llu) with key (%llu %u %llu)",
2741			  cur->bytenr, level - 1, root->root_key.objectid,
2742			  tree_key->objectid, tree_key->type, tree_key->offset);
2743		btrfs_put_root(root);
2744		ret = -ENOENT;
2745		goto out;
2746	}
2747	lower = cur;
2748
2749	/* Add all nodes and edges in the path */
2750	for (; level < BTRFS_MAX_LEVEL; level++) {
2751		if (!path->nodes[level]) {
2752			ASSERT(btrfs_root_bytenr(&root->root_item) ==
2753			       lower->bytenr);
2754			/* Same as previous should_ignore_reloc_root() call */
2755			if (btrfs_should_ignore_reloc_root(root) &&
2756			    cache->is_reloc) {
2757				btrfs_put_root(root);
2758				list_add(&lower->list, &cache->useless_node);
2759			} else {
2760				lower->root = root;
2761			}
2762			break;
2763		}
2764
2765		edge = btrfs_backref_alloc_edge(cache);
2766		if (!edge) {
2767			btrfs_put_root(root);
2768			ret = -ENOMEM;
2769			goto out;
2770		}
2771
2772		eb = path->nodes[level];
2773		rb_node = rb_simple_search(&cache->rb_root, eb->start);
2774		if (!rb_node) {
2775			upper = btrfs_backref_alloc_node(cache, eb->start,
2776							 lower->level + 1);
2777			if (!upper) {
2778				btrfs_put_root(root);
2779				btrfs_backref_free_edge(cache, edge);
2780				ret = -ENOMEM;
2781				goto out;
2782			}
2783			upper->owner = btrfs_header_owner(eb);
2784			if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
2785				upper->cowonly = 1;
2786
2787			/*
2788			 * If we know the block isn't shared we can avoid
2789			 * checking its backrefs.
2790			 */
2791			if (btrfs_block_can_be_shared(root, eb))
2792				upper->checked = 0;
2793			else
2794				upper->checked = 1;
2795
2796			/*
2797			 * Add the block to pending list if we need to check its
2798			 * backrefs, we only do this once while walking up a
2799			 * tree as we will catch anything else later on.
2800			 */
2801			if (!upper->checked && need_check) {
2802				need_check = false;
2803				list_add_tail(&edge->list[UPPER],
2804					      &cache->pending_edge);
2805			} else {
2806				if (upper->checked)
2807					need_check = true;
2808				INIT_LIST_HEAD(&edge->list[UPPER]);
2809			}
2810		} else {
2811			upper = rb_entry(rb_node, struct btrfs_backref_node,
2812					 rb_node);
2813			ASSERT(upper->checked);
2814			INIT_LIST_HEAD(&edge->list[UPPER]);
2815			if (!upper->owner)
2816				upper->owner = btrfs_header_owner(eb);
2817		}
2818		btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2819
2820		if (rb_node) {
2821			btrfs_put_root(root);
2822			break;
2823		}
2824		lower = upper;
2825		upper = NULL;
2826	}
2827out:
2828	btrfs_release_path(path);
2829	return ret;
2830}
2831
2832/*
2833 * Add backref node @cur into @cache.
2834 *
2835 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2836 *	 links aren't yet bi-directional. Needs to finish such links.
2837 *	 Use btrfs_backref_finish_upper_links() to finish such linkage.
2838 *
 
2839 * @path:	Released path for indirect tree backref lookup
2840 * @iter:	Released backref iter for extent tree search
2841 * @node_key:	The first key of the tree block
2842 */
2843int btrfs_backref_add_tree_node(struct btrfs_backref_cache *cache,
 
2844				struct btrfs_path *path,
2845				struct btrfs_backref_iter *iter,
2846				struct btrfs_key *node_key,
2847				struct btrfs_backref_node *cur)
2848{
2849	struct btrfs_fs_info *fs_info = cache->fs_info;
2850	struct btrfs_backref_edge *edge;
2851	struct btrfs_backref_node *exist;
2852	int ret;
2853
2854	ret = btrfs_backref_iter_start(iter, cur->bytenr);
2855	if (ret < 0)
2856		return ret;
2857	/*
2858	 * We skip the first btrfs_tree_block_info, as we don't use the key
2859	 * stored in it, but fetch it from the tree block
2860	 */
2861	if (btrfs_backref_has_tree_block_info(iter)) {
2862		ret = btrfs_backref_iter_next(iter);
2863		if (ret < 0)
2864			goto out;
2865		/* No extra backref? This means the tree block is corrupted */
2866		if (ret > 0) {
2867			ret = -EUCLEAN;
2868			goto out;
2869		}
2870	}
2871	WARN_ON(cur->checked);
2872	if (!list_empty(&cur->upper)) {
2873		/*
2874		 * The backref was added previously when processing backref of
2875		 * type BTRFS_TREE_BLOCK_REF_KEY
2876		 */
2877		ASSERT(list_is_singular(&cur->upper));
2878		edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2879				  list[LOWER]);
2880		ASSERT(list_empty(&edge->list[UPPER]));
2881		exist = edge->node[UPPER];
2882		/*
2883		 * Add the upper level block to pending list if we need check
2884		 * its backrefs
2885		 */
2886		if (!exist->checked)
2887			list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2888	} else {
2889		exist = NULL;
2890	}
2891
2892	for (; ret == 0; ret = btrfs_backref_iter_next(iter)) {
2893		struct extent_buffer *eb;
2894		struct btrfs_key key;
2895		int type;
2896
2897		cond_resched();
2898		eb = btrfs_backref_get_eb(iter);
2899
2900		key.objectid = iter->bytenr;
2901		if (btrfs_backref_iter_is_inline_ref(iter)) {
2902			struct btrfs_extent_inline_ref *iref;
2903
2904			/* Update key for inline backref */
2905			iref = (struct btrfs_extent_inline_ref *)
2906				((unsigned long)iter->cur_ptr);
2907			type = btrfs_get_extent_inline_ref_type(eb, iref,
2908							BTRFS_REF_TYPE_BLOCK);
2909			if (type == BTRFS_REF_TYPE_INVALID) {
2910				ret = -EUCLEAN;
2911				goto out;
2912			}
2913			key.type = type;
2914			key.offset = btrfs_extent_inline_ref_offset(eb, iref);
2915		} else {
2916			key.type = iter->cur_key.type;
2917			key.offset = iter->cur_key.offset;
2918		}
2919
2920		/*
2921		 * Parent node found and matches current inline ref, no need to
2922		 * rebuild this node for this inline ref
2923		 */
2924		if (exist &&
2925		    ((key.type == BTRFS_TREE_BLOCK_REF_KEY &&
2926		      exist->owner == key.offset) ||
2927		     (key.type == BTRFS_SHARED_BLOCK_REF_KEY &&
2928		      exist->bytenr == key.offset))) {
2929			exist = NULL;
2930			continue;
2931		}
2932
2933		/* SHARED_BLOCK_REF means key.offset is the parent bytenr */
2934		if (key.type == BTRFS_SHARED_BLOCK_REF_KEY) {
2935			ret = handle_direct_tree_backref(cache, &key, cur);
2936			if (ret < 0)
2937				goto out;
2938			continue;
2939		} else if (unlikely(key.type == BTRFS_EXTENT_REF_V0_KEY)) {
2940			ret = -EINVAL;
2941			btrfs_print_v0_err(fs_info);
2942			btrfs_handle_fs_error(fs_info, ret, NULL);
2943			goto out;
2944		} else if (key.type != BTRFS_TREE_BLOCK_REF_KEY) {
2945			continue;
 
 
2946		}
2947
2948		/*
2949		 * key.type == BTRFS_TREE_BLOCK_REF_KEY, inline ref offset
2950		 * means the root objectid. We need to search the tree to get
2951		 * its parent bytenr.
2952		 */
2953		ret = handle_indirect_tree_backref(cache, path, &key, node_key,
2954						   cur);
2955		if (ret < 0)
2956			goto out;
2957	}
2958	ret = 0;
2959	cur->checked = 1;
2960	WARN_ON(exist);
2961out:
2962	btrfs_backref_iter_release(iter);
2963	return ret;
2964}
2965
2966/*
2967 * Finish the upwards linkage created by btrfs_backref_add_tree_node()
2968 */
2969int btrfs_backref_finish_upper_links(struct btrfs_backref_cache *cache,
2970				     struct btrfs_backref_node *start)
2971{
2972	struct list_head *useless_node = &cache->useless_node;
2973	struct btrfs_backref_edge *edge;
2974	struct rb_node *rb_node;
2975	LIST_HEAD(pending_edge);
2976
2977	ASSERT(start->checked);
2978
2979	/* Insert this node to cache if it's not COW-only */
2980	if (!start->cowonly) {
2981		rb_node = rb_simple_insert(&cache->rb_root, start->bytenr,
2982					   &start->rb_node);
2983		if (rb_node)
2984			btrfs_backref_panic(cache->fs_info, start->bytenr,
2985					    -EEXIST);
2986		list_add_tail(&start->lower, &cache->leaves);
2987	}
2988
2989	/*
2990	 * Use breadth first search to iterate all related edges.
2991	 *
2992	 * The starting points are all the edges of this node
2993	 */
2994	list_for_each_entry(edge, &start->upper, list[LOWER])
2995		list_add_tail(&edge->list[UPPER], &pending_edge);
2996
2997	while (!list_empty(&pending_edge)) {
2998		struct btrfs_backref_node *upper;
2999		struct btrfs_backref_node *lower;
3000		struct rb_node *rb_node;
3001
3002		edge = list_first_entry(&pending_edge,
3003				struct btrfs_backref_edge, list[UPPER]);
3004		list_del_init(&edge->list[UPPER]);
3005		upper = edge->node[UPPER];
3006		lower = edge->node[LOWER];
3007
3008		/* Parent is detached, no need to keep any edges */
3009		if (upper->detached) {
3010			list_del(&edge->list[LOWER]);
3011			btrfs_backref_free_edge(cache, edge);
3012
3013			/* Lower node is orphan, queue for cleanup */
3014			if (list_empty(&lower->upper))
3015				list_add(&lower->list, useless_node);
3016			continue;
3017		}
3018
3019		/*
3020		 * All new nodes added in current build_backref_tree() haven't
3021		 * been linked to the cache rb tree.
3022		 * So if we have upper->rb_node populated, this means a cache
3023		 * hit. We only need to link the edge, as @upper and all its
3024		 * parents have already been linked.
3025		 */
3026		if (!RB_EMPTY_NODE(&upper->rb_node)) {
3027			if (upper->lowest) {
3028				list_del_init(&upper->lower);
3029				upper->lowest = 0;
3030			}
3031
3032			list_add_tail(&edge->list[UPPER], &upper->lower);
3033			continue;
3034		}
3035
3036		/* Sanity check, we shouldn't have any unchecked nodes */
3037		if (!upper->checked) {
3038			ASSERT(0);
3039			return -EUCLEAN;
3040		}
3041
3042		/* Sanity check, COW-only node has non-COW-only parent */
3043		if (start->cowonly != upper->cowonly) {
3044			ASSERT(0);
3045			return -EUCLEAN;
3046		}
3047
3048		/* Only cache non-COW-only (subvolume trees) tree blocks */
3049		if (!upper->cowonly) {
3050			rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3051						   &upper->rb_node);
3052			if (rb_node) {
3053				btrfs_backref_panic(cache->fs_info,
3054						upper->bytenr, -EEXIST);
3055				return -EUCLEAN;
3056			}
3057		}
3058
3059		list_add_tail(&edge->list[UPPER], &upper->lower);
3060
3061		/*
3062		 * Also queue all the parent edges of this uncached node
3063		 * to finish the upper linkage
3064		 */
3065		list_for_each_entry(edge, &upper->upper, list[LOWER])
3066			list_add_tail(&edge->list[UPPER], &pending_edge);
3067	}
3068	return 0;
3069}
3070
3071void btrfs_backref_error_cleanup(struct btrfs_backref_cache *cache,
3072				 struct btrfs_backref_node *node)
3073{
3074	struct btrfs_backref_node *lower;
3075	struct btrfs_backref_node *upper;
3076	struct btrfs_backref_edge *edge;
3077
3078	while (!list_empty(&cache->useless_node)) {
3079		lower = list_first_entry(&cache->useless_node,
3080				   struct btrfs_backref_node, list);
3081		list_del_init(&lower->list);
3082	}
3083	while (!list_empty(&cache->pending_edge)) {
3084		edge = list_first_entry(&cache->pending_edge,
3085				struct btrfs_backref_edge, list[UPPER]);
3086		list_del(&edge->list[UPPER]);
3087		list_del(&edge->list[LOWER]);
3088		lower = edge->node[LOWER];
3089		upper = edge->node[UPPER];
3090		btrfs_backref_free_edge(cache, edge);
3091
3092		/*
3093		 * Lower is no longer linked to any upper backref nodes and
3094		 * isn't in the cache, we can free it ourselves.
3095		 */
3096		if (list_empty(&lower->upper) &&
3097		    RB_EMPTY_NODE(&lower->rb_node))
3098			list_add(&lower->list, &cache->useless_node);
3099
3100		if (!RB_EMPTY_NODE(&upper->rb_node))
3101			continue;
3102
3103		/* Add this guy's upper edges to the list to process */
3104		list_for_each_entry(edge, &upper->upper, list[LOWER])
3105			list_add_tail(&edge->list[UPPER],
3106				      &cache->pending_edge);
3107		if (list_empty(&upper->upper))
3108			list_add(&upper->list, &cache->useless_node);
3109	}
3110
3111	while (!list_empty(&cache->useless_node)) {
3112		lower = list_first_entry(&cache->useless_node,
3113				   struct btrfs_backref_node, list);
3114		list_del_init(&lower->list);
3115		if (lower == node)
3116			node = NULL;
3117		btrfs_backref_free_node(cache, lower);
3118	}
3119
3120	btrfs_backref_cleanup_node(cache, node);
3121	ASSERT(list_empty(&cache->useless_node) &&
3122	       list_empty(&cache->pending_edge));
3123}