Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2012 Alexander Block.  All rights reserved.
   4 */
   5
   6#include <linux/bsearch.h>
   7#include <linux/fs.h>
   8#include <linux/file.h>
   9#include <linux/sort.h>
  10#include <linux/mount.h>
  11#include <linux/xattr.h>
  12#include <linux/posix_acl_xattr.h>
  13#include <linux/radix-tree.h>
  14#include <linux/vmalloc.h>
  15#include <linux/string.h>
  16#include <linux/compat.h>
  17#include <linux/crc32c.h>
  18
  19#include "send.h"
  20#include "backref.h"
  21#include "locking.h"
  22#include "disk-io.h"
  23#include "btrfs_inode.h"
  24#include "transaction.h"
  25#include "compression.h"
  26#include "xattr.h"
  27
  28/*
  29 * Maximum number of references an extent can have in order for us to attempt to
  30 * issue clone operations instead of write operations. This currently exists to
  31 * avoid hitting limitations of the backreference walking code (taking a lot of
  32 * time and using too much memory for extents with large number of references).
  33 */
  34#define SEND_MAX_EXTENT_REFS	64
  35
  36/*
  37 * A fs_path is a helper to dynamically build path names with unknown size.
  38 * It reallocates the internal buffer on demand.
  39 * It allows fast adding of path elements on the right side (normal path) and
  40 * fast adding to the left side (reversed path). A reversed path can also be
  41 * unreversed if needed.
  42 */
  43struct fs_path {
  44	union {
  45		struct {
  46			char *start;
  47			char *end;
  48
  49			char *buf;
  50			unsigned short buf_len:15;
  51			unsigned short reversed:1;
  52			char inline_buf[];
  53		};
  54		/*
  55		 * Average path length does not exceed 200 bytes, we'll have
  56		 * better packing in the slab and higher chance to satisfy
  57		 * a allocation later during send.
  58		 */
  59		char pad[256];
  60	};
  61};
  62#define FS_PATH_INLINE_SIZE \
  63	(sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
  64
  65
  66/* reused for each extent */
  67struct clone_root {
  68	struct btrfs_root *root;
  69	u64 ino;
  70	u64 offset;
  71
  72	u64 found_refs;
  73};
  74
  75#define SEND_CTX_MAX_NAME_CACHE_SIZE 128
  76#define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
  77
  78struct send_ctx {
  79	struct file *send_filp;
  80	loff_t send_off;
  81	char *send_buf;
  82	u32 send_size;
  83	u32 send_max_size;
  84	u64 total_send_size;
  85	u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
  86	u64 flags;	/* 'flags' member of btrfs_ioctl_send_args is u64 */
  87
  88	struct btrfs_root *send_root;
  89	struct btrfs_root *parent_root;
  90	struct clone_root *clone_roots;
  91	int clone_roots_cnt;
  92
  93	/* current state of the compare_tree call */
  94	struct btrfs_path *left_path;
  95	struct btrfs_path *right_path;
  96	struct btrfs_key *cmp_key;
  97
  98	/*
  99	 * infos of the currently processed inode. In case of deleted inodes,
 100	 * these are the values from the deleted inode.
 101	 */
 102	u64 cur_ino;
 103	u64 cur_inode_gen;
 104	int cur_inode_new;
 105	int cur_inode_new_gen;
 106	int cur_inode_deleted;
 107	u64 cur_inode_size;
 108	u64 cur_inode_mode;
 109	u64 cur_inode_rdev;
 110	u64 cur_inode_last_extent;
 111	u64 cur_inode_next_write_offset;
 112	bool ignore_cur_inode;
 113
 114	u64 send_progress;
 115
 116	struct list_head new_refs;
 117	struct list_head deleted_refs;
 118
 119	struct radix_tree_root name_cache;
 120	struct list_head name_cache_list;
 121	int name_cache_size;
 122
 123	struct file_ra_state ra;
 124
 125	/*
 126	 * We process inodes by their increasing order, so if before an
 127	 * incremental send we reverse the parent/child relationship of
 128	 * directories such that a directory with a lower inode number was
 129	 * the parent of a directory with a higher inode number, and the one
 130	 * becoming the new parent got renamed too, we can't rename/move the
 131	 * directory with lower inode number when we finish processing it - we
 132	 * must process the directory with higher inode number first, then
 133	 * rename/move it and then rename/move the directory with lower inode
 134	 * number. Example follows.
 135	 *
 136	 * Tree state when the first send was performed:
 137	 *
 138	 * .
 139	 * |-- a                   (ino 257)
 140	 *     |-- b               (ino 258)
 141	 *         |
 142	 *         |
 143	 *         |-- c           (ino 259)
 144	 *         |   |-- d       (ino 260)
 145	 *         |
 146	 *         |-- c2          (ino 261)
 147	 *
 148	 * Tree state when the second (incremental) send is performed:
 149	 *
 150	 * .
 151	 * |-- a                   (ino 257)
 152	 *     |-- b               (ino 258)
 153	 *         |-- c2          (ino 261)
 154	 *             |-- d2      (ino 260)
 155	 *                 |-- cc  (ino 259)
 156	 *
 157	 * The sequence of steps that lead to the second state was:
 158	 *
 159	 * mv /a/b/c/d /a/b/c2/d2
 160	 * mv /a/b/c /a/b/c2/d2/cc
 161	 *
 162	 * "c" has lower inode number, but we can't move it (2nd mv operation)
 163	 * before we move "d", which has higher inode number.
 164	 *
 165	 * So we just memorize which move/rename operations must be performed
 166	 * later when their respective parent is processed and moved/renamed.
 167	 */
 168
 169	/* Indexed by parent directory inode number. */
 170	struct rb_root pending_dir_moves;
 171
 172	/*
 173	 * Reverse index, indexed by the inode number of a directory that
 174	 * is waiting for the move/rename of its immediate parent before its
 175	 * own move/rename can be performed.
 176	 */
 177	struct rb_root waiting_dir_moves;
 178
 179	/*
 180	 * A directory that is going to be rm'ed might have a child directory
 181	 * which is in the pending directory moves index above. In this case,
 182	 * the directory can only be removed after the move/rename of its child
 183	 * is performed. Example:
 184	 *
 185	 * Parent snapshot:
 186	 *
 187	 * .                        (ino 256)
 188	 * |-- a/                   (ino 257)
 189	 *     |-- b/               (ino 258)
 190	 *         |-- c/           (ino 259)
 191	 *         |   |-- x/       (ino 260)
 192	 *         |
 193	 *         |-- y/           (ino 261)
 194	 *
 195	 * Send snapshot:
 196	 *
 197	 * .                        (ino 256)
 198	 * |-- a/                   (ino 257)
 199	 *     |-- b/               (ino 258)
 200	 *         |-- YY/          (ino 261)
 201	 *              |-- x/      (ino 260)
 202	 *
 203	 * Sequence of steps that lead to the send snapshot:
 204	 * rm -f /a/b/c/foo.txt
 205	 * mv /a/b/y /a/b/YY
 206	 * mv /a/b/c/x /a/b/YY
 207	 * rmdir /a/b/c
 208	 *
 209	 * When the child is processed, its move/rename is delayed until its
 210	 * parent is processed (as explained above), but all other operations
 211	 * like update utimes, chown, chgrp, etc, are performed and the paths
 212	 * that it uses for those operations must use the orphanized name of
 213	 * its parent (the directory we're going to rm later), so we need to
 214	 * memorize that name.
 215	 *
 216	 * Indexed by the inode number of the directory to be deleted.
 217	 */
 218	struct rb_root orphan_dirs;
 219};
 220
 221struct pending_dir_move {
 222	struct rb_node node;
 223	struct list_head list;
 224	u64 parent_ino;
 225	u64 ino;
 226	u64 gen;
 227	struct list_head update_refs;
 228};
 229
 230struct waiting_dir_move {
 231	struct rb_node node;
 232	u64 ino;
 233	/*
 234	 * There might be some directory that could not be removed because it
 235	 * was waiting for this directory inode to be moved first. Therefore
 236	 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
 237	 */
 238	u64 rmdir_ino;
 239	u64 rmdir_gen;
 240	bool orphanized;
 241};
 242
 243struct orphan_dir_info {
 244	struct rb_node node;
 245	u64 ino;
 246	u64 gen;
 247	u64 last_dir_index_offset;
 248};
 249
 250struct name_cache_entry {
 251	struct list_head list;
 252	/*
 253	 * radix_tree has only 32bit entries but we need to handle 64bit inums.
 254	 * We use the lower 32bit of the 64bit inum to store it in the tree. If
 255	 * more then one inum would fall into the same entry, we use radix_list
 256	 * to store the additional entries. radix_list is also used to store
 257	 * entries where two entries have the same inum but different
 258	 * generations.
 259	 */
 260	struct list_head radix_list;
 261	u64 ino;
 262	u64 gen;
 263	u64 parent_ino;
 264	u64 parent_gen;
 265	int ret;
 266	int need_later_update;
 267	int name_len;
 268	char name[];
 269};
 270
 271#define ADVANCE							1
 272#define ADVANCE_ONLY_NEXT					-1
 273
 274enum btrfs_compare_tree_result {
 275	BTRFS_COMPARE_TREE_NEW,
 276	BTRFS_COMPARE_TREE_DELETED,
 277	BTRFS_COMPARE_TREE_CHANGED,
 278	BTRFS_COMPARE_TREE_SAME,
 279};
 280
 281__cold
 282static void inconsistent_snapshot_error(struct send_ctx *sctx,
 283					enum btrfs_compare_tree_result result,
 284					const char *what)
 285{
 286	const char *result_string;
 287
 288	switch (result) {
 289	case BTRFS_COMPARE_TREE_NEW:
 290		result_string = "new";
 291		break;
 292	case BTRFS_COMPARE_TREE_DELETED:
 293		result_string = "deleted";
 294		break;
 295	case BTRFS_COMPARE_TREE_CHANGED:
 296		result_string = "updated";
 297		break;
 298	case BTRFS_COMPARE_TREE_SAME:
 299		ASSERT(0);
 300		result_string = "unchanged";
 301		break;
 302	default:
 303		ASSERT(0);
 304		result_string = "unexpected";
 305	}
 306
 307	btrfs_err(sctx->send_root->fs_info,
 308		  "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
 309		  result_string, what, sctx->cmp_key->objectid,
 310		  sctx->send_root->root_key.objectid,
 311		  (sctx->parent_root ?
 312		   sctx->parent_root->root_key.objectid : 0));
 313}
 314
 315static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
 316
 317static struct waiting_dir_move *
 318get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
 319
 320static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
 321
 322static int need_send_hole(struct send_ctx *sctx)
 323{
 324	return (sctx->parent_root && !sctx->cur_inode_new &&
 325		!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
 326		S_ISREG(sctx->cur_inode_mode));
 327}
 328
 329static void fs_path_reset(struct fs_path *p)
 330{
 331	if (p->reversed) {
 332		p->start = p->buf + p->buf_len - 1;
 333		p->end = p->start;
 334		*p->start = 0;
 335	} else {
 336		p->start = p->buf;
 337		p->end = p->start;
 338		*p->start = 0;
 339	}
 340}
 341
 342static struct fs_path *fs_path_alloc(void)
 343{
 344	struct fs_path *p;
 345
 346	p = kmalloc(sizeof(*p), GFP_KERNEL);
 347	if (!p)
 348		return NULL;
 349	p->reversed = 0;
 350	p->buf = p->inline_buf;
 351	p->buf_len = FS_PATH_INLINE_SIZE;
 352	fs_path_reset(p);
 353	return p;
 354}
 355
 356static struct fs_path *fs_path_alloc_reversed(void)
 357{
 358	struct fs_path *p;
 359
 360	p = fs_path_alloc();
 361	if (!p)
 362		return NULL;
 363	p->reversed = 1;
 364	fs_path_reset(p);
 365	return p;
 366}
 367
 368static void fs_path_free(struct fs_path *p)
 369{
 370	if (!p)
 371		return;
 372	if (p->buf != p->inline_buf)
 373		kfree(p->buf);
 374	kfree(p);
 375}
 376
 377static int fs_path_len(struct fs_path *p)
 378{
 379	return p->end - p->start;
 380}
 381
 382static int fs_path_ensure_buf(struct fs_path *p, int len)
 383{
 384	char *tmp_buf;
 385	int path_len;
 386	int old_buf_len;
 387
 388	len++;
 389
 390	if (p->buf_len >= len)
 391		return 0;
 392
 393	if (len > PATH_MAX) {
 394		WARN_ON(1);
 395		return -ENOMEM;
 396	}
 397
 398	path_len = p->end - p->start;
 399	old_buf_len = p->buf_len;
 400
 401	/*
 402	 * First time the inline_buf does not suffice
 403	 */
 404	if (p->buf == p->inline_buf) {
 405		tmp_buf = kmalloc(len, GFP_KERNEL);
 406		if (tmp_buf)
 407			memcpy(tmp_buf, p->buf, old_buf_len);
 408	} else {
 409		tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
 410	}
 411	if (!tmp_buf)
 412		return -ENOMEM;
 413	p->buf = tmp_buf;
 414	/*
 415	 * The real size of the buffer is bigger, this will let the fast path
 416	 * happen most of the time
 417	 */
 418	p->buf_len = ksize(p->buf);
 419
 420	if (p->reversed) {
 421		tmp_buf = p->buf + old_buf_len - path_len - 1;
 422		p->end = p->buf + p->buf_len - 1;
 423		p->start = p->end - path_len;
 424		memmove(p->start, tmp_buf, path_len + 1);
 425	} else {
 426		p->start = p->buf;
 427		p->end = p->start + path_len;
 428	}
 429	return 0;
 430}
 431
 432static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
 433				   char **prepared)
 434{
 435	int ret;
 436	int new_len;
 437
 438	new_len = p->end - p->start + name_len;
 439	if (p->start != p->end)
 440		new_len++;
 441	ret = fs_path_ensure_buf(p, new_len);
 442	if (ret < 0)
 443		goto out;
 444
 445	if (p->reversed) {
 446		if (p->start != p->end)
 447			*--p->start = '/';
 448		p->start -= name_len;
 449		*prepared = p->start;
 450	} else {
 451		if (p->start != p->end)
 452			*p->end++ = '/';
 453		*prepared = p->end;
 454		p->end += name_len;
 455		*p->end = 0;
 456	}
 457
 458out:
 459	return ret;
 460}
 461
 462static int fs_path_add(struct fs_path *p, const char *name, int name_len)
 463{
 464	int ret;
 465	char *prepared;
 466
 467	ret = fs_path_prepare_for_add(p, name_len, &prepared);
 468	if (ret < 0)
 469		goto out;
 470	memcpy(prepared, name, name_len);
 471
 472out:
 473	return ret;
 474}
 475
 476static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
 477{
 478	int ret;
 479	char *prepared;
 480
 481	ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
 482	if (ret < 0)
 483		goto out;
 484	memcpy(prepared, p2->start, p2->end - p2->start);
 485
 486out:
 487	return ret;
 488}
 489
 490static int fs_path_add_from_extent_buffer(struct fs_path *p,
 491					  struct extent_buffer *eb,
 492					  unsigned long off, int len)
 493{
 494	int ret;
 495	char *prepared;
 496
 497	ret = fs_path_prepare_for_add(p, len, &prepared);
 498	if (ret < 0)
 499		goto out;
 500
 501	read_extent_buffer(eb, prepared, off, len);
 502
 503out:
 504	return ret;
 505}
 506
 507static int fs_path_copy(struct fs_path *p, struct fs_path *from)
 508{
 509	int ret;
 510
 511	p->reversed = from->reversed;
 512	fs_path_reset(p);
 513
 514	ret = fs_path_add_path(p, from);
 515
 516	return ret;
 517}
 518
 519
 520static void fs_path_unreverse(struct fs_path *p)
 521{
 522	char *tmp;
 523	int len;
 524
 525	if (!p->reversed)
 526		return;
 527
 528	tmp = p->start;
 529	len = p->end - p->start;
 530	p->start = p->buf;
 531	p->end = p->start + len;
 532	memmove(p->start, tmp, len + 1);
 533	p->reversed = 0;
 534}
 535
 536static struct btrfs_path *alloc_path_for_send(void)
 537{
 538	struct btrfs_path *path;
 539
 540	path = btrfs_alloc_path();
 541	if (!path)
 542		return NULL;
 543	path->search_commit_root = 1;
 544	path->skip_locking = 1;
 545	path->need_commit_sem = 1;
 546	return path;
 547}
 548
 549static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
 550{
 551	int ret;
 552	u32 pos = 0;
 553
 554	while (pos < len) {
 555		ret = kernel_write(filp, buf + pos, len - pos, off);
 556		/* TODO handle that correctly */
 557		/*if (ret == -ERESTARTSYS) {
 558			continue;
 559		}*/
 560		if (ret < 0)
 561			return ret;
 562		if (ret == 0) {
 563			return -EIO;
 564		}
 565		pos += ret;
 566	}
 567
 568	return 0;
 569}
 570
 571static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
 572{
 573	struct btrfs_tlv_header *hdr;
 574	int total_len = sizeof(*hdr) + len;
 575	int left = sctx->send_max_size - sctx->send_size;
 576
 577	if (unlikely(left < total_len))
 578		return -EOVERFLOW;
 579
 580	hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
 581	put_unaligned_le16(attr, &hdr->tlv_type);
 582	put_unaligned_le16(len, &hdr->tlv_len);
 583	memcpy(hdr + 1, data, len);
 584	sctx->send_size += total_len;
 585
 586	return 0;
 587}
 588
 589#define TLV_PUT_DEFINE_INT(bits) \
 590	static int tlv_put_u##bits(struct send_ctx *sctx,	 	\
 591			u##bits attr, u##bits value)			\
 592	{								\
 593		__le##bits __tmp = cpu_to_le##bits(value);		\
 594		return tlv_put(sctx, attr, &__tmp, sizeof(__tmp));	\
 595	}
 596
 597TLV_PUT_DEFINE_INT(64)
 598
 599static int tlv_put_string(struct send_ctx *sctx, u16 attr,
 600			  const char *str, int len)
 601{
 602	if (len == -1)
 603		len = strlen(str);
 604	return tlv_put(sctx, attr, str, len);
 605}
 606
 607static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
 608			const u8 *uuid)
 609{
 610	return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
 611}
 612
 613static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
 614				  struct extent_buffer *eb,
 615				  struct btrfs_timespec *ts)
 616{
 617	struct btrfs_timespec bts;
 618	read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
 619	return tlv_put(sctx, attr, &bts, sizeof(bts));
 620}
 621
 622
 623#define TLV_PUT(sctx, attrtype, data, attrlen) \
 624	do { \
 625		ret = tlv_put(sctx, attrtype, data, attrlen); \
 626		if (ret < 0) \
 627			goto tlv_put_failure; \
 628	} while (0)
 629
 630#define TLV_PUT_INT(sctx, attrtype, bits, value) \
 631	do { \
 632		ret = tlv_put_u##bits(sctx, attrtype, value); \
 633		if (ret < 0) \
 634			goto tlv_put_failure; \
 635	} while (0)
 636
 637#define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
 638#define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
 639#define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
 640#define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
 641#define TLV_PUT_STRING(sctx, attrtype, str, len) \
 642	do { \
 643		ret = tlv_put_string(sctx, attrtype, str, len); \
 644		if (ret < 0) \
 645			goto tlv_put_failure; \
 646	} while (0)
 647#define TLV_PUT_PATH(sctx, attrtype, p) \
 648	do { \
 649		ret = tlv_put_string(sctx, attrtype, p->start, \
 650			p->end - p->start); \
 651		if (ret < 0) \
 652			goto tlv_put_failure; \
 653	} while(0)
 654#define TLV_PUT_UUID(sctx, attrtype, uuid) \
 655	do { \
 656		ret = tlv_put_uuid(sctx, attrtype, uuid); \
 657		if (ret < 0) \
 658			goto tlv_put_failure; \
 659	} while (0)
 660#define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
 661	do { \
 662		ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
 663		if (ret < 0) \
 664			goto tlv_put_failure; \
 665	} while (0)
 666
 667static int send_header(struct send_ctx *sctx)
 668{
 669	struct btrfs_stream_header hdr;
 670
 671	strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
 672	hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
 673
 674	return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
 675					&sctx->send_off);
 676}
 677
 678/*
 679 * For each command/item we want to send to userspace, we call this function.
 680 */
 681static int begin_cmd(struct send_ctx *sctx, int cmd)
 682{
 683	struct btrfs_cmd_header *hdr;
 684
 685	if (WARN_ON(!sctx->send_buf))
 686		return -EINVAL;
 687
 688	BUG_ON(sctx->send_size);
 689
 690	sctx->send_size += sizeof(*hdr);
 691	hdr = (struct btrfs_cmd_header *)sctx->send_buf;
 692	put_unaligned_le16(cmd, &hdr->cmd);
 693
 694	return 0;
 695}
 696
 697static int send_cmd(struct send_ctx *sctx)
 698{
 699	int ret;
 700	struct btrfs_cmd_header *hdr;
 701	u32 crc;
 702
 703	hdr = (struct btrfs_cmd_header *)sctx->send_buf;
 704	put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len);
 705	put_unaligned_le32(0, &hdr->crc);
 706
 707	crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
 708	put_unaligned_le32(crc, &hdr->crc);
 709
 710	ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
 711					&sctx->send_off);
 712
 713	sctx->total_send_size += sctx->send_size;
 714	sctx->cmd_send_size[get_unaligned_le16(&hdr->cmd)] += sctx->send_size;
 715	sctx->send_size = 0;
 716
 717	return ret;
 718}
 719
 720/*
 721 * Sends a move instruction to user space
 722 */
 723static int send_rename(struct send_ctx *sctx,
 724		     struct fs_path *from, struct fs_path *to)
 725{
 726	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 727	int ret;
 728
 729	btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
 730
 731	ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
 732	if (ret < 0)
 733		goto out;
 734
 735	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
 736	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
 737
 738	ret = send_cmd(sctx);
 739
 740tlv_put_failure:
 741out:
 742	return ret;
 743}
 744
 745/*
 746 * Sends a link instruction to user space
 747 */
 748static int send_link(struct send_ctx *sctx,
 749		     struct fs_path *path, struct fs_path *lnk)
 750{
 751	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 752	int ret;
 753
 754	btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
 755
 756	ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
 757	if (ret < 0)
 758		goto out;
 759
 760	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
 761	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
 762
 763	ret = send_cmd(sctx);
 764
 765tlv_put_failure:
 766out:
 767	return ret;
 768}
 769
 770/*
 771 * Sends an unlink instruction to user space
 772 */
 773static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
 774{
 775	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 776	int ret;
 777
 778	btrfs_debug(fs_info, "send_unlink %s", path->start);
 779
 780	ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
 781	if (ret < 0)
 782		goto out;
 783
 784	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
 785
 786	ret = send_cmd(sctx);
 787
 788tlv_put_failure:
 789out:
 790	return ret;
 791}
 792
 793/*
 794 * Sends a rmdir instruction to user space
 795 */
 796static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
 797{
 798	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
 799	int ret;
 800
 801	btrfs_debug(fs_info, "send_rmdir %s", path->start);
 802
 803	ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
 804	if (ret < 0)
 805		goto out;
 806
 807	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
 808
 809	ret = send_cmd(sctx);
 810
 811tlv_put_failure:
 812out:
 813	return ret;
 814}
 815
 816/*
 817 * Helper function to retrieve some fields from an inode item.
 818 */
 819static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
 820			  u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
 821			  u64 *gid, u64 *rdev)
 822{
 823	int ret;
 824	struct btrfs_inode_item *ii;
 825	struct btrfs_key key;
 826
 827	key.objectid = ino;
 828	key.type = BTRFS_INODE_ITEM_KEY;
 829	key.offset = 0;
 830	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
 831	if (ret) {
 832		if (ret > 0)
 833			ret = -ENOENT;
 834		return ret;
 835	}
 836
 837	ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
 838			struct btrfs_inode_item);
 839	if (size)
 840		*size = btrfs_inode_size(path->nodes[0], ii);
 841	if (gen)
 842		*gen = btrfs_inode_generation(path->nodes[0], ii);
 843	if (mode)
 844		*mode = btrfs_inode_mode(path->nodes[0], ii);
 845	if (uid)
 846		*uid = btrfs_inode_uid(path->nodes[0], ii);
 847	if (gid)
 848		*gid = btrfs_inode_gid(path->nodes[0], ii);
 849	if (rdev)
 850		*rdev = btrfs_inode_rdev(path->nodes[0], ii);
 851
 852	return ret;
 853}
 854
 855static int get_inode_info(struct btrfs_root *root,
 856			  u64 ino, u64 *size, u64 *gen,
 857			  u64 *mode, u64 *uid, u64 *gid,
 858			  u64 *rdev)
 859{
 860	struct btrfs_path *path;
 861	int ret;
 862
 863	path = alloc_path_for_send();
 864	if (!path)
 865		return -ENOMEM;
 866	ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
 867			       rdev);
 868	btrfs_free_path(path);
 869	return ret;
 870}
 871
 872typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
 873				   struct fs_path *p,
 874				   void *ctx);
 875
 876/*
 877 * Helper function to iterate the entries in ONE btrfs_inode_ref or
 878 * btrfs_inode_extref.
 879 * The iterate callback may return a non zero value to stop iteration. This can
 880 * be a negative value for error codes or 1 to simply stop it.
 881 *
 882 * path must point to the INODE_REF or INODE_EXTREF when called.
 883 */
 884static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
 885			     struct btrfs_key *found_key, int resolve,
 886			     iterate_inode_ref_t iterate, void *ctx)
 887{
 888	struct extent_buffer *eb = path->nodes[0];
 889	struct btrfs_item *item;
 890	struct btrfs_inode_ref *iref;
 891	struct btrfs_inode_extref *extref;
 892	struct btrfs_path *tmp_path;
 893	struct fs_path *p;
 894	u32 cur = 0;
 895	u32 total;
 896	int slot = path->slots[0];
 897	u32 name_len;
 898	char *start;
 899	int ret = 0;
 900	int num = 0;
 901	int index;
 902	u64 dir;
 903	unsigned long name_off;
 904	unsigned long elem_size;
 905	unsigned long ptr;
 906
 907	p = fs_path_alloc_reversed();
 908	if (!p)
 909		return -ENOMEM;
 910
 911	tmp_path = alloc_path_for_send();
 912	if (!tmp_path) {
 913		fs_path_free(p);
 914		return -ENOMEM;
 915	}
 916
 917
 918	if (found_key->type == BTRFS_INODE_REF_KEY) {
 919		ptr = (unsigned long)btrfs_item_ptr(eb, slot,
 920						    struct btrfs_inode_ref);
 921		item = btrfs_item_nr(slot);
 922		total = btrfs_item_size(eb, item);
 923		elem_size = sizeof(*iref);
 924	} else {
 925		ptr = btrfs_item_ptr_offset(eb, slot);
 926		total = btrfs_item_size_nr(eb, slot);
 927		elem_size = sizeof(*extref);
 928	}
 929
 930	while (cur < total) {
 931		fs_path_reset(p);
 932
 933		if (found_key->type == BTRFS_INODE_REF_KEY) {
 934			iref = (struct btrfs_inode_ref *)(ptr + cur);
 935			name_len = btrfs_inode_ref_name_len(eb, iref);
 936			name_off = (unsigned long)(iref + 1);
 937			index = btrfs_inode_ref_index(eb, iref);
 938			dir = found_key->offset;
 939		} else {
 940			extref = (struct btrfs_inode_extref *)(ptr + cur);
 941			name_len = btrfs_inode_extref_name_len(eb, extref);
 942			name_off = (unsigned long)&extref->name;
 943			index = btrfs_inode_extref_index(eb, extref);
 944			dir = btrfs_inode_extref_parent(eb, extref);
 945		}
 946
 947		if (resolve) {
 948			start = btrfs_ref_to_path(root, tmp_path, name_len,
 949						  name_off, eb, dir,
 950						  p->buf, p->buf_len);
 951			if (IS_ERR(start)) {
 952				ret = PTR_ERR(start);
 953				goto out;
 954			}
 955			if (start < p->buf) {
 956				/* overflow , try again with larger buffer */
 957				ret = fs_path_ensure_buf(p,
 958						p->buf_len + p->buf - start);
 959				if (ret < 0)
 960					goto out;
 961				start = btrfs_ref_to_path(root, tmp_path,
 962							  name_len, name_off,
 963							  eb, dir,
 964							  p->buf, p->buf_len);
 965				if (IS_ERR(start)) {
 966					ret = PTR_ERR(start);
 967					goto out;
 968				}
 969				BUG_ON(start < p->buf);
 970			}
 971			p->start = start;
 972		} else {
 973			ret = fs_path_add_from_extent_buffer(p, eb, name_off,
 974							     name_len);
 975			if (ret < 0)
 976				goto out;
 977		}
 978
 979		cur += elem_size + name_len;
 980		ret = iterate(num, dir, index, p, ctx);
 981		if (ret)
 982			goto out;
 983		num++;
 984	}
 985
 986out:
 987	btrfs_free_path(tmp_path);
 988	fs_path_free(p);
 989	return ret;
 990}
 991
 992typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
 993				  const char *name, int name_len,
 994				  const char *data, int data_len,
 995				  u8 type, void *ctx);
 996
 997/*
 998 * Helper function to iterate the entries in ONE btrfs_dir_item.
 999 * The iterate callback may return a non zero value to stop iteration. This can
1000 * be a negative value for error codes or 1 to simply stop it.
1001 *
1002 * path must point to the dir item when called.
1003 */
1004static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
1005			    iterate_dir_item_t iterate, void *ctx)
1006{
1007	int ret = 0;
1008	struct extent_buffer *eb;
1009	struct btrfs_item *item;
1010	struct btrfs_dir_item *di;
1011	struct btrfs_key di_key;
1012	char *buf = NULL;
1013	int buf_len;
1014	u32 name_len;
1015	u32 data_len;
1016	u32 cur;
1017	u32 len;
1018	u32 total;
1019	int slot;
1020	int num;
1021	u8 type;
1022
1023	/*
1024	 * Start with a small buffer (1 page). If later we end up needing more
1025	 * space, which can happen for xattrs on a fs with a leaf size greater
1026	 * then the page size, attempt to increase the buffer. Typically xattr
1027	 * values are small.
1028	 */
1029	buf_len = PATH_MAX;
1030	buf = kmalloc(buf_len, GFP_KERNEL);
1031	if (!buf) {
1032		ret = -ENOMEM;
1033		goto out;
1034	}
1035
1036	eb = path->nodes[0];
1037	slot = path->slots[0];
1038	item = btrfs_item_nr(slot);
1039	di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1040	cur = 0;
1041	len = 0;
1042	total = btrfs_item_size(eb, item);
1043
1044	num = 0;
1045	while (cur < total) {
1046		name_len = btrfs_dir_name_len(eb, di);
1047		data_len = btrfs_dir_data_len(eb, di);
1048		type = btrfs_dir_type(eb, di);
1049		btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1050
1051		if (type == BTRFS_FT_XATTR) {
1052			if (name_len > XATTR_NAME_MAX) {
1053				ret = -ENAMETOOLONG;
1054				goto out;
1055			}
1056			if (name_len + data_len >
1057					BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1058				ret = -E2BIG;
1059				goto out;
1060			}
1061		} else {
1062			/*
1063			 * Path too long
1064			 */
1065			if (name_len + data_len > PATH_MAX) {
1066				ret = -ENAMETOOLONG;
1067				goto out;
1068			}
1069		}
1070
1071		if (name_len + data_len > buf_len) {
1072			buf_len = name_len + data_len;
1073			if (is_vmalloc_addr(buf)) {
1074				vfree(buf);
1075				buf = NULL;
1076			} else {
1077				char *tmp = krealloc(buf, buf_len,
1078						GFP_KERNEL | __GFP_NOWARN);
1079
1080				if (!tmp)
1081					kfree(buf);
1082				buf = tmp;
1083			}
1084			if (!buf) {
1085				buf = kvmalloc(buf_len, GFP_KERNEL);
1086				if (!buf) {
1087					ret = -ENOMEM;
1088					goto out;
1089				}
1090			}
1091		}
1092
1093		read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1094				name_len + data_len);
1095
1096		len = sizeof(*di) + name_len + data_len;
1097		di = (struct btrfs_dir_item *)((char *)di + len);
1098		cur += len;
1099
1100		ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1101				data_len, type, ctx);
1102		if (ret < 0)
1103			goto out;
1104		if (ret) {
1105			ret = 0;
1106			goto out;
1107		}
1108
1109		num++;
1110	}
1111
1112out:
1113	kvfree(buf);
1114	return ret;
1115}
1116
1117static int __copy_first_ref(int num, u64 dir, int index,
1118			    struct fs_path *p, void *ctx)
1119{
1120	int ret;
1121	struct fs_path *pt = ctx;
1122
1123	ret = fs_path_copy(pt, p);
1124	if (ret < 0)
1125		return ret;
1126
1127	/* we want the first only */
1128	return 1;
1129}
1130
1131/*
1132 * Retrieve the first path of an inode. If an inode has more then one
1133 * ref/hardlink, this is ignored.
1134 */
1135static int get_inode_path(struct btrfs_root *root,
1136			  u64 ino, struct fs_path *path)
1137{
1138	int ret;
1139	struct btrfs_key key, found_key;
1140	struct btrfs_path *p;
1141
1142	p = alloc_path_for_send();
1143	if (!p)
1144		return -ENOMEM;
1145
1146	fs_path_reset(path);
1147
1148	key.objectid = ino;
1149	key.type = BTRFS_INODE_REF_KEY;
1150	key.offset = 0;
1151
1152	ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1153	if (ret < 0)
1154		goto out;
1155	if (ret) {
1156		ret = 1;
1157		goto out;
1158	}
1159	btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1160	if (found_key.objectid != ino ||
1161	    (found_key.type != BTRFS_INODE_REF_KEY &&
1162	     found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1163		ret = -ENOENT;
1164		goto out;
1165	}
1166
1167	ret = iterate_inode_ref(root, p, &found_key, 1,
1168				__copy_first_ref, path);
1169	if (ret < 0)
1170		goto out;
1171	ret = 0;
1172
1173out:
1174	btrfs_free_path(p);
1175	return ret;
1176}
1177
1178struct backref_ctx {
1179	struct send_ctx *sctx;
1180
1181	/* number of total found references */
1182	u64 found;
1183
1184	/*
1185	 * used for clones found in send_root. clones found behind cur_objectid
1186	 * and cur_offset are not considered as allowed clones.
1187	 */
1188	u64 cur_objectid;
1189	u64 cur_offset;
1190
1191	/* may be truncated in case it's the last extent in a file */
1192	u64 extent_len;
1193
1194	/* Just to check for bugs in backref resolving */
1195	int found_itself;
1196};
1197
1198static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1199{
1200	u64 root = (u64)(uintptr_t)key;
1201	struct clone_root *cr = (struct clone_root *)elt;
1202
1203	if (root < cr->root->root_key.objectid)
1204		return -1;
1205	if (root > cr->root->root_key.objectid)
1206		return 1;
1207	return 0;
1208}
1209
1210static int __clone_root_cmp_sort(const void *e1, const void *e2)
1211{
1212	struct clone_root *cr1 = (struct clone_root *)e1;
1213	struct clone_root *cr2 = (struct clone_root *)e2;
1214
1215	if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
1216		return -1;
1217	if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
1218		return 1;
1219	return 0;
1220}
1221
1222/*
1223 * Called for every backref that is found for the current extent.
1224 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1225 */
1226static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1227{
1228	struct backref_ctx *bctx = ctx_;
1229	struct clone_root *found;
1230
1231	/* First check if the root is in the list of accepted clone sources */
1232	found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1233			bctx->sctx->clone_roots_cnt,
1234			sizeof(struct clone_root),
1235			__clone_root_cmp_bsearch);
1236	if (!found)
1237		return 0;
1238
1239	if (found->root == bctx->sctx->send_root &&
1240	    ino == bctx->cur_objectid &&
1241	    offset == bctx->cur_offset) {
1242		bctx->found_itself = 1;
1243	}
1244
1245	/*
1246	 * Make sure we don't consider clones from send_root that are
1247	 * behind the current inode/offset.
1248	 */
1249	if (found->root == bctx->sctx->send_root) {
1250		/*
1251		 * If the source inode was not yet processed we can't issue a
1252		 * clone operation, as the source extent does not exist yet at
1253		 * the destination of the stream.
1254		 */
1255		if (ino > bctx->cur_objectid)
1256			return 0;
1257		/*
1258		 * We clone from the inode currently being sent as long as the
1259		 * source extent is already processed, otherwise we could try
1260		 * to clone from an extent that does not exist yet at the
1261		 * destination of the stream.
1262		 */
1263		if (ino == bctx->cur_objectid &&
1264		    offset + bctx->extent_len >
1265		    bctx->sctx->cur_inode_next_write_offset)
1266			return 0;
1267	}
1268
1269	bctx->found++;
1270	found->found_refs++;
1271	if (ino < found->ino) {
1272		found->ino = ino;
1273		found->offset = offset;
1274	} else if (found->ino == ino) {
1275		/*
1276		 * same extent found more then once in the same file.
1277		 */
1278		if (found->offset > offset + bctx->extent_len)
1279			found->offset = offset;
1280	}
1281
1282	return 0;
1283}
1284
1285/*
1286 * Given an inode, offset and extent item, it finds a good clone for a clone
1287 * instruction. Returns -ENOENT when none could be found. The function makes
1288 * sure that the returned clone is usable at the point where sending is at the
1289 * moment. This means, that no clones are accepted which lie behind the current
1290 * inode+offset.
1291 *
1292 * path must point to the extent item when called.
1293 */
1294static int find_extent_clone(struct send_ctx *sctx,
1295			     struct btrfs_path *path,
1296			     u64 ino, u64 data_offset,
1297			     u64 ino_size,
1298			     struct clone_root **found)
1299{
1300	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1301	int ret;
1302	int extent_type;
1303	u64 logical;
1304	u64 disk_byte;
1305	u64 num_bytes;
1306	u64 extent_item_pos;
1307	u64 flags = 0;
1308	struct btrfs_file_extent_item *fi;
1309	struct extent_buffer *eb = path->nodes[0];
1310	struct backref_ctx *backref_ctx = NULL;
1311	struct clone_root *cur_clone_root;
1312	struct btrfs_key found_key;
1313	struct btrfs_path *tmp_path;
1314	struct btrfs_extent_item *ei;
1315	int compressed;
1316	u32 i;
1317
1318	tmp_path = alloc_path_for_send();
1319	if (!tmp_path)
1320		return -ENOMEM;
1321
1322	/* We only use this path under the commit sem */
1323	tmp_path->need_commit_sem = 0;
1324
1325	backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
1326	if (!backref_ctx) {
1327		ret = -ENOMEM;
1328		goto out;
1329	}
1330
1331	if (data_offset >= ino_size) {
1332		/*
1333		 * There may be extents that lie behind the file's size.
1334		 * I at least had this in combination with snapshotting while
1335		 * writing large files.
1336		 */
1337		ret = 0;
1338		goto out;
1339	}
1340
1341	fi = btrfs_item_ptr(eb, path->slots[0],
1342			struct btrfs_file_extent_item);
1343	extent_type = btrfs_file_extent_type(eb, fi);
1344	if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1345		ret = -ENOENT;
1346		goto out;
1347	}
1348	compressed = btrfs_file_extent_compression(eb, fi);
1349
1350	num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1351	disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1352	if (disk_byte == 0) {
1353		ret = -ENOENT;
1354		goto out;
1355	}
1356	logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1357
1358	down_read(&fs_info->commit_root_sem);
1359	ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1360				  &found_key, &flags);
1361	up_read(&fs_info->commit_root_sem);
1362
1363	if (ret < 0)
1364		goto out;
1365	if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1366		ret = -EIO;
1367		goto out;
1368	}
1369
1370	ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
1371			    struct btrfs_extent_item);
1372	/*
1373	 * Backreference walking (iterate_extent_inodes() below) is currently
1374	 * too expensive when an extent has a large number of references, both
1375	 * in time spent and used memory. So for now just fallback to write
1376	 * operations instead of clone operations when an extent has more than
1377	 * a certain amount of references.
1378	 */
1379	if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) {
1380		ret = -ENOENT;
1381		goto out;
1382	}
1383	btrfs_release_path(tmp_path);
1384
1385	/*
1386	 * Setup the clone roots.
1387	 */
1388	for (i = 0; i < sctx->clone_roots_cnt; i++) {
1389		cur_clone_root = sctx->clone_roots + i;
1390		cur_clone_root->ino = (u64)-1;
1391		cur_clone_root->offset = 0;
1392		cur_clone_root->found_refs = 0;
1393	}
1394
1395	backref_ctx->sctx = sctx;
1396	backref_ctx->found = 0;
1397	backref_ctx->cur_objectid = ino;
1398	backref_ctx->cur_offset = data_offset;
1399	backref_ctx->found_itself = 0;
1400	backref_ctx->extent_len = num_bytes;
1401
1402	/*
1403	 * The last extent of a file may be too large due to page alignment.
1404	 * We need to adjust extent_len in this case so that the checks in
1405	 * __iterate_backrefs work.
1406	 */
1407	if (data_offset + num_bytes >= ino_size)
1408		backref_ctx->extent_len = ino_size - data_offset;
1409
1410	/*
1411	 * Now collect all backrefs.
1412	 */
1413	if (compressed == BTRFS_COMPRESS_NONE)
1414		extent_item_pos = logical - found_key.objectid;
1415	else
1416		extent_item_pos = 0;
1417	ret = iterate_extent_inodes(fs_info, found_key.objectid,
1418				    extent_item_pos, 1, __iterate_backrefs,
1419				    backref_ctx, false);
1420
1421	if (ret < 0)
1422		goto out;
1423
1424	if (!backref_ctx->found_itself) {
1425		/* found a bug in backref code? */
1426		ret = -EIO;
1427		btrfs_err(fs_info,
1428			  "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1429			  ino, data_offset, disk_byte, found_key.objectid);
1430		goto out;
1431	}
1432
1433	btrfs_debug(fs_info,
1434		    "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1435		    data_offset, ino, num_bytes, logical);
1436
1437	if (!backref_ctx->found)
1438		btrfs_debug(fs_info, "no clones found");
1439
1440	cur_clone_root = NULL;
1441	for (i = 0; i < sctx->clone_roots_cnt; i++) {
1442		if (sctx->clone_roots[i].found_refs) {
1443			if (!cur_clone_root)
1444				cur_clone_root = sctx->clone_roots + i;
1445			else if (sctx->clone_roots[i].root == sctx->send_root)
1446				/* prefer clones from send_root over others */
1447				cur_clone_root = sctx->clone_roots + i;
1448		}
1449
1450	}
1451
1452	if (cur_clone_root) {
1453		*found = cur_clone_root;
1454		ret = 0;
1455	} else {
1456		ret = -ENOENT;
1457	}
1458
1459out:
1460	btrfs_free_path(tmp_path);
1461	kfree(backref_ctx);
1462	return ret;
1463}
1464
1465static int read_symlink(struct btrfs_root *root,
1466			u64 ino,
1467			struct fs_path *dest)
1468{
1469	int ret;
1470	struct btrfs_path *path;
1471	struct btrfs_key key;
1472	struct btrfs_file_extent_item *ei;
1473	u8 type;
1474	u8 compression;
1475	unsigned long off;
1476	int len;
1477
1478	path = alloc_path_for_send();
1479	if (!path)
1480		return -ENOMEM;
1481
1482	key.objectid = ino;
1483	key.type = BTRFS_EXTENT_DATA_KEY;
1484	key.offset = 0;
1485	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1486	if (ret < 0)
1487		goto out;
1488	if (ret) {
1489		/*
1490		 * An empty symlink inode. Can happen in rare error paths when
1491		 * creating a symlink (transaction committed before the inode
1492		 * eviction handler removed the symlink inode items and a crash
1493		 * happened in between or the subvol was snapshoted in between).
1494		 * Print an informative message to dmesg/syslog so that the user
1495		 * can delete the symlink.
1496		 */
1497		btrfs_err(root->fs_info,
1498			  "Found empty symlink inode %llu at root %llu",
1499			  ino, root->root_key.objectid);
1500		ret = -EIO;
1501		goto out;
1502	}
1503
1504	ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1505			struct btrfs_file_extent_item);
1506	type = btrfs_file_extent_type(path->nodes[0], ei);
1507	compression = btrfs_file_extent_compression(path->nodes[0], ei);
1508	BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1509	BUG_ON(compression);
1510
1511	off = btrfs_file_extent_inline_start(ei);
1512	len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
1513
1514	ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1515
1516out:
1517	btrfs_free_path(path);
1518	return ret;
1519}
1520
1521/*
1522 * Helper function to generate a file name that is unique in the root of
1523 * send_root and parent_root. This is used to generate names for orphan inodes.
1524 */
1525static int gen_unique_name(struct send_ctx *sctx,
1526			   u64 ino, u64 gen,
1527			   struct fs_path *dest)
1528{
1529	int ret = 0;
1530	struct btrfs_path *path;
1531	struct btrfs_dir_item *di;
1532	char tmp[64];
1533	int len;
1534	u64 idx = 0;
1535
1536	path = alloc_path_for_send();
1537	if (!path)
1538		return -ENOMEM;
1539
1540	while (1) {
1541		len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1542				ino, gen, idx);
1543		ASSERT(len < sizeof(tmp));
1544
1545		di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1546				path, BTRFS_FIRST_FREE_OBJECTID,
1547				tmp, strlen(tmp), 0);
1548		btrfs_release_path(path);
1549		if (IS_ERR(di)) {
1550			ret = PTR_ERR(di);
1551			goto out;
1552		}
1553		if (di) {
1554			/* not unique, try again */
1555			idx++;
1556			continue;
1557		}
1558
1559		if (!sctx->parent_root) {
1560			/* unique */
1561			ret = 0;
1562			break;
1563		}
1564
1565		di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1566				path, BTRFS_FIRST_FREE_OBJECTID,
1567				tmp, strlen(tmp), 0);
1568		btrfs_release_path(path);
1569		if (IS_ERR(di)) {
1570			ret = PTR_ERR(di);
1571			goto out;
1572		}
1573		if (di) {
1574			/* not unique, try again */
1575			idx++;
1576			continue;
1577		}
1578		/* unique */
1579		break;
1580	}
1581
1582	ret = fs_path_add(dest, tmp, strlen(tmp));
1583
1584out:
1585	btrfs_free_path(path);
1586	return ret;
1587}
1588
1589enum inode_state {
1590	inode_state_no_change,
1591	inode_state_will_create,
1592	inode_state_did_create,
1593	inode_state_will_delete,
1594	inode_state_did_delete,
1595};
1596
1597static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1598{
1599	int ret;
1600	int left_ret;
1601	int right_ret;
1602	u64 left_gen;
1603	u64 right_gen;
1604
1605	ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1606			NULL, NULL);
1607	if (ret < 0 && ret != -ENOENT)
1608		goto out;
1609	left_ret = ret;
1610
1611	if (!sctx->parent_root) {
1612		right_ret = -ENOENT;
1613	} else {
1614		ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1615				NULL, NULL, NULL, NULL);
1616		if (ret < 0 && ret != -ENOENT)
1617			goto out;
1618		right_ret = ret;
1619	}
1620
1621	if (!left_ret && !right_ret) {
1622		if (left_gen == gen && right_gen == gen) {
1623			ret = inode_state_no_change;
1624		} else if (left_gen == gen) {
1625			if (ino < sctx->send_progress)
1626				ret = inode_state_did_create;
1627			else
1628				ret = inode_state_will_create;
1629		} else if (right_gen == gen) {
1630			if (ino < sctx->send_progress)
1631				ret = inode_state_did_delete;
1632			else
1633				ret = inode_state_will_delete;
1634		} else  {
1635			ret = -ENOENT;
1636		}
1637	} else if (!left_ret) {
1638		if (left_gen == gen) {
1639			if (ino < sctx->send_progress)
1640				ret = inode_state_did_create;
1641			else
1642				ret = inode_state_will_create;
1643		} else {
1644			ret = -ENOENT;
1645		}
1646	} else if (!right_ret) {
1647		if (right_gen == gen) {
1648			if (ino < sctx->send_progress)
1649				ret = inode_state_did_delete;
1650			else
1651				ret = inode_state_will_delete;
1652		} else {
1653			ret = -ENOENT;
1654		}
1655	} else {
1656		ret = -ENOENT;
1657	}
1658
1659out:
1660	return ret;
1661}
1662
1663static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1664{
1665	int ret;
1666
1667	if (ino == BTRFS_FIRST_FREE_OBJECTID)
1668		return 1;
1669
1670	ret = get_cur_inode_state(sctx, ino, gen);
1671	if (ret < 0)
1672		goto out;
1673
1674	if (ret == inode_state_no_change ||
1675	    ret == inode_state_did_create ||
1676	    ret == inode_state_will_delete)
1677		ret = 1;
1678	else
1679		ret = 0;
1680
1681out:
1682	return ret;
1683}
1684
1685/*
1686 * Helper function to lookup a dir item in a dir.
1687 */
1688static int lookup_dir_item_inode(struct btrfs_root *root,
1689				 u64 dir, const char *name, int name_len,
1690				 u64 *found_inode,
1691				 u8 *found_type)
1692{
1693	int ret = 0;
1694	struct btrfs_dir_item *di;
1695	struct btrfs_key key;
1696	struct btrfs_path *path;
1697
1698	path = alloc_path_for_send();
1699	if (!path)
1700		return -ENOMEM;
1701
1702	di = btrfs_lookup_dir_item(NULL, root, path,
1703			dir, name, name_len, 0);
1704	if (IS_ERR_OR_NULL(di)) {
1705		ret = di ? PTR_ERR(di) : -ENOENT;
1706		goto out;
1707	}
1708	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1709	if (key.type == BTRFS_ROOT_ITEM_KEY) {
1710		ret = -ENOENT;
1711		goto out;
1712	}
1713	*found_inode = key.objectid;
1714	*found_type = btrfs_dir_type(path->nodes[0], di);
1715
1716out:
1717	btrfs_free_path(path);
1718	return ret;
1719}
1720
1721/*
1722 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1723 * generation of the parent dir and the name of the dir entry.
1724 */
1725static int get_first_ref(struct btrfs_root *root, u64 ino,
1726			 u64 *dir, u64 *dir_gen, struct fs_path *name)
1727{
1728	int ret;
1729	struct btrfs_key key;
1730	struct btrfs_key found_key;
1731	struct btrfs_path *path;
1732	int len;
1733	u64 parent_dir;
1734
1735	path = alloc_path_for_send();
1736	if (!path)
1737		return -ENOMEM;
1738
1739	key.objectid = ino;
1740	key.type = BTRFS_INODE_REF_KEY;
1741	key.offset = 0;
1742
1743	ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1744	if (ret < 0)
1745		goto out;
1746	if (!ret)
1747		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1748				path->slots[0]);
1749	if (ret || found_key.objectid != ino ||
1750	    (found_key.type != BTRFS_INODE_REF_KEY &&
1751	     found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1752		ret = -ENOENT;
1753		goto out;
1754	}
1755
1756	if (found_key.type == BTRFS_INODE_REF_KEY) {
1757		struct btrfs_inode_ref *iref;
1758		iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1759				      struct btrfs_inode_ref);
1760		len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1761		ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1762						     (unsigned long)(iref + 1),
1763						     len);
1764		parent_dir = found_key.offset;
1765	} else {
1766		struct btrfs_inode_extref *extref;
1767		extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1768					struct btrfs_inode_extref);
1769		len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1770		ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1771					(unsigned long)&extref->name, len);
1772		parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1773	}
1774	if (ret < 0)
1775		goto out;
1776	btrfs_release_path(path);
1777
1778	if (dir_gen) {
1779		ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1780				     NULL, NULL, NULL);
1781		if (ret < 0)
1782			goto out;
1783	}
1784
1785	*dir = parent_dir;
1786
1787out:
1788	btrfs_free_path(path);
1789	return ret;
1790}
1791
1792static int is_first_ref(struct btrfs_root *root,
1793			u64 ino, u64 dir,
1794			const char *name, int name_len)
1795{
1796	int ret;
1797	struct fs_path *tmp_name;
1798	u64 tmp_dir;
1799
1800	tmp_name = fs_path_alloc();
1801	if (!tmp_name)
1802		return -ENOMEM;
1803
1804	ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1805	if (ret < 0)
1806		goto out;
1807
1808	if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1809		ret = 0;
1810		goto out;
1811	}
1812
1813	ret = !memcmp(tmp_name->start, name, name_len);
1814
1815out:
1816	fs_path_free(tmp_name);
1817	return ret;
1818}
1819
1820/*
1821 * Used by process_recorded_refs to determine if a new ref would overwrite an
1822 * already existing ref. In case it detects an overwrite, it returns the
1823 * inode/gen in who_ino/who_gen.
1824 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1825 * to make sure later references to the overwritten inode are possible.
1826 * Orphanizing is however only required for the first ref of an inode.
1827 * process_recorded_refs does an additional is_first_ref check to see if
1828 * orphanizing is really required.
1829 */
1830static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1831			      const char *name, int name_len,
1832			      u64 *who_ino, u64 *who_gen, u64 *who_mode)
1833{
1834	int ret = 0;
1835	u64 gen;
1836	u64 other_inode = 0;
1837	u8 other_type = 0;
1838
1839	if (!sctx->parent_root)
1840		goto out;
1841
1842	ret = is_inode_existent(sctx, dir, dir_gen);
1843	if (ret <= 0)
1844		goto out;
1845
1846	/*
1847	 * If we have a parent root we need to verify that the parent dir was
1848	 * not deleted and then re-created, if it was then we have no overwrite
1849	 * and we can just unlink this entry.
1850	 */
1851	if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
1852		ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1853				     NULL, NULL, NULL);
1854		if (ret < 0 && ret != -ENOENT)
1855			goto out;
1856		if (ret) {
1857			ret = 0;
1858			goto out;
1859		}
1860		if (gen != dir_gen)
1861			goto out;
1862	}
1863
1864	ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1865			&other_inode, &other_type);
1866	if (ret < 0 && ret != -ENOENT)
1867		goto out;
1868	if (ret) {
1869		ret = 0;
1870		goto out;
1871	}
1872
1873	/*
1874	 * Check if the overwritten ref was already processed. If yes, the ref
1875	 * was already unlinked/moved, so we can safely assume that we will not
1876	 * overwrite anything at this point in time.
1877	 */
1878	if (other_inode > sctx->send_progress ||
1879	    is_waiting_for_move(sctx, other_inode)) {
1880		ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1881				who_gen, who_mode, NULL, NULL, NULL);
1882		if (ret < 0)
1883			goto out;
1884
1885		ret = 1;
1886		*who_ino = other_inode;
1887	} else {
1888		ret = 0;
1889	}
1890
1891out:
1892	return ret;
1893}
1894
1895/*
1896 * Checks if the ref was overwritten by an already processed inode. This is
1897 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1898 * thus the orphan name needs be used.
1899 * process_recorded_refs also uses it to avoid unlinking of refs that were
1900 * overwritten.
1901 */
1902static int did_overwrite_ref(struct send_ctx *sctx,
1903			    u64 dir, u64 dir_gen,
1904			    u64 ino, u64 ino_gen,
1905			    const char *name, int name_len)
1906{
1907	int ret = 0;
1908	u64 gen;
1909	u64 ow_inode;
1910	u8 other_type;
1911
1912	if (!sctx->parent_root)
1913		goto out;
1914
1915	ret = is_inode_existent(sctx, dir, dir_gen);
1916	if (ret <= 0)
1917		goto out;
1918
1919	if (dir != BTRFS_FIRST_FREE_OBJECTID) {
1920		ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
1921				     NULL, NULL, NULL);
1922		if (ret < 0 && ret != -ENOENT)
1923			goto out;
1924		if (ret) {
1925			ret = 0;
1926			goto out;
1927		}
1928		if (gen != dir_gen)
1929			goto out;
1930	}
1931
1932	/* check if the ref was overwritten by another ref */
1933	ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1934			&ow_inode, &other_type);
1935	if (ret < 0 && ret != -ENOENT)
1936		goto out;
1937	if (ret) {
1938		/* was never and will never be overwritten */
1939		ret = 0;
1940		goto out;
1941	}
1942
1943	ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1944			NULL, NULL);
1945	if (ret < 0)
1946		goto out;
1947
1948	if (ow_inode == ino && gen == ino_gen) {
1949		ret = 0;
1950		goto out;
1951	}
1952
1953	/*
1954	 * We know that it is or will be overwritten. Check this now.
1955	 * The current inode being processed might have been the one that caused
1956	 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1957	 * the current inode being processed.
1958	 */
1959	if ((ow_inode < sctx->send_progress) ||
1960	    (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1961	     gen == sctx->cur_inode_gen))
1962		ret = 1;
1963	else
1964		ret = 0;
1965
1966out:
1967	return ret;
1968}
1969
1970/*
1971 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1972 * that got overwritten. This is used by process_recorded_refs to determine
1973 * if it has to use the path as returned by get_cur_path or the orphan name.
1974 */
1975static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1976{
1977	int ret = 0;
1978	struct fs_path *name = NULL;
1979	u64 dir;
1980	u64 dir_gen;
1981
1982	if (!sctx->parent_root)
1983		goto out;
1984
1985	name = fs_path_alloc();
1986	if (!name)
1987		return -ENOMEM;
1988
1989	ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
1990	if (ret < 0)
1991		goto out;
1992
1993	ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
1994			name->start, fs_path_len(name));
1995
1996out:
1997	fs_path_free(name);
1998	return ret;
1999}
2000
2001/*
2002 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
2003 * so we need to do some special handling in case we have clashes. This function
2004 * takes care of this with the help of name_cache_entry::radix_list.
2005 * In case of error, nce is kfreed.
2006 */
2007static int name_cache_insert(struct send_ctx *sctx,
2008			     struct name_cache_entry *nce)
2009{
2010	int ret = 0;
2011	struct list_head *nce_head;
2012
2013	nce_head = radix_tree_lookup(&sctx->name_cache,
2014			(unsigned long)nce->ino);
2015	if (!nce_head) {
2016		nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2017		if (!nce_head) {
2018			kfree(nce);
2019			return -ENOMEM;
2020		}
2021		INIT_LIST_HEAD(nce_head);
2022
2023		ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2024		if (ret < 0) {
2025			kfree(nce_head);
2026			kfree(nce);
2027			return ret;
2028		}
2029	}
2030	list_add_tail(&nce->radix_list, nce_head);
2031	list_add_tail(&nce->list, &sctx->name_cache_list);
2032	sctx->name_cache_size++;
2033
2034	return ret;
2035}
2036
2037static void name_cache_delete(struct send_ctx *sctx,
2038			      struct name_cache_entry *nce)
2039{
2040	struct list_head *nce_head;
2041
2042	nce_head = radix_tree_lookup(&sctx->name_cache,
2043			(unsigned long)nce->ino);
2044	if (!nce_head) {
2045		btrfs_err(sctx->send_root->fs_info,
2046	      "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2047			nce->ino, sctx->name_cache_size);
2048	}
2049
2050	list_del(&nce->radix_list);
2051	list_del(&nce->list);
2052	sctx->name_cache_size--;
2053
2054	/*
2055	 * We may not get to the final release of nce_head if the lookup fails
2056	 */
2057	if (nce_head && list_empty(nce_head)) {
2058		radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2059		kfree(nce_head);
2060	}
2061}
2062
2063static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2064						    u64 ino, u64 gen)
2065{
2066	struct list_head *nce_head;
2067	struct name_cache_entry *cur;
2068
2069	nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2070	if (!nce_head)
2071		return NULL;
2072
2073	list_for_each_entry(cur, nce_head, radix_list) {
2074		if (cur->ino == ino && cur->gen == gen)
2075			return cur;
2076	}
2077	return NULL;
2078}
2079
2080/*
2081 * Remove some entries from the beginning of name_cache_list.
2082 */
2083static void name_cache_clean_unused(struct send_ctx *sctx)
2084{
2085	struct name_cache_entry *nce;
2086
2087	if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2088		return;
2089
2090	while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2091		nce = list_entry(sctx->name_cache_list.next,
2092				struct name_cache_entry, list);
2093		name_cache_delete(sctx, nce);
2094		kfree(nce);
2095	}
2096}
2097
2098static void name_cache_free(struct send_ctx *sctx)
2099{
2100	struct name_cache_entry *nce;
2101
2102	while (!list_empty(&sctx->name_cache_list)) {
2103		nce = list_entry(sctx->name_cache_list.next,
2104				struct name_cache_entry, list);
2105		name_cache_delete(sctx, nce);
2106		kfree(nce);
2107	}
2108}
2109
2110/*
2111 * Used by get_cur_path for each ref up to the root.
2112 * Returns 0 if it succeeded.
2113 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2114 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2115 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2116 * Returns <0 in case of error.
2117 */
2118static int __get_cur_name_and_parent(struct send_ctx *sctx,
2119				     u64 ino, u64 gen,
2120				     u64 *parent_ino,
2121				     u64 *parent_gen,
2122				     struct fs_path *dest)
2123{
2124	int ret;
2125	int nce_ret;
2126	struct name_cache_entry *nce = NULL;
2127
2128	/*
2129	 * First check if we already did a call to this function with the same
2130	 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2131	 * return the cached result.
2132	 */
2133	nce = name_cache_search(sctx, ino, gen);
2134	if (nce) {
2135		if (ino < sctx->send_progress && nce->need_later_update) {
2136			name_cache_delete(sctx, nce);
2137			kfree(nce);
2138			nce = NULL;
2139		} else {
2140			/*
2141			 * Removes the entry from the list and adds it back to
2142			 * the end.  This marks the entry as recently used so
2143			 * that name_cache_clean_unused does not remove it.
2144			 */
2145			list_move_tail(&nce->list, &sctx->name_cache_list);
2146
2147			*parent_ino = nce->parent_ino;
2148			*parent_gen = nce->parent_gen;
2149			ret = fs_path_add(dest, nce->name, nce->name_len);
2150			if (ret < 0)
2151				goto out;
2152			ret = nce->ret;
2153			goto out;
2154		}
2155	}
2156
2157	/*
2158	 * If the inode is not existent yet, add the orphan name and return 1.
2159	 * This should only happen for the parent dir that we determine in
2160	 * __record_new_ref
2161	 */
2162	ret = is_inode_existent(sctx, ino, gen);
2163	if (ret < 0)
2164		goto out;
2165
2166	if (!ret) {
2167		ret = gen_unique_name(sctx, ino, gen, dest);
2168		if (ret < 0)
2169			goto out;
2170		ret = 1;
2171		goto out_cache;
2172	}
2173
2174	/*
2175	 * Depending on whether the inode was already processed or not, use
2176	 * send_root or parent_root for ref lookup.
2177	 */
2178	if (ino < sctx->send_progress)
2179		ret = get_first_ref(sctx->send_root, ino,
2180				    parent_ino, parent_gen, dest);
2181	else
2182		ret = get_first_ref(sctx->parent_root, ino,
2183				    parent_ino, parent_gen, dest);
2184	if (ret < 0)
2185		goto out;
2186
2187	/*
2188	 * Check if the ref was overwritten by an inode's ref that was processed
2189	 * earlier. If yes, treat as orphan and return 1.
2190	 */
2191	ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2192			dest->start, dest->end - dest->start);
2193	if (ret < 0)
2194		goto out;
2195	if (ret) {
2196		fs_path_reset(dest);
2197		ret = gen_unique_name(sctx, ino, gen, dest);
2198		if (ret < 0)
2199			goto out;
2200		ret = 1;
2201	}
2202
2203out_cache:
2204	/*
2205	 * Store the result of the lookup in the name cache.
2206	 */
2207	nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2208	if (!nce) {
2209		ret = -ENOMEM;
2210		goto out;
2211	}
2212
2213	nce->ino = ino;
2214	nce->gen = gen;
2215	nce->parent_ino = *parent_ino;
2216	nce->parent_gen = *parent_gen;
2217	nce->name_len = fs_path_len(dest);
2218	nce->ret = ret;
2219	strcpy(nce->name, dest->start);
2220
2221	if (ino < sctx->send_progress)
2222		nce->need_later_update = 0;
2223	else
2224		nce->need_later_update = 1;
2225
2226	nce_ret = name_cache_insert(sctx, nce);
2227	if (nce_ret < 0)
2228		ret = nce_ret;
2229	name_cache_clean_unused(sctx);
2230
2231out:
2232	return ret;
2233}
2234
2235/*
2236 * Magic happens here. This function returns the first ref to an inode as it
2237 * would look like while receiving the stream at this point in time.
2238 * We walk the path up to the root. For every inode in between, we check if it
2239 * was already processed/sent. If yes, we continue with the parent as found
2240 * in send_root. If not, we continue with the parent as found in parent_root.
2241 * If we encounter an inode that was deleted at this point in time, we use the
2242 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2243 * that were not created yet and overwritten inodes/refs.
2244 *
2245 * When do we have orphan inodes:
2246 * 1. When an inode is freshly created and thus no valid refs are available yet
2247 * 2. When a directory lost all it's refs (deleted) but still has dir items
2248 *    inside which were not processed yet (pending for move/delete). If anyone
2249 *    tried to get the path to the dir items, it would get a path inside that
2250 *    orphan directory.
2251 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2252 *    of an unprocessed inode. If in that case the first ref would be
2253 *    overwritten, the overwritten inode gets "orphanized". Later when we
2254 *    process this overwritten inode, it is restored at a new place by moving
2255 *    the orphan inode.
2256 *
2257 * sctx->send_progress tells this function at which point in time receiving
2258 * would be.
2259 */
2260static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2261			struct fs_path *dest)
2262{
2263	int ret = 0;
2264	struct fs_path *name = NULL;
2265	u64 parent_inode = 0;
2266	u64 parent_gen = 0;
2267	int stop = 0;
2268
2269	name = fs_path_alloc();
2270	if (!name) {
2271		ret = -ENOMEM;
2272		goto out;
2273	}
2274
2275	dest->reversed = 1;
2276	fs_path_reset(dest);
2277
2278	while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2279		struct waiting_dir_move *wdm;
2280
2281		fs_path_reset(name);
2282
2283		if (is_waiting_for_rm(sctx, ino, gen)) {
2284			ret = gen_unique_name(sctx, ino, gen, name);
2285			if (ret < 0)
2286				goto out;
2287			ret = fs_path_add_path(dest, name);
2288			break;
2289		}
2290
2291		wdm = get_waiting_dir_move(sctx, ino);
2292		if (wdm && wdm->orphanized) {
2293			ret = gen_unique_name(sctx, ino, gen, name);
2294			stop = 1;
2295		} else if (wdm) {
2296			ret = get_first_ref(sctx->parent_root, ino,
2297					    &parent_inode, &parent_gen, name);
2298		} else {
2299			ret = __get_cur_name_and_parent(sctx, ino, gen,
2300							&parent_inode,
2301							&parent_gen, name);
2302			if (ret)
2303				stop = 1;
2304		}
2305
2306		if (ret < 0)
2307			goto out;
2308
2309		ret = fs_path_add_path(dest, name);
2310		if (ret < 0)
2311			goto out;
2312
2313		ino = parent_inode;
2314		gen = parent_gen;
2315	}
2316
2317out:
2318	fs_path_free(name);
2319	if (!ret)
2320		fs_path_unreverse(dest);
2321	return ret;
2322}
2323
2324/*
2325 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2326 */
2327static int send_subvol_begin(struct send_ctx *sctx)
2328{
2329	int ret;
2330	struct btrfs_root *send_root = sctx->send_root;
2331	struct btrfs_root *parent_root = sctx->parent_root;
2332	struct btrfs_path *path;
2333	struct btrfs_key key;
2334	struct btrfs_root_ref *ref;
2335	struct extent_buffer *leaf;
2336	char *name = NULL;
2337	int namelen;
2338
2339	path = btrfs_alloc_path();
2340	if (!path)
2341		return -ENOMEM;
2342
2343	name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2344	if (!name) {
2345		btrfs_free_path(path);
2346		return -ENOMEM;
2347	}
2348
2349	key.objectid = send_root->root_key.objectid;
2350	key.type = BTRFS_ROOT_BACKREF_KEY;
2351	key.offset = 0;
2352
2353	ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2354				&key, path, 1, 0);
2355	if (ret < 0)
2356		goto out;
2357	if (ret) {
2358		ret = -ENOENT;
2359		goto out;
2360	}
2361
2362	leaf = path->nodes[0];
2363	btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2364	if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2365	    key.objectid != send_root->root_key.objectid) {
2366		ret = -ENOENT;
2367		goto out;
2368	}
2369	ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2370	namelen = btrfs_root_ref_name_len(leaf, ref);
2371	read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2372	btrfs_release_path(path);
2373
2374	if (parent_root) {
2375		ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2376		if (ret < 0)
2377			goto out;
2378	} else {
2379		ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2380		if (ret < 0)
2381			goto out;
2382	}
2383
2384	TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2385
2386	if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2387		TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2388			    sctx->send_root->root_item.received_uuid);
2389	else
2390		TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2391			    sctx->send_root->root_item.uuid);
2392
2393	TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2394		    btrfs_root_ctransid(&sctx->send_root->root_item));
2395	if (parent_root) {
2396		if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2397			TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2398				     parent_root->root_item.received_uuid);
2399		else
2400			TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2401				     parent_root->root_item.uuid);
2402		TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2403			    btrfs_root_ctransid(&sctx->parent_root->root_item));
2404	}
2405
2406	ret = send_cmd(sctx);
2407
2408tlv_put_failure:
2409out:
2410	btrfs_free_path(path);
2411	kfree(name);
2412	return ret;
2413}
2414
2415static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2416{
2417	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2418	int ret = 0;
2419	struct fs_path *p;
2420
2421	btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2422
2423	p = fs_path_alloc();
2424	if (!p)
2425		return -ENOMEM;
2426
2427	ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2428	if (ret < 0)
2429		goto out;
2430
2431	ret = get_cur_path(sctx, ino, gen, p);
2432	if (ret < 0)
2433		goto out;
2434	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2435	TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2436
2437	ret = send_cmd(sctx);
2438
2439tlv_put_failure:
2440out:
2441	fs_path_free(p);
2442	return ret;
2443}
2444
2445static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2446{
2447	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2448	int ret = 0;
2449	struct fs_path *p;
2450
2451	btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2452
2453	p = fs_path_alloc();
2454	if (!p)
2455		return -ENOMEM;
2456
2457	ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2458	if (ret < 0)
2459		goto out;
2460
2461	ret = get_cur_path(sctx, ino, gen, p);
2462	if (ret < 0)
2463		goto out;
2464	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2465	TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2466
2467	ret = send_cmd(sctx);
2468
2469tlv_put_failure:
2470out:
2471	fs_path_free(p);
2472	return ret;
2473}
2474
2475static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2476{
2477	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2478	int ret = 0;
2479	struct fs_path *p;
2480
2481	btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2482		    ino, uid, gid);
2483
2484	p = fs_path_alloc();
2485	if (!p)
2486		return -ENOMEM;
2487
2488	ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2489	if (ret < 0)
2490		goto out;
2491
2492	ret = get_cur_path(sctx, ino, gen, p);
2493	if (ret < 0)
2494		goto out;
2495	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2496	TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2497	TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2498
2499	ret = send_cmd(sctx);
2500
2501tlv_put_failure:
2502out:
2503	fs_path_free(p);
2504	return ret;
2505}
2506
2507static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2508{
2509	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2510	int ret = 0;
2511	struct fs_path *p = NULL;
2512	struct btrfs_inode_item *ii;
2513	struct btrfs_path *path = NULL;
2514	struct extent_buffer *eb;
2515	struct btrfs_key key;
2516	int slot;
2517
2518	btrfs_debug(fs_info, "send_utimes %llu", ino);
2519
2520	p = fs_path_alloc();
2521	if (!p)
2522		return -ENOMEM;
2523
2524	path = alloc_path_for_send();
2525	if (!path) {
2526		ret = -ENOMEM;
2527		goto out;
2528	}
2529
2530	key.objectid = ino;
2531	key.type = BTRFS_INODE_ITEM_KEY;
2532	key.offset = 0;
2533	ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2534	if (ret > 0)
2535		ret = -ENOENT;
2536	if (ret < 0)
2537		goto out;
2538
2539	eb = path->nodes[0];
2540	slot = path->slots[0];
2541	ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2542
2543	ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2544	if (ret < 0)
2545		goto out;
2546
2547	ret = get_cur_path(sctx, ino, gen, p);
2548	if (ret < 0)
2549		goto out;
2550	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2551	TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2552	TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2553	TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2554	/* TODO Add otime support when the otime patches get into upstream */
2555
2556	ret = send_cmd(sctx);
2557
2558tlv_put_failure:
2559out:
2560	fs_path_free(p);
2561	btrfs_free_path(path);
2562	return ret;
2563}
2564
2565/*
2566 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2567 * a valid path yet because we did not process the refs yet. So, the inode
2568 * is created as orphan.
2569 */
2570static int send_create_inode(struct send_ctx *sctx, u64 ino)
2571{
2572	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2573	int ret = 0;
2574	struct fs_path *p;
2575	int cmd;
2576	u64 gen;
2577	u64 mode;
2578	u64 rdev;
2579
2580	btrfs_debug(fs_info, "send_create_inode %llu", ino);
2581
2582	p = fs_path_alloc();
2583	if (!p)
2584		return -ENOMEM;
2585
2586	if (ino != sctx->cur_ino) {
2587		ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2588				     NULL, NULL, &rdev);
2589		if (ret < 0)
2590			goto out;
2591	} else {
2592		gen = sctx->cur_inode_gen;
2593		mode = sctx->cur_inode_mode;
2594		rdev = sctx->cur_inode_rdev;
2595	}
2596
2597	if (S_ISREG(mode)) {
2598		cmd = BTRFS_SEND_C_MKFILE;
2599	} else if (S_ISDIR(mode)) {
2600		cmd = BTRFS_SEND_C_MKDIR;
2601	} else if (S_ISLNK(mode)) {
2602		cmd = BTRFS_SEND_C_SYMLINK;
2603	} else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2604		cmd = BTRFS_SEND_C_MKNOD;
2605	} else if (S_ISFIFO(mode)) {
2606		cmd = BTRFS_SEND_C_MKFIFO;
2607	} else if (S_ISSOCK(mode)) {
2608		cmd = BTRFS_SEND_C_MKSOCK;
2609	} else {
2610		btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2611				(int)(mode & S_IFMT));
2612		ret = -EOPNOTSUPP;
2613		goto out;
2614	}
2615
2616	ret = begin_cmd(sctx, cmd);
2617	if (ret < 0)
2618		goto out;
2619
2620	ret = gen_unique_name(sctx, ino, gen, p);
2621	if (ret < 0)
2622		goto out;
2623
2624	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2625	TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2626
2627	if (S_ISLNK(mode)) {
2628		fs_path_reset(p);
2629		ret = read_symlink(sctx->send_root, ino, p);
2630		if (ret < 0)
2631			goto out;
2632		TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2633	} else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2634		   S_ISFIFO(mode) || S_ISSOCK(mode)) {
2635		TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2636		TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2637	}
2638
2639	ret = send_cmd(sctx);
2640	if (ret < 0)
2641		goto out;
2642
2643
2644tlv_put_failure:
2645out:
2646	fs_path_free(p);
2647	return ret;
2648}
2649
2650/*
2651 * We need some special handling for inodes that get processed before the parent
2652 * directory got created. See process_recorded_refs for details.
2653 * This function does the check if we already created the dir out of order.
2654 */
2655static int did_create_dir(struct send_ctx *sctx, u64 dir)
2656{
2657	int ret = 0;
2658	struct btrfs_path *path = NULL;
2659	struct btrfs_key key;
2660	struct btrfs_key found_key;
2661	struct btrfs_key di_key;
2662	struct extent_buffer *eb;
2663	struct btrfs_dir_item *di;
2664	int slot;
2665
2666	path = alloc_path_for_send();
2667	if (!path) {
2668		ret = -ENOMEM;
2669		goto out;
2670	}
2671
2672	key.objectid = dir;
2673	key.type = BTRFS_DIR_INDEX_KEY;
2674	key.offset = 0;
2675	ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2676	if (ret < 0)
2677		goto out;
2678
2679	while (1) {
2680		eb = path->nodes[0];
2681		slot = path->slots[0];
2682		if (slot >= btrfs_header_nritems(eb)) {
2683			ret = btrfs_next_leaf(sctx->send_root, path);
2684			if (ret < 0) {
2685				goto out;
2686			} else if (ret > 0) {
2687				ret = 0;
2688				break;
2689			}
2690			continue;
2691		}
2692
2693		btrfs_item_key_to_cpu(eb, &found_key, slot);
2694		if (found_key.objectid != key.objectid ||
2695		    found_key.type != key.type) {
2696			ret = 0;
2697			goto out;
2698		}
2699
2700		di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2701		btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2702
2703		if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2704		    di_key.objectid < sctx->send_progress) {
2705			ret = 1;
2706			goto out;
2707		}
2708
2709		path->slots[0]++;
2710	}
2711
2712out:
2713	btrfs_free_path(path);
2714	return ret;
2715}
2716
2717/*
2718 * Only creates the inode if it is:
2719 * 1. Not a directory
2720 * 2. Or a directory which was not created already due to out of order
2721 *    directories. See did_create_dir and process_recorded_refs for details.
2722 */
2723static int send_create_inode_if_needed(struct send_ctx *sctx)
2724{
2725	int ret;
2726
2727	if (S_ISDIR(sctx->cur_inode_mode)) {
2728		ret = did_create_dir(sctx, sctx->cur_ino);
2729		if (ret < 0)
2730			goto out;
2731		if (ret) {
2732			ret = 0;
2733			goto out;
2734		}
2735	}
2736
2737	ret = send_create_inode(sctx, sctx->cur_ino);
2738	if (ret < 0)
2739		goto out;
2740
2741out:
2742	return ret;
2743}
2744
2745struct recorded_ref {
2746	struct list_head list;
2747	char *name;
2748	struct fs_path *full_path;
2749	u64 dir;
2750	u64 dir_gen;
2751	int name_len;
2752};
2753
2754static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
2755{
2756	ref->full_path = path;
2757	ref->name = (char *)kbasename(ref->full_path->start);
2758	ref->name_len = ref->full_path->end - ref->name;
2759}
2760
2761/*
2762 * We need to process new refs before deleted refs, but compare_tree gives us
2763 * everything mixed. So we first record all refs and later process them.
2764 * This function is a helper to record one ref.
2765 */
2766static int __record_ref(struct list_head *head, u64 dir,
2767		      u64 dir_gen, struct fs_path *path)
2768{
2769	struct recorded_ref *ref;
2770
2771	ref = kmalloc(sizeof(*ref), GFP_KERNEL);
2772	if (!ref)
2773		return -ENOMEM;
2774
2775	ref->dir = dir;
2776	ref->dir_gen = dir_gen;
2777	set_ref_path(ref, path);
2778	list_add_tail(&ref->list, head);
2779	return 0;
2780}
2781
2782static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2783{
2784	struct recorded_ref *new;
2785
2786	new = kmalloc(sizeof(*ref), GFP_KERNEL);
2787	if (!new)
2788		return -ENOMEM;
2789
2790	new->dir = ref->dir;
2791	new->dir_gen = ref->dir_gen;
2792	new->full_path = NULL;
2793	INIT_LIST_HEAD(&new->list);
2794	list_add_tail(&new->list, list);
2795	return 0;
2796}
2797
2798static void __free_recorded_refs(struct list_head *head)
2799{
2800	struct recorded_ref *cur;
2801
2802	while (!list_empty(head)) {
2803		cur = list_entry(head->next, struct recorded_ref, list);
2804		fs_path_free(cur->full_path);
2805		list_del(&cur->list);
2806		kfree(cur);
2807	}
2808}
2809
2810static void free_recorded_refs(struct send_ctx *sctx)
2811{
2812	__free_recorded_refs(&sctx->new_refs);
2813	__free_recorded_refs(&sctx->deleted_refs);
2814}
2815
2816/*
2817 * Renames/moves a file/dir to its orphan name. Used when the first
2818 * ref of an unprocessed inode gets overwritten and for all non empty
2819 * directories.
2820 */
2821static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2822			  struct fs_path *path)
2823{
2824	int ret;
2825	struct fs_path *orphan;
2826
2827	orphan = fs_path_alloc();
2828	if (!orphan)
2829		return -ENOMEM;
2830
2831	ret = gen_unique_name(sctx, ino, gen, orphan);
2832	if (ret < 0)
2833		goto out;
2834
2835	ret = send_rename(sctx, path, orphan);
2836
2837out:
2838	fs_path_free(orphan);
2839	return ret;
2840}
2841
2842static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
2843						   u64 dir_ino, u64 dir_gen)
2844{
2845	struct rb_node **p = &sctx->orphan_dirs.rb_node;
2846	struct rb_node *parent = NULL;
2847	struct orphan_dir_info *entry, *odi;
2848
2849	while (*p) {
2850		parent = *p;
2851		entry = rb_entry(parent, struct orphan_dir_info, node);
2852		if (dir_ino < entry->ino)
2853			p = &(*p)->rb_left;
2854		else if (dir_ino > entry->ino)
2855			p = &(*p)->rb_right;
2856		else if (dir_gen < entry->gen)
2857			p = &(*p)->rb_left;
2858		else if (dir_gen > entry->gen)
2859			p = &(*p)->rb_right;
2860		else
2861			return entry;
2862	}
2863
2864	odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2865	if (!odi)
2866		return ERR_PTR(-ENOMEM);
2867	odi->ino = dir_ino;
2868	odi->gen = dir_gen;
2869	odi->last_dir_index_offset = 0;
2870
2871	rb_link_node(&odi->node, parent, p);
2872	rb_insert_color(&odi->node, &sctx->orphan_dirs);
2873	return odi;
2874}
2875
2876static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
2877						   u64 dir_ino, u64 gen)
2878{
2879	struct rb_node *n = sctx->orphan_dirs.rb_node;
2880	struct orphan_dir_info *entry;
2881
2882	while (n) {
2883		entry = rb_entry(n, struct orphan_dir_info, node);
2884		if (dir_ino < entry->ino)
2885			n = n->rb_left;
2886		else if (dir_ino > entry->ino)
2887			n = n->rb_right;
2888		else if (gen < entry->gen)
2889			n = n->rb_left;
2890		else if (gen > entry->gen)
2891			n = n->rb_right;
2892		else
2893			return entry;
2894	}
2895	return NULL;
2896}
2897
2898static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
2899{
2900	struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
2901
2902	return odi != NULL;
2903}
2904
2905static void free_orphan_dir_info(struct send_ctx *sctx,
2906				 struct orphan_dir_info *odi)
2907{
2908	if (!odi)
2909		return;
2910	rb_erase(&odi->node, &sctx->orphan_dirs);
2911	kfree(odi);
2912}
2913
2914/*
2915 * Returns 1 if a directory can be removed at this point in time.
2916 * We check this by iterating all dir items and checking if the inode behind
2917 * the dir item was already processed.
2918 */
2919static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2920		     u64 send_progress)
2921{
2922	int ret = 0;
2923	struct btrfs_root *root = sctx->parent_root;
2924	struct btrfs_path *path;
2925	struct btrfs_key key;
2926	struct btrfs_key found_key;
2927	struct btrfs_key loc;
2928	struct btrfs_dir_item *di;
2929	struct orphan_dir_info *odi = NULL;
2930
2931	/*
2932	 * Don't try to rmdir the top/root subvolume dir.
2933	 */
2934	if (dir == BTRFS_FIRST_FREE_OBJECTID)
2935		return 0;
2936
2937	path = alloc_path_for_send();
2938	if (!path)
2939		return -ENOMEM;
2940
2941	key.objectid = dir;
2942	key.type = BTRFS_DIR_INDEX_KEY;
2943	key.offset = 0;
2944
2945	odi = get_orphan_dir_info(sctx, dir, dir_gen);
2946	if (odi)
2947		key.offset = odi->last_dir_index_offset;
2948
2949	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2950	if (ret < 0)
2951		goto out;
2952
2953	while (1) {
2954		struct waiting_dir_move *dm;
2955
2956		if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2957			ret = btrfs_next_leaf(root, path);
2958			if (ret < 0)
2959				goto out;
2960			else if (ret > 0)
2961				break;
2962			continue;
2963		}
2964		btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2965				      path->slots[0]);
2966		if (found_key.objectid != key.objectid ||
2967		    found_key.type != key.type)
2968			break;
2969
2970		di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2971				struct btrfs_dir_item);
2972		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2973
2974		dm = get_waiting_dir_move(sctx, loc.objectid);
2975		if (dm) {
2976			odi = add_orphan_dir_info(sctx, dir, dir_gen);
2977			if (IS_ERR(odi)) {
2978				ret = PTR_ERR(odi);
2979				goto out;
2980			}
2981			odi->gen = dir_gen;
2982			odi->last_dir_index_offset = found_key.offset;
2983			dm->rmdir_ino = dir;
2984			dm->rmdir_gen = dir_gen;
2985			ret = 0;
2986			goto out;
2987		}
2988
2989		if (loc.objectid > send_progress) {
2990			odi = add_orphan_dir_info(sctx, dir, dir_gen);
2991			if (IS_ERR(odi)) {
2992				ret = PTR_ERR(odi);
2993				goto out;
2994			}
2995			odi->gen = dir_gen;
2996			odi->last_dir_index_offset = found_key.offset;
2997			ret = 0;
2998			goto out;
2999		}
3000
3001		path->slots[0]++;
3002	}
3003	free_orphan_dir_info(sctx, odi);
3004
3005	ret = 1;
3006
3007out:
3008	btrfs_free_path(path);
3009	return ret;
3010}
3011
3012static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
3013{
3014	struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3015
3016	return entry != NULL;
3017}
3018
3019static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3020{
3021	struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3022	struct rb_node *parent = NULL;
3023	struct waiting_dir_move *entry, *dm;
3024
3025	dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3026	if (!dm)
3027		return -ENOMEM;
3028	dm->ino = ino;
3029	dm->rmdir_ino = 0;
3030	dm->rmdir_gen = 0;
3031	dm->orphanized = orphanized;
3032
3033	while (*p) {
3034		parent = *p;
3035		entry = rb_entry(parent, struct waiting_dir_move, node);
3036		if (ino < entry->ino) {
3037			p = &(*p)->rb_left;
3038		} else if (ino > entry->ino) {
3039			p = &(*p)->rb_right;
3040		} else {
3041			kfree(dm);
3042			return -EEXIST;
3043		}
3044	}
3045
3046	rb_link_node(&dm->node, parent, p);
3047	rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3048	return 0;
3049}
3050
3051static struct waiting_dir_move *
3052get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3053{
3054	struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3055	struct waiting_dir_move *entry;
3056
3057	while (n) {
3058		entry = rb_entry(n, struct waiting_dir_move, node);
3059		if (ino < entry->ino)
3060			n = n->rb_left;
3061		else if (ino > entry->ino)
3062			n = n->rb_right;
3063		else
3064			return entry;
3065	}
3066	return NULL;
3067}
3068
3069static void free_waiting_dir_move(struct send_ctx *sctx,
3070				  struct waiting_dir_move *dm)
3071{
3072	if (!dm)
3073		return;
3074	rb_erase(&dm->node, &sctx->waiting_dir_moves);
3075	kfree(dm);
3076}
3077
3078static int add_pending_dir_move(struct send_ctx *sctx,
3079				u64 ino,
3080				u64 ino_gen,
3081				u64 parent_ino,
3082				struct list_head *new_refs,
3083				struct list_head *deleted_refs,
3084				const bool is_orphan)
3085{
3086	struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3087	struct rb_node *parent = NULL;
3088	struct pending_dir_move *entry = NULL, *pm;
3089	struct recorded_ref *cur;
3090	int exists = 0;
3091	int ret;
3092
3093	pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3094	if (!pm)
3095		return -ENOMEM;
3096	pm->parent_ino = parent_ino;
3097	pm->ino = ino;
3098	pm->gen = ino_gen;
3099	INIT_LIST_HEAD(&pm->list);
3100	INIT_LIST_HEAD(&pm->update_refs);
3101	RB_CLEAR_NODE(&pm->node);
3102
3103	while (*p) {
3104		parent = *p;
3105		entry = rb_entry(parent, struct pending_dir_move, node);
3106		if (parent_ino < entry->parent_ino) {
3107			p = &(*p)->rb_left;
3108		} else if (parent_ino > entry->parent_ino) {
3109			p = &(*p)->rb_right;
3110		} else {
3111			exists = 1;
3112			break;
3113		}
3114	}
3115
3116	list_for_each_entry(cur, deleted_refs, list) {
3117		ret = dup_ref(cur, &pm->update_refs);
3118		if (ret < 0)
3119			goto out;
3120	}
3121	list_for_each_entry(cur, new_refs, list) {
3122		ret = dup_ref(cur, &pm->update_refs);
3123		if (ret < 0)
3124			goto out;
3125	}
3126
3127	ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3128	if (ret)
3129		goto out;
3130
3131	if (exists) {
3132		list_add_tail(&pm->list, &entry->list);
3133	} else {
3134		rb_link_node(&pm->node, parent, p);
3135		rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3136	}
3137	ret = 0;
3138out:
3139	if (ret) {
3140		__free_recorded_refs(&pm->update_refs);
3141		kfree(pm);
3142	}
3143	return ret;
3144}
3145
3146static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3147						      u64 parent_ino)
3148{
3149	struct rb_node *n = sctx->pending_dir_moves.rb_node;
3150	struct pending_dir_move *entry;
3151
3152	while (n) {
3153		entry = rb_entry(n, struct pending_dir_move, node);
3154		if (parent_ino < entry->parent_ino)
3155			n = n->rb_left;
3156		else if (parent_ino > entry->parent_ino)
3157			n = n->rb_right;
3158		else
3159			return entry;
3160	}
3161	return NULL;
3162}
3163
3164static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3165		     u64 ino, u64 gen, u64 *ancestor_ino)
3166{
3167	int ret = 0;
3168	u64 parent_inode = 0;
3169	u64 parent_gen = 0;
3170	u64 start_ino = ino;
3171
3172	*ancestor_ino = 0;
3173	while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3174		fs_path_reset(name);
3175
3176		if (is_waiting_for_rm(sctx, ino, gen))
3177			break;
3178		if (is_waiting_for_move(sctx, ino)) {
3179			if (*ancestor_ino == 0)
3180				*ancestor_ino = ino;
3181			ret = get_first_ref(sctx->parent_root, ino,
3182					    &parent_inode, &parent_gen, name);
3183		} else {
3184			ret = __get_cur_name_and_parent(sctx, ino, gen,
3185							&parent_inode,
3186							&parent_gen, name);
3187			if (ret > 0) {
3188				ret = 0;
3189				break;
3190			}
3191		}
3192		if (ret < 0)
3193			break;
3194		if (parent_inode == start_ino) {
3195			ret = 1;
3196			if (*ancestor_ino == 0)
3197				*ancestor_ino = ino;
3198			break;
3199		}
3200		ino = parent_inode;
3201		gen = parent_gen;
3202	}
3203	return ret;
3204}
3205
3206static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3207{
3208	struct fs_path *from_path = NULL;
3209	struct fs_path *to_path = NULL;
3210	struct fs_path *name = NULL;
3211	u64 orig_progress = sctx->send_progress;
3212	struct recorded_ref *cur;
3213	u64 parent_ino, parent_gen;
3214	struct waiting_dir_move *dm = NULL;
3215	u64 rmdir_ino = 0;
3216	u64 rmdir_gen;
3217	u64 ancestor;
3218	bool is_orphan;
3219	int ret;
3220
3221	name = fs_path_alloc();
3222	from_path = fs_path_alloc();
3223	if (!name || !from_path) {
3224		ret = -ENOMEM;
3225		goto out;
3226	}
3227
3228	dm = get_waiting_dir_move(sctx, pm->ino);
3229	ASSERT(dm);
3230	rmdir_ino = dm->rmdir_ino;
3231	rmdir_gen = dm->rmdir_gen;
3232	is_orphan = dm->orphanized;
3233	free_waiting_dir_move(sctx, dm);
3234
3235	if (is_orphan) {
3236		ret = gen_unique_name(sctx, pm->ino,
3237				      pm->gen, from_path);
3238	} else {
3239		ret = get_first_ref(sctx->parent_root, pm->ino,
3240				    &parent_ino, &parent_gen, name);
3241		if (ret < 0)
3242			goto out;
3243		ret = get_cur_path(sctx, parent_ino, parent_gen,
3244				   from_path);
3245		if (ret < 0)
3246			goto out;
3247		ret = fs_path_add_path(from_path, name);
3248	}
3249	if (ret < 0)
3250		goto out;
3251
3252	sctx->send_progress = sctx->cur_ino + 1;
3253	ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3254	if (ret < 0)
3255		goto out;
3256	if (ret) {
3257		LIST_HEAD(deleted_refs);
3258		ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3259		ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3260					   &pm->update_refs, &deleted_refs,
3261					   is_orphan);
3262		if (ret < 0)
3263			goto out;
3264		if (rmdir_ino) {
3265			dm = get_waiting_dir_move(sctx, pm->ino);
3266			ASSERT(dm);
3267			dm->rmdir_ino = rmdir_ino;
3268			dm->rmdir_gen = rmdir_gen;
3269		}
3270		goto out;
3271	}
3272	fs_path_reset(name);
3273	to_path = name;
3274	name = NULL;
3275	ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3276	if (ret < 0)
3277		goto out;
3278
3279	ret = send_rename(sctx, from_path, to_path);
3280	if (ret < 0)
3281		goto out;
3282
3283	if (rmdir_ino) {
3284		struct orphan_dir_info *odi;
3285		u64 gen;
3286
3287		odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
3288		if (!odi) {
3289			/* already deleted */
3290			goto finish;
3291		}
3292		gen = odi->gen;
3293
3294		ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
3295		if (ret < 0)
3296			goto out;
3297		if (!ret)
3298			goto finish;
3299
3300		name = fs_path_alloc();
3301		if (!name) {
3302			ret = -ENOMEM;
3303			goto out;
3304		}
3305		ret = get_cur_path(sctx, rmdir_ino, gen, name);
3306		if (ret < 0)
3307			goto out;
3308		ret = send_rmdir(sctx, name);
3309		if (ret < 0)
3310			goto out;
3311	}
3312
3313finish:
3314	ret = send_utimes(sctx, pm->ino, pm->gen);
3315	if (ret < 0)
3316		goto out;
3317
3318	/*
3319	 * After rename/move, need to update the utimes of both new parent(s)
3320	 * and old parent(s).
3321	 */
3322	list_for_each_entry(cur, &pm->update_refs, list) {
3323		/*
3324		 * The parent inode might have been deleted in the send snapshot
3325		 */
3326		ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3327				     NULL, NULL, NULL, NULL, NULL);
3328		if (ret == -ENOENT) {
3329			ret = 0;
3330			continue;
3331		}
3332		if (ret < 0)
3333			goto out;
3334
3335		ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3336		if (ret < 0)
3337			goto out;
3338	}
3339
3340out:
3341	fs_path_free(name);
3342	fs_path_free(from_path);
3343	fs_path_free(to_path);
3344	sctx->send_progress = orig_progress;
3345
3346	return ret;
3347}
3348
3349static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3350{
3351	if (!list_empty(&m->list))
3352		list_del(&m->list);
3353	if (!RB_EMPTY_NODE(&m->node))
3354		rb_erase(&m->node, &sctx->pending_dir_moves);
3355	__free_recorded_refs(&m->update_refs);
3356	kfree(m);
3357}
3358
3359static void tail_append_pending_moves(struct send_ctx *sctx,
3360				      struct pending_dir_move *moves,
3361				      struct list_head *stack)
3362{
3363	if (list_empty(&moves->list)) {
3364		list_add_tail(&moves->list, stack);
3365	} else {
3366		LIST_HEAD(list);
3367		list_splice_init(&moves->list, &list);
3368		list_add_tail(&moves->list, stack);
3369		list_splice_tail(&list, stack);
3370	}
3371	if (!RB_EMPTY_NODE(&moves->node)) {
3372		rb_erase(&moves->node, &sctx->pending_dir_moves);
3373		RB_CLEAR_NODE(&moves->node);
3374	}
3375}
3376
3377static int apply_children_dir_moves(struct send_ctx *sctx)
3378{
3379	struct pending_dir_move *pm;
3380	struct list_head stack;
3381	u64 parent_ino = sctx->cur_ino;
3382	int ret = 0;
3383
3384	pm = get_pending_dir_moves(sctx, parent_ino);
3385	if (!pm)
3386		return 0;
3387
3388	INIT_LIST_HEAD(&stack);
3389	tail_append_pending_moves(sctx, pm, &stack);
3390
3391	while (!list_empty(&stack)) {
3392		pm = list_first_entry(&stack, struct pending_dir_move, list);
3393		parent_ino = pm->ino;
3394		ret = apply_dir_move(sctx, pm);
3395		free_pending_move(sctx, pm);
3396		if (ret)
3397			goto out;
3398		pm = get_pending_dir_moves(sctx, parent_ino);
3399		if (pm)
3400			tail_append_pending_moves(sctx, pm, &stack);
3401	}
3402	return 0;
3403
3404out:
3405	while (!list_empty(&stack)) {
3406		pm = list_first_entry(&stack, struct pending_dir_move, list);
3407		free_pending_move(sctx, pm);
3408	}
3409	return ret;
3410}
3411
3412/*
3413 * We might need to delay a directory rename even when no ancestor directory
3414 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3415 * renamed. This happens when we rename a directory to the old name (the name
3416 * in the parent root) of some other unrelated directory that got its rename
3417 * delayed due to some ancestor with higher number that got renamed.
3418 *
3419 * Example:
3420 *
3421 * Parent snapshot:
3422 * .                                       (ino 256)
3423 * |---- a/                                (ino 257)
3424 * |     |---- file                        (ino 260)
3425 * |
3426 * |---- b/                                (ino 258)
3427 * |---- c/                                (ino 259)
3428 *
3429 * Send snapshot:
3430 * .                                       (ino 256)
3431 * |---- a/                                (ino 258)
3432 * |---- x/                                (ino 259)
3433 *       |---- y/                          (ino 257)
3434 *             |----- file                 (ino 260)
3435 *
3436 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3437 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3438 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3439 * must issue is:
3440 *
3441 * 1 - rename 259 from 'c' to 'x'
3442 * 2 - rename 257 from 'a' to 'x/y'
3443 * 3 - rename 258 from 'b' to 'a'
3444 *
3445 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3446 * be done right away and < 0 on error.
3447 */
3448static int wait_for_dest_dir_move(struct send_ctx *sctx,
3449				  struct recorded_ref *parent_ref,
3450				  const bool is_orphan)
3451{
3452	struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3453	struct btrfs_path *path;
3454	struct btrfs_key key;
3455	struct btrfs_key di_key;
3456	struct btrfs_dir_item *di;
3457	u64 left_gen;
3458	u64 right_gen;
3459	int ret = 0;
3460	struct waiting_dir_move *wdm;
3461
3462	if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3463		return 0;
3464
3465	path = alloc_path_for_send();
3466	if (!path)
3467		return -ENOMEM;
3468
3469	key.objectid = parent_ref->dir;
3470	key.type = BTRFS_DIR_ITEM_KEY;
3471	key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3472
3473	ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3474	if (ret < 0) {
3475		goto out;
3476	} else if (ret > 0) {
3477		ret = 0;
3478		goto out;
3479	}
3480
3481	di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3482				       parent_ref->name_len);
3483	if (!di) {
3484		ret = 0;
3485		goto out;
3486	}
3487	/*
3488	 * di_key.objectid has the number of the inode that has a dentry in the
3489	 * parent directory with the same name that sctx->cur_ino is being
3490	 * renamed to. We need to check if that inode is in the send root as
3491	 * well and if it is currently marked as an inode with a pending rename,
3492	 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3493	 * that it happens after that other inode is renamed.
3494	 */
3495	btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3496	if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3497		ret = 0;
3498		goto out;
3499	}
3500
3501	ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3502			     &left_gen, NULL, NULL, NULL, NULL);
3503	if (ret < 0)
3504		goto out;
3505	ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3506			     &right_gen, NULL, NULL, NULL, NULL);
3507	if (ret < 0) {
3508		if (ret == -ENOENT)
3509			ret = 0;
3510		goto out;
3511	}
3512
3513	/* Different inode, no need to delay the rename of sctx->cur_ino */
3514	if (right_gen != left_gen) {
3515		ret = 0;
3516		goto out;
3517	}
3518
3519	wdm = get_waiting_dir_move(sctx, di_key.objectid);
3520	if (wdm && !wdm->orphanized) {
3521		ret = add_pending_dir_move(sctx,
3522					   sctx->cur_ino,
3523					   sctx->cur_inode_gen,
3524					   di_key.objectid,
3525					   &sctx->new_refs,
3526					   &sctx->deleted_refs,
3527					   is_orphan);
3528		if (!ret)
3529			ret = 1;
3530	}
3531out:
3532	btrfs_free_path(path);
3533	return ret;
3534}
3535
3536/*
3537 * Check if inode ino2, or any of its ancestors, is inode ino1.
3538 * Return 1 if true, 0 if false and < 0 on error.
3539 */
3540static int check_ino_in_path(struct btrfs_root *root,
3541			     const u64 ino1,
3542			     const u64 ino1_gen,
3543			     const u64 ino2,
3544			     const u64 ino2_gen,
3545			     struct fs_path *fs_path)
3546{
3547	u64 ino = ino2;
3548
3549	if (ino1 == ino2)
3550		return ino1_gen == ino2_gen;
3551
3552	while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3553		u64 parent;
3554		u64 parent_gen;
3555		int ret;
3556
3557		fs_path_reset(fs_path);
3558		ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3559		if (ret < 0)
3560			return ret;
3561		if (parent == ino1)
3562			return parent_gen == ino1_gen;
3563		ino = parent;
3564	}
3565	return 0;
3566}
3567
3568/*
3569 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
3570 * possible path (in case ino2 is not a directory and has multiple hard links).
3571 * Return 1 if true, 0 if false and < 0 on error.
3572 */
3573static int is_ancestor(struct btrfs_root *root,
3574		       const u64 ino1,
3575		       const u64 ino1_gen,
3576		       const u64 ino2,
3577		       struct fs_path *fs_path)
3578{
3579	bool free_fs_path = false;
3580	int ret = 0;
3581	struct btrfs_path *path = NULL;
3582	struct btrfs_key key;
3583
3584	if (!fs_path) {
3585		fs_path = fs_path_alloc();
3586		if (!fs_path)
3587			return -ENOMEM;
3588		free_fs_path = true;
3589	}
3590
3591	path = alloc_path_for_send();
3592	if (!path) {
3593		ret = -ENOMEM;
3594		goto out;
3595	}
3596
3597	key.objectid = ino2;
3598	key.type = BTRFS_INODE_REF_KEY;
3599	key.offset = 0;
3600
3601	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3602	if (ret < 0)
3603		goto out;
3604
3605	while (true) {
3606		struct extent_buffer *leaf = path->nodes[0];
3607		int slot = path->slots[0];
3608		u32 cur_offset = 0;
3609		u32 item_size;
3610
3611		if (slot >= btrfs_header_nritems(leaf)) {
3612			ret = btrfs_next_leaf(root, path);
3613			if (ret < 0)
3614				goto out;
3615			if (ret > 0)
3616				break;
3617			continue;
3618		}
3619
3620		btrfs_item_key_to_cpu(leaf, &key, slot);
3621		if (key.objectid != ino2)
3622			break;
3623		if (key.type != BTRFS_INODE_REF_KEY &&
3624		    key.type != BTRFS_INODE_EXTREF_KEY)
3625			break;
3626
3627		item_size = btrfs_item_size_nr(leaf, slot);
3628		while (cur_offset < item_size) {
3629			u64 parent;
3630			u64 parent_gen;
3631
3632			if (key.type == BTRFS_INODE_EXTREF_KEY) {
3633				unsigned long ptr;
3634				struct btrfs_inode_extref *extref;
3635
3636				ptr = btrfs_item_ptr_offset(leaf, slot);
3637				extref = (struct btrfs_inode_extref *)
3638					(ptr + cur_offset);
3639				parent = btrfs_inode_extref_parent(leaf,
3640								   extref);
3641				cur_offset += sizeof(*extref);
3642				cur_offset += btrfs_inode_extref_name_len(leaf,
3643								  extref);
3644			} else {
3645				parent = key.offset;
3646				cur_offset = item_size;
3647			}
3648
3649			ret = get_inode_info(root, parent, NULL, &parent_gen,
3650					     NULL, NULL, NULL, NULL);
3651			if (ret < 0)
3652				goto out;
3653			ret = check_ino_in_path(root, ino1, ino1_gen,
3654						parent, parent_gen, fs_path);
3655			if (ret)
3656				goto out;
3657		}
3658		path->slots[0]++;
3659	}
3660	ret = 0;
3661 out:
3662	btrfs_free_path(path);
3663	if (free_fs_path)
3664		fs_path_free(fs_path);
3665	return ret;
3666}
3667
3668static int wait_for_parent_move(struct send_ctx *sctx,
3669				struct recorded_ref *parent_ref,
3670				const bool is_orphan)
3671{
3672	int ret = 0;
3673	u64 ino = parent_ref->dir;
3674	u64 ino_gen = parent_ref->dir_gen;
3675	u64 parent_ino_before, parent_ino_after;
3676	struct fs_path *path_before = NULL;
3677	struct fs_path *path_after = NULL;
3678	int len1, len2;
3679
3680	path_after = fs_path_alloc();
3681	path_before = fs_path_alloc();
3682	if (!path_after || !path_before) {
3683		ret = -ENOMEM;
3684		goto out;
3685	}
3686
3687	/*
3688	 * Our current directory inode may not yet be renamed/moved because some
3689	 * ancestor (immediate or not) has to be renamed/moved first. So find if
3690	 * such ancestor exists and make sure our own rename/move happens after
3691	 * that ancestor is processed to avoid path build infinite loops (done
3692	 * at get_cur_path()).
3693	 */
3694	while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3695		u64 parent_ino_after_gen;
3696
3697		if (is_waiting_for_move(sctx, ino)) {
3698			/*
3699			 * If the current inode is an ancestor of ino in the
3700			 * parent root, we need to delay the rename of the
3701			 * current inode, otherwise don't delayed the rename
3702			 * because we can end up with a circular dependency
3703			 * of renames, resulting in some directories never
3704			 * getting the respective rename operations issued in
3705			 * the send stream or getting into infinite path build
3706			 * loops.
3707			 */
3708			ret = is_ancestor(sctx->parent_root,
3709					  sctx->cur_ino, sctx->cur_inode_gen,
3710					  ino, path_before);
3711			if (ret)
3712				break;
3713		}
3714
3715		fs_path_reset(path_before);
3716		fs_path_reset(path_after);
3717
3718		ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3719				    &parent_ino_after_gen, path_after);
3720		if (ret < 0)
3721			goto out;
3722		ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3723				    NULL, path_before);
3724		if (ret < 0 && ret != -ENOENT) {
3725			goto out;
3726		} else if (ret == -ENOENT) {
3727			ret = 0;
3728			break;
3729		}
3730
3731		len1 = fs_path_len(path_before);
3732		len2 = fs_path_len(path_after);
3733		if (ino > sctx->cur_ino &&
3734		    (parent_ino_before != parent_ino_after || len1 != len2 ||
3735		     memcmp(path_before->start, path_after->start, len1))) {
3736			u64 parent_ino_gen;
3737
3738			ret = get_inode_info(sctx->parent_root, ino, NULL,
3739					     &parent_ino_gen, NULL, NULL, NULL,
3740					     NULL);
3741			if (ret < 0)
3742				goto out;
3743			if (ino_gen == parent_ino_gen) {
3744				ret = 1;
3745				break;
3746			}
3747		}
3748		ino = parent_ino_after;
3749		ino_gen = parent_ino_after_gen;
3750	}
3751
3752out:
3753	fs_path_free(path_before);
3754	fs_path_free(path_after);
3755
3756	if (ret == 1) {
3757		ret = add_pending_dir_move(sctx,
3758					   sctx->cur_ino,
3759					   sctx->cur_inode_gen,
3760					   ino,
3761					   &sctx->new_refs,
3762					   &sctx->deleted_refs,
3763					   is_orphan);
3764		if (!ret)
3765			ret = 1;
3766	}
3767
3768	return ret;
3769}
3770
3771static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3772{
3773	int ret;
3774	struct fs_path *new_path;
3775
3776	/*
3777	 * Our reference's name member points to its full_path member string, so
3778	 * we use here a new path.
3779	 */
3780	new_path = fs_path_alloc();
3781	if (!new_path)
3782		return -ENOMEM;
3783
3784	ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
3785	if (ret < 0) {
3786		fs_path_free(new_path);
3787		return ret;
3788	}
3789	ret = fs_path_add(new_path, ref->name, ref->name_len);
3790	if (ret < 0) {
3791		fs_path_free(new_path);
3792		return ret;
3793	}
3794
3795	fs_path_free(ref->full_path);
3796	set_ref_path(ref, new_path);
3797
3798	return 0;
3799}
3800
3801/*
3802 * When processing the new references for an inode we may orphanize an existing
3803 * directory inode because its old name conflicts with one of the new references
3804 * of the current inode. Later, when processing another new reference of our
3805 * inode, we might need to orphanize another inode, but the path we have in the
3806 * reference reflects the pre-orphanization name of the directory we previously
3807 * orphanized. For example:
3808 *
3809 * parent snapshot looks like:
3810 *
3811 * .                                     (ino 256)
3812 * |----- f1                             (ino 257)
3813 * |----- f2                             (ino 258)
3814 * |----- d1/                            (ino 259)
3815 *        |----- d2/                     (ino 260)
3816 *
3817 * send snapshot looks like:
3818 *
3819 * .                                     (ino 256)
3820 * |----- d1                             (ino 258)
3821 * |----- f2/                            (ino 259)
3822 *        |----- f2_link/                (ino 260)
3823 *        |       |----- f1              (ino 257)
3824 *        |
3825 *        |----- d2                      (ino 258)
3826 *
3827 * When processing inode 257 we compute the name for inode 259 as "d1", and we
3828 * cache it in the name cache. Later when we start processing inode 258, when
3829 * collecting all its new references we set a full path of "d1/d2" for its new
3830 * reference with name "d2". When we start processing the new references we
3831 * start by processing the new reference with name "d1", and this results in
3832 * orphanizing inode 259, since its old reference causes a conflict. Then we
3833 * move on the next new reference, with name "d2", and we find out we must
3834 * orphanize inode 260, as its old reference conflicts with ours - but for the
3835 * orphanization we use a source path corresponding to the path we stored in the
3836 * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the
3837 * receiver fail since the path component "d1/" no longer exists, it was renamed
3838 * to "o259-6-0/" when processing the previous new reference. So in this case we
3839 * must recompute the path in the new reference and use it for the new
3840 * orphanization operation.
3841 */
3842static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3843{
3844	char *name;
3845	int ret;
3846
3847	name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
3848	if (!name)
3849		return -ENOMEM;
3850
3851	fs_path_reset(ref->full_path);
3852	ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
3853	if (ret < 0)
3854		goto out;
3855
3856	ret = fs_path_add(ref->full_path, name, ref->name_len);
3857	if (ret < 0)
3858		goto out;
3859
3860	/* Update the reference's base name pointer. */
3861	set_ref_path(ref, ref->full_path);
3862out:
3863	kfree(name);
3864	return ret;
3865}
3866
3867/*
3868 * This does all the move/link/unlink/rmdir magic.
3869 */
3870static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3871{
3872	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3873	int ret = 0;
3874	struct recorded_ref *cur;
3875	struct recorded_ref *cur2;
3876	struct list_head check_dirs;
3877	struct fs_path *valid_path = NULL;
3878	u64 ow_inode = 0;
3879	u64 ow_gen;
3880	u64 ow_mode;
3881	int did_overwrite = 0;
3882	int is_orphan = 0;
3883	u64 last_dir_ino_rm = 0;
3884	bool can_rename = true;
3885	bool orphanized_dir = false;
3886	bool orphanized_ancestor = false;
3887
3888	btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3889
3890	/*
3891	 * This should never happen as the root dir always has the same ref
3892	 * which is always '..'
3893	 */
3894	BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3895	INIT_LIST_HEAD(&check_dirs);
3896
3897	valid_path = fs_path_alloc();
3898	if (!valid_path) {
3899		ret = -ENOMEM;
3900		goto out;
3901	}
3902
3903	/*
3904	 * First, check if the first ref of the current inode was overwritten
3905	 * before. If yes, we know that the current inode was already orphanized
3906	 * and thus use the orphan name. If not, we can use get_cur_path to
3907	 * get the path of the first ref as it would like while receiving at
3908	 * this point in time.
3909	 * New inodes are always orphan at the beginning, so force to use the
3910	 * orphan name in this case.
3911	 * The first ref is stored in valid_path and will be updated if it
3912	 * gets moved around.
3913	 */
3914	if (!sctx->cur_inode_new) {
3915		ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3916				sctx->cur_inode_gen);
3917		if (ret < 0)
3918			goto out;
3919		if (ret)
3920			did_overwrite = 1;
3921	}
3922	if (sctx->cur_inode_new || did_overwrite) {
3923		ret = gen_unique_name(sctx, sctx->cur_ino,
3924				sctx->cur_inode_gen, valid_path);
3925		if (ret < 0)
3926			goto out;
3927		is_orphan = 1;
3928	} else {
3929		ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3930				valid_path);
3931		if (ret < 0)
3932			goto out;
3933	}
3934
3935	/*
3936	 * Before doing any rename and link operations, do a first pass on the
3937	 * new references to orphanize any unprocessed inodes that may have a
3938	 * reference that conflicts with one of the new references of the current
3939	 * inode. This needs to happen first because a new reference may conflict
3940	 * with the old reference of a parent directory, so we must make sure
3941	 * that the path used for link and rename commands don't use an
3942	 * orphanized name when an ancestor was not yet orphanized.
3943	 *
3944	 * Example:
3945	 *
3946	 * Parent snapshot:
3947	 *
3948	 * .                                                      (ino 256)
3949	 * |----- testdir/                                        (ino 259)
3950	 * |          |----- a                                    (ino 257)
3951	 * |
3952	 * |----- b                                               (ino 258)
3953	 *
3954	 * Send snapshot:
3955	 *
3956	 * .                                                      (ino 256)
3957	 * |----- testdir_2/                                      (ino 259)
3958	 * |          |----- a                                    (ino 260)
3959	 * |
3960	 * |----- testdir                                         (ino 257)
3961	 * |----- b                                               (ino 257)
3962	 * |----- b2                                              (ino 258)
3963	 *
3964	 * Processing the new reference for inode 257 with name "b" may happen
3965	 * before processing the new reference with name "testdir". If so, we
3966	 * must make sure that by the time we send a link command to create the
3967	 * hard link "b", inode 259 was already orphanized, since the generated
3968	 * path in "valid_path" already contains the orphanized name for 259.
3969	 * We are processing inode 257, so only later when processing 259 we do
3970	 * the rename operation to change its temporary (orphanized) name to
3971	 * "testdir_2".
3972	 */
3973	list_for_each_entry(cur, &sctx->new_refs, list) {
3974		ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3975		if (ret < 0)
3976			goto out;
3977		if (ret == inode_state_will_create)
3978			continue;
3979
3980		/*
3981		 * Check if this new ref would overwrite the first ref of another
3982		 * unprocessed inode. If yes, orphanize the overwritten inode.
3983		 * If we find an overwritten ref that is not the first ref,
3984		 * simply unlink it.
3985		 */
3986		ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3987				cur->name, cur->name_len,
3988				&ow_inode, &ow_gen, &ow_mode);
3989		if (ret < 0)
3990			goto out;
3991		if (ret) {
3992			ret = is_first_ref(sctx->parent_root,
3993					   ow_inode, cur->dir, cur->name,
3994					   cur->name_len);
3995			if (ret < 0)
3996				goto out;
3997			if (ret) {
3998				struct name_cache_entry *nce;
3999				struct waiting_dir_move *wdm;
4000
4001				if (orphanized_dir) {
4002					ret = refresh_ref_path(sctx, cur);
4003					if (ret < 0)
4004						goto out;
4005				}
4006
4007				ret = orphanize_inode(sctx, ow_inode, ow_gen,
4008						cur->full_path);
4009				if (ret < 0)
4010					goto out;
4011				if (S_ISDIR(ow_mode))
4012					orphanized_dir = true;
4013
4014				/*
4015				 * If ow_inode has its rename operation delayed
4016				 * make sure that its orphanized name is used in
4017				 * the source path when performing its rename
4018				 * operation.
4019				 */
4020				if (is_waiting_for_move(sctx, ow_inode)) {
4021					wdm = get_waiting_dir_move(sctx,
4022								   ow_inode);
4023					ASSERT(wdm);
4024					wdm->orphanized = true;
4025				}
4026
4027				/*
4028				 * Make sure we clear our orphanized inode's
4029				 * name from the name cache. This is because the
4030				 * inode ow_inode might be an ancestor of some
4031				 * other inode that will be orphanized as well
4032				 * later and has an inode number greater than
4033				 * sctx->send_progress. We need to prevent
4034				 * future name lookups from using the old name
4035				 * and get instead the orphan name.
4036				 */
4037				nce = name_cache_search(sctx, ow_inode, ow_gen);
4038				if (nce) {
4039					name_cache_delete(sctx, nce);
4040					kfree(nce);
4041				}
4042
4043				/*
4044				 * ow_inode might currently be an ancestor of
4045				 * cur_ino, therefore compute valid_path (the
4046				 * current path of cur_ino) again because it
4047				 * might contain the pre-orphanization name of
4048				 * ow_inode, which is no longer valid.
4049				 */
4050				ret = is_ancestor(sctx->parent_root,
4051						  ow_inode, ow_gen,
4052						  sctx->cur_ino, NULL);
4053				if (ret > 0) {
4054					orphanized_ancestor = true;
4055					fs_path_reset(valid_path);
4056					ret = get_cur_path(sctx, sctx->cur_ino,
4057							   sctx->cur_inode_gen,
4058							   valid_path);
4059				}
4060				if (ret < 0)
4061					goto out;
4062			} else {
4063				/*
4064				 * If we previously orphanized a directory that
4065				 * collided with a new reference that we already
4066				 * processed, recompute the current path because
4067				 * that directory may be part of the path.
4068				 */
4069				if (orphanized_dir) {
4070					ret = refresh_ref_path(sctx, cur);
4071					if (ret < 0)
4072						goto out;
4073				}
4074				ret = send_unlink(sctx, cur->full_path);
4075				if (ret < 0)
4076					goto out;
4077			}
4078		}
4079
4080	}
4081
4082	list_for_each_entry(cur, &sctx->new_refs, list) {
4083		/*
4084		 * We may have refs where the parent directory does not exist
4085		 * yet. This happens if the parent directories inum is higher
4086		 * than the current inum. To handle this case, we create the
4087		 * parent directory out of order. But we need to check if this
4088		 * did already happen before due to other refs in the same dir.
4089		 */
4090		ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4091		if (ret < 0)
4092			goto out;
4093		if (ret == inode_state_will_create) {
4094			ret = 0;
4095			/*
4096			 * First check if any of the current inodes refs did
4097			 * already create the dir.
4098			 */
4099			list_for_each_entry(cur2, &sctx->new_refs, list) {
4100				if (cur == cur2)
4101					break;
4102				if (cur2->dir == cur->dir) {
4103					ret = 1;
4104					break;
4105				}
4106			}
4107
4108			/*
4109			 * If that did not happen, check if a previous inode
4110			 * did already create the dir.
4111			 */
4112			if (!ret)
4113				ret = did_create_dir(sctx, cur->dir);
4114			if (ret < 0)
4115				goto out;
4116			if (!ret) {
4117				ret = send_create_inode(sctx, cur->dir);
4118				if (ret < 0)
4119					goto out;
4120			}
4121		}
4122
4123		if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
4124			ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
4125			if (ret < 0)
4126				goto out;
4127			if (ret == 1) {
4128				can_rename = false;
4129				*pending_move = 1;
4130			}
4131		}
4132
4133		if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
4134		    can_rename) {
4135			ret = wait_for_parent_move(sctx, cur, is_orphan);
4136			if (ret < 0)
4137				goto out;
4138			if (ret == 1) {
4139				can_rename = false;
4140				*pending_move = 1;
4141			}
4142		}
4143
4144		/*
4145		 * link/move the ref to the new place. If we have an orphan
4146		 * inode, move it and update valid_path. If not, link or move
4147		 * it depending on the inode mode.
4148		 */
4149		if (is_orphan && can_rename) {
4150			ret = send_rename(sctx, valid_path, cur->full_path);
4151			if (ret < 0)
4152				goto out;
4153			is_orphan = 0;
4154			ret = fs_path_copy(valid_path, cur->full_path);
4155			if (ret < 0)
4156				goto out;
4157		} else if (can_rename) {
4158			if (S_ISDIR(sctx->cur_inode_mode)) {
4159				/*
4160				 * Dirs can't be linked, so move it. For moved
4161				 * dirs, we always have one new and one deleted
4162				 * ref. The deleted ref is ignored later.
4163				 */
4164				ret = send_rename(sctx, valid_path,
4165						  cur->full_path);
4166				if (!ret)
4167					ret = fs_path_copy(valid_path,
4168							   cur->full_path);
4169				if (ret < 0)
4170					goto out;
4171			} else {
4172				/*
4173				 * We might have previously orphanized an inode
4174				 * which is an ancestor of our current inode,
4175				 * so our reference's full path, which was
4176				 * computed before any such orphanizations, must
4177				 * be updated.
4178				 */
4179				if (orphanized_dir) {
4180					ret = update_ref_path(sctx, cur);
4181					if (ret < 0)
4182						goto out;
4183				}
4184				ret = send_link(sctx, cur->full_path,
4185						valid_path);
4186				if (ret < 0)
4187					goto out;
4188			}
4189		}
4190		ret = dup_ref(cur, &check_dirs);
4191		if (ret < 0)
4192			goto out;
4193	}
4194
4195	if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
4196		/*
4197		 * Check if we can already rmdir the directory. If not,
4198		 * orphanize it. For every dir item inside that gets deleted
4199		 * later, we do this check again and rmdir it then if possible.
4200		 * See the use of check_dirs for more details.
4201		 */
4202		ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4203				sctx->cur_ino);
4204		if (ret < 0)
4205			goto out;
4206		if (ret) {
4207			ret = send_rmdir(sctx, valid_path);
4208			if (ret < 0)
4209				goto out;
4210		} else if (!is_orphan) {
4211			ret = orphanize_inode(sctx, sctx->cur_ino,
4212					sctx->cur_inode_gen, valid_path);
4213			if (ret < 0)
4214				goto out;
4215			is_orphan = 1;
4216		}
4217
4218		list_for_each_entry(cur, &sctx->deleted_refs, list) {
4219			ret = dup_ref(cur, &check_dirs);
4220			if (ret < 0)
4221				goto out;
4222		}
4223	} else if (S_ISDIR(sctx->cur_inode_mode) &&
4224		   !list_empty(&sctx->deleted_refs)) {
4225		/*
4226		 * We have a moved dir. Add the old parent to check_dirs
4227		 */
4228		cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4229				list);
4230		ret = dup_ref(cur, &check_dirs);
4231		if (ret < 0)
4232			goto out;
4233	} else if (!S_ISDIR(sctx->cur_inode_mode)) {
4234		/*
4235		 * We have a non dir inode. Go through all deleted refs and
4236		 * unlink them if they were not already overwritten by other
4237		 * inodes.
4238		 */
4239		list_for_each_entry(cur, &sctx->deleted_refs, list) {
4240			ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4241					sctx->cur_ino, sctx->cur_inode_gen,
4242					cur->name, cur->name_len);
4243			if (ret < 0)
4244				goto out;
4245			if (!ret) {
4246				/*
4247				 * If we orphanized any ancestor before, we need
4248				 * to recompute the full path for deleted names,
4249				 * since any such path was computed before we
4250				 * processed any references and orphanized any
4251				 * ancestor inode.
4252				 */
4253				if (orphanized_ancestor) {
4254					ret = update_ref_path(sctx, cur);
4255					if (ret < 0)
4256						goto out;
4257				}
4258				ret = send_unlink(sctx, cur->full_path);
4259				if (ret < 0)
4260					goto out;
4261			}
4262			ret = dup_ref(cur, &check_dirs);
4263			if (ret < 0)
4264				goto out;
4265		}
4266		/*
4267		 * If the inode is still orphan, unlink the orphan. This may
4268		 * happen when a previous inode did overwrite the first ref
4269		 * of this inode and no new refs were added for the current
4270		 * inode. Unlinking does not mean that the inode is deleted in
4271		 * all cases. There may still be links to this inode in other
4272		 * places.
4273		 */
4274		if (is_orphan) {
4275			ret = send_unlink(sctx, valid_path);
4276			if (ret < 0)
4277				goto out;
4278		}
4279	}
4280
4281	/*
4282	 * We did collect all parent dirs where cur_inode was once located. We
4283	 * now go through all these dirs and check if they are pending for
4284	 * deletion and if it's finally possible to perform the rmdir now.
4285	 * We also update the inode stats of the parent dirs here.
4286	 */
4287	list_for_each_entry(cur, &check_dirs, list) {
4288		/*
4289		 * In case we had refs into dirs that were not processed yet,
4290		 * we don't need to do the utime and rmdir logic for these dirs.
4291		 * The dir will be processed later.
4292		 */
4293		if (cur->dir > sctx->cur_ino)
4294			continue;
4295
4296		ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4297		if (ret < 0)
4298			goto out;
4299
4300		if (ret == inode_state_did_create ||
4301		    ret == inode_state_no_change) {
4302			/* TODO delayed utimes */
4303			ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4304			if (ret < 0)
4305				goto out;
4306		} else if (ret == inode_state_did_delete &&
4307			   cur->dir != last_dir_ino_rm) {
4308			ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4309					sctx->cur_ino);
4310			if (ret < 0)
4311				goto out;
4312			if (ret) {
4313				ret = get_cur_path(sctx, cur->dir,
4314						   cur->dir_gen, valid_path);
4315				if (ret < 0)
4316					goto out;
4317				ret = send_rmdir(sctx, valid_path);
4318				if (ret < 0)
4319					goto out;
4320				last_dir_ino_rm = cur->dir;
4321			}
4322		}
4323	}
4324
4325	ret = 0;
4326
4327out:
4328	__free_recorded_refs(&check_dirs);
4329	free_recorded_refs(sctx);
4330	fs_path_free(valid_path);
4331	return ret;
4332}
4333
4334static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
4335		      void *ctx, struct list_head *refs)
4336{
4337	int ret = 0;
4338	struct send_ctx *sctx = ctx;
4339	struct fs_path *p;
4340	u64 gen;
4341
4342	p = fs_path_alloc();
4343	if (!p)
4344		return -ENOMEM;
4345
4346	ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
4347			NULL, NULL);
4348	if (ret < 0)
4349		goto out;
4350
4351	ret = get_cur_path(sctx, dir, gen, p);
4352	if (ret < 0)
4353		goto out;
4354	ret = fs_path_add_path(p, name);
4355	if (ret < 0)
4356		goto out;
4357
4358	ret = __record_ref(refs, dir, gen, p);
4359
4360out:
4361	if (ret)
4362		fs_path_free(p);
4363	return ret;
4364}
4365
4366static int __record_new_ref(int num, u64 dir, int index,
4367			    struct fs_path *name,
4368			    void *ctx)
4369{
4370	struct send_ctx *sctx = ctx;
4371	return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
4372}
4373
4374
4375static int __record_deleted_ref(int num, u64 dir, int index,
4376				struct fs_path *name,
4377				void *ctx)
4378{
4379	struct send_ctx *sctx = ctx;
4380	return record_ref(sctx->parent_root, dir, name, ctx,
4381			  &sctx->deleted_refs);
4382}
4383
4384static int record_new_ref(struct send_ctx *sctx)
4385{
4386	int ret;
4387
4388	ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4389				sctx->cmp_key, 0, __record_new_ref, sctx);
4390	if (ret < 0)
4391		goto out;
4392	ret = 0;
4393
4394out:
4395	return ret;
4396}
4397
4398static int record_deleted_ref(struct send_ctx *sctx)
4399{
4400	int ret;
4401
4402	ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4403				sctx->cmp_key, 0, __record_deleted_ref, sctx);
4404	if (ret < 0)
4405		goto out;
4406	ret = 0;
4407
4408out:
4409	return ret;
4410}
4411
4412struct find_ref_ctx {
4413	u64 dir;
4414	u64 dir_gen;
4415	struct btrfs_root *root;
4416	struct fs_path *name;
4417	int found_idx;
4418};
4419
4420static int __find_iref(int num, u64 dir, int index,
4421		       struct fs_path *name,
4422		       void *ctx_)
4423{
4424	struct find_ref_ctx *ctx = ctx_;
4425	u64 dir_gen;
4426	int ret;
4427
4428	if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
4429	    strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
4430		/*
4431		 * To avoid doing extra lookups we'll only do this if everything
4432		 * else matches.
4433		 */
4434		ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
4435				     NULL, NULL, NULL);
4436		if (ret)
4437			return ret;
4438		if (dir_gen != ctx->dir_gen)
4439			return 0;
4440		ctx->found_idx = num;
4441		return 1;
4442	}
4443	return 0;
4444}
4445
4446static int find_iref(struct btrfs_root *root,
4447		     struct btrfs_path *path,
4448		     struct btrfs_key *key,
4449		     u64 dir, u64 dir_gen, struct fs_path *name)
4450{
4451	int ret;
4452	struct find_ref_ctx ctx;
4453
4454	ctx.dir = dir;
4455	ctx.name = name;
4456	ctx.dir_gen = dir_gen;
4457	ctx.found_idx = -1;
4458	ctx.root = root;
4459
4460	ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
4461	if (ret < 0)
4462		return ret;
4463
4464	if (ctx.found_idx == -1)
4465		return -ENOENT;
4466
4467	return ctx.found_idx;
4468}
4469
4470static int __record_changed_new_ref(int num, u64 dir, int index,
4471				    struct fs_path *name,
4472				    void *ctx)
4473{
4474	u64 dir_gen;
4475	int ret;
4476	struct send_ctx *sctx = ctx;
4477
4478	ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4479			     NULL, NULL, NULL);
4480	if (ret)
4481		return ret;
4482
4483	ret = find_iref(sctx->parent_root, sctx->right_path,
4484			sctx->cmp_key, dir, dir_gen, name);
4485	if (ret == -ENOENT)
4486		ret = __record_new_ref(num, dir, index, name, sctx);
4487	else if (ret > 0)
4488		ret = 0;
4489
4490	return ret;
4491}
4492
4493static int __record_changed_deleted_ref(int num, u64 dir, int index,
4494					struct fs_path *name,
4495					void *ctx)
4496{
4497	u64 dir_gen;
4498	int ret;
4499	struct send_ctx *sctx = ctx;
4500
4501	ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4502			     NULL, NULL, NULL);
4503	if (ret)
4504		return ret;
4505
4506	ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
4507			dir, dir_gen, name);
4508	if (ret == -ENOENT)
4509		ret = __record_deleted_ref(num, dir, index, name, sctx);
4510	else if (ret > 0)
4511		ret = 0;
4512
4513	return ret;
4514}
4515
4516static int record_changed_ref(struct send_ctx *sctx)
4517{
4518	int ret = 0;
4519
4520	ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4521			sctx->cmp_key, 0, __record_changed_new_ref, sctx);
4522	if (ret < 0)
4523		goto out;
4524	ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4525			sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
4526	if (ret < 0)
4527		goto out;
4528	ret = 0;
4529
4530out:
4531	return ret;
4532}
4533
4534/*
4535 * Record and process all refs at once. Needed when an inode changes the
4536 * generation number, which means that it was deleted and recreated.
4537 */
4538static int process_all_refs(struct send_ctx *sctx,
4539			    enum btrfs_compare_tree_result cmd)
4540{
4541	int ret;
4542	struct btrfs_root *root;
4543	struct btrfs_path *path;
4544	struct btrfs_key key;
4545	struct btrfs_key found_key;
4546	struct extent_buffer *eb;
4547	int slot;
4548	iterate_inode_ref_t cb;
4549	int pending_move = 0;
4550
4551	path = alloc_path_for_send();
4552	if (!path)
4553		return -ENOMEM;
4554
4555	if (cmd == BTRFS_COMPARE_TREE_NEW) {
4556		root = sctx->send_root;
4557		cb = __record_new_ref;
4558	} else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4559		root = sctx->parent_root;
4560		cb = __record_deleted_ref;
4561	} else {
4562		btrfs_err(sctx->send_root->fs_info,
4563				"Wrong command %d in process_all_refs", cmd);
4564		ret = -EINVAL;
4565		goto out;
4566	}
4567
4568	key.objectid = sctx->cmp_key->objectid;
4569	key.type = BTRFS_INODE_REF_KEY;
4570	key.offset = 0;
4571	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4572	if (ret < 0)
4573		goto out;
4574
4575	while (1) {
4576		eb = path->nodes[0];
4577		slot = path->slots[0];
4578		if (slot >= btrfs_header_nritems(eb)) {
4579			ret = btrfs_next_leaf(root, path);
4580			if (ret < 0)
4581				goto out;
4582			else if (ret > 0)
4583				break;
4584			continue;
4585		}
4586
4587		btrfs_item_key_to_cpu(eb, &found_key, slot);
4588
4589		if (found_key.objectid != key.objectid ||
4590		    (found_key.type != BTRFS_INODE_REF_KEY &&
4591		     found_key.type != BTRFS_INODE_EXTREF_KEY))
4592			break;
4593
4594		ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4595		if (ret < 0)
4596			goto out;
4597
4598		path->slots[0]++;
4599	}
4600	btrfs_release_path(path);
4601
4602	/*
4603	 * We don't actually care about pending_move as we are simply
4604	 * re-creating this inode and will be rename'ing it into place once we
4605	 * rename the parent directory.
4606	 */
4607	ret = process_recorded_refs(sctx, &pending_move);
4608out:
4609	btrfs_free_path(path);
4610	return ret;
4611}
4612
4613static int send_set_xattr(struct send_ctx *sctx,
4614			  struct fs_path *path,
4615			  const char *name, int name_len,
4616			  const char *data, int data_len)
4617{
4618	int ret = 0;
4619
4620	ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4621	if (ret < 0)
4622		goto out;
4623
4624	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4625	TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4626	TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4627
4628	ret = send_cmd(sctx);
4629
4630tlv_put_failure:
4631out:
4632	return ret;
4633}
4634
4635static int send_remove_xattr(struct send_ctx *sctx,
4636			  struct fs_path *path,
4637			  const char *name, int name_len)
4638{
4639	int ret = 0;
4640
4641	ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4642	if (ret < 0)
4643		goto out;
4644
4645	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4646	TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4647
4648	ret = send_cmd(sctx);
4649
4650tlv_put_failure:
4651out:
4652	return ret;
4653}
4654
4655static int __process_new_xattr(int num, struct btrfs_key *di_key,
4656			       const char *name, int name_len,
4657			       const char *data, int data_len,
4658			       u8 type, void *ctx)
4659{
4660	int ret;
4661	struct send_ctx *sctx = ctx;
4662	struct fs_path *p;
4663	struct posix_acl_xattr_header dummy_acl;
4664
4665	/* Capabilities are emitted by finish_inode_if_needed */
4666	if (!strncmp(name, XATTR_NAME_CAPS, name_len))
4667		return 0;
4668
4669	p = fs_path_alloc();
4670	if (!p)
4671		return -ENOMEM;
4672
4673	/*
4674	 * This hack is needed because empty acls are stored as zero byte
4675	 * data in xattrs. Problem with that is, that receiving these zero byte
4676	 * acls will fail later. To fix this, we send a dummy acl list that
4677	 * only contains the version number and no entries.
4678	 */
4679	if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4680	    !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4681		if (data_len == 0) {
4682			dummy_acl.a_version =
4683					cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4684			data = (char *)&dummy_acl;
4685			data_len = sizeof(dummy_acl);
4686		}
4687	}
4688
4689	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4690	if (ret < 0)
4691		goto out;
4692
4693	ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4694
4695out:
4696	fs_path_free(p);
4697	return ret;
4698}
4699
4700static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4701				   const char *name, int name_len,
4702				   const char *data, int data_len,
4703				   u8 type, void *ctx)
4704{
4705	int ret;
4706	struct send_ctx *sctx = ctx;
4707	struct fs_path *p;
4708
4709	p = fs_path_alloc();
4710	if (!p)
4711		return -ENOMEM;
4712
4713	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4714	if (ret < 0)
4715		goto out;
4716
4717	ret = send_remove_xattr(sctx, p, name, name_len);
4718
4719out:
4720	fs_path_free(p);
4721	return ret;
4722}
4723
4724static int process_new_xattr(struct send_ctx *sctx)
4725{
4726	int ret = 0;
4727
4728	ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4729			       __process_new_xattr, sctx);
4730
4731	return ret;
4732}
4733
4734static int process_deleted_xattr(struct send_ctx *sctx)
4735{
4736	return iterate_dir_item(sctx->parent_root, sctx->right_path,
4737				__process_deleted_xattr, sctx);
4738}
4739
4740struct find_xattr_ctx {
4741	const char *name;
4742	int name_len;
4743	int found_idx;
4744	char *found_data;
4745	int found_data_len;
4746};
4747
4748static int __find_xattr(int num, struct btrfs_key *di_key,
4749			const char *name, int name_len,
4750			const char *data, int data_len,
4751			u8 type, void *vctx)
4752{
4753	struct find_xattr_ctx *ctx = vctx;
4754
4755	if (name_len == ctx->name_len &&
4756	    strncmp(name, ctx->name, name_len) == 0) {
4757		ctx->found_idx = num;
4758		ctx->found_data_len = data_len;
4759		ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4760		if (!ctx->found_data)
4761			return -ENOMEM;
4762		return 1;
4763	}
4764	return 0;
4765}
4766
4767static int find_xattr(struct btrfs_root *root,
4768		      struct btrfs_path *path,
4769		      struct btrfs_key *key,
4770		      const char *name, int name_len,
4771		      char **data, int *data_len)
4772{
4773	int ret;
4774	struct find_xattr_ctx ctx;
4775
4776	ctx.name = name;
4777	ctx.name_len = name_len;
4778	ctx.found_idx = -1;
4779	ctx.found_data = NULL;
4780	ctx.found_data_len = 0;
4781
4782	ret = iterate_dir_item(root, path, __find_xattr, &ctx);
4783	if (ret < 0)
4784		return ret;
4785
4786	if (ctx.found_idx == -1)
4787		return -ENOENT;
4788	if (data) {
4789		*data = ctx.found_data;
4790		*data_len = ctx.found_data_len;
4791	} else {
4792		kfree(ctx.found_data);
4793	}
4794	return ctx.found_idx;
4795}
4796
4797
4798static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4799				       const char *name, int name_len,
4800				       const char *data, int data_len,
4801				       u8 type, void *ctx)
4802{
4803	int ret;
4804	struct send_ctx *sctx = ctx;
4805	char *found_data = NULL;
4806	int found_data_len  = 0;
4807
4808	ret = find_xattr(sctx->parent_root, sctx->right_path,
4809			 sctx->cmp_key, name, name_len, &found_data,
4810			 &found_data_len);
4811	if (ret == -ENOENT) {
4812		ret = __process_new_xattr(num, di_key, name, name_len, data,
4813				data_len, type, ctx);
4814	} else if (ret >= 0) {
4815		if (data_len != found_data_len ||
4816		    memcmp(data, found_data, data_len)) {
4817			ret = __process_new_xattr(num, di_key, name, name_len,
4818					data, data_len, type, ctx);
4819		} else {
4820			ret = 0;
4821		}
4822	}
4823
4824	kfree(found_data);
4825	return ret;
4826}
4827
4828static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4829					   const char *name, int name_len,
4830					   const char *data, int data_len,
4831					   u8 type, void *ctx)
4832{
4833	int ret;
4834	struct send_ctx *sctx = ctx;
4835
4836	ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4837			 name, name_len, NULL, NULL);
4838	if (ret == -ENOENT)
4839		ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4840				data_len, type, ctx);
4841	else if (ret >= 0)
4842		ret = 0;
4843
4844	return ret;
4845}
4846
4847static int process_changed_xattr(struct send_ctx *sctx)
4848{
4849	int ret = 0;
4850
4851	ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4852			__process_changed_new_xattr, sctx);
4853	if (ret < 0)
4854		goto out;
4855	ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4856			__process_changed_deleted_xattr, sctx);
4857
4858out:
4859	return ret;
4860}
4861
4862static int process_all_new_xattrs(struct send_ctx *sctx)
4863{
4864	int ret;
4865	struct btrfs_root *root;
4866	struct btrfs_path *path;
4867	struct btrfs_key key;
4868	struct btrfs_key found_key;
4869	struct extent_buffer *eb;
4870	int slot;
4871
4872	path = alloc_path_for_send();
4873	if (!path)
4874		return -ENOMEM;
4875
4876	root = sctx->send_root;
4877
4878	key.objectid = sctx->cmp_key->objectid;
4879	key.type = BTRFS_XATTR_ITEM_KEY;
4880	key.offset = 0;
4881	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4882	if (ret < 0)
4883		goto out;
4884
4885	while (1) {
4886		eb = path->nodes[0];
4887		slot = path->slots[0];
4888		if (slot >= btrfs_header_nritems(eb)) {
4889			ret = btrfs_next_leaf(root, path);
4890			if (ret < 0) {
4891				goto out;
4892			} else if (ret > 0) {
4893				ret = 0;
4894				break;
4895			}
4896			continue;
4897		}
4898
4899		btrfs_item_key_to_cpu(eb, &found_key, slot);
4900		if (found_key.objectid != key.objectid ||
4901		    found_key.type != key.type) {
4902			ret = 0;
4903			goto out;
4904		}
4905
4906		ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
4907		if (ret < 0)
4908			goto out;
4909
4910		path->slots[0]++;
4911	}
4912
4913out:
4914	btrfs_free_path(path);
4915	return ret;
4916}
4917
4918static inline u64 max_send_read_size(const struct send_ctx *sctx)
4919{
4920	return sctx->send_max_size - SZ_16K;
4921}
4922
4923static int put_data_header(struct send_ctx *sctx, u32 len)
4924{
4925	struct btrfs_tlv_header *hdr;
4926
4927	if (sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len)
4928		return -EOVERFLOW;
4929	hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size);
4930	put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type);
4931	put_unaligned_le16(len, &hdr->tlv_len);
4932	sctx->send_size += sizeof(*hdr);
4933	return 0;
4934}
4935
4936static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
4937{
4938	struct btrfs_root *root = sctx->send_root;
4939	struct btrfs_fs_info *fs_info = root->fs_info;
4940	struct inode *inode;
4941	struct page *page;
4942	pgoff_t index = offset >> PAGE_SHIFT;
4943	pgoff_t last_index;
4944	unsigned pg_offset = offset_in_page(offset);
4945	int ret;
4946
4947	ret = put_data_header(sctx, len);
4948	if (ret)
4949		return ret;
4950
4951	inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
4952	if (IS_ERR(inode))
4953		return PTR_ERR(inode);
4954
4955	last_index = (offset + len - 1) >> PAGE_SHIFT;
4956
4957	/* initial readahead */
4958	memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4959	file_ra_state_init(&sctx->ra, inode->i_mapping);
4960
4961	while (index <= last_index) {
4962		unsigned cur_len = min_t(unsigned, len,
4963					 PAGE_SIZE - pg_offset);
4964
4965		page = find_lock_page(inode->i_mapping, index);
4966		if (!page) {
4967			page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
4968				NULL, index, last_index + 1 - index);
4969
4970			page = find_or_create_page(inode->i_mapping, index,
4971					GFP_KERNEL);
4972			if (!page) {
4973				ret = -ENOMEM;
4974				break;
4975			}
4976		}
4977
4978		if (PageReadahead(page)) {
4979			page_cache_async_readahead(inode->i_mapping, &sctx->ra,
4980				NULL, page, index, last_index + 1 - index);
4981		}
4982
4983		if (!PageUptodate(page)) {
4984			btrfs_readpage(NULL, page);
4985			lock_page(page);
4986			if (!PageUptodate(page)) {
4987				unlock_page(page);
4988				put_page(page);
4989				ret = -EIO;
4990				break;
4991			}
4992		}
4993
4994		memcpy_from_page(sctx->send_buf + sctx->send_size, page,
4995				 pg_offset, cur_len);
4996		unlock_page(page);
4997		put_page(page);
4998		index++;
4999		pg_offset = 0;
5000		len -= cur_len;
5001		sctx->send_size += cur_len;
5002	}
5003	iput(inode);
5004	return ret;
5005}
5006
5007/*
5008 * Read some bytes from the current inode/file and send a write command to
5009 * user space.
5010 */
5011static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
5012{
5013	struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
5014	int ret = 0;
5015	struct fs_path *p;
5016
5017	p = fs_path_alloc();
5018	if (!p)
5019		return -ENOMEM;
5020
5021	btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
5022
5023	ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5024	if (ret < 0)
5025		goto out;
5026
5027	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5028	if (ret < 0)
5029		goto out;
5030
5031	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5032	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5033	ret = put_file_data(sctx, offset, len);
5034	if (ret < 0)
5035		goto out;
5036
5037	ret = send_cmd(sctx);
5038
5039tlv_put_failure:
5040out:
5041	fs_path_free(p);
5042	return ret;
5043}
5044
5045/*
5046 * Send a clone command to user space.
5047 */
5048static int send_clone(struct send_ctx *sctx,
5049		      u64 offset, u32 len,
5050		      struct clone_root *clone_root)
5051{
5052	int ret = 0;
5053	struct fs_path *p;
5054	u64 gen;
5055
5056	btrfs_debug(sctx->send_root->fs_info,
5057		    "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
5058		    offset, len, clone_root->root->root_key.objectid,
5059		    clone_root->ino, clone_root->offset);
5060
5061	p = fs_path_alloc();
5062	if (!p)
5063		return -ENOMEM;
5064
5065	ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
5066	if (ret < 0)
5067		goto out;
5068
5069	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5070	if (ret < 0)
5071		goto out;
5072
5073	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5074	TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
5075	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5076
5077	if (clone_root->root == sctx->send_root) {
5078		ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
5079				&gen, NULL, NULL, NULL, NULL);
5080		if (ret < 0)
5081			goto out;
5082		ret = get_cur_path(sctx, clone_root->ino, gen, p);
5083	} else {
5084		ret = get_inode_path(clone_root->root, clone_root->ino, p);
5085	}
5086	if (ret < 0)
5087		goto out;
5088
5089	/*
5090	 * If the parent we're using has a received_uuid set then use that as
5091	 * our clone source as that is what we will look for when doing a
5092	 * receive.
5093	 *
5094	 * This covers the case that we create a snapshot off of a received
5095	 * subvolume and then use that as the parent and try to receive on a
5096	 * different host.
5097	 */
5098	if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
5099		TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5100			     clone_root->root->root_item.received_uuid);
5101	else
5102		TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5103			     clone_root->root->root_item.uuid);
5104	TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
5105		    btrfs_root_ctransid(&clone_root->root->root_item));
5106	TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
5107	TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
5108			clone_root->offset);
5109
5110	ret = send_cmd(sctx);
5111
5112tlv_put_failure:
5113out:
5114	fs_path_free(p);
5115	return ret;
5116}
5117
5118/*
5119 * Send an update extent command to user space.
5120 */
5121static int send_update_extent(struct send_ctx *sctx,
5122			      u64 offset, u32 len)
5123{
5124	int ret = 0;
5125	struct fs_path *p;
5126
5127	p = fs_path_alloc();
5128	if (!p)
5129		return -ENOMEM;
5130
5131	ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
5132	if (ret < 0)
5133		goto out;
5134
5135	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5136	if (ret < 0)
5137		goto out;
5138
5139	TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5140	TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5141	TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
5142
5143	ret = send_cmd(sctx);
5144
5145tlv_put_failure:
5146out:
5147	fs_path_free(p);
5148	return ret;
5149}
5150
5151static int send_hole(struct send_ctx *sctx, u64 end)
5152{
5153	struct fs_path *p = NULL;
5154	u64 read_size = max_send_read_size(sctx);
5155	u64 offset = sctx->cur_inode_last_extent;
5156	int ret = 0;
5157
5158	/*
5159	 * A hole that starts at EOF or beyond it. Since we do not yet support
5160	 * fallocate (for extent preallocation and hole punching), sending a
5161	 * write of zeroes starting at EOF or beyond would later require issuing
5162	 * a truncate operation which would undo the write and achieve nothing.
5163	 */
5164	if (offset >= sctx->cur_inode_size)
5165		return 0;
5166
5167	/*
5168	 * Don't go beyond the inode's i_size due to prealloc extents that start
5169	 * after the i_size.
5170	 */
5171	end = min_t(u64, end, sctx->cur_inode_size);
5172
5173	if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5174		return send_update_extent(sctx, offset, end - offset);
5175
5176	p = fs_path_alloc();
5177	if (!p)
5178		return -ENOMEM;
5179	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5180	if (ret < 0)
5181		goto tlv_put_failure;
5182	while (offset < end) {
5183		u64 len = min(end - offset, read_size);
5184
5185		ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5186		if (ret < 0)
5187			break;
5188		TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5189		TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5190		ret = put_data_header(sctx, len);
5191		if (ret < 0)
5192			break;
5193		memset(sctx->send_buf + sctx->send_size, 0, len);
5194		sctx->send_size += len;
5195		ret = send_cmd(sctx);
5196		if (ret < 0)
5197			break;
5198		offset += len;
5199	}
5200	sctx->cur_inode_next_write_offset = offset;
5201tlv_put_failure:
5202	fs_path_free(p);
5203	return ret;
5204}
5205
5206static int send_extent_data(struct send_ctx *sctx,
5207			    const u64 offset,
5208			    const u64 len)
5209{
5210	u64 read_size = max_send_read_size(sctx);
5211	u64 sent = 0;
5212
5213	if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5214		return send_update_extent(sctx, offset, len);
5215
5216	while (sent < len) {
5217		u64 size = min(len - sent, read_size);
5218		int ret;
5219
5220		ret = send_write(sctx, offset + sent, size);
5221		if (ret < 0)
5222			return ret;
5223		sent += size;
5224	}
5225	return 0;
5226}
5227
5228/*
5229 * Search for a capability xattr related to sctx->cur_ino. If the capability is
5230 * found, call send_set_xattr function to emit it.
5231 *
5232 * Return 0 if there isn't a capability, or when the capability was emitted
5233 * successfully, or < 0 if an error occurred.
5234 */
5235static int send_capabilities(struct send_ctx *sctx)
5236{
5237	struct fs_path *fspath = NULL;
5238	struct btrfs_path *path;
5239	struct btrfs_dir_item *di;
5240	struct extent_buffer *leaf;
5241	unsigned long data_ptr;
5242	char *buf = NULL;
5243	int buf_len;
5244	int ret = 0;
5245
5246	path = alloc_path_for_send();
5247	if (!path)
5248		return -ENOMEM;
5249
5250	di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
5251				XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
5252	if (!di) {
5253		/* There is no xattr for this inode */
5254		goto out;
5255	} else if (IS_ERR(di)) {
5256		ret = PTR_ERR(di);
5257		goto out;
5258	}
5259
5260	leaf = path->nodes[0];
5261	buf_len = btrfs_dir_data_len(leaf, di);
5262
5263	fspath = fs_path_alloc();
5264	buf = kmalloc(buf_len, GFP_KERNEL);
5265	if (!fspath || !buf) {
5266		ret = -ENOMEM;
5267		goto out;
5268	}
5269
5270	ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5271	if (ret < 0)
5272		goto out;
5273
5274	data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
5275	read_extent_buffer(leaf, buf, data_ptr, buf_len);
5276
5277	ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
5278			strlen(XATTR_NAME_CAPS), buf, buf_len);
5279out:
5280	kfree(buf);
5281	fs_path_free(fspath);
5282	btrfs_free_path(path);
5283	return ret;
5284}
5285
5286static int clone_range(struct send_ctx *sctx,
5287		       struct clone_root *clone_root,
5288		       const u64 disk_byte,
5289		       u64 data_offset,
5290		       u64 offset,
5291		       u64 len)
5292{
5293	struct btrfs_path *path;
5294	struct btrfs_key key;
5295	int ret;
5296	u64 clone_src_i_size = 0;
5297
5298	/*
5299	 * Prevent cloning from a zero offset with a length matching the sector
5300	 * size because in some scenarios this will make the receiver fail.
5301	 *
5302	 * For example, if in the source filesystem the extent at offset 0
5303	 * has a length of sectorsize and it was written using direct IO, then
5304	 * it can never be an inline extent (even if compression is enabled).
5305	 * Then this extent can be cloned in the original filesystem to a non
5306	 * zero file offset, but it may not be possible to clone in the
5307	 * destination filesystem because it can be inlined due to compression
5308	 * on the destination filesystem (as the receiver's write operations are
5309	 * always done using buffered IO). The same happens when the original
5310	 * filesystem does not have compression enabled but the destination
5311	 * filesystem has.
5312	 */
5313	if (clone_root->offset == 0 &&
5314	    len == sctx->send_root->fs_info->sectorsize)
5315		return send_extent_data(sctx, offset, len);
5316
5317	path = alloc_path_for_send();
5318	if (!path)
5319		return -ENOMEM;
5320
5321	/*
5322	 * There are inodes that have extents that lie behind its i_size. Don't
5323	 * accept clones from these extents.
5324	 */
5325	ret = __get_inode_info(clone_root->root, path, clone_root->ino,
5326			       &clone_src_i_size, NULL, NULL, NULL, NULL, NULL);
5327	btrfs_release_path(path);
5328	if (ret < 0)
5329		goto out;
5330
5331	/*
5332	 * We can't send a clone operation for the entire range if we find
5333	 * extent items in the respective range in the source file that
5334	 * refer to different extents or if we find holes.
5335	 * So check for that and do a mix of clone and regular write/copy
5336	 * operations if needed.
5337	 *
5338	 * Example:
5339	 *
5340	 * mkfs.btrfs -f /dev/sda
5341	 * mount /dev/sda /mnt
5342	 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5343	 * cp --reflink=always /mnt/foo /mnt/bar
5344	 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5345	 * btrfs subvolume snapshot -r /mnt /mnt/snap
5346	 *
5347	 * If when we send the snapshot and we are processing file bar (which
5348	 * has a higher inode number than foo) we blindly send a clone operation
5349	 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5350	 * a file bar that matches the content of file foo - iow, doesn't match
5351	 * the content from bar in the original filesystem.
5352	 */
5353	key.objectid = clone_root->ino;
5354	key.type = BTRFS_EXTENT_DATA_KEY;
5355	key.offset = clone_root->offset;
5356	ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5357	if (ret < 0)
5358		goto out;
5359	if (ret > 0 && path->slots[0] > 0) {
5360		btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5361		if (key.objectid == clone_root->ino &&
5362		    key.type == BTRFS_EXTENT_DATA_KEY)
5363			path->slots[0]--;
5364	}
5365
5366	while (true) {
5367		struct extent_buffer *leaf = path->nodes[0];
5368		int slot = path->slots[0];
5369		struct btrfs_file_extent_item *ei;
5370		u8 type;
5371		u64 ext_len;
5372		u64 clone_len;
5373		u64 clone_data_offset;
5374
5375		if (slot >= btrfs_header_nritems(leaf)) {
5376			ret = btrfs_next_leaf(clone_root->root, path);
5377			if (ret < 0)
5378				goto out;
5379			else if (ret > 0)
5380				break;
5381			continue;
5382		}
5383
5384		btrfs_item_key_to_cpu(leaf, &key, slot);
5385
5386		/*
5387		 * We might have an implicit trailing hole (NO_HOLES feature
5388		 * enabled). We deal with it after leaving this loop.
5389		 */
5390		if (key.objectid != clone_root->ino ||
5391		    key.type != BTRFS_EXTENT_DATA_KEY)
5392			break;
5393
5394		ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5395		type = btrfs_file_extent_type(leaf, ei);
5396		if (type == BTRFS_FILE_EXTENT_INLINE) {
5397			ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
5398			ext_len = PAGE_ALIGN(ext_len);
5399		} else {
5400			ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5401		}
5402
5403		if (key.offset + ext_len <= clone_root->offset)
5404			goto next;
5405
5406		if (key.offset > clone_root->offset) {
5407			/* Implicit hole, NO_HOLES feature enabled. */
5408			u64 hole_len = key.offset - clone_root->offset;
5409
5410			if (hole_len > len)
5411				hole_len = len;
5412			ret = send_extent_data(sctx, offset, hole_len);
5413			if (ret < 0)
5414				goto out;
5415
5416			len -= hole_len;
5417			if (len == 0)
5418				break;
5419			offset += hole_len;
5420			clone_root->offset += hole_len;
5421			data_offset += hole_len;
5422		}
5423
5424		if (key.offset >= clone_root->offset + len)
5425			break;
5426
5427		if (key.offset >= clone_src_i_size)
5428			break;
5429
5430		if (key.offset + ext_len > clone_src_i_size)
5431			ext_len = clone_src_i_size - key.offset;
5432
5433		clone_data_offset = btrfs_file_extent_offset(leaf, ei);
5434		if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
5435			clone_root->offset = key.offset;
5436			if (clone_data_offset < data_offset &&
5437				clone_data_offset + ext_len > data_offset) {
5438				u64 extent_offset;
5439
5440				extent_offset = data_offset - clone_data_offset;
5441				ext_len -= extent_offset;
5442				clone_data_offset += extent_offset;
5443				clone_root->offset += extent_offset;
5444			}
5445		}
5446
5447		clone_len = min_t(u64, ext_len, len);
5448
5449		if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
5450		    clone_data_offset == data_offset) {
5451			const u64 src_end = clone_root->offset + clone_len;
5452			const u64 sectorsize = SZ_64K;
5453
5454			/*
5455			 * We can't clone the last block, when its size is not
5456			 * sector size aligned, into the middle of a file. If we
5457			 * do so, the receiver will get a failure (-EINVAL) when
5458			 * trying to clone or will silently corrupt the data in
5459			 * the destination file if it's on a kernel without the
5460			 * fix introduced by commit ac765f83f1397646
5461			 * ("Btrfs: fix data corruption due to cloning of eof
5462			 * block).
5463			 *
5464			 * So issue a clone of the aligned down range plus a
5465			 * regular write for the eof block, if we hit that case.
5466			 *
5467			 * Also, we use the maximum possible sector size, 64K,
5468			 * because we don't know what's the sector size of the
5469			 * filesystem that receives the stream, so we have to
5470			 * assume the largest possible sector size.
5471			 */
5472			if (src_end == clone_src_i_size &&
5473			    !IS_ALIGNED(src_end, sectorsize) &&
5474			    offset + clone_len < sctx->cur_inode_size) {
5475				u64 slen;
5476
5477				slen = ALIGN_DOWN(src_end - clone_root->offset,
5478						  sectorsize);
5479				if (slen > 0) {
5480					ret = send_clone(sctx, offset, slen,
5481							 clone_root);
5482					if (ret < 0)
5483						goto out;
5484				}
5485				ret = send_extent_data(sctx, offset + slen,
5486						       clone_len - slen);
5487			} else {
5488				ret = send_clone(sctx, offset, clone_len,
5489						 clone_root);
5490			}
5491		} else {
5492			ret = send_extent_data(sctx, offset, clone_len);
5493		}
5494
5495		if (ret < 0)
5496			goto out;
5497
5498		len -= clone_len;
5499		if (len == 0)
5500			break;
5501		offset += clone_len;
5502		clone_root->offset += clone_len;
5503
5504		/*
5505		 * If we are cloning from the file we are currently processing,
5506		 * and using the send root as the clone root, we must stop once
5507		 * the current clone offset reaches the current eof of the file
5508		 * at the receiver, otherwise we would issue an invalid clone
5509		 * operation (source range going beyond eof) and cause the
5510		 * receiver to fail. So if we reach the current eof, bail out
5511		 * and fallback to a regular write.
5512		 */
5513		if (clone_root->root == sctx->send_root &&
5514		    clone_root->ino == sctx->cur_ino &&
5515		    clone_root->offset >= sctx->cur_inode_next_write_offset)
5516			break;
5517
5518		data_offset += clone_len;
5519next:
5520		path->slots[0]++;
5521	}
5522
5523	if (len > 0)
5524		ret = send_extent_data(sctx, offset, len);
5525	else
5526		ret = 0;
5527out:
5528	btrfs_free_path(path);
5529	return ret;
5530}
5531
5532static int send_write_or_clone(struct send_ctx *sctx,
5533			       struct btrfs_path *path,
5534			       struct btrfs_key *key,
5535			       struct clone_root *clone_root)
5536{
5537	int ret = 0;
5538	u64 offset = key->offset;
5539	u64 end;
5540	u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
5541
5542	end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
5543	if (offset >= end)
5544		return 0;
5545
5546	if (clone_root && IS_ALIGNED(end, bs)) {
5547		struct btrfs_file_extent_item *ei;
5548		u64 disk_byte;
5549		u64 data_offset;
5550
5551		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5552				    struct btrfs_file_extent_item);
5553		disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5554		data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5555		ret = clone_range(sctx, clone_root, disk_byte, data_offset,
5556				  offset, end - offset);
5557	} else {
5558		ret = send_extent_data(sctx, offset, end - offset);
5559	}
5560	sctx->cur_inode_next_write_offset = end;
5561	return ret;
5562}
5563
5564static int is_extent_unchanged(struct send_ctx *sctx,
5565			       struct btrfs_path *left_path,
5566			       struct btrfs_key *ekey)
5567{
5568	int ret = 0;
5569	struct btrfs_key key;
5570	struct btrfs_path *path = NULL;
5571	struct extent_buffer *eb;
5572	int slot;
5573	struct btrfs_key found_key;
5574	struct btrfs_file_extent_item *ei;
5575	u64 left_disknr;
5576	u64 right_disknr;
5577	u64 left_offset;
5578	u64 right_offset;
5579	u64 left_offset_fixed;
5580	u64 left_len;
5581	u64 right_len;
5582	u64 left_gen;
5583	u64 right_gen;
5584	u8 left_type;
5585	u8 right_type;
5586
5587	path = alloc_path_for_send();
5588	if (!path)
5589		return -ENOMEM;
5590
5591	eb = left_path->nodes[0];
5592	slot = left_path->slots[0];
5593	ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5594	left_type = btrfs_file_extent_type(eb, ei);
5595
5596	if (left_type != BTRFS_FILE_EXTENT_REG) {
5597		ret = 0;
5598		goto out;
5599	}
5600	left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5601	left_len = btrfs_file_extent_num_bytes(eb, ei);
5602	left_offset = btrfs_file_extent_offset(eb, ei);
5603	left_gen = btrfs_file_extent_generation(eb, ei);
5604
5605	/*
5606	 * Following comments will refer to these graphics. L is the left
5607	 * extents which we are checking at the moment. 1-8 are the right
5608	 * extents that we iterate.
5609	 *
5610	 *       |-----L-----|
5611	 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5612	 *
5613	 *       |-----L-----|
5614	 * |--1--|-2b-|...(same as above)
5615	 *
5616	 * Alternative situation. Happens on files where extents got split.
5617	 *       |-----L-----|
5618	 * |-----------7-----------|-6-|
5619	 *
5620	 * Alternative situation. Happens on files which got larger.
5621	 *       |-----L-----|
5622	 * |-8-|
5623	 * Nothing follows after 8.
5624	 */
5625
5626	key.objectid = ekey->objectid;
5627	key.type = BTRFS_EXTENT_DATA_KEY;
5628	key.offset = ekey->offset;
5629	ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5630	if (ret < 0)
5631		goto out;
5632	if (ret) {
5633		ret = 0;
5634		goto out;
5635	}
5636
5637	/*
5638	 * Handle special case where the right side has no extents at all.
5639	 */
5640	eb = path->nodes[0];
5641	slot = path->slots[0];
5642	btrfs_item_key_to_cpu(eb, &found_key, slot);
5643	if (found_key.objectid != key.objectid ||
5644	    found_key.type != key.type) {
5645		/* If we're a hole then just pretend nothing changed */
5646		ret = (left_disknr) ? 0 : 1;
5647		goto out;
5648	}
5649
5650	/*
5651	 * We're now on 2a, 2b or 7.
5652	 */
5653	key = found_key;
5654	while (key.offset < ekey->offset + left_len) {
5655		ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5656		right_type = btrfs_file_extent_type(eb, ei);
5657		if (right_type != BTRFS_FILE_EXTENT_REG &&
5658		    right_type != BTRFS_FILE_EXTENT_INLINE) {
5659			ret = 0;
5660			goto out;
5661		}
5662
5663		if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5664			right_len = btrfs_file_extent_ram_bytes(eb, ei);
5665			right_len = PAGE_ALIGN(right_len);
5666		} else {
5667			right_len = btrfs_file_extent_num_bytes(eb, ei);
5668		}
5669
5670		/*
5671		 * Are we at extent 8? If yes, we know the extent is changed.
5672		 * This may only happen on the first iteration.
5673		 */
5674		if (found_key.offset + right_len <= ekey->offset) {
5675			/* If we're a hole just pretend nothing changed */
5676			ret = (left_disknr) ? 0 : 1;
5677			goto out;
5678		}
5679
5680		/*
5681		 * We just wanted to see if when we have an inline extent, what
5682		 * follows it is a regular extent (wanted to check the above
5683		 * condition for inline extents too). This should normally not
5684		 * happen but it's possible for example when we have an inline
5685		 * compressed extent representing data with a size matching
5686		 * the page size (currently the same as sector size).
5687		 */
5688		if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5689			ret = 0;
5690			goto out;
5691		}
5692
5693		right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5694		right_offset = btrfs_file_extent_offset(eb, ei);
5695		right_gen = btrfs_file_extent_generation(eb, ei);
5696
5697		left_offset_fixed = left_offset;
5698		if (key.offset < ekey->offset) {
5699			/* Fix the right offset for 2a and 7. */
5700			right_offset += ekey->offset - key.offset;
5701		} else {
5702			/* Fix the left offset for all behind 2a and 2b */
5703			left_offset_fixed += key.offset - ekey->offset;
5704		}
5705
5706		/*
5707		 * Check if we have the same extent.
5708		 */
5709		if (left_disknr != right_disknr ||
5710		    left_offset_fixed != right_offset ||
5711		    left_gen != right_gen) {
5712			ret = 0;
5713			goto out;
5714		}
5715
5716		/*
5717		 * Go to the next extent.
5718		 */
5719		ret = btrfs_next_item(sctx->parent_root, path);
5720		if (ret < 0)
5721			goto out;
5722		if (!ret) {
5723			eb = path->nodes[0];
5724			slot = path->slots[0];
5725			btrfs_item_key_to_cpu(eb, &found_key, slot);
5726		}
5727		if (ret || found_key.objectid != key.objectid ||
5728		    found_key.type != key.type) {
5729			key.offset += right_len;
5730			break;
5731		}
5732		if (found_key.offset != key.offset + right_len) {
5733			ret = 0;
5734			goto out;
5735		}
5736		key = found_key;
5737	}
5738
5739	/*
5740	 * We're now behind the left extent (treat as unchanged) or at the end
5741	 * of the right side (treat as changed).
5742	 */
5743	if (key.offset >= ekey->offset + left_len)
5744		ret = 1;
5745	else
5746		ret = 0;
5747
5748
5749out:
5750	btrfs_free_path(path);
5751	return ret;
5752}
5753
5754static int get_last_extent(struct send_ctx *sctx, u64 offset)
5755{
5756	struct btrfs_path *path;
5757	struct btrfs_root *root = sctx->send_root;
5758	struct btrfs_key key;
5759	int ret;
5760
5761	path = alloc_path_for_send();
5762	if (!path)
5763		return -ENOMEM;
5764
5765	sctx->cur_inode_last_extent = 0;
5766
5767	key.objectid = sctx->cur_ino;
5768	key.type = BTRFS_EXTENT_DATA_KEY;
5769	key.offset = offset;
5770	ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
5771	if (ret < 0)
5772		goto out;
5773	ret = 0;
5774	btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5775	if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
5776		goto out;
5777
5778	sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
5779out:
5780	btrfs_free_path(path);
5781	return ret;
5782}
5783
5784static int range_is_hole_in_parent(struct send_ctx *sctx,
5785				   const u64 start,
5786				   const u64 end)
5787{
5788	struct btrfs_path *path;
5789	struct btrfs_key key;
5790	struct btrfs_root *root = sctx->parent_root;
5791	u64 search_start = start;
5792	int ret;
5793
5794	path = alloc_path_for_send();
5795	if (!path)
5796		return -ENOMEM;
5797
5798	key.objectid = sctx->cur_ino;
5799	key.type = BTRFS_EXTENT_DATA_KEY;
5800	key.offset = search_start;
5801	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5802	if (ret < 0)
5803		goto out;
5804	if (ret > 0 && path->slots[0] > 0)
5805		path->slots[0]--;
5806
5807	while (search_start < end) {
5808		struct extent_buffer *leaf = path->nodes[0];
5809		int slot = path->slots[0];
5810		struct btrfs_file_extent_item *fi;
5811		u64 extent_end;
5812
5813		if (slot >= btrfs_header_nritems(leaf)) {
5814			ret = btrfs_next_leaf(root, path);
5815			if (ret < 0)
5816				goto out;
5817			else if (ret > 0)
5818				break;
5819			continue;
5820		}
5821
5822		btrfs_item_key_to_cpu(leaf, &key, slot);
5823		if (key.objectid < sctx->cur_ino ||
5824		    key.type < BTRFS_EXTENT_DATA_KEY)
5825			goto next;
5826		if (key.objectid > sctx->cur_ino ||
5827		    key.type > BTRFS_EXTENT_DATA_KEY ||
5828		    key.offset >= end)
5829			break;
5830
5831		fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5832		extent_end = btrfs_file_extent_end(path);
5833		if (extent_end <= start)
5834			goto next;
5835		if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
5836			search_start = extent_end;
5837			goto next;
5838		}
5839		ret = 0;
5840		goto out;
5841next:
5842		path->slots[0]++;
5843	}
5844	ret = 1;
5845out:
5846	btrfs_free_path(path);
5847	return ret;
5848}
5849
5850static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
5851			   struct btrfs_key *key)
5852{
5853	int ret = 0;
5854
5855	if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
5856		return 0;
5857
5858	if (sctx->cur_inode_last_extent == (u64)-1) {
5859		ret = get_last_extent(sctx, key->offset - 1);
5860		if (ret)
5861			return ret;
5862	}
5863
5864	if (path->slots[0] == 0 &&
5865	    sctx->cur_inode_last_extent < key->offset) {
5866		/*
5867		 * We might have skipped entire leafs that contained only
5868		 * file extent items for our current inode. These leafs have
5869		 * a generation number smaller (older) than the one in the
5870		 * current leaf and the leaf our last extent came from, and
5871		 * are located between these 2 leafs.
5872		 */
5873		ret = get_last_extent(sctx, key->offset - 1);
5874		if (ret)
5875			return ret;
5876	}
5877
5878	if (sctx->cur_inode_last_extent < key->offset) {
5879		ret = range_is_hole_in_parent(sctx,
5880					      sctx->cur_inode_last_extent,
5881					      key->offset);
5882		if (ret < 0)
5883			return ret;
5884		else if (ret == 0)
5885			ret = send_hole(sctx, key->offset);
5886		else
5887			ret = 0;
5888	}
5889	sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
5890	return ret;
5891}
5892
5893static int process_extent(struct send_ctx *sctx,
5894			  struct btrfs_path *path,
5895			  struct btrfs_key *key)
5896{
5897	struct clone_root *found_clone = NULL;
5898	int ret = 0;
5899
5900	if (S_ISLNK(sctx->cur_inode_mode))
5901		return 0;
5902
5903	if (sctx->parent_root && !sctx->cur_inode_new) {
5904		ret = is_extent_unchanged(sctx, path, key);
5905		if (ret < 0)
5906			goto out;
5907		if (ret) {
5908			ret = 0;
5909			goto out_hole;
5910		}
5911	} else {
5912		struct btrfs_file_extent_item *ei;
5913		u8 type;
5914
5915		ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5916				    struct btrfs_file_extent_item);
5917		type = btrfs_file_extent_type(path->nodes[0], ei);
5918		if (type == BTRFS_FILE_EXTENT_PREALLOC ||
5919		    type == BTRFS_FILE_EXTENT_REG) {
5920			/*
5921			 * The send spec does not have a prealloc command yet,
5922			 * so just leave a hole for prealloc'ed extents until
5923			 * we have enough commands queued up to justify rev'ing
5924			 * the send spec.
5925			 */
5926			if (type == BTRFS_FILE_EXTENT_PREALLOC) {
5927				ret = 0;
5928				goto out;
5929			}
5930
5931			/* Have a hole, just skip it. */
5932			if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
5933				ret = 0;
5934				goto out;
5935			}
5936		}
5937	}
5938
5939	ret = find_extent_clone(sctx, path, key->objectid, key->offset,
5940			sctx->cur_inode_size, &found_clone);
5941	if (ret != -ENOENT && ret < 0)
5942		goto out;
5943
5944	ret = send_write_or_clone(sctx, path, key, found_clone);
5945	if (ret)
5946		goto out;
5947out_hole:
5948	ret = maybe_send_hole(sctx, path, key);
5949out:
5950	return ret;
5951}
5952
5953static int process_all_extents(struct send_ctx *sctx)
5954{
5955	int ret;
5956	struct btrfs_root *root;
5957	struct btrfs_path *path;
5958	struct btrfs_key key;
5959	struct btrfs_key found_key;
5960	struct extent_buffer *eb;
5961	int slot;
5962
5963	root = sctx->send_root;
5964	path = alloc_path_for_send();
5965	if (!path)
5966		return -ENOMEM;
5967
5968	key.objectid = sctx->cmp_key->objectid;
5969	key.type = BTRFS_EXTENT_DATA_KEY;
5970	key.offset = 0;
5971	ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5972	if (ret < 0)
5973		goto out;
5974
5975	while (1) {
5976		eb = path->nodes[0];
5977		slot = path->slots[0];
5978
5979		if (slot >= btrfs_header_nritems(eb)) {
5980			ret = btrfs_next_leaf(root, path);
5981			if (ret < 0) {
5982				goto out;
5983			} else if (ret > 0) {
5984				ret = 0;
5985				break;
5986			}
5987			continue;
5988		}
5989
5990		btrfs_item_key_to_cpu(eb, &found_key, slot);
5991
5992		if (found_key.objectid != key.objectid ||
5993		    found_key.type != key.type) {
5994			ret = 0;
5995			goto out;
5996		}
5997
5998		ret = process_extent(sctx, path, &found_key);
5999		if (ret < 0)
6000			goto out;
6001
6002		path->slots[0]++;
6003	}
6004
6005out:
6006	btrfs_free_path(path);
6007	return ret;
6008}
6009
6010static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
6011					   int *pending_move,
6012					   int *refs_processed)
6013{
6014	int ret = 0;
6015
6016	if (sctx->cur_ino == 0)
6017		goto out;
6018	if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
6019	    sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
6020		goto out;
6021	if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
6022		goto out;
6023
6024	ret = process_recorded_refs(sctx, pending_move);
6025	if (ret < 0)
6026		goto out;
6027
6028	*refs_processed = 1;
6029out:
6030	return ret;
6031}
6032
6033static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
6034{
6035	int ret = 0;
6036	u64 left_mode;
6037	u64 left_uid;
6038	u64 left_gid;
6039	u64 right_mode;
6040	u64 right_uid;
6041	u64 right_gid;
6042	int need_chmod = 0;
6043	int need_chown = 0;
6044	int need_truncate = 1;
6045	int pending_move = 0;
6046	int refs_processed = 0;
6047
6048	if (sctx->ignore_cur_inode)
6049		return 0;
6050
6051	ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
6052					      &refs_processed);
6053	if (ret < 0)
6054		goto out;
6055
6056	/*
6057	 * We have processed the refs and thus need to advance send_progress.
6058	 * Now, calls to get_cur_xxx will take the updated refs of the current
6059	 * inode into account.
6060	 *
6061	 * On the other hand, if our current inode is a directory and couldn't
6062	 * be moved/renamed because its parent was renamed/moved too and it has
6063	 * a higher inode number, we can only move/rename our current inode
6064	 * after we moved/renamed its parent. Therefore in this case operate on
6065	 * the old path (pre move/rename) of our current inode, and the
6066	 * move/rename will be performed later.
6067	 */
6068	if (refs_processed && !pending_move)
6069		sctx->send_progress = sctx->cur_ino + 1;
6070
6071	if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
6072		goto out;
6073	if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
6074		goto out;
6075
6076	ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
6077			&left_mode, &left_uid, &left_gid, NULL);
6078	if (ret < 0)
6079		goto out;
6080
6081	if (!sctx->parent_root || sctx->cur_inode_new) {
6082		need_chown = 1;
6083		if (!S_ISLNK(sctx->cur_inode_mode))
6084			need_chmod = 1;
6085		if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
6086			need_truncate = 0;
6087	} else {
6088		u64 old_size;
6089
6090		ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
6091				&old_size, NULL, &right_mode, &right_uid,
6092				&right_gid, NULL);
6093		if (ret < 0)
6094			goto out;
6095
6096		if (left_uid != right_uid || left_gid != right_gid)
6097			need_chown = 1;
6098		if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
6099			need_chmod = 1;
6100		if ((old_size == sctx->cur_inode_size) ||
6101		    (sctx->cur_inode_size > old_size &&
6102		     sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
6103			need_truncate = 0;
6104	}
6105
6106	if (S_ISREG(sctx->cur_inode_mode)) {
6107		if (need_send_hole(sctx)) {
6108			if (sctx->cur_inode_last_extent == (u64)-1 ||
6109			    sctx->cur_inode_last_extent <
6110			    sctx->cur_inode_size) {
6111				ret = get_last_extent(sctx, (u64)-1);
6112				if (ret)
6113					goto out;
6114			}
6115			if (sctx->cur_inode_last_extent <
6116			    sctx->cur_inode_size) {
6117				ret = send_hole(sctx, sctx->cur_inode_size);
6118				if (ret)
6119					goto out;
6120			}
6121		}
6122		if (need_truncate) {
6123			ret = send_truncate(sctx, sctx->cur_ino,
6124					    sctx->cur_inode_gen,
6125					    sctx->cur_inode_size);
6126			if (ret < 0)
6127				goto out;
6128		}
6129	}
6130
6131	if (need_chown) {
6132		ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6133				left_uid, left_gid);
6134		if (ret < 0)
6135			goto out;
6136	}
6137	if (need_chmod) {
6138		ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6139				left_mode);
6140		if (ret < 0)
6141			goto out;
6142	}
6143
6144	ret = send_capabilities(sctx);
6145	if (ret < 0)
6146		goto out;
6147
6148	/*
6149	 * If other directory inodes depended on our current directory
6150	 * inode's move/rename, now do their move/rename operations.
6151	 */
6152	if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
6153		ret = apply_children_dir_moves(sctx);
6154		if (ret)
6155			goto out;
6156		/*
6157		 * Need to send that every time, no matter if it actually
6158		 * changed between the two trees as we have done changes to
6159		 * the inode before. If our inode is a directory and it's
6160		 * waiting to be moved/renamed, we will send its utimes when
6161		 * it's moved/renamed, therefore we don't need to do it here.
6162		 */
6163		sctx->send_progress = sctx->cur_ino + 1;
6164		ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
6165		if (ret < 0)
6166			goto out;
6167	}
6168
6169out:
6170	return ret;
6171}
6172
6173struct parent_paths_ctx {
6174	struct list_head *refs;
6175	struct send_ctx *sctx;
6176};
6177
6178static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
6179			     void *ctx)
6180{
6181	struct parent_paths_ctx *ppctx = ctx;
6182
6183	return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
6184			  ppctx->refs);
6185}
6186
6187/*
6188 * Issue unlink operations for all paths of the current inode found in the
6189 * parent snapshot.
6190 */
6191static int btrfs_unlink_all_paths(struct send_ctx *sctx)
6192{
6193	LIST_HEAD(deleted_refs);
6194	struct btrfs_path *path;
6195	struct btrfs_key key;
6196	struct parent_paths_ctx ctx;
6197	int ret;
6198
6199	path = alloc_path_for_send();
6200	if (!path)
6201		return -ENOMEM;
6202
6203	key.objectid = sctx->cur_ino;
6204	key.type = BTRFS_INODE_REF_KEY;
6205	key.offset = 0;
6206	ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
6207	if (ret < 0)
6208		goto out;
6209
6210	ctx.refs = &deleted_refs;
6211	ctx.sctx = sctx;
6212
6213	while (true) {
6214		struct extent_buffer *eb = path->nodes[0];
6215		int slot = path->slots[0];
6216
6217		if (slot >= btrfs_header_nritems(eb)) {
6218			ret = btrfs_next_leaf(sctx->parent_root, path);
6219			if (ret < 0)
6220				goto out;
6221			else if (ret > 0)
6222				break;
6223			continue;
6224		}
6225
6226		btrfs_item_key_to_cpu(eb, &key, slot);
6227		if (key.objectid != sctx->cur_ino)
6228			break;
6229		if (key.type != BTRFS_INODE_REF_KEY &&
6230		    key.type != BTRFS_INODE_EXTREF_KEY)
6231			break;
6232
6233		ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
6234					record_parent_ref, &ctx);
6235		if (ret < 0)
6236			goto out;
6237
6238		path->slots[0]++;
6239	}
6240
6241	while (!list_empty(&deleted_refs)) {
6242		struct recorded_ref *ref;
6243
6244		ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
6245		ret = send_unlink(sctx, ref->full_path);
6246		if (ret < 0)
6247			goto out;
6248		fs_path_free(ref->full_path);
6249		list_del(&ref->list);
6250		kfree(ref);
6251	}
6252	ret = 0;
6253out:
6254	btrfs_free_path(path);
6255	if (ret)
6256		__free_recorded_refs(&deleted_refs);
6257	return ret;
6258}
6259
6260static int changed_inode(struct send_ctx *sctx,
6261			 enum btrfs_compare_tree_result result)
6262{
6263	int ret = 0;
6264	struct btrfs_key *key = sctx->cmp_key;
6265	struct btrfs_inode_item *left_ii = NULL;
6266	struct btrfs_inode_item *right_ii = NULL;
6267	u64 left_gen = 0;
6268	u64 right_gen = 0;
6269
6270	sctx->cur_ino = key->objectid;
6271	sctx->cur_inode_new_gen = 0;
6272	sctx->cur_inode_last_extent = (u64)-1;
6273	sctx->cur_inode_next_write_offset = 0;
6274	sctx->ignore_cur_inode = false;
6275
6276	/*
6277	 * Set send_progress to current inode. This will tell all get_cur_xxx
6278	 * functions that the current inode's refs are not updated yet. Later,
6279	 * when process_recorded_refs is finished, it is set to cur_ino + 1.
6280	 */
6281	sctx->send_progress = sctx->cur_ino;
6282
6283	if (result == BTRFS_COMPARE_TREE_NEW ||
6284	    result == BTRFS_COMPARE_TREE_CHANGED) {
6285		left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
6286				sctx->left_path->slots[0],
6287				struct btrfs_inode_item);
6288		left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
6289				left_ii);
6290	} else {
6291		right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6292				sctx->right_path->slots[0],
6293				struct btrfs_inode_item);
6294		right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6295				right_ii);
6296	}
6297	if (result == BTRFS_COMPARE_TREE_CHANGED) {
6298		right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6299				sctx->right_path->slots[0],
6300				struct btrfs_inode_item);
6301
6302		right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6303				right_ii);
6304
6305		/*
6306		 * The cur_ino = root dir case is special here. We can't treat
6307		 * the inode as deleted+reused because it would generate a
6308		 * stream that tries to delete/mkdir the root dir.
6309		 */
6310		if (left_gen != right_gen &&
6311		    sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6312			sctx->cur_inode_new_gen = 1;
6313	}
6314
6315	/*
6316	 * Normally we do not find inodes with a link count of zero (orphans)
6317	 * because the most common case is to create a snapshot and use it
6318	 * for a send operation. However other less common use cases involve
6319	 * using a subvolume and send it after turning it to RO mode just
6320	 * after deleting all hard links of a file while holding an open
6321	 * file descriptor against it or turning a RO snapshot into RW mode,
6322	 * keep an open file descriptor against a file, delete it and then
6323	 * turn the snapshot back to RO mode before using it for a send
6324	 * operation. So if we find such cases, ignore the inode and all its
6325	 * items completely if it's a new inode, or if it's a changed inode
6326	 * make sure all its previous paths (from the parent snapshot) are all
6327	 * unlinked and all other the inode items are ignored.
6328	 */
6329	if (result == BTRFS_COMPARE_TREE_NEW ||
6330	    result == BTRFS_COMPARE_TREE_CHANGED) {
6331		u32 nlinks;
6332
6333		nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
6334		if (nlinks == 0) {
6335			sctx->ignore_cur_inode = true;
6336			if (result == BTRFS_COMPARE_TREE_CHANGED)
6337				ret = btrfs_unlink_all_paths(sctx);
6338			goto out;
6339		}
6340	}
6341
6342	if (result == BTRFS_COMPARE_TREE_NEW) {
6343		sctx->cur_inode_gen = left_gen;
6344		sctx->cur_inode_new = 1;
6345		sctx->cur_inode_deleted = 0;
6346		sctx->cur_inode_size = btrfs_inode_size(
6347				sctx->left_path->nodes[0], left_ii);
6348		sctx->cur_inode_mode = btrfs_inode_mode(
6349				sctx->left_path->nodes[0], left_ii);
6350		sctx->cur_inode_rdev = btrfs_inode_rdev(
6351				sctx->left_path->nodes[0], left_ii);
6352		if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6353			ret = send_create_inode_if_needed(sctx);
6354	} else if (result == BTRFS_COMPARE_TREE_DELETED) {
6355		sctx->cur_inode_gen = right_gen;
6356		sctx->cur_inode_new = 0;
6357		sctx->cur_inode_deleted = 1;
6358		sctx->cur_inode_size = btrfs_inode_size(
6359				sctx->right_path->nodes[0], right_ii);
6360		sctx->cur_inode_mode = btrfs_inode_mode(
6361				sctx->right_path->nodes[0], right_ii);
6362	} else if (result == BTRFS_COMPARE_TREE_CHANGED) {
6363		/*
6364		 * We need to do some special handling in case the inode was
6365		 * reported as changed with a changed generation number. This
6366		 * means that the original inode was deleted and new inode
6367		 * reused the same inum. So we have to treat the old inode as
6368		 * deleted and the new one as new.
6369		 */
6370		if (sctx->cur_inode_new_gen) {
6371			/*
6372			 * First, process the inode as if it was deleted.
6373			 */
6374			sctx->cur_inode_gen = right_gen;
6375			sctx->cur_inode_new = 0;
6376			sctx->cur_inode_deleted = 1;
6377			sctx->cur_inode_size = btrfs_inode_size(
6378					sctx->right_path->nodes[0], right_ii);
6379			sctx->cur_inode_mode = btrfs_inode_mode(
6380					sctx->right_path->nodes[0], right_ii);
6381			ret = process_all_refs(sctx,
6382					BTRFS_COMPARE_TREE_DELETED);
6383			if (ret < 0)
6384				goto out;
6385
6386			/*
6387			 * Now process the inode as if it was new.
6388			 */
6389			sctx->cur_inode_gen = left_gen;
6390			sctx->cur_inode_new = 1;
6391			sctx->cur_inode_deleted = 0;
6392			sctx->cur_inode_size = btrfs_inode_size(
6393					sctx->left_path->nodes[0], left_ii);
6394			sctx->cur_inode_mode = btrfs_inode_mode(
6395					sctx->left_path->nodes[0], left_ii);
6396			sctx->cur_inode_rdev = btrfs_inode_rdev(
6397					sctx->left_path->nodes[0], left_ii);
6398			ret = send_create_inode_if_needed(sctx);
6399			if (ret < 0)
6400				goto out;
6401
6402			ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
6403			if (ret < 0)
6404				goto out;
6405			/*
6406			 * Advance send_progress now as we did not get into
6407			 * process_recorded_refs_if_needed in the new_gen case.
6408			 */
6409			sctx->send_progress = sctx->cur_ino + 1;
6410
6411			/*
6412			 * Now process all extents and xattrs of the inode as if
6413			 * they were all new.
6414			 */
6415			ret = process_all_extents(sctx);
6416			if (ret < 0)
6417				goto out;
6418			ret = process_all_new_xattrs(sctx);
6419			if (ret < 0)
6420				goto out;
6421		} else {
6422			sctx->cur_inode_gen = left_gen;
6423			sctx->cur_inode_new = 0;
6424			sctx->cur_inode_new_gen = 0;
6425			sctx->cur_inode_deleted = 0;
6426			sctx->cur_inode_size = btrfs_inode_size(
6427					sctx->left_path->nodes[0], left_ii);
6428			sctx->cur_inode_mode = btrfs_inode_mode(
6429					sctx->left_path->nodes[0], left_ii);
6430		}
6431	}
6432
6433out:
6434	return ret;
6435}
6436
6437/*
6438 * We have to process new refs before deleted refs, but compare_trees gives us
6439 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
6440 * first and later process them in process_recorded_refs.
6441 * For the cur_inode_new_gen case, we skip recording completely because
6442 * changed_inode did already initiate processing of refs. The reason for this is
6443 * that in this case, compare_tree actually compares the refs of 2 different
6444 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
6445 * refs of the right tree as deleted and all refs of the left tree as new.
6446 */
6447static int changed_ref(struct send_ctx *sctx,
6448		       enum btrfs_compare_tree_result result)
6449{
6450	int ret = 0;
6451
6452	if (sctx->cur_ino != sctx->cmp_key->objectid) {
6453		inconsistent_snapshot_error(sctx, result, "reference");
6454		return -EIO;
6455	}
6456
6457	if (!sctx->cur_inode_new_gen &&
6458	    sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
6459		if (result == BTRFS_COMPARE_TREE_NEW)
6460			ret = record_new_ref(sctx);
6461		else if (result == BTRFS_COMPARE_TREE_DELETED)
6462			ret = record_deleted_ref(sctx);
6463		else if (result == BTRFS_COMPARE_TREE_CHANGED)
6464			ret = record_changed_ref(sctx);
6465	}
6466
6467	return ret;
6468}
6469
6470/*
6471 * Process new/deleted/changed xattrs. We skip processing in the
6472 * cur_inode_new_gen case because changed_inode did already initiate processing
6473 * of xattrs. The reason is the same as in changed_ref
6474 */
6475static int changed_xattr(struct send_ctx *sctx,
6476			 enum btrfs_compare_tree_result result)
6477{
6478	int ret = 0;
6479
6480	if (sctx->cur_ino != sctx->cmp_key->objectid) {
6481		inconsistent_snapshot_error(sctx, result, "xattr");
6482		return -EIO;
6483	}
6484
6485	if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6486		if (result == BTRFS_COMPARE_TREE_NEW)
6487			ret = process_new_xattr(sctx);
6488		else if (result == BTRFS_COMPARE_TREE_DELETED)
6489			ret = process_deleted_xattr(sctx);
6490		else if (result == BTRFS_COMPARE_TREE_CHANGED)
6491			ret = process_changed_xattr(sctx);
6492	}
6493
6494	return ret;
6495}
6496
6497/*
6498 * Process new/deleted/changed extents. We skip processing in the
6499 * cur_inode_new_gen case because changed_inode did already initiate processing
6500 * of extents. The reason is the same as in changed_ref
6501 */
6502static int changed_extent(struct send_ctx *sctx,
6503			  enum btrfs_compare_tree_result result)
6504{
6505	int ret = 0;
6506
6507	/*
6508	 * We have found an extent item that changed without the inode item
6509	 * having changed. This can happen either after relocation (where the
6510	 * disk_bytenr of an extent item is replaced at
6511	 * relocation.c:replace_file_extents()) or after deduplication into a
6512	 * file in both the parent and send snapshots (where an extent item can
6513	 * get modified or replaced with a new one). Note that deduplication
6514	 * updates the inode item, but it only changes the iversion (sequence
6515	 * field in the inode item) of the inode, so if a file is deduplicated
6516	 * the same amount of times in both the parent and send snapshots, its
6517	 * iversion becomes the same in both snapshots, whence the inode item is
6518	 * the same on both snapshots.
6519	 */
6520	if (sctx->cur_ino != sctx->cmp_key->objectid)
6521		return 0;
6522
6523	if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6524		if (result != BTRFS_COMPARE_TREE_DELETED)
6525			ret = process_extent(sctx, sctx->left_path,
6526					sctx->cmp_key);
6527	}
6528
6529	return ret;
6530}
6531
6532static int dir_changed(struct send_ctx *sctx, u64 dir)
6533{
6534	u64 orig_gen, new_gen;
6535	int ret;
6536
6537	ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
6538			     NULL, NULL);
6539	if (ret)
6540		return ret;
6541
6542	ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
6543			     NULL, NULL, NULL);
6544	if (ret)
6545		return ret;
6546
6547	return (orig_gen != new_gen) ? 1 : 0;
6548}
6549
6550static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
6551			struct btrfs_key *key)
6552{
6553	struct btrfs_inode_extref *extref;
6554	struct extent_buffer *leaf;
6555	u64 dirid = 0, last_dirid = 0;
6556	unsigned long ptr;
6557	u32 item_size;
6558	u32 cur_offset = 0;
6559	int ref_name_len;
6560	int ret = 0;
6561
6562	/* Easy case, just check this one dirid */
6563	if (key->type == BTRFS_INODE_REF_KEY) {
6564		dirid = key->offset;
6565
6566		ret = dir_changed(sctx, dirid);
6567		goto out;
6568	}
6569
6570	leaf = path->nodes[0];
6571	item_size = btrfs_item_size_nr(leaf, path->slots[0]);
6572	ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
6573	while (cur_offset < item_size) {
6574		extref = (struct btrfs_inode_extref *)(ptr +
6575						       cur_offset);
6576		dirid = btrfs_inode_extref_parent(leaf, extref);
6577		ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
6578		cur_offset += ref_name_len + sizeof(*extref);
6579		if (dirid == last_dirid)
6580			continue;
6581		ret = dir_changed(sctx, dirid);
6582		if (ret)
6583			break;
6584		last_dirid = dirid;
6585	}
6586out:
6587	return ret;
6588}
6589
6590/*
6591 * Updates compare related fields in sctx and simply forwards to the actual
6592 * changed_xxx functions.
6593 */
6594static int changed_cb(struct btrfs_path *left_path,
6595		      struct btrfs_path *right_path,
6596		      struct btrfs_key *key,
6597		      enum btrfs_compare_tree_result result,
6598		      struct send_ctx *sctx)
6599{
6600	int ret = 0;
6601
6602	if (result == BTRFS_COMPARE_TREE_SAME) {
6603		if (key->type == BTRFS_INODE_REF_KEY ||
6604		    key->type == BTRFS_INODE_EXTREF_KEY) {
6605			ret = compare_refs(sctx, left_path, key);
6606			if (!ret)
6607				return 0;
6608			if (ret < 0)
6609				return ret;
6610		} else if (key->type == BTRFS_EXTENT_DATA_KEY) {
6611			return maybe_send_hole(sctx, left_path, key);
6612		} else {
6613			return 0;
6614		}
6615		result = BTRFS_COMPARE_TREE_CHANGED;
6616		ret = 0;
6617	}
6618
6619	sctx->left_path = left_path;
6620	sctx->right_path = right_path;
6621	sctx->cmp_key = key;
6622
6623	ret = finish_inode_if_needed(sctx, 0);
6624	if (ret < 0)
6625		goto out;
6626
6627	/* Ignore non-FS objects */
6628	if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
6629	    key->objectid == BTRFS_FREE_SPACE_OBJECTID)
6630		goto out;
6631
6632	if (key->type == BTRFS_INODE_ITEM_KEY) {
6633		ret = changed_inode(sctx, result);
6634	} else if (!sctx->ignore_cur_inode) {
6635		if (key->type == BTRFS_INODE_REF_KEY ||
6636		    key->type == BTRFS_INODE_EXTREF_KEY)
6637			ret = changed_ref(sctx, result);
6638		else if (key->type == BTRFS_XATTR_ITEM_KEY)
6639			ret = changed_xattr(sctx, result);
6640		else if (key->type == BTRFS_EXTENT_DATA_KEY)
6641			ret = changed_extent(sctx, result);
6642	}
6643
6644out:
6645	return ret;
6646}
6647
6648static int full_send_tree(struct send_ctx *sctx)
6649{
6650	int ret;
6651	struct btrfs_root *send_root = sctx->send_root;
6652	struct btrfs_key key;
6653	struct btrfs_path *path;
6654	struct extent_buffer *eb;
6655	int slot;
6656
6657	path = alloc_path_for_send();
6658	if (!path)
6659		return -ENOMEM;
6660	path->reada = READA_FORWARD_ALWAYS;
6661
6662	key.objectid = BTRFS_FIRST_FREE_OBJECTID;
6663	key.type = BTRFS_INODE_ITEM_KEY;
6664	key.offset = 0;
6665
6666	ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
6667	if (ret < 0)
6668		goto out;
6669	if (ret)
6670		goto out_finish;
6671
6672	while (1) {
6673		eb = path->nodes[0];
6674		slot = path->slots[0];
6675		btrfs_item_key_to_cpu(eb, &key, slot);
6676
6677		ret = changed_cb(path, NULL, &key,
6678				 BTRFS_COMPARE_TREE_NEW, sctx);
6679		if (ret < 0)
6680			goto out;
6681
6682		ret = btrfs_next_item(send_root, path);
6683		if (ret < 0)
6684			goto out;
6685		if (ret) {
6686			ret  = 0;
6687			break;
6688		}
6689	}
6690
6691out_finish:
6692	ret = finish_inode_if_needed(sctx, 1);
6693
6694out:
6695	btrfs_free_path(path);
6696	return ret;
6697}
6698
6699static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen)
6700{
6701	struct extent_buffer *eb;
6702	struct extent_buffer *parent = path->nodes[*level];
6703	int slot = path->slots[*level];
6704	const int nritems = btrfs_header_nritems(parent);
6705	u64 reada_max;
6706	u64 reada_done = 0;
6707
6708	BUG_ON(*level == 0);
6709	eb = btrfs_read_node_slot(parent, slot);
6710	if (IS_ERR(eb))
6711		return PTR_ERR(eb);
6712
6713	/*
6714	 * Trigger readahead for the next leaves we will process, so that it is
6715	 * very likely that when we need them they are already in memory and we
6716	 * will not block on disk IO. For nodes we only do readahead for one,
6717	 * since the time window between processing nodes is typically larger.
6718	 */
6719	reada_max = (*level == 1 ? SZ_128K : eb->fs_info->nodesize);
6720
6721	for (slot++; slot < nritems && reada_done < reada_max; slot++) {
6722		if (btrfs_node_ptr_generation(parent, slot) > reada_min_gen) {
6723			btrfs_readahead_node_child(parent, slot);
6724			reada_done += eb->fs_info->nodesize;
6725		}
6726	}
6727
6728	path->nodes[*level - 1] = eb;
6729	path->slots[*level - 1] = 0;
6730	(*level)--;
6731	return 0;
6732}
6733
6734static int tree_move_next_or_upnext(struct btrfs_path *path,
6735				    int *level, int root_level)
6736{
6737	int ret = 0;
6738	int nritems;
6739	nritems = btrfs_header_nritems(path->nodes[*level]);
6740
6741	path->slots[*level]++;
6742
6743	while (path->slots[*level] >= nritems) {
6744		if (*level == root_level)
6745			return -1;
6746
6747		/* move upnext */
6748		path->slots[*level] = 0;
6749		free_extent_buffer(path->nodes[*level]);
6750		path->nodes[*level] = NULL;
6751		(*level)++;
6752		path->slots[*level]++;
6753
6754		nritems = btrfs_header_nritems(path->nodes[*level]);
6755		ret = 1;
6756	}
6757	return ret;
6758}
6759
6760/*
6761 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
6762 * or down.
6763 */
6764static int tree_advance(struct btrfs_path *path,
6765			int *level, int root_level,
6766			int allow_down,
6767			struct btrfs_key *key,
6768			u64 reada_min_gen)
6769{
6770	int ret;
6771
6772	if (*level == 0 || !allow_down) {
6773		ret = tree_move_next_or_upnext(path, level, root_level);
6774	} else {
6775		ret = tree_move_down(path, level, reada_min_gen);
6776	}
6777	if (ret >= 0) {
6778		if (*level == 0)
6779			btrfs_item_key_to_cpu(path->nodes[*level], key,
6780					path->slots[*level]);
6781		else
6782			btrfs_node_key_to_cpu(path->nodes[*level], key,
6783					path->slots[*level]);
6784	}
6785	return ret;
6786}
6787
6788static int tree_compare_item(struct btrfs_path *left_path,
6789			     struct btrfs_path *right_path,
6790			     char *tmp_buf)
6791{
6792	int cmp;
6793	int len1, len2;
6794	unsigned long off1, off2;
6795
6796	len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
6797	len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
6798	if (len1 != len2)
6799		return 1;
6800
6801	off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
6802	off2 = btrfs_item_ptr_offset(right_path->nodes[0],
6803				right_path->slots[0]);
6804
6805	read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
6806
6807	cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
6808	if (cmp)
6809		return 1;
6810	return 0;
6811}
6812
6813/*
6814 * This function compares two trees and calls the provided callback for
6815 * every changed/new/deleted item it finds.
6816 * If shared tree blocks are encountered, whole subtrees are skipped, making
6817 * the compare pretty fast on snapshotted subvolumes.
6818 *
6819 * This currently works on commit roots only. As commit roots are read only,
6820 * we don't do any locking. The commit roots are protected with transactions.
6821 * Transactions are ended and rejoined when a commit is tried in between.
6822 *
6823 * This function checks for modifications done to the trees while comparing.
6824 * If it detects a change, it aborts immediately.
6825 */
6826static int btrfs_compare_trees(struct btrfs_root *left_root,
6827			struct btrfs_root *right_root, struct send_ctx *sctx)
6828{
6829	struct btrfs_fs_info *fs_info = left_root->fs_info;
6830	int ret;
6831	int cmp;
6832	struct btrfs_path *left_path = NULL;
6833	struct btrfs_path *right_path = NULL;
6834	struct btrfs_key left_key;
6835	struct btrfs_key right_key;
6836	char *tmp_buf = NULL;
6837	int left_root_level;
6838	int right_root_level;
6839	int left_level;
6840	int right_level;
6841	int left_end_reached;
6842	int right_end_reached;
6843	int advance_left;
6844	int advance_right;
6845	u64 left_blockptr;
6846	u64 right_blockptr;
6847	u64 left_gen;
6848	u64 right_gen;
6849	u64 reada_min_gen;
6850
6851	left_path = btrfs_alloc_path();
6852	if (!left_path) {
6853		ret = -ENOMEM;
6854		goto out;
6855	}
6856	right_path = btrfs_alloc_path();
6857	if (!right_path) {
6858		ret = -ENOMEM;
6859		goto out;
6860	}
6861
6862	tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
6863	if (!tmp_buf) {
6864		ret = -ENOMEM;
6865		goto out;
6866	}
6867
6868	left_path->search_commit_root = 1;
6869	left_path->skip_locking = 1;
6870	right_path->search_commit_root = 1;
6871	right_path->skip_locking = 1;
6872
6873	/*
6874	 * Strategy: Go to the first items of both trees. Then do
6875	 *
6876	 * If both trees are at level 0
6877	 *   Compare keys of current items
6878	 *     If left < right treat left item as new, advance left tree
6879	 *       and repeat
6880	 *     If left > right treat right item as deleted, advance right tree
6881	 *       and repeat
6882	 *     If left == right do deep compare of items, treat as changed if
6883	 *       needed, advance both trees and repeat
6884	 * If both trees are at the same level but not at level 0
6885	 *   Compare keys of current nodes/leafs
6886	 *     If left < right advance left tree and repeat
6887	 *     If left > right advance right tree and repeat
6888	 *     If left == right compare blockptrs of the next nodes/leafs
6889	 *       If they match advance both trees but stay at the same level
6890	 *         and repeat
6891	 *       If they don't match advance both trees while allowing to go
6892	 *         deeper and repeat
6893	 * If tree levels are different
6894	 *   Advance the tree that needs it and repeat
6895	 *
6896	 * Advancing a tree means:
6897	 *   If we are at level 0, try to go to the next slot. If that's not
6898	 *   possible, go one level up and repeat. Stop when we found a level
6899	 *   where we could go to the next slot. We may at this point be on a
6900	 *   node or a leaf.
6901	 *
6902	 *   If we are not at level 0 and not on shared tree blocks, go one
6903	 *   level deeper.
6904	 *
6905	 *   If we are not at level 0 and on shared tree blocks, go one slot to
6906	 *   the right if possible or go up and right.
6907	 */
6908
6909	down_read(&fs_info->commit_root_sem);
6910	left_level = btrfs_header_level(left_root->commit_root);
6911	left_root_level = left_level;
6912	left_path->nodes[left_level] =
6913			btrfs_clone_extent_buffer(left_root->commit_root);
6914	if (!left_path->nodes[left_level]) {
6915		up_read(&fs_info->commit_root_sem);
6916		ret = -ENOMEM;
6917		goto out;
6918	}
6919
6920	right_level = btrfs_header_level(right_root->commit_root);
6921	right_root_level = right_level;
6922	right_path->nodes[right_level] =
6923			btrfs_clone_extent_buffer(right_root->commit_root);
6924	if (!right_path->nodes[right_level]) {
6925		up_read(&fs_info->commit_root_sem);
6926		ret = -ENOMEM;
6927		goto out;
6928	}
6929	/*
6930	 * Our right root is the parent root, while the left root is the "send"
6931	 * root. We know that all new nodes/leaves in the left root must have
6932	 * a generation greater than the right root's generation, so we trigger
6933	 * readahead for those nodes and leaves of the left root, as we know we
6934	 * will need to read them at some point.
6935	 */
6936	reada_min_gen = btrfs_header_generation(right_root->commit_root);
6937	up_read(&fs_info->commit_root_sem);
6938
6939	if (left_level == 0)
6940		btrfs_item_key_to_cpu(left_path->nodes[left_level],
6941				&left_key, left_path->slots[left_level]);
6942	else
6943		btrfs_node_key_to_cpu(left_path->nodes[left_level],
6944				&left_key, left_path->slots[left_level]);
6945	if (right_level == 0)
6946		btrfs_item_key_to_cpu(right_path->nodes[right_level],
6947				&right_key, right_path->slots[right_level]);
6948	else
6949		btrfs_node_key_to_cpu(right_path->nodes[right_level],
6950				&right_key, right_path->slots[right_level]);
6951
6952	left_end_reached = right_end_reached = 0;
6953	advance_left = advance_right = 0;
6954
6955	while (1) {
6956		cond_resched();
6957		if (advance_left && !left_end_reached) {
6958			ret = tree_advance(left_path, &left_level,
6959					left_root_level,
6960					advance_left != ADVANCE_ONLY_NEXT,
6961					&left_key, reada_min_gen);
6962			if (ret == -1)
6963				left_end_reached = ADVANCE;
6964			else if (ret < 0)
6965				goto out;
6966			advance_left = 0;
6967		}
6968		if (advance_right && !right_end_reached) {
6969			ret = tree_advance(right_path, &right_level,
6970					right_root_level,
6971					advance_right != ADVANCE_ONLY_NEXT,
6972					&right_key, reada_min_gen);
6973			if (ret == -1)
6974				right_end_reached = ADVANCE;
6975			else if (ret < 0)
6976				goto out;
6977			advance_right = 0;
6978		}
6979
6980		if (left_end_reached && right_end_reached) {
6981			ret = 0;
6982			goto out;
6983		} else if (left_end_reached) {
6984			if (right_level == 0) {
6985				ret = changed_cb(left_path, right_path,
6986						&right_key,
6987						BTRFS_COMPARE_TREE_DELETED,
6988						sctx);
6989				if (ret < 0)
6990					goto out;
6991			}
6992			advance_right = ADVANCE;
6993			continue;
6994		} else if (right_end_reached) {
6995			if (left_level == 0) {
6996				ret = changed_cb(left_path, right_path,
6997						&left_key,
6998						BTRFS_COMPARE_TREE_NEW,
6999						sctx);
7000				if (ret < 0)
7001					goto out;
7002			}
7003			advance_left = ADVANCE;
7004			continue;
7005		}
7006
7007		if (left_level == 0 && right_level == 0) {
7008			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7009			if (cmp < 0) {
7010				ret = changed_cb(left_path, right_path,
7011						&left_key,
7012						BTRFS_COMPARE_TREE_NEW,
7013						sctx);
7014				if (ret < 0)
7015					goto out;
7016				advance_left = ADVANCE;
7017			} else if (cmp > 0) {
7018				ret = changed_cb(left_path, right_path,
7019						&right_key,
7020						BTRFS_COMPARE_TREE_DELETED,
7021						sctx);
7022				if (ret < 0)
7023					goto out;
7024				advance_right = ADVANCE;
7025			} else {
7026				enum btrfs_compare_tree_result result;
7027
7028				WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
7029				ret = tree_compare_item(left_path, right_path,
7030							tmp_buf);
7031				if (ret)
7032					result = BTRFS_COMPARE_TREE_CHANGED;
7033				else
7034					result = BTRFS_COMPARE_TREE_SAME;
7035				ret = changed_cb(left_path, right_path,
7036						 &left_key, result, sctx);
7037				if (ret < 0)
7038					goto out;
7039				advance_left = ADVANCE;
7040				advance_right = ADVANCE;
7041			}
7042		} else if (left_level == right_level) {
7043			cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7044			if (cmp < 0) {
7045				advance_left = ADVANCE;
7046			} else if (cmp > 0) {
7047				advance_right = ADVANCE;
7048			} else {
7049				left_blockptr = btrfs_node_blockptr(
7050						left_path->nodes[left_level],
7051						left_path->slots[left_level]);
7052				right_blockptr = btrfs_node_blockptr(
7053						right_path->nodes[right_level],
7054						right_path->slots[right_level]);
7055				left_gen = btrfs_node_ptr_generation(
7056						left_path->nodes[left_level],
7057						left_path->slots[left_level]);
7058				right_gen = btrfs_node_ptr_generation(
7059						right_path->nodes[right_level],
7060						right_path->slots[right_level]);
7061				if (left_blockptr == right_blockptr &&
7062				    left_gen == right_gen) {
7063					/*
7064					 * As we're on a shared block, don't
7065					 * allow to go deeper.
7066					 */
7067					advance_left = ADVANCE_ONLY_NEXT;
7068					advance_right = ADVANCE_ONLY_NEXT;
7069				} else {
7070					advance_left = ADVANCE;
7071					advance_right = ADVANCE;
7072				}
7073			}
7074		} else if (left_level < right_level) {
7075			advance_right = ADVANCE;
7076		} else {
7077			advance_left = ADVANCE;
7078		}
7079	}
7080
7081out:
7082	btrfs_free_path(left_path);
7083	btrfs_free_path(right_path);
7084	kvfree(tmp_buf);
7085	return ret;
7086}
7087
7088static int send_subvol(struct send_ctx *sctx)
7089{
7090	int ret;
7091
7092	if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
7093		ret = send_header(sctx);
7094		if (ret < 0)
7095			goto out;
7096	}
7097
7098	ret = send_subvol_begin(sctx);
7099	if (ret < 0)
7100		goto out;
7101
7102	if (sctx->parent_root) {
7103		ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, sctx);
7104		if (ret < 0)
7105			goto out;
7106		ret = finish_inode_if_needed(sctx, 1);
7107		if (ret < 0)
7108			goto out;
7109	} else {
7110		ret = full_send_tree(sctx);
7111		if (ret < 0)
7112			goto out;
7113	}
7114
7115out:
7116	free_recorded_refs(sctx);
7117	return ret;
7118}
7119
7120/*
7121 * If orphan cleanup did remove any orphans from a root, it means the tree
7122 * was modified and therefore the commit root is not the same as the current
7123 * root anymore. This is a problem, because send uses the commit root and
7124 * therefore can see inode items that don't exist in the current root anymore,
7125 * and for example make calls to btrfs_iget, which will do tree lookups based
7126 * on the current root and not on the commit root. Those lookups will fail,
7127 * returning a -ESTALE error, and making send fail with that error. So make
7128 * sure a send does not see any orphans we have just removed, and that it will
7129 * see the same inodes regardless of whether a transaction commit happened
7130 * before it started (meaning that the commit root will be the same as the
7131 * current root) or not.
7132 */
7133static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
7134{
7135	int i;
7136	struct btrfs_trans_handle *trans = NULL;
7137
7138again:
7139	if (sctx->parent_root &&
7140	    sctx->parent_root->node != sctx->parent_root->commit_root)
7141		goto commit_trans;
7142
7143	for (i = 0; i < sctx->clone_roots_cnt; i++)
7144		if (sctx->clone_roots[i].root->node !=
7145		    sctx->clone_roots[i].root->commit_root)
7146			goto commit_trans;
7147
7148	if (trans)
7149		return btrfs_end_transaction(trans);
7150
7151	return 0;
7152
7153commit_trans:
7154	/* Use any root, all fs roots will get their commit roots updated. */
7155	if (!trans) {
7156		trans = btrfs_join_transaction(sctx->send_root);
7157		if (IS_ERR(trans))
7158			return PTR_ERR(trans);
7159		goto again;
7160	}
7161
7162	return btrfs_commit_transaction(trans);
7163}
7164
7165/*
7166 * Make sure any existing dellaloc is flushed for any root used by a send
7167 * operation so that we do not miss any data and we do not race with writeback
7168 * finishing and changing a tree while send is using the tree. This could
7169 * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
7170 * a send operation then uses the subvolume.
7171 * After flushing delalloc ensure_commit_roots_uptodate() must be called.
7172 */
7173static int flush_delalloc_roots(struct send_ctx *sctx)
7174{
7175	struct btrfs_root *root = sctx->parent_root;
7176	int ret;
7177	int i;
7178
7179	if (root) {
7180		ret = btrfs_start_delalloc_snapshot(root, false);
7181		if (ret)
7182			return ret;
7183		btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7184	}
7185
7186	for (i = 0; i < sctx->clone_roots_cnt; i++) {
7187		root = sctx->clone_roots[i].root;
7188		ret = btrfs_start_delalloc_snapshot(root, false);
7189		if (ret)
7190			return ret;
7191		btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7192	}
7193
7194	return 0;
7195}
7196
7197static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
7198{
7199	spin_lock(&root->root_item_lock);
7200	root->send_in_progress--;
7201	/*
7202	 * Not much left to do, we don't know why it's unbalanced and
7203	 * can't blindly reset it to 0.
7204	 */
7205	if (root->send_in_progress < 0)
7206		btrfs_err(root->fs_info,
7207			  "send_in_progress unbalanced %d root %llu",
7208			  root->send_in_progress, root->root_key.objectid);
7209	spin_unlock(&root->root_item_lock);
7210}
7211
7212static void dedupe_in_progress_warn(const struct btrfs_root *root)
7213{
7214	btrfs_warn_rl(root->fs_info,
7215"cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
7216		      root->root_key.objectid, root->dedupe_in_progress);
7217}
7218
7219long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
7220{
7221	int ret = 0;
7222	struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
7223	struct btrfs_fs_info *fs_info = send_root->fs_info;
7224	struct btrfs_root *clone_root;
7225	struct send_ctx *sctx = NULL;
7226	u32 i;
7227	u64 *clone_sources_tmp = NULL;
7228	int clone_sources_to_rollback = 0;
7229	size_t alloc_size;
7230	int sort_clone_roots = 0;
7231
7232	if (!capable(CAP_SYS_ADMIN))
7233		return -EPERM;
7234
7235	/*
7236	 * The subvolume must remain read-only during send, protect against
7237	 * making it RW. This also protects against deletion.
7238	 */
7239	spin_lock(&send_root->root_item_lock);
7240	if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
7241		dedupe_in_progress_warn(send_root);
7242		spin_unlock(&send_root->root_item_lock);
7243		return -EAGAIN;
7244	}
7245	send_root->send_in_progress++;
7246	spin_unlock(&send_root->root_item_lock);
7247
7248	/*
7249	 * Userspace tools do the checks and warn the user if it's
7250	 * not RO.
7251	 */
7252	if (!btrfs_root_readonly(send_root)) {
7253		ret = -EPERM;
7254		goto out;
7255	}
7256
7257	/*
7258	 * Check that we don't overflow at later allocations, we request
7259	 * clone_sources_count + 1 items, and compare to unsigned long inside
7260	 * access_ok.
7261	 */
7262	if (arg->clone_sources_count >
7263	    ULONG_MAX / sizeof(struct clone_root) - 1) {
7264		ret = -EINVAL;
7265		goto out;
7266	}
7267
7268	if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
7269		ret = -EINVAL;
7270		goto out;
7271	}
7272
7273	sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
7274	if (!sctx) {
7275		ret = -ENOMEM;
7276		goto out;
7277	}
7278
7279	INIT_LIST_HEAD(&sctx->new_refs);
7280	INIT_LIST_HEAD(&sctx->deleted_refs);
7281	INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
7282	INIT_LIST_HEAD(&sctx->name_cache_list);
7283
7284	sctx->flags = arg->flags;
7285
7286	sctx->send_filp = fget(arg->send_fd);
7287	if (!sctx->send_filp) {
7288		ret = -EBADF;
7289		goto out;
7290	}
7291
7292	sctx->send_root = send_root;
7293	/*
7294	 * Unlikely but possible, if the subvolume is marked for deletion but
7295	 * is slow to remove the directory entry, send can still be started
7296	 */
7297	if (btrfs_root_dead(sctx->send_root)) {
7298		ret = -EPERM;
7299		goto out;
7300	}
7301
7302	sctx->clone_roots_cnt = arg->clone_sources_count;
7303
7304	sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
7305	sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
7306	if (!sctx->send_buf) {
7307		ret = -ENOMEM;
7308		goto out;
7309	}
7310
7311	sctx->pending_dir_moves = RB_ROOT;
7312	sctx->waiting_dir_moves = RB_ROOT;
7313	sctx->orphan_dirs = RB_ROOT;
7314
7315	sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
7316				     arg->clone_sources_count + 1,
7317				     GFP_KERNEL);
7318	if (!sctx->clone_roots) {
7319		ret = -ENOMEM;
7320		goto out;
7321	}
7322
7323	alloc_size = array_size(sizeof(*arg->clone_sources),
7324				arg->clone_sources_count);
7325
7326	if (arg->clone_sources_count) {
7327		clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
7328		if (!clone_sources_tmp) {
7329			ret = -ENOMEM;
7330			goto out;
7331		}
7332
7333		ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
7334				alloc_size);
7335		if (ret) {
7336			ret = -EFAULT;
7337			goto out;
7338		}
7339
7340		for (i = 0; i < arg->clone_sources_count; i++) {
7341			clone_root = btrfs_get_fs_root(fs_info,
7342						clone_sources_tmp[i], true);
7343			if (IS_ERR(clone_root)) {
7344				ret = PTR_ERR(clone_root);
7345				goto out;
7346			}
7347			spin_lock(&clone_root->root_item_lock);
7348			if (!btrfs_root_readonly(clone_root) ||
7349			    btrfs_root_dead(clone_root)) {
7350				spin_unlock(&clone_root->root_item_lock);
7351				btrfs_put_root(clone_root);
7352				ret = -EPERM;
7353				goto out;
7354			}
7355			if (clone_root->dedupe_in_progress) {
7356				dedupe_in_progress_warn(clone_root);
7357				spin_unlock(&clone_root->root_item_lock);
7358				btrfs_put_root(clone_root);
7359				ret = -EAGAIN;
7360				goto out;
7361			}
7362			clone_root->send_in_progress++;
7363			spin_unlock(&clone_root->root_item_lock);
7364
7365			sctx->clone_roots[i].root = clone_root;
7366			clone_sources_to_rollback = i + 1;
7367		}
7368		kvfree(clone_sources_tmp);
7369		clone_sources_tmp = NULL;
7370	}
7371
7372	if (arg->parent_root) {
7373		sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root,
7374						      true);
7375		if (IS_ERR(sctx->parent_root)) {
7376			ret = PTR_ERR(sctx->parent_root);
7377			goto out;
7378		}
7379
7380		spin_lock(&sctx->parent_root->root_item_lock);
7381		sctx->parent_root->send_in_progress++;
7382		if (!btrfs_root_readonly(sctx->parent_root) ||
7383				btrfs_root_dead(sctx->parent_root)) {
7384			spin_unlock(&sctx->parent_root->root_item_lock);
7385			ret = -EPERM;
7386			goto out;
7387		}
7388		if (sctx->parent_root->dedupe_in_progress) {
7389			dedupe_in_progress_warn(sctx->parent_root);
7390			spin_unlock(&sctx->parent_root->root_item_lock);
7391			ret = -EAGAIN;
7392			goto out;
7393		}
7394		spin_unlock(&sctx->parent_root->root_item_lock);
7395	}
7396
7397	/*
7398	 * Clones from send_root are allowed, but only if the clone source
7399	 * is behind the current send position. This is checked while searching
7400	 * for possible clone sources.
7401	 */
7402	sctx->clone_roots[sctx->clone_roots_cnt++].root =
7403		btrfs_grab_root(sctx->send_root);
7404
7405	/* We do a bsearch later */
7406	sort(sctx->clone_roots, sctx->clone_roots_cnt,
7407			sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
7408			NULL);
7409	sort_clone_roots = 1;
7410
7411	ret = flush_delalloc_roots(sctx);
7412	if (ret)
7413		goto out;
7414
7415	ret = ensure_commit_roots_uptodate(sctx);
7416	if (ret)
7417		goto out;
7418
7419	spin_lock(&fs_info->send_reloc_lock);
7420	if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
7421		spin_unlock(&fs_info->send_reloc_lock);
7422		btrfs_warn_rl(fs_info,
7423		"cannot run send because a relocation operation is in progress");
7424		ret = -EAGAIN;
7425		goto out;
7426	}
7427	fs_info->send_in_progress++;
7428	spin_unlock(&fs_info->send_reloc_lock);
7429
7430	ret = send_subvol(sctx);
7431	spin_lock(&fs_info->send_reloc_lock);
7432	fs_info->send_in_progress--;
7433	spin_unlock(&fs_info->send_reloc_lock);
7434	if (ret < 0)
7435		goto out;
7436
7437	if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
7438		ret = begin_cmd(sctx, BTRFS_SEND_C_END);
7439		if (ret < 0)
7440			goto out;
7441		ret = send_cmd(sctx);
7442		if (ret < 0)
7443			goto out;
7444	}
7445
7446out:
7447	WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
7448	while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
7449		struct rb_node *n;
7450		struct pending_dir_move *pm;
7451
7452		n = rb_first(&sctx->pending_dir_moves);
7453		pm = rb_entry(n, struct pending_dir_move, node);
7454		while (!list_empty(&pm->list)) {
7455			struct pending_dir_move *pm2;
7456
7457			pm2 = list_first_entry(&pm->list,
7458					       struct pending_dir_move, list);
7459			free_pending_move(sctx, pm2);
7460		}
7461		free_pending_move(sctx, pm);
7462	}
7463
7464	WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
7465	while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
7466		struct rb_node *n;
7467		struct waiting_dir_move *dm;
7468
7469		n = rb_first(&sctx->waiting_dir_moves);
7470		dm = rb_entry(n, struct waiting_dir_move, node);
7471		rb_erase(&dm->node, &sctx->waiting_dir_moves);
7472		kfree(dm);
7473	}
7474
7475	WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
7476	while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
7477		struct rb_node *n;
7478		struct orphan_dir_info *odi;
7479
7480		n = rb_first(&sctx->orphan_dirs);
7481		odi = rb_entry(n, struct orphan_dir_info, node);
7482		free_orphan_dir_info(sctx, odi);
7483	}
7484
7485	if (sort_clone_roots) {
7486		for (i = 0; i < sctx->clone_roots_cnt; i++) {
7487			btrfs_root_dec_send_in_progress(
7488					sctx->clone_roots[i].root);
7489			btrfs_put_root(sctx->clone_roots[i].root);
7490		}
7491	} else {
7492		for (i = 0; sctx && i < clone_sources_to_rollback; i++) {
7493			btrfs_root_dec_send_in_progress(
7494					sctx->clone_roots[i].root);
7495			btrfs_put_root(sctx->clone_roots[i].root);
7496		}
7497
7498		btrfs_root_dec_send_in_progress(send_root);
7499	}
7500	if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) {
7501		btrfs_root_dec_send_in_progress(sctx->parent_root);
7502		btrfs_put_root(sctx->parent_root);
7503	}
7504
7505	kvfree(clone_sources_tmp);
7506
7507	if (sctx) {
7508		if (sctx->send_filp)
7509			fput(sctx->send_filp);
7510
7511		kvfree(sctx->clone_roots);
7512		kvfree(sctx->send_buf);
7513
7514		name_cache_free(sctx);
7515
7516		kfree(sctx);
7517	}
7518
7519	return ret;
7520}