Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 */
5
6#include <linux/bsearch.h>
7#include <linux/fs.h>
8#include <linux/file.h>
9#include <linux/sort.h>
10#include <linux/mount.h>
11#include <linux/xattr.h>
12#include <linux/posix_acl_xattr.h>
13#include <linux/radix-tree.h>
14#include <linux/vmalloc.h>
15#include <linux/string.h>
16#include <linux/compat.h>
17#include <linux/crc32c.h>
18
19#include "send.h"
20#include "backref.h"
21#include "locking.h"
22#include "disk-io.h"
23#include "btrfs_inode.h"
24#include "transaction.h"
25#include "compression.h"
26
27/*
28 * A fs_path is a helper to dynamically build path names with unknown size.
29 * It reallocates the internal buffer on demand.
30 * It allows fast adding of path elements on the right side (normal path) and
31 * fast adding to the left side (reversed path). A reversed path can also be
32 * unreversed if needed.
33 */
34struct fs_path {
35 union {
36 struct {
37 char *start;
38 char *end;
39
40 char *buf;
41 unsigned short buf_len:15;
42 unsigned short reversed:1;
43 char inline_buf[];
44 };
45 /*
46 * Average path length does not exceed 200 bytes, we'll have
47 * better packing in the slab and higher chance to satisfy
48 * a allocation later during send.
49 */
50 char pad[256];
51 };
52};
53#define FS_PATH_INLINE_SIZE \
54 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
55
56
57/* reused for each extent */
58struct clone_root {
59 struct btrfs_root *root;
60 u64 ino;
61 u64 offset;
62
63 u64 found_refs;
64};
65
66#define SEND_CTX_MAX_NAME_CACHE_SIZE 128
67#define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
68
69struct send_ctx {
70 struct file *send_filp;
71 loff_t send_off;
72 char *send_buf;
73 u32 send_size;
74 u32 send_max_size;
75 u64 total_send_size;
76 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
77 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
78
79 struct btrfs_root *send_root;
80 struct btrfs_root *parent_root;
81 struct clone_root *clone_roots;
82 int clone_roots_cnt;
83
84 /* current state of the compare_tree call */
85 struct btrfs_path *left_path;
86 struct btrfs_path *right_path;
87 struct btrfs_key *cmp_key;
88
89 /*
90 * infos of the currently processed inode. In case of deleted inodes,
91 * these are the values from the deleted inode.
92 */
93 u64 cur_ino;
94 u64 cur_inode_gen;
95 int cur_inode_new;
96 int cur_inode_new_gen;
97 int cur_inode_deleted;
98 u64 cur_inode_size;
99 u64 cur_inode_mode;
100 u64 cur_inode_rdev;
101 u64 cur_inode_last_extent;
102 u64 cur_inode_next_write_offset;
103 bool ignore_cur_inode;
104
105 u64 send_progress;
106
107 struct list_head new_refs;
108 struct list_head deleted_refs;
109
110 struct radix_tree_root name_cache;
111 struct list_head name_cache_list;
112 int name_cache_size;
113
114 struct file_ra_state ra;
115
116 char *read_buf;
117
118 /*
119 * We process inodes by their increasing order, so if before an
120 * incremental send we reverse the parent/child relationship of
121 * directories such that a directory with a lower inode number was
122 * the parent of a directory with a higher inode number, and the one
123 * becoming the new parent got renamed too, we can't rename/move the
124 * directory with lower inode number when we finish processing it - we
125 * must process the directory with higher inode number first, then
126 * rename/move it and then rename/move the directory with lower inode
127 * number. Example follows.
128 *
129 * Tree state when the first send was performed:
130 *
131 * .
132 * |-- a (ino 257)
133 * |-- b (ino 258)
134 * |
135 * |
136 * |-- c (ino 259)
137 * | |-- d (ino 260)
138 * |
139 * |-- c2 (ino 261)
140 *
141 * Tree state when the second (incremental) send is performed:
142 *
143 * .
144 * |-- a (ino 257)
145 * |-- b (ino 258)
146 * |-- c2 (ino 261)
147 * |-- d2 (ino 260)
148 * |-- cc (ino 259)
149 *
150 * The sequence of steps that lead to the second state was:
151 *
152 * mv /a/b/c/d /a/b/c2/d2
153 * mv /a/b/c /a/b/c2/d2/cc
154 *
155 * "c" has lower inode number, but we can't move it (2nd mv operation)
156 * before we move "d", which has higher inode number.
157 *
158 * So we just memorize which move/rename operations must be performed
159 * later when their respective parent is processed and moved/renamed.
160 */
161
162 /* Indexed by parent directory inode number. */
163 struct rb_root pending_dir_moves;
164
165 /*
166 * Reverse index, indexed by the inode number of a directory that
167 * is waiting for the move/rename of its immediate parent before its
168 * own move/rename can be performed.
169 */
170 struct rb_root waiting_dir_moves;
171
172 /*
173 * A directory that is going to be rm'ed might have a child directory
174 * which is in the pending directory moves index above. In this case,
175 * the directory can only be removed after the move/rename of its child
176 * is performed. Example:
177 *
178 * Parent snapshot:
179 *
180 * . (ino 256)
181 * |-- a/ (ino 257)
182 * |-- b/ (ino 258)
183 * |-- c/ (ino 259)
184 * | |-- x/ (ino 260)
185 * |
186 * |-- y/ (ino 261)
187 *
188 * Send snapshot:
189 *
190 * . (ino 256)
191 * |-- a/ (ino 257)
192 * |-- b/ (ino 258)
193 * |-- YY/ (ino 261)
194 * |-- x/ (ino 260)
195 *
196 * Sequence of steps that lead to the send snapshot:
197 * rm -f /a/b/c/foo.txt
198 * mv /a/b/y /a/b/YY
199 * mv /a/b/c/x /a/b/YY
200 * rmdir /a/b/c
201 *
202 * When the child is processed, its move/rename is delayed until its
203 * parent is processed (as explained above), but all other operations
204 * like update utimes, chown, chgrp, etc, are performed and the paths
205 * that it uses for those operations must use the orphanized name of
206 * its parent (the directory we're going to rm later), so we need to
207 * memorize that name.
208 *
209 * Indexed by the inode number of the directory to be deleted.
210 */
211 struct rb_root orphan_dirs;
212};
213
214struct pending_dir_move {
215 struct rb_node node;
216 struct list_head list;
217 u64 parent_ino;
218 u64 ino;
219 u64 gen;
220 struct list_head update_refs;
221};
222
223struct waiting_dir_move {
224 struct rb_node node;
225 u64 ino;
226 /*
227 * There might be some directory that could not be removed because it
228 * was waiting for this directory inode to be moved first. Therefore
229 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
230 */
231 u64 rmdir_ino;
232 bool orphanized;
233};
234
235struct orphan_dir_info {
236 struct rb_node node;
237 u64 ino;
238 u64 gen;
239 u64 last_dir_index_offset;
240};
241
242struct name_cache_entry {
243 struct list_head list;
244 /*
245 * radix_tree has only 32bit entries but we need to handle 64bit inums.
246 * We use the lower 32bit of the 64bit inum to store it in the tree. If
247 * more then one inum would fall into the same entry, we use radix_list
248 * to store the additional entries. radix_list is also used to store
249 * entries where two entries have the same inum but different
250 * generations.
251 */
252 struct list_head radix_list;
253 u64 ino;
254 u64 gen;
255 u64 parent_ino;
256 u64 parent_gen;
257 int ret;
258 int need_later_update;
259 int name_len;
260 char name[];
261};
262
263#define ADVANCE 1
264#define ADVANCE_ONLY_NEXT -1
265
266enum btrfs_compare_tree_result {
267 BTRFS_COMPARE_TREE_NEW,
268 BTRFS_COMPARE_TREE_DELETED,
269 BTRFS_COMPARE_TREE_CHANGED,
270 BTRFS_COMPARE_TREE_SAME,
271};
272typedef int (*btrfs_changed_cb_t)(struct btrfs_path *left_path,
273 struct btrfs_path *right_path,
274 struct btrfs_key *key,
275 enum btrfs_compare_tree_result result,
276 void *ctx);
277
278__cold
279static void inconsistent_snapshot_error(struct send_ctx *sctx,
280 enum btrfs_compare_tree_result result,
281 const char *what)
282{
283 const char *result_string;
284
285 switch (result) {
286 case BTRFS_COMPARE_TREE_NEW:
287 result_string = "new";
288 break;
289 case BTRFS_COMPARE_TREE_DELETED:
290 result_string = "deleted";
291 break;
292 case BTRFS_COMPARE_TREE_CHANGED:
293 result_string = "updated";
294 break;
295 case BTRFS_COMPARE_TREE_SAME:
296 ASSERT(0);
297 result_string = "unchanged";
298 break;
299 default:
300 ASSERT(0);
301 result_string = "unexpected";
302 }
303
304 btrfs_err(sctx->send_root->fs_info,
305 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
306 result_string, what, sctx->cmp_key->objectid,
307 sctx->send_root->root_key.objectid,
308 (sctx->parent_root ?
309 sctx->parent_root->root_key.objectid : 0));
310}
311
312static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
313
314static struct waiting_dir_move *
315get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
316
317static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
318
319static int need_send_hole(struct send_ctx *sctx)
320{
321 return (sctx->parent_root && !sctx->cur_inode_new &&
322 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
323 S_ISREG(sctx->cur_inode_mode));
324}
325
326static void fs_path_reset(struct fs_path *p)
327{
328 if (p->reversed) {
329 p->start = p->buf + p->buf_len - 1;
330 p->end = p->start;
331 *p->start = 0;
332 } else {
333 p->start = p->buf;
334 p->end = p->start;
335 *p->start = 0;
336 }
337}
338
339static struct fs_path *fs_path_alloc(void)
340{
341 struct fs_path *p;
342
343 p = kmalloc(sizeof(*p), GFP_KERNEL);
344 if (!p)
345 return NULL;
346 p->reversed = 0;
347 p->buf = p->inline_buf;
348 p->buf_len = FS_PATH_INLINE_SIZE;
349 fs_path_reset(p);
350 return p;
351}
352
353static struct fs_path *fs_path_alloc_reversed(void)
354{
355 struct fs_path *p;
356
357 p = fs_path_alloc();
358 if (!p)
359 return NULL;
360 p->reversed = 1;
361 fs_path_reset(p);
362 return p;
363}
364
365static void fs_path_free(struct fs_path *p)
366{
367 if (!p)
368 return;
369 if (p->buf != p->inline_buf)
370 kfree(p->buf);
371 kfree(p);
372}
373
374static int fs_path_len(struct fs_path *p)
375{
376 return p->end - p->start;
377}
378
379static int fs_path_ensure_buf(struct fs_path *p, int len)
380{
381 char *tmp_buf;
382 int path_len;
383 int old_buf_len;
384
385 len++;
386
387 if (p->buf_len >= len)
388 return 0;
389
390 if (len > PATH_MAX) {
391 WARN_ON(1);
392 return -ENOMEM;
393 }
394
395 path_len = p->end - p->start;
396 old_buf_len = p->buf_len;
397
398 /*
399 * First time the inline_buf does not suffice
400 */
401 if (p->buf == p->inline_buf) {
402 tmp_buf = kmalloc(len, GFP_KERNEL);
403 if (tmp_buf)
404 memcpy(tmp_buf, p->buf, old_buf_len);
405 } else {
406 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
407 }
408 if (!tmp_buf)
409 return -ENOMEM;
410 p->buf = tmp_buf;
411 /*
412 * The real size of the buffer is bigger, this will let the fast path
413 * happen most of the time
414 */
415 p->buf_len = ksize(p->buf);
416
417 if (p->reversed) {
418 tmp_buf = p->buf + old_buf_len - path_len - 1;
419 p->end = p->buf + p->buf_len - 1;
420 p->start = p->end - path_len;
421 memmove(p->start, tmp_buf, path_len + 1);
422 } else {
423 p->start = p->buf;
424 p->end = p->start + path_len;
425 }
426 return 0;
427}
428
429static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
430 char **prepared)
431{
432 int ret;
433 int new_len;
434
435 new_len = p->end - p->start + name_len;
436 if (p->start != p->end)
437 new_len++;
438 ret = fs_path_ensure_buf(p, new_len);
439 if (ret < 0)
440 goto out;
441
442 if (p->reversed) {
443 if (p->start != p->end)
444 *--p->start = '/';
445 p->start -= name_len;
446 *prepared = p->start;
447 } else {
448 if (p->start != p->end)
449 *p->end++ = '/';
450 *prepared = p->end;
451 p->end += name_len;
452 *p->end = 0;
453 }
454
455out:
456 return ret;
457}
458
459static int fs_path_add(struct fs_path *p, const char *name, int name_len)
460{
461 int ret;
462 char *prepared;
463
464 ret = fs_path_prepare_for_add(p, name_len, &prepared);
465 if (ret < 0)
466 goto out;
467 memcpy(prepared, name, name_len);
468
469out:
470 return ret;
471}
472
473static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
474{
475 int ret;
476 char *prepared;
477
478 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
479 if (ret < 0)
480 goto out;
481 memcpy(prepared, p2->start, p2->end - p2->start);
482
483out:
484 return ret;
485}
486
487static int fs_path_add_from_extent_buffer(struct fs_path *p,
488 struct extent_buffer *eb,
489 unsigned long off, int len)
490{
491 int ret;
492 char *prepared;
493
494 ret = fs_path_prepare_for_add(p, len, &prepared);
495 if (ret < 0)
496 goto out;
497
498 read_extent_buffer(eb, prepared, off, len);
499
500out:
501 return ret;
502}
503
504static int fs_path_copy(struct fs_path *p, struct fs_path *from)
505{
506 int ret;
507
508 p->reversed = from->reversed;
509 fs_path_reset(p);
510
511 ret = fs_path_add_path(p, from);
512
513 return ret;
514}
515
516
517static void fs_path_unreverse(struct fs_path *p)
518{
519 char *tmp;
520 int len;
521
522 if (!p->reversed)
523 return;
524
525 tmp = p->start;
526 len = p->end - p->start;
527 p->start = p->buf;
528 p->end = p->start + len;
529 memmove(p->start, tmp, len + 1);
530 p->reversed = 0;
531}
532
533static struct btrfs_path *alloc_path_for_send(void)
534{
535 struct btrfs_path *path;
536
537 path = btrfs_alloc_path();
538 if (!path)
539 return NULL;
540 path->search_commit_root = 1;
541 path->skip_locking = 1;
542 path->need_commit_sem = 1;
543 return path;
544}
545
546static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
547{
548 int ret;
549 u32 pos = 0;
550
551 while (pos < len) {
552 ret = kernel_write(filp, buf + pos, len - pos, off);
553 /* TODO handle that correctly */
554 /*if (ret == -ERESTARTSYS) {
555 continue;
556 }*/
557 if (ret < 0)
558 return ret;
559 if (ret == 0) {
560 return -EIO;
561 }
562 pos += ret;
563 }
564
565 return 0;
566}
567
568static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
569{
570 struct btrfs_tlv_header *hdr;
571 int total_len = sizeof(*hdr) + len;
572 int left = sctx->send_max_size - sctx->send_size;
573
574 if (unlikely(left < total_len))
575 return -EOVERFLOW;
576
577 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
578 hdr->tlv_type = cpu_to_le16(attr);
579 hdr->tlv_len = cpu_to_le16(len);
580 memcpy(hdr + 1, data, len);
581 sctx->send_size += total_len;
582
583 return 0;
584}
585
586#define TLV_PUT_DEFINE_INT(bits) \
587 static int tlv_put_u##bits(struct send_ctx *sctx, \
588 u##bits attr, u##bits value) \
589 { \
590 __le##bits __tmp = cpu_to_le##bits(value); \
591 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
592 }
593
594TLV_PUT_DEFINE_INT(64)
595
596static int tlv_put_string(struct send_ctx *sctx, u16 attr,
597 const char *str, int len)
598{
599 if (len == -1)
600 len = strlen(str);
601 return tlv_put(sctx, attr, str, len);
602}
603
604static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
605 const u8 *uuid)
606{
607 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
608}
609
610static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
611 struct extent_buffer *eb,
612 struct btrfs_timespec *ts)
613{
614 struct btrfs_timespec bts;
615 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
616 return tlv_put(sctx, attr, &bts, sizeof(bts));
617}
618
619
620#define TLV_PUT(sctx, attrtype, data, attrlen) \
621 do { \
622 ret = tlv_put(sctx, attrtype, data, attrlen); \
623 if (ret < 0) \
624 goto tlv_put_failure; \
625 } while (0)
626
627#define TLV_PUT_INT(sctx, attrtype, bits, value) \
628 do { \
629 ret = tlv_put_u##bits(sctx, attrtype, value); \
630 if (ret < 0) \
631 goto tlv_put_failure; \
632 } while (0)
633
634#define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
635#define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
636#define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
637#define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
638#define TLV_PUT_STRING(sctx, attrtype, str, len) \
639 do { \
640 ret = tlv_put_string(sctx, attrtype, str, len); \
641 if (ret < 0) \
642 goto tlv_put_failure; \
643 } while (0)
644#define TLV_PUT_PATH(sctx, attrtype, p) \
645 do { \
646 ret = tlv_put_string(sctx, attrtype, p->start, \
647 p->end - p->start); \
648 if (ret < 0) \
649 goto tlv_put_failure; \
650 } while(0)
651#define TLV_PUT_UUID(sctx, attrtype, uuid) \
652 do { \
653 ret = tlv_put_uuid(sctx, attrtype, uuid); \
654 if (ret < 0) \
655 goto tlv_put_failure; \
656 } while (0)
657#define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
658 do { \
659 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
660 if (ret < 0) \
661 goto tlv_put_failure; \
662 } while (0)
663
664static int send_header(struct send_ctx *sctx)
665{
666 struct btrfs_stream_header hdr;
667
668 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
669 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
670
671 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
672 &sctx->send_off);
673}
674
675/*
676 * For each command/item we want to send to userspace, we call this function.
677 */
678static int begin_cmd(struct send_ctx *sctx, int cmd)
679{
680 struct btrfs_cmd_header *hdr;
681
682 if (WARN_ON(!sctx->send_buf))
683 return -EINVAL;
684
685 BUG_ON(sctx->send_size);
686
687 sctx->send_size += sizeof(*hdr);
688 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
689 hdr->cmd = cpu_to_le16(cmd);
690
691 return 0;
692}
693
694static int send_cmd(struct send_ctx *sctx)
695{
696 int ret;
697 struct btrfs_cmd_header *hdr;
698 u32 crc;
699
700 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
701 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
702 hdr->crc = 0;
703
704 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
705 hdr->crc = cpu_to_le32(crc);
706
707 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
708 &sctx->send_off);
709
710 sctx->total_send_size += sctx->send_size;
711 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
712 sctx->send_size = 0;
713
714 return ret;
715}
716
717/*
718 * Sends a move instruction to user space
719 */
720static int send_rename(struct send_ctx *sctx,
721 struct fs_path *from, struct fs_path *to)
722{
723 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
724 int ret;
725
726 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
727
728 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
729 if (ret < 0)
730 goto out;
731
732 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
733 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
734
735 ret = send_cmd(sctx);
736
737tlv_put_failure:
738out:
739 return ret;
740}
741
742/*
743 * Sends a link instruction to user space
744 */
745static int send_link(struct send_ctx *sctx,
746 struct fs_path *path, struct fs_path *lnk)
747{
748 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
749 int ret;
750
751 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
752
753 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
754 if (ret < 0)
755 goto out;
756
757 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
758 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
759
760 ret = send_cmd(sctx);
761
762tlv_put_failure:
763out:
764 return ret;
765}
766
767/*
768 * Sends an unlink instruction to user space
769 */
770static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
771{
772 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
773 int ret;
774
775 btrfs_debug(fs_info, "send_unlink %s", path->start);
776
777 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
778 if (ret < 0)
779 goto out;
780
781 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
782
783 ret = send_cmd(sctx);
784
785tlv_put_failure:
786out:
787 return ret;
788}
789
790/*
791 * Sends a rmdir instruction to user space
792 */
793static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
794{
795 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
796 int ret;
797
798 btrfs_debug(fs_info, "send_rmdir %s", path->start);
799
800 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
801 if (ret < 0)
802 goto out;
803
804 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
805
806 ret = send_cmd(sctx);
807
808tlv_put_failure:
809out:
810 return ret;
811}
812
813/*
814 * Helper function to retrieve some fields from an inode item.
815 */
816static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
817 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
818 u64 *gid, u64 *rdev)
819{
820 int ret;
821 struct btrfs_inode_item *ii;
822 struct btrfs_key key;
823
824 key.objectid = ino;
825 key.type = BTRFS_INODE_ITEM_KEY;
826 key.offset = 0;
827 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
828 if (ret) {
829 if (ret > 0)
830 ret = -ENOENT;
831 return ret;
832 }
833
834 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
835 struct btrfs_inode_item);
836 if (size)
837 *size = btrfs_inode_size(path->nodes[0], ii);
838 if (gen)
839 *gen = btrfs_inode_generation(path->nodes[0], ii);
840 if (mode)
841 *mode = btrfs_inode_mode(path->nodes[0], ii);
842 if (uid)
843 *uid = btrfs_inode_uid(path->nodes[0], ii);
844 if (gid)
845 *gid = btrfs_inode_gid(path->nodes[0], ii);
846 if (rdev)
847 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
848
849 return ret;
850}
851
852static int get_inode_info(struct btrfs_root *root,
853 u64 ino, u64 *size, u64 *gen,
854 u64 *mode, u64 *uid, u64 *gid,
855 u64 *rdev)
856{
857 struct btrfs_path *path;
858 int ret;
859
860 path = alloc_path_for_send();
861 if (!path)
862 return -ENOMEM;
863 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
864 rdev);
865 btrfs_free_path(path);
866 return ret;
867}
868
869typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
870 struct fs_path *p,
871 void *ctx);
872
873/*
874 * Helper function to iterate the entries in ONE btrfs_inode_ref or
875 * btrfs_inode_extref.
876 * The iterate callback may return a non zero value to stop iteration. This can
877 * be a negative value for error codes or 1 to simply stop it.
878 *
879 * path must point to the INODE_REF or INODE_EXTREF when called.
880 */
881static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
882 struct btrfs_key *found_key, int resolve,
883 iterate_inode_ref_t iterate, void *ctx)
884{
885 struct extent_buffer *eb = path->nodes[0];
886 struct btrfs_item *item;
887 struct btrfs_inode_ref *iref;
888 struct btrfs_inode_extref *extref;
889 struct btrfs_path *tmp_path;
890 struct fs_path *p;
891 u32 cur = 0;
892 u32 total;
893 int slot = path->slots[0];
894 u32 name_len;
895 char *start;
896 int ret = 0;
897 int num = 0;
898 int index;
899 u64 dir;
900 unsigned long name_off;
901 unsigned long elem_size;
902 unsigned long ptr;
903
904 p = fs_path_alloc_reversed();
905 if (!p)
906 return -ENOMEM;
907
908 tmp_path = alloc_path_for_send();
909 if (!tmp_path) {
910 fs_path_free(p);
911 return -ENOMEM;
912 }
913
914
915 if (found_key->type == BTRFS_INODE_REF_KEY) {
916 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
917 struct btrfs_inode_ref);
918 item = btrfs_item_nr(slot);
919 total = btrfs_item_size(eb, item);
920 elem_size = sizeof(*iref);
921 } else {
922 ptr = btrfs_item_ptr_offset(eb, slot);
923 total = btrfs_item_size_nr(eb, slot);
924 elem_size = sizeof(*extref);
925 }
926
927 while (cur < total) {
928 fs_path_reset(p);
929
930 if (found_key->type == BTRFS_INODE_REF_KEY) {
931 iref = (struct btrfs_inode_ref *)(ptr + cur);
932 name_len = btrfs_inode_ref_name_len(eb, iref);
933 name_off = (unsigned long)(iref + 1);
934 index = btrfs_inode_ref_index(eb, iref);
935 dir = found_key->offset;
936 } else {
937 extref = (struct btrfs_inode_extref *)(ptr + cur);
938 name_len = btrfs_inode_extref_name_len(eb, extref);
939 name_off = (unsigned long)&extref->name;
940 index = btrfs_inode_extref_index(eb, extref);
941 dir = btrfs_inode_extref_parent(eb, extref);
942 }
943
944 if (resolve) {
945 start = btrfs_ref_to_path(root, tmp_path, name_len,
946 name_off, eb, dir,
947 p->buf, p->buf_len);
948 if (IS_ERR(start)) {
949 ret = PTR_ERR(start);
950 goto out;
951 }
952 if (start < p->buf) {
953 /* overflow , try again with larger buffer */
954 ret = fs_path_ensure_buf(p,
955 p->buf_len + p->buf - start);
956 if (ret < 0)
957 goto out;
958 start = btrfs_ref_to_path(root, tmp_path,
959 name_len, name_off,
960 eb, dir,
961 p->buf, p->buf_len);
962 if (IS_ERR(start)) {
963 ret = PTR_ERR(start);
964 goto out;
965 }
966 BUG_ON(start < p->buf);
967 }
968 p->start = start;
969 } else {
970 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
971 name_len);
972 if (ret < 0)
973 goto out;
974 }
975
976 cur += elem_size + name_len;
977 ret = iterate(num, dir, index, p, ctx);
978 if (ret)
979 goto out;
980 num++;
981 }
982
983out:
984 btrfs_free_path(tmp_path);
985 fs_path_free(p);
986 return ret;
987}
988
989typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
990 const char *name, int name_len,
991 const char *data, int data_len,
992 u8 type, void *ctx);
993
994/*
995 * Helper function to iterate the entries in ONE btrfs_dir_item.
996 * The iterate callback may return a non zero value to stop iteration. This can
997 * be a negative value for error codes or 1 to simply stop it.
998 *
999 * path must point to the dir item when called.
1000 */
1001static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
1002 iterate_dir_item_t iterate, void *ctx)
1003{
1004 int ret = 0;
1005 struct extent_buffer *eb;
1006 struct btrfs_item *item;
1007 struct btrfs_dir_item *di;
1008 struct btrfs_key di_key;
1009 char *buf = NULL;
1010 int buf_len;
1011 u32 name_len;
1012 u32 data_len;
1013 u32 cur;
1014 u32 len;
1015 u32 total;
1016 int slot;
1017 int num;
1018 u8 type;
1019
1020 /*
1021 * Start with a small buffer (1 page). If later we end up needing more
1022 * space, which can happen for xattrs on a fs with a leaf size greater
1023 * then the page size, attempt to increase the buffer. Typically xattr
1024 * values are small.
1025 */
1026 buf_len = PATH_MAX;
1027 buf = kmalloc(buf_len, GFP_KERNEL);
1028 if (!buf) {
1029 ret = -ENOMEM;
1030 goto out;
1031 }
1032
1033 eb = path->nodes[0];
1034 slot = path->slots[0];
1035 item = btrfs_item_nr(slot);
1036 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1037 cur = 0;
1038 len = 0;
1039 total = btrfs_item_size(eb, item);
1040
1041 num = 0;
1042 while (cur < total) {
1043 name_len = btrfs_dir_name_len(eb, di);
1044 data_len = btrfs_dir_data_len(eb, di);
1045 type = btrfs_dir_type(eb, di);
1046 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1047
1048 if (type == BTRFS_FT_XATTR) {
1049 if (name_len > XATTR_NAME_MAX) {
1050 ret = -ENAMETOOLONG;
1051 goto out;
1052 }
1053 if (name_len + data_len >
1054 BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1055 ret = -E2BIG;
1056 goto out;
1057 }
1058 } else {
1059 /*
1060 * Path too long
1061 */
1062 if (name_len + data_len > PATH_MAX) {
1063 ret = -ENAMETOOLONG;
1064 goto out;
1065 }
1066 }
1067
1068 if (name_len + data_len > buf_len) {
1069 buf_len = name_len + data_len;
1070 if (is_vmalloc_addr(buf)) {
1071 vfree(buf);
1072 buf = NULL;
1073 } else {
1074 char *tmp = krealloc(buf, buf_len,
1075 GFP_KERNEL | __GFP_NOWARN);
1076
1077 if (!tmp)
1078 kfree(buf);
1079 buf = tmp;
1080 }
1081 if (!buf) {
1082 buf = kvmalloc(buf_len, GFP_KERNEL);
1083 if (!buf) {
1084 ret = -ENOMEM;
1085 goto out;
1086 }
1087 }
1088 }
1089
1090 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1091 name_len + data_len);
1092
1093 len = sizeof(*di) + name_len + data_len;
1094 di = (struct btrfs_dir_item *)((char *)di + len);
1095 cur += len;
1096
1097 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1098 data_len, type, ctx);
1099 if (ret < 0)
1100 goto out;
1101 if (ret) {
1102 ret = 0;
1103 goto out;
1104 }
1105
1106 num++;
1107 }
1108
1109out:
1110 kvfree(buf);
1111 return ret;
1112}
1113
1114static int __copy_first_ref(int num, u64 dir, int index,
1115 struct fs_path *p, void *ctx)
1116{
1117 int ret;
1118 struct fs_path *pt = ctx;
1119
1120 ret = fs_path_copy(pt, p);
1121 if (ret < 0)
1122 return ret;
1123
1124 /* we want the first only */
1125 return 1;
1126}
1127
1128/*
1129 * Retrieve the first path of an inode. If an inode has more then one
1130 * ref/hardlink, this is ignored.
1131 */
1132static int get_inode_path(struct btrfs_root *root,
1133 u64 ino, struct fs_path *path)
1134{
1135 int ret;
1136 struct btrfs_key key, found_key;
1137 struct btrfs_path *p;
1138
1139 p = alloc_path_for_send();
1140 if (!p)
1141 return -ENOMEM;
1142
1143 fs_path_reset(path);
1144
1145 key.objectid = ino;
1146 key.type = BTRFS_INODE_REF_KEY;
1147 key.offset = 0;
1148
1149 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1150 if (ret < 0)
1151 goto out;
1152 if (ret) {
1153 ret = 1;
1154 goto out;
1155 }
1156 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1157 if (found_key.objectid != ino ||
1158 (found_key.type != BTRFS_INODE_REF_KEY &&
1159 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1160 ret = -ENOENT;
1161 goto out;
1162 }
1163
1164 ret = iterate_inode_ref(root, p, &found_key, 1,
1165 __copy_first_ref, path);
1166 if (ret < 0)
1167 goto out;
1168 ret = 0;
1169
1170out:
1171 btrfs_free_path(p);
1172 return ret;
1173}
1174
1175struct backref_ctx {
1176 struct send_ctx *sctx;
1177
1178 /* number of total found references */
1179 u64 found;
1180
1181 /*
1182 * used for clones found in send_root. clones found behind cur_objectid
1183 * and cur_offset are not considered as allowed clones.
1184 */
1185 u64 cur_objectid;
1186 u64 cur_offset;
1187
1188 /* may be truncated in case it's the last extent in a file */
1189 u64 extent_len;
1190
1191 /* data offset in the file extent item */
1192 u64 data_offset;
1193
1194 /* Just to check for bugs in backref resolving */
1195 int found_itself;
1196};
1197
1198static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1199{
1200 u64 root = (u64)(uintptr_t)key;
1201 struct clone_root *cr = (struct clone_root *)elt;
1202
1203 if (root < cr->root->root_key.objectid)
1204 return -1;
1205 if (root > cr->root->root_key.objectid)
1206 return 1;
1207 return 0;
1208}
1209
1210static int __clone_root_cmp_sort(const void *e1, const void *e2)
1211{
1212 struct clone_root *cr1 = (struct clone_root *)e1;
1213 struct clone_root *cr2 = (struct clone_root *)e2;
1214
1215 if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
1216 return -1;
1217 if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
1218 return 1;
1219 return 0;
1220}
1221
1222/*
1223 * Called for every backref that is found for the current extent.
1224 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1225 */
1226static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1227{
1228 struct backref_ctx *bctx = ctx_;
1229 struct clone_root *found;
1230
1231 /* First check if the root is in the list of accepted clone sources */
1232 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1233 bctx->sctx->clone_roots_cnt,
1234 sizeof(struct clone_root),
1235 __clone_root_cmp_bsearch);
1236 if (!found)
1237 return 0;
1238
1239 if (found->root == bctx->sctx->send_root &&
1240 ino == bctx->cur_objectid &&
1241 offset == bctx->cur_offset) {
1242 bctx->found_itself = 1;
1243 }
1244
1245 /*
1246 * Make sure we don't consider clones from send_root that are
1247 * behind the current inode/offset.
1248 */
1249 if (found->root == bctx->sctx->send_root) {
1250 /*
1251 * TODO for the moment we don't accept clones from the inode
1252 * that is currently send. We may change this when
1253 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1254 * file.
1255 */
1256 if (ino >= bctx->cur_objectid)
1257 return 0;
1258 }
1259
1260 bctx->found++;
1261 found->found_refs++;
1262 if (ino < found->ino) {
1263 found->ino = ino;
1264 found->offset = offset;
1265 } else if (found->ino == ino) {
1266 /*
1267 * same extent found more then once in the same file.
1268 */
1269 if (found->offset > offset + bctx->extent_len)
1270 found->offset = offset;
1271 }
1272
1273 return 0;
1274}
1275
1276/*
1277 * Given an inode, offset and extent item, it finds a good clone for a clone
1278 * instruction. Returns -ENOENT when none could be found. The function makes
1279 * sure that the returned clone is usable at the point where sending is at the
1280 * moment. This means, that no clones are accepted which lie behind the current
1281 * inode+offset.
1282 *
1283 * path must point to the extent item when called.
1284 */
1285static int find_extent_clone(struct send_ctx *sctx,
1286 struct btrfs_path *path,
1287 u64 ino, u64 data_offset,
1288 u64 ino_size,
1289 struct clone_root **found)
1290{
1291 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1292 int ret;
1293 int extent_type;
1294 u64 logical;
1295 u64 disk_byte;
1296 u64 num_bytes;
1297 u64 extent_item_pos;
1298 u64 flags = 0;
1299 struct btrfs_file_extent_item *fi;
1300 struct extent_buffer *eb = path->nodes[0];
1301 struct backref_ctx *backref_ctx = NULL;
1302 struct clone_root *cur_clone_root;
1303 struct btrfs_key found_key;
1304 struct btrfs_path *tmp_path;
1305 int compressed;
1306 u32 i;
1307
1308 tmp_path = alloc_path_for_send();
1309 if (!tmp_path)
1310 return -ENOMEM;
1311
1312 /* We only use this path under the commit sem */
1313 tmp_path->need_commit_sem = 0;
1314
1315 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
1316 if (!backref_ctx) {
1317 ret = -ENOMEM;
1318 goto out;
1319 }
1320
1321 if (data_offset >= ino_size) {
1322 /*
1323 * There may be extents that lie behind the file's size.
1324 * I at least had this in combination with snapshotting while
1325 * writing large files.
1326 */
1327 ret = 0;
1328 goto out;
1329 }
1330
1331 fi = btrfs_item_ptr(eb, path->slots[0],
1332 struct btrfs_file_extent_item);
1333 extent_type = btrfs_file_extent_type(eb, fi);
1334 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1335 ret = -ENOENT;
1336 goto out;
1337 }
1338 compressed = btrfs_file_extent_compression(eb, fi);
1339
1340 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1341 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1342 if (disk_byte == 0) {
1343 ret = -ENOENT;
1344 goto out;
1345 }
1346 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1347
1348 down_read(&fs_info->commit_root_sem);
1349 ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1350 &found_key, &flags);
1351 up_read(&fs_info->commit_root_sem);
1352 btrfs_release_path(tmp_path);
1353
1354 if (ret < 0)
1355 goto out;
1356 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1357 ret = -EIO;
1358 goto out;
1359 }
1360
1361 /*
1362 * Setup the clone roots.
1363 */
1364 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1365 cur_clone_root = sctx->clone_roots + i;
1366 cur_clone_root->ino = (u64)-1;
1367 cur_clone_root->offset = 0;
1368 cur_clone_root->found_refs = 0;
1369 }
1370
1371 backref_ctx->sctx = sctx;
1372 backref_ctx->found = 0;
1373 backref_ctx->cur_objectid = ino;
1374 backref_ctx->cur_offset = data_offset;
1375 backref_ctx->found_itself = 0;
1376 backref_ctx->extent_len = num_bytes;
1377 /*
1378 * For non-compressed extents iterate_extent_inodes() gives us extent
1379 * offsets that already take into account the data offset, but not for
1380 * compressed extents, since the offset is logical and not relative to
1381 * the physical extent locations. We must take this into account to
1382 * avoid sending clone offsets that go beyond the source file's size,
1383 * which would result in the clone ioctl failing with -EINVAL on the
1384 * receiving end.
1385 */
1386 if (compressed == BTRFS_COMPRESS_NONE)
1387 backref_ctx->data_offset = 0;
1388 else
1389 backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi);
1390
1391 /*
1392 * The last extent of a file may be too large due to page alignment.
1393 * We need to adjust extent_len in this case so that the checks in
1394 * __iterate_backrefs work.
1395 */
1396 if (data_offset + num_bytes >= ino_size)
1397 backref_ctx->extent_len = ino_size - data_offset;
1398
1399 /*
1400 * Now collect all backrefs.
1401 */
1402 if (compressed == BTRFS_COMPRESS_NONE)
1403 extent_item_pos = logical - found_key.objectid;
1404 else
1405 extent_item_pos = 0;
1406 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1407 extent_item_pos, 1, __iterate_backrefs,
1408 backref_ctx, false);
1409
1410 if (ret < 0)
1411 goto out;
1412
1413 if (!backref_ctx->found_itself) {
1414 /* found a bug in backref code? */
1415 ret = -EIO;
1416 btrfs_err(fs_info,
1417 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1418 ino, data_offset, disk_byte, found_key.objectid);
1419 goto out;
1420 }
1421
1422 btrfs_debug(fs_info,
1423 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1424 data_offset, ino, num_bytes, logical);
1425
1426 if (!backref_ctx->found)
1427 btrfs_debug(fs_info, "no clones found");
1428
1429 cur_clone_root = NULL;
1430 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1431 if (sctx->clone_roots[i].found_refs) {
1432 if (!cur_clone_root)
1433 cur_clone_root = sctx->clone_roots + i;
1434 else if (sctx->clone_roots[i].root == sctx->send_root)
1435 /* prefer clones from send_root over others */
1436 cur_clone_root = sctx->clone_roots + i;
1437 }
1438
1439 }
1440
1441 if (cur_clone_root) {
1442 *found = cur_clone_root;
1443 ret = 0;
1444 } else {
1445 ret = -ENOENT;
1446 }
1447
1448out:
1449 btrfs_free_path(tmp_path);
1450 kfree(backref_ctx);
1451 return ret;
1452}
1453
1454static int read_symlink(struct btrfs_root *root,
1455 u64 ino,
1456 struct fs_path *dest)
1457{
1458 int ret;
1459 struct btrfs_path *path;
1460 struct btrfs_key key;
1461 struct btrfs_file_extent_item *ei;
1462 u8 type;
1463 u8 compression;
1464 unsigned long off;
1465 int len;
1466
1467 path = alloc_path_for_send();
1468 if (!path)
1469 return -ENOMEM;
1470
1471 key.objectid = ino;
1472 key.type = BTRFS_EXTENT_DATA_KEY;
1473 key.offset = 0;
1474 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1475 if (ret < 0)
1476 goto out;
1477 if (ret) {
1478 /*
1479 * An empty symlink inode. Can happen in rare error paths when
1480 * creating a symlink (transaction committed before the inode
1481 * eviction handler removed the symlink inode items and a crash
1482 * happened in between or the subvol was snapshoted in between).
1483 * Print an informative message to dmesg/syslog so that the user
1484 * can delete the symlink.
1485 */
1486 btrfs_err(root->fs_info,
1487 "Found empty symlink inode %llu at root %llu",
1488 ino, root->root_key.objectid);
1489 ret = -EIO;
1490 goto out;
1491 }
1492
1493 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1494 struct btrfs_file_extent_item);
1495 type = btrfs_file_extent_type(path->nodes[0], ei);
1496 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1497 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1498 BUG_ON(compression);
1499
1500 off = btrfs_file_extent_inline_start(ei);
1501 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
1502
1503 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1504
1505out:
1506 btrfs_free_path(path);
1507 return ret;
1508}
1509
1510/*
1511 * Helper function to generate a file name that is unique in the root of
1512 * send_root and parent_root. This is used to generate names for orphan inodes.
1513 */
1514static int gen_unique_name(struct send_ctx *sctx,
1515 u64 ino, u64 gen,
1516 struct fs_path *dest)
1517{
1518 int ret = 0;
1519 struct btrfs_path *path;
1520 struct btrfs_dir_item *di;
1521 char tmp[64];
1522 int len;
1523 u64 idx = 0;
1524
1525 path = alloc_path_for_send();
1526 if (!path)
1527 return -ENOMEM;
1528
1529 while (1) {
1530 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1531 ino, gen, idx);
1532 ASSERT(len < sizeof(tmp));
1533
1534 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1535 path, BTRFS_FIRST_FREE_OBJECTID,
1536 tmp, strlen(tmp), 0);
1537 btrfs_release_path(path);
1538 if (IS_ERR(di)) {
1539 ret = PTR_ERR(di);
1540 goto out;
1541 }
1542 if (di) {
1543 /* not unique, try again */
1544 idx++;
1545 continue;
1546 }
1547
1548 if (!sctx->parent_root) {
1549 /* unique */
1550 ret = 0;
1551 break;
1552 }
1553
1554 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1555 path, BTRFS_FIRST_FREE_OBJECTID,
1556 tmp, strlen(tmp), 0);
1557 btrfs_release_path(path);
1558 if (IS_ERR(di)) {
1559 ret = PTR_ERR(di);
1560 goto out;
1561 }
1562 if (di) {
1563 /* not unique, try again */
1564 idx++;
1565 continue;
1566 }
1567 /* unique */
1568 break;
1569 }
1570
1571 ret = fs_path_add(dest, tmp, strlen(tmp));
1572
1573out:
1574 btrfs_free_path(path);
1575 return ret;
1576}
1577
1578enum inode_state {
1579 inode_state_no_change,
1580 inode_state_will_create,
1581 inode_state_did_create,
1582 inode_state_will_delete,
1583 inode_state_did_delete,
1584};
1585
1586static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1587{
1588 int ret;
1589 int left_ret;
1590 int right_ret;
1591 u64 left_gen;
1592 u64 right_gen;
1593
1594 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1595 NULL, NULL);
1596 if (ret < 0 && ret != -ENOENT)
1597 goto out;
1598 left_ret = ret;
1599
1600 if (!sctx->parent_root) {
1601 right_ret = -ENOENT;
1602 } else {
1603 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1604 NULL, NULL, NULL, NULL);
1605 if (ret < 0 && ret != -ENOENT)
1606 goto out;
1607 right_ret = ret;
1608 }
1609
1610 if (!left_ret && !right_ret) {
1611 if (left_gen == gen && right_gen == gen) {
1612 ret = inode_state_no_change;
1613 } else if (left_gen == gen) {
1614 if (ino < sctx->send_progress)
1615 ret = inode_state_did_create;
1616 else
1617 ret = inode_state_will_create;
1618 } else if (right_gen == gen) {
1619 if (ino < sctx->send_progress)
1620 ret = inode_state_did_delete;
1621 else
1622 ret = inode_state_will_delete;
1623 } else {
1624 ret = -ENOENT;
1625 }
1626 } else if (!left_ret) {
1627 if (left_gen == gen) {
1628 if (ino < sctx->send_progress)
1629 ret = inode_state_did_create;
1630 else
1631 ret = inode_state_will_create;
1632 } else {
1633 ret = -ENOENT;
1634 }
1635 } else if (!right_ret) {
1636 if (right_gen == gen) {
1637 if (ino < sctx->send_progress)
1638 ret = inode_state_did_delete;
1639 else
1640 ret = inode_state_will_delete;
1641 } else {
1642 ret = -ENOENT;
1643 }
1644 } else {
1645 ret = -ENOENT;
1646 }
1647
1648out:
1649 return ret;
1650}
1651
1652static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1653{
1654 int ret;
1655
1656 if (ino == BTRFS_FIRST_FREE_OBJECTID)
1657 return 1;
1658
1659 ret = get_cur_inode_state(sctx, ino, gen);
1660 if (ret < 0)
1661 goto out;
1662
1663 if (ret == inode_state_no_change ||
1664 ret == inode_state_did_create ||
1665 ret == inode_state_will_delete)
1666 ret = 1;
1667 else
1668 ret = 0;
1669
1670out:
1671 return ret;
1672}
1673
1674/*
1675 * Helper function to lookup a dir item in a dir.
1676 */
1677static int lookup_dir_item_inode(struct btrfs_root *root,
1678 u64 dir, const char *name, int name_len,
1679 u64 *found_inode,
1680 u8 *found_type)
1681{
1682 int ret = 0;
1683 struct btrfs_dir_item *di;
1684 struct btrfs_key key;
1685 struct btrfs_path *path;
1686
1687 path = alloc_path_for_send();
1688 if (!path)
1689 return -ENOMEM;
1690
1691 di = btrfs_lookup_dir_item(NULL, root, path,
1692 dir, name, name_len, 0);
1693 if (IS_ERR_OR_NULL(di)) {
1694 ret = di ? PTR_ERR(di) : -ENOENT;
1695 goto out;
1696 }
1697 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1698 if (key.type == BTRFS_ROOT_ITEM_KEY) {
1699 ret = -ENOENT;
1700 goto out;
1701 }
1702 *found_inode = key.objectid;
1703 *found_type = btrfs_dir_type(path->nodes[0], di);
1704
1705out:
1706 btrfs_free_path(path);
1707 return ret;
1708}
1709
1710/*
1711 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1712 * generation of the parent dir and the name of the dir entry.
1713 */
1714static int get_first_ref(struct btrfs_root *root, u64 ino,
1715 u64 *dir, u64 *dir_gen, struct fs_path *name)
1716{
1717 int ret;
1718 struct btrfs_key key;
1719 struct btrfs_key found_key;
1720 struct btrfs_path *path;
1721 int len;
1722 u64 parent_dir;
1723
1724 path = alloc_path_for_send();
1725 if (!path)
1726 return -ENOMEM;
1727
1728 key.objectid = ino;
1729 key.type = BTRFS_INODE_REF_KEY;
1730 key.offset = 0;
1731
1732 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1733 if (ret < 0)
1734 goto out;
1735 if (!ret)
1736 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1737 path->slots[0]);
1738 if (ret || found_key.objectid != ino ||
1739 (found_key.type != BTRFS_INODE_REF_KEY &&
1740 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1741 ret = -ENOENT;
1742 goto out;
1743 }
1744
1745 if (found_key.type == BTRFS_INODE_REF_KEY) {
1746 struct btrfs_inode_ref *iref;
1747 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1748 struct btrfs_inode_ref);
1749 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1750 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1751 (unsigned long)(iref + 1),
1752 len);
1753 parent_dir = found_key.offset;
1754 } else {
1755 struct btrfs_inode_extref *extref;
1756 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1757 struct btrfs_inode_extref);
1758 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1759 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1760 (unsigned long)&extref->name, len);
1761 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1762 }
1763 if (ret < 0)
1764 goto out;
1765 btrfs_release_path(path);
1766
1767 if (dir_gen) {
1768 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1769 NULL, NULL, NULL);
1770 if (ret < 0)
1771 goto out;
1772 }
1773
1774 *dir = parent_dir;
1775
1776out:
1777 btrfs_free_path(path);
1778 return ret;
1779}
1780
1781static int is_first_ref(struct btrfs_root *root,
1782 u64 ino, u64 dir,
1783 const char *name, int name_len)
1784{
1785 int ret;
1786 struct fs_path *tmp_name;
1787 u64 tmp_dir;
1788
1789 tmp_name = fs_path_alloc();
1790 if (!tmp_name)
1791 return -ENOMEM;
1792
1793 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1794 if (ret < 0)
1795 goto out;
1796
1797 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1798 ret = 0;
1799 goto out;
1800 }
1801
1802 ret = !memcmp(tmp_name->start, name, name_len);
1803
1804out:
1805 fs_path_free(tmp_name);
1806 return ret;
1807}
1808
1809/*
1810 * Used by process_recorded_refs to determine if a new ref would overwrite an
1811 * already existing ref. In case it detects an overwrite, it returns the
1812 * inode/gen in who_ino/who_gen.
1813 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1814 * to make sure later references to the overwritten inode are possible.
1815 * Orphanizing is however only required for the first ref of an inode.
1816 * process_recorded_refs does an additional is_first_ref check to see if
1817 * orphanizing is really required.
1818 */
1819static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1820 const char *name, int name_len,
1821 u64 *who_ino, u64 *who_gen, u64 *who_mode)
1822{
1823 int ret = 0;
1824 u64 gen;
1825 u64 other_inode = 0;
1826 u8 other_type = 0;
1827
1828 if (!sctx->parent_root)
1829 goto out;
1830
1831 ret = is_inode_existent(sctx, dir, dir_gen);
1832 if (ret <= 0)
1833 goto out;
1834
1835 /*
1836 * If we have a parent root we need to verify that the parent dir was
1837 * not deleted and then re-created, if it was then we have no overwrite
1838 * and we can just unlink this entry.
1839 */
1840 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
1841 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1842 NULL, NULL, NULL);
1843 if (ret < 0 && ret != -ENOENT)
1844 goto out;
1845 if (ret) {
1846 ret = 0;
1847 goto out;
1848 }
1849 if (gen != dir_gen)
1850 goto out;
1851 }
1852
1853 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1854 &other_inode, &other_type);
1855 if (ret < 0 && ret != -ENOENT)
1856 goto out;
1857 if (ret) {
1858 ret = 0;
1859 goto out;
1860 }
1861
1862 /*
1863 * Check if the overwritten ref was already processed. If yes, the ref
1864 * was already unlinked/moved, so we can safely assume that we will not
1865 * overwrite anything at this point in time.
1866 */
1867 if (other_inode > sctx->send_progress ||
1868 is_waiting_for_move(sctx, other_inode)) {
1869 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1870 who_gen, who_mode, NULL, NULL, NULL);
1871 if (ret < 0)
1872 goto out;
1873
1874 ret = 1;
1875 *who_ino = other_inode;
1876 } else {
1877 ret = 0;
1878 }
1879
1880out:
1881 return ret;
1882}
1883
1884/*
1885 * Checks if the ref was overwritten by an already processed inode. This is
1886 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1887 * thus the orphan name needs be used.
1888 * process_recorded_refs also uses it to avoid unlinking of refs that were
1889 * overwritten.
1890 */
1891static int did_overwrite_ref(struct send_ctx *sctx,
1892 u64 dir, u64 dir_gen,
1893 u64 ino, u64 ino_gen,
1894 const char *name, int name_len)
1895{
1896 int ret = 0;
1897 u64 gen;
1898 u64 ow_inode;
1899 u8 other_type;
1900
1901 if (!sctx->parent_root)
1902 goto out;
1903
1904 ret = is_inode_existent(sctx, dir, dir_gen);
1905 if (ret <= 0)
1906 goto out;
1907
1908 if (dir != BTRFS_FIRST_FREE_OBJECTID) {
1909 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
1910 NULL, NULL, NULL);
1911 if (ret < 0 && ret != -ENOENT)
1912 goto out;
1913 if (ret) {
1914 ret = 0;
1915 goto out;
1916 }
1917 if (gen != dir_gen)
1918 goto out;
1919 }
1920
1921 /* check if the ref was overwritten by another ref */
1922 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1923 &ow_inode, &other_type);
1924 if (ret < 0 && ret != -ENOENT)
1925 goto out;
1926 if (ret) {
1927 /* was never and will never be overwritten */
1928 ret = 0;
1929 goto out;
1930 }
1931
1932 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1933 NULL, NULL);
1934 if (ret < 0)
1935 goto out;
1936
1937 if (ow_inode == ino && gen == ino_gen) {
1938 ret = 0;
1939 goto out;
1940 }
1941
1942 /*
1943 * We know that it is or will be overwritten. Check this now.
1944 * The current inode being processed might have been the one that caused
1945 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1946 * the current inode being processed.
1947 */
1948 if ((ow_inode < sctx->send_progress) ||
1949 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1950 gen == sctx->cur_inode_gen))
1951 ret = 1;
1952 else
1953 ret = 0;
1954
1955out:
1956 return ret;
1957}
1958
1959/*
1960 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1961 * that got overwritten. This is used by process_recorded_refs to determine
1962 * if it has to use the path as returned by get_cur_path or the orphan name.
1963 */
1964static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1965{
1966 int ret = 0;
1967 struct fs_path *name = NULL;
1968 u64 dir;
1969 u64 dir_gen;
1970
1971 if (!sctx->parent_root)
1972 goto out;
1973
1974 name = fs_path_alloc();
1975 if (!name)
1976 return -ENOMEM;
1977
1978 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
1979 if (ret < 0)
1980 goto out;
1981
1982 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
1983 name->start, fs_path_len(name));
1984
1985out:
1986 fs_path_free(name);
1987 return ret;
1988}
1989
1990/*
1991 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1992 * so we need to do some special handling in case we have clashes. This function
1993 * takes care of this with the help of name_cache_entry::radix_list.
1994 * In case of error, nce is kfreed.
1995 */
1996static int name_cache_insert(struct send_ctx *sctx,
1997 struct name_cache_entry *nce)
1998{
1999 int ret = 0;
2000 struct list_head *nce_head;
2001
2002 nce_head = radix_tree_lookup(&sctx->name_cache,
2003 (unsigned long)nce->ino);
2004 if (!nce_head) {
2005 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2006 if (!nce_head) {
2007 kfree(nce);
2008 return -ENOMEM;
2009 }
2010 INIT_LIST_HEAD(nce_head);
2011
2012 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2013 if (ret < 0) {
2014 kfree(nce_head);
2015 kfree(nce);
2016 return ret;
2017 }
2018 }
2019 list_add_tail(&nce->radix_list, nce_head);
2020 list_add_tail(&nce->list, &sctx->name_cache_list);
2021 sctx->name_cache_size++;
2022
2023 return ret;
2024}
2025
2026static void name_cache_delete(struct send_ctx *sctx,
2027 struct name_cache_entry *nce)
2028{
2029 struct list_head *nce_head;
2030
2031 nce_head = radix_tree_lookup(&sctx->name_cache,
2032 (unsigned long)nce->ino);
2033 if (!nce_head) {
2034 btrfs_err(sctx->send_root->fs_info,
2035 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2036 nce->ino, sctx->name_cache_size);
2037 }
2038
2039 list_del(&nce->radix_list);
2040 list_del(&nce->list);
2041 sctx->name_cache_size--;
2042
2043 /*
2044 * We may not get to the final release of nce_head if the lookup fails
2045 */
2046 if (nce_head && list_empty(nce_head)) {
2047 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2048 kfree(nce_head);
2049 }
2050}
2051
2052static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2053 u64 ino, u64 gen)
2054{
2055 struct list_head *nce_head;
2056 struct name_cache_entry *cur;
2057
2058 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2059 if (!nce_head)
2060 return NULL;
2061
2062 list_for_each_entry(cur, nce_head, radix_list) {
2063 if (cur->ino == ino && cur->gen == gen)
2064 return cur;
2065 }
2066 return NULL;
2067}
2068
2069/*
2070 * Removes the entry from the list and adds it back to the end. This marks the
2071 * entry as recently used so that name_cache_clean_unused does not remove it.
2072 */
2073static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
2074{
2075 list_del(&nce->list);
2076 list_add_tail(&nce->list, &sctx->name_cache_list);
2077}
2078
2079/*
2080 * Remove some entries from the beginning of name_cache_list.
2081 */
2082static void name_cache_clean_unused(struct send_ctx *sctx)
2083{
2084 struct name_cache_entry *nce;
2085
2086 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2087 return;
2088
2089 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2090 nce = list_entry(sctx->name_cache_list.next,
2091 struct name_cache_entry, list);
2092 name_cache_delete(sctx, nce);
2093 kfree(nce);
2094 }
2095}
2096
2097static void name_cache_free(struct send_ctx *sctx)
2098{
2099 struct name_cache_entry *nce;
2100
2101 while (!list_empty(&sctx->name_cache_list)) {
2102 nce = list_entry(sctx->name_cache_list.next,
2103 struct name_cache_entry, list);
2104 name_cache_delete(sctx, nce);
2105 kfree(nce);
2106 }
2107}
2108
2109/*
2110 * Used by get_cur_path for each ref up to the root.
2111 * Returns 0 if it succeeded.
2112 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2113 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2114 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2115 * Returns <0 in case of error.
2116 */
2117static int __get_cur_name_and_parent(struct send_ctx *sctx,
2118 u64 ino, u64 gen,
2119 u64 *parent_ino,
2120 u64 *parent_gen,
2121 struct fs_path *dest)
2122{
2123 int ret;
2124 int nce_ret;
2125 struct name_cache_entry *nce = NULL;
2126
2127 /*
2128 * First check if we already did a call to this function with the same
2129 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2130 * return the cached result.
2131 */
2132 nce = name_cache_search(sctx, ino, gen);
2133 if (nce) {
2134 if (ino < sctx->send_progress && nce->need_later_update) {
2135 name_cache_delete(sctx, nce);
2136 kfree(nce);
2137 nce = NULL;
2138 } else {
2139 name_cache_used(sctx, nce);
2140 *parent_ino = nce->parent_ino;
2141 *parent_gen = nce->parent_gen;
2142 ret = fs_path_add(dest, nce->name, nce->name_len);
2143 if (ret < 0)
2144 goto out;
2145 ret = nce->ret;
2146 goto out;
2147 }
2148 }
2149
2150 /*
2151 * If the inode is not existent yet, add the orphan name and return 1.
2152 * This should only happen for the parent dir that we determine in
2153 * __record_new_ref
2154 */
2155 ret = is_inode_existent(sctx, ino, gen);
2156 if (ret < 0)
2157 goto out;
2158
2159 if (!ret) {
2160 ret = gen_unique_name(sctx, ino, gen, dest);
2161 if (ret < 0)
2162 goto out;
2163 ret = 1;
2164 goto out_cache;
2165 }
2166
2167 /*
2168 * Depending on whether the inode was already processed or not, use
2169 * send_root or parent_root for ref lookup.
2170 */
2171 if (ino < sctx->send_progress)
2172 ret = get_first_ref(sctx->send_root, ino,
2173 parent_ino, parent_gen, dest);
2174 else
2175 ret = get_first_ref(sctx->parent_root, ino,
2176 parent_ino, parent_gen, dest);
2177 if (ret < 0)
2178 goto out;
2179
2180 /*
2181 * Check if the ref was overwritten by an inode's ref that was processed
2182 * earlier. If yes, treat as orphan and return 1.
2183 */
2184 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2185 dest->start, dest->end - dest->start);
2186 if (ret < 0)
2187 goto out;
2188 if (ret) {
2189 fs_path_reset(dest);
2190 ret = gen_unique_name(sctx, ino, gen, dest);
2191 if (ret < 0)
2192 goto out;
2193 ret = 1;
2194 }
2195
2196out_cache:
2197 /*
2198 * Store the result of the lookup in the name cache.
2199 */
2200 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2201 if (!nce) {
2202 ret = -ENOMEM;
2203 goto out;
2204 }
2205
2206 nce->ino = ino;
2207 nce->gen = gen;
2208 nce->parent_ino = *parent_ino;
2209 nce->parent_gen = *parent_gen;
2210 nce->name_len = fs_path_len(dest);
2211 nce->ret = ret;
2212 strcpy(nce->name, dest->start);
2213
2214 if (ino < sctx->send_progress)
2215 nce->need_later_update = 0;
2216 else
2217 nce->need_later_update = 1;
2218
2219 nce_ret = name_cache_insert(sctx, nce);
2220 if (nce_ret < 0)
2221 ret = nce_ret;
2222 name_cache_clean_unused(sctx);
2223
2224out:
2225 return ret;
2226}
2227
2228/*
2229 * Magic happens here. This function returns the first ref to an inode as it
2230 * would look like while receiving the stream at this point in time.
2231 * We walk the path up to the root. For every inode in between, we check if it
2232 * was already processed/sent. If yes, we continue with the parent as found
2233 * in send_root. If not, we continue with the parent as found in parent_root.
2234 * If we encounter an inode that was deleted at this point in time, we use the
2235 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2236 * that were not created yet and overwritten inodes/refs.
2237 *
2238 * When do we have orphan inodes:
2239 * 1. When an inode is freshly created and thus no valid refs are available yet
2240 * 2. When a directory lost all it's refs (deleted) but still has dir items
2241 * inside which were not processed yet (pending for move/delete). If anyone
2242 * tried to get the path to the dir items, it would get a path inside that
2243 * orphan directory.
2244 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2245 * of an unprocessed inode. If in that case the first ref would be
2246 * overwritten, the overwritten inode gets "orphanized". Later when we
2247 * process this overwritten inode, it is restored at a new place by moving
2248 * the orphan inode.
2249 *
2250 * sctx->send_progress tells this function at which point in time receiving
2251 * would be.
2252 */
2253static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2254 struct fs_path *dest)
2255{
2256 int ret = 0;
2257 struct fs_path *name = NULL;
2258 u64 parent_inode = 0;
2259 u64 parent_gen = 0;
2260 int stop = 0;
2261
2262 name = fs_path_alloc();
2263 if (!name) {
2264 ret = -ENOMEM;
2265 goto out;
2266 }
2267
2268 dest->reversed = 1;
2269 fs_path_reset(dest);
2270
2271 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2272 struct waiting_dir_move *wdm;
2273
2274 fs_path_reset(name);
2275
2276 if (is_waiting_for_rm(sctx, ino)) {
2277 ret = gen_unique_name(sctx, ino, gen, name);
2278 if (ret < 0)
2279 goto out;
2280 ret = fs_path_add_path(dest, name);
2281 break;
2282 }
2283
2284 wdm = get_waiting_dir_move(sctx, ino);
2285 if (wdm && wdm->orphanized) {
2286 ret = gen_unique_name(sctx, ino, gen, name);
2287 stop = 1;
2288 } else if (wdm) {
2289 ret = get_first_ref(sctx->parent_root, ino,
2290 &parent_inode, &parent_gen, name);
2291 } else {
2292 ret = __get_cur_name_and_parent(sctx, ino, gen,
2293 &parent_inode,
2294 &parent_gen, name);
2295 if (ret)
2296 stop = 1;
2297 }
2298
2299 if (ret < 0)
2300 goto out;
2301
2302 ret = fs_path_add_path(dest, name);
2303 if (ret < 0)
2304 goto out;
2305
2306 ino = parent_inode;
2307 gen = parent_gen;
2308 }
2309
2310out:
2311 fs_path_free(name);
2312 if (!ret)
2313 fs_path_unreverse(dest);
2314 return ret;
2315}
2316
2317/*
2318 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2319 */
2320static int send_subvol_begin(struct send_ctx *sctx)
2321{
2322 int ret;
2323 struct btrfs_root *send_root = sctx->send_root;
2324 struct btrfs_root *parent_root = sctx->parent_root;
2325 struct btrfs_path *path;
2326 struct btrfs_key key;
2327 struct btrfs_root_ref *ref;
2328 struct extent_buffer *leaf;
2329 char *name = NULL;
2330 int namelen;
2331
2332 path = btrfs_alloc_path();
2333 if (!path)
2334 return -ENOMEM;
2335
2336 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2337 if (!name) {
2338 btrfs_free_path(path);
2339 return -ENOMEM;
2340 }
2341
2342 key.objectid = send_root->root_key.objectid;
2343 key.type = BTRFS_ROOT_BACKREF_KEY;
2344 key.offset = 0;
2345
2346 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2347 &key, path, 1, 0);
2348 if (ret < 0)
2349 goto out;
2350 if (ret) {
2351 ret = -ENOENT;
2352 goto out;
2353 }
2354
2355 leaf = path->nodes[0];
2356 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2357 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2358 key.objectid != send_root->root_key.objectid) {
2359 ret = -ENOENT;
2360 goto out;
2361 }
2362 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2363 namelen = btrfs_root_ref_name_len(leaf, ref);
2364 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2365 btrfs_release_path(path);
2366
2367 if (parent_root) {
2368 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2369 if (ret < 0)
2370 goto out;
2371 } else {
2372 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2373 if (ret < 0)
2374 goto out;
2375 }
2376
2377 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2378
2379 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2380 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2381 sctx->send_root->root_item.received_uuid);
2382 else
2383 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2384 sctx->send_root->root_item.uuid);
2385
2386 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2387 le64_to_cpu(sctx->send_root->root_item.ctransid));
2388 if (parent_root) {
2389 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2390 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2391 parent_root->root_item.received_uuid);
2392 else
2393 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2394 parent_root->root_item.uuid);
2395 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2396 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2397 }
2398
2399 ret = send_cmd(sctx);
2400
2401tlv_put_failure:
2402out:
2403 btrfs_free_path(path);
2404 kfree(name);
2405 return ret;
2406}
2407
2408static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2409{
2410 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2411 int ret = 0;
2412 struct fs_path *p;
2413
2414 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2415
2416 p = fs_path_alloc();
2417 if (!p)
2418 return -ENOMEM;
2419
2420 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2421 if (ret < 0)
2422 goto out;
2423
2424 ret = get_cur_path(sctx, ino, gen, p);
2425 if (ret < 0)
2426 goto out;
2427 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2428 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2429
2430 ret = send_cmd(sctx);
2431
2432tlv_put_failure:
2433out:
2434 fs_path_free(p);
2435 return ret;
2436}
2437
2438static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2439{
2440 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2441 int ret = 0;
2442 struct fs_path *p;
2443
2444 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2445
2446 p = fs_path_alloc();
2447 if (!p)
2448 return -ENOMEM;
2449
2450 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2451 if (ret < 0)
2452 goto out;
2453
2454 ret = get_cur_path(sctx, ino, gen, p);
2455 if (ret < 0)
2456 goto out;
2457 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2458 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2459
2460 ret = send_cmd(sctx);
2461
2462tlv_put_failure:
2463out:
2464 fs_path_free(p);
2465 return ret;
2466}
2467
2468static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2469{
2470 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2471 int ret = 0;
2472 struct fs_path *p;
2473
2474 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2475 ino, uid, gid);
2476
2477 p = fs_path_alloc();
2478 if (!p)
2479 return -ENOMEM;
2480
2481 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2482 if (ret < 0)
2483 goto out;
2484
2485 ret = get_cur_path(sctx, ino, gen, p);
2486 if (ret < 0)
2487 goto out;
2488 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2489 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2490 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2491
2492 ret = send_cmd(sctx);
2493
2494tlv_put_failure:
2495out:
2496 fs_path_free(p);
2497 return ret;
2498}
2499
2500static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2501{
2502 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2503 int ret = 0;
2504 struct fs_path *p = NULL;
2505 struct btrfs_inode_item *ii;
2506 struct btrfs_path *path = NULL;
2507 struct extent_buffer *eb;
2508 struct btrfs_key key;
2509 int slot;
2510
2511 btrfs_debug(fs_info, "send_utimes %llu", ino);
2512
2513 p = fs_path_alloc();
2514 if (!p)
2515 return -ENOMEM;
2516
2517 path = alloc_path_for_send();
2518 if (!path) {
2519 ret = -ENOMEM;
2520 goto out;
2521 }
2522
2523 key.objectid = ino;
2524 key.type = BTRFS_INODE_ITEM_KEY;
2525 key.offset = 0;
2526 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2527 if (ret > 0)
2528 ret = -ENOENT;
2529 if (ret < 0)
2530 goto out;
2531
2532 eb = path->nodes[0];
2533 slot = path->slots[0];
2534 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2535
2536 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2537 if (ret < 0)
2538 goto out;
2539
2540 ret = get_cur_path(sctx, ino, gen, p);
2541 if (ret < 0)
2542 goto out;
2543 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2544 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2545 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2546 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2547 /* TODO Add otime support when the otime patches get into upstream */
2548
2549 ret = send_cmd(sctx);
2550
2551tlv_put_failure:
2552out:
2553 fs_path_free(p);
2554 btrfs_free_path(path);
2555 return ret;
2556}
2557
2558/*
2559 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2560 * a valid path yet because we did not process the refs yet. So, the inode
2561 * is created as orphan.
2562 */
2563static int send_create_inode(struct send_ctx *sctx, u64 ino)
2564{
2565 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2566 int ret = 0;
2567 struct fs_path *p;
2568 int cmd;
2569 u64 gen;
2570 u64 mode;
2571 u64 rdev;
2572
2573 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2574
2575 p = fs_path_alloc();
2576 if (!p)
2577 return -ENOMEM;
2578
2579 if (ino != sctx->cur_ino) {
2580 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2581 NULL, NULL, &rdev);
2582 if (ret < 0)
2583 goto out;
2584 } else {
2585 gen = sctx->cur_inode_gen;
2586 mode = sctx->cur_inode_mode;
2587 rdev = sctx->cur_inode_rdev;
2588 }
2589
2590 if (S_ISREG(mode)) {
2591 cmd = BTRFS_SEND_C_MKFILE;
2592 } else if (S_ISDIR(mode)) {
2593 cmd = BTRFS_SEND_C_MKDIR;
2594 } else if (S_ISLNK(mode)) {
2595 cmd = BTRFS_SEND_C_SYMLINK;
2596 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2597 cmd = BTRFS_SEND_C_MKNOD;
2598 } else if (S_ISFIFO(mode)) {
2599 cmd = BTRFS_SEND_C_MKFIFO;
2600 } else if (S_ISSOCK(mode)) {
2601 cmd = BTRFS_SEND_C_MKSOCK;
2602 } else {
2603 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2604 (int)(mode & S_IFMT));
2605 ret = -EOPNOTSUPP;
2606 goto out;
2607 }
2608
2609 ret = begin_cmd(sctx, cmd);
2610 if (ret < 0)
2611 goto out;
2612
2613 ret = gen_unique_name(sctx, ino, gen, p);
2614 if (ret < 0)
2615 goto out;
2616
2617 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2618 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2619
2620 if (S_ISLNK(mode)) {
2621 fs_path_reset(p);
2622 ret = read_symlink(sctx->send_root, ino, p);
2623 if (ret < 0)
2624 goto out;
2625 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2626 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2627 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2628 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2629 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2630 }
2631
2632 ret = send_cmd(sctx);
2633 if (ret < 0)
2634 goto out;
2635
2636
2637tlv_put_failure:
2638out:
2639 fs_path_free(p);
2640 return ret;
2641}
2642
2643/*
2644 * We need some special handling for inodes that get processed before the parent
2645 * directory got created. See process_recorded_refs for details.
2646 * This function does the check if we already created the dir out of order.
2647 */
2648static int did_create_dir(struct send_ctx *sctx, u64 dir)
2649{
2650 int ret = 0;
2651 struct btrfs_path *path = NULL;
2652 struct btrfs_key key;
2653 struct btrfs_key found_key;
2654 struct btrfs_key di_key;
2655 struct extent_buffer *eb;
2656 struct btrfs_dir_item *di;
2657 int slot;
2658
2659 path = alloc_path_for_send();
2660 if (!path) {
2661 ret = -ENOMEM;
2662 goto out;
2663 }
2664
2665 key.objectid = dir;
2666 key.type = BTRFS_DIR_INDEX_KEY;
2667 key.offset = 0;
2668 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2669 if (ret < 0)
2670 goto out;
2671
2672 while (1) {
2673 eb = path->nodes[0];
2674 slot = path->slots[0];
2675 if (slot >= btrfs_header_nritems(eb)) {
2676 ret = btrfs_next_leaf(sctx->send_root, path);
2677 if (ret < 0) {
2678 goto out;
2679 } else if (ret > 0) {
2680 ret = 0;
2681 break;
2682 }
2683 continue;
2684 }
2685
2686 btrfs_item_key_to_cpu(eb, &found_key, slot);
2687 if (found_key.objectid != key.objectid ||
2688 found_key.type != key.type) {
2689 ret = 0;
2690 goto out;
2691 }
2692
2693 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2694 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2695
2696 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2697 di_key.objectid < sctx->send_progress) {
2698 ret = 1;
2699 goto out;
2700 }
2701
2702 path->slots[0]++;
2703 }
2704
2705out:
2706 btrfs_free_path(path);
2707 return ret;
2708}
2709
2710/*
2711 * Only creates the inode if it is:
2712 * 1. Not a directory
2713 * 2. Or a directory which was not created already due to out of order
2714 * directories. See did_create_dir and process_recorded_refs for details.
2715 */
2716static int send_create_inode_if_needed(struct send_ctx *sctx)
2717{
2718 int ret;
2719
2720 if (S_ISDIR(sctx->cur_inode_mode)) {
2721 ret = did_create_dir(sctx, sctx->cur_ino);
2722 if (ret < 0)
2723 goto out;
2724 if (ret) {
2725 ret = 0;
2726 goto out;
2727 }
2728 }
2729
2730 ret = send_create_inode(sctx, sctx->cur_ino);
2731 if (ret < 0)
2732 goto out;
2733
2734out:
2735 return ret;
2736}
2737
2738struct recorded_ref {
2739 struct list_head list;
2740 char *name;
2741 struct fs_path *full_path;
2742 u64 dir;
2743 u64 dir_gen;
2744 int name_len;
2745};
2746
2747static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
2748{
2749 ref->full_path = path;
2750 ref->name = (char *)kbasename(ref->full_path->start);
2751 ref->name_len = ref->full_path->end - ref->name;
2752}
2753
2754/*
2755 * We need to process new refs before deleted refs, but compare_tree gives us
2756 * everything mixed. So we first record all refs and later process them.
2757 * This function is a helper to record one ref.
2758 */
2759static int __record_ref(struct list_head *head, u64 dir,
2760 u64 dir_gen, struct fs_path *path)
2761{
2762 struct recorded_ref *ref;
2763
2764 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
2765 if (!ref)
2766 return -ENOMEM;
2767
2768 ref->dir = dir;
2769 ref->dir_gen = dir_gen;
2770 set_ref_path(ref, path);
2771 list_add_tail(&ref->list, head);
2772 return 0;
2773}
2774
2775static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2776{
2777 struct recorded_ref *new;
2778
2779 new = kmalloc(sizeof(*ref), GFP_KERNEL);
2780 if (!new)
2781 return -ENOMEM;
2782
2783 new->dir = ref->dir;
2784 new->dir_gen = ref->dir_gen;
2785 new->full_path = NULL;
2786 INIT_LIST_HEAD(&new->list);
2787 list_add_tail(&new->list, list);
2788 return 0;
2789}
2790
2791static void __free_recorded_refs(struct list_head *head)
2792{
2793 struct recorded_ref *cur;
2794
2795 while (!list_empty(head)) {
2796 cur = list_entry(head->next, struct recorded_ref, list);
2797 fs_path_free(cur->full_path);
2798 list_del(&cur->list);
2799 kfree(cur);
2800 }
2801}
2802
2803static void free_recorded_refs(struct send_ctx *sctx)
2804{
2805 __free_recorded_refs(&sctx->new_refs);
2806 __free_recorded_refs(&sctx->deleted_refs);
2807}
2808
2809/*
2810 * Renames/moves a file/dir to its orphan name. Used when the first
2811 * ref of an unprocessed inode gets overwritten and for all non empty
2812 * directories.
2813 */
2814static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2815 struct fs_path *path)
2816{
2817 int ret;
2818 struct fs_path *orphan;
2819
2820 orphan = fs_path_alloc();
2821 if (!orphan)
2822 return -ENOMEM;
2823
2824 ret = gen_unique_name(sctx, ino, gen, orphan);
2825 if (ret < 0)
2826 goto out;
2827
2828 ret = send_rename(sctx, path, orphan);
2829
2830out:
2831 fs_path_free(orphan);
2832 return ret;
2833}
2834
2835static struct orphan_dir_info *
2836add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2837{
2838 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2839 struct rb_node *parent = NULL;
2840 struct orphan_dir_info *entry, *odi;
2841
2842 while (*p) {
2843 parent = *p;
2844 entry = rb_entry(parent, struct orphan_dir_info, node);
2845 if (dir_ino < entry->ino) {
2846 p = &(*p)->rb_left;
2847 } else if (dir_ino > entry->ino) {
2848 p = &(*p)->rb_right;
2849 } else {
2850 return entry;
2851 }
2852 }
2853
2854 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2855 if (!odi)
2856 return ERR_PTR(-ENOMEM);
2857 odi->ino = dir_ino;
2858 odi->gen = 0;
2859 odi->last_dir_index_offset = 0;
2860
2861 rb_link_node(&odi->node, parent, p);
2862 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2863 return odi;
2864}
2865
2866static struct orphan_dir_info *
2867get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2868{
2869 struct rb_node *n = sctx->orphan_dirs.rb_node;
2870 struct orphan_dir_info *entry;
2871
2872 while (n) {
2873 entry = rb_entry(n, struct orphan_dir_info, node);
2874 if (dir_ino < entry->ino)
2875 n = n->rb_left;
2876 else if (dir_ino > entry->ino)
2877 n = n->rb_right;
2878 else
2879 return entry;
2880 }
2881 return NULL;
2882}
2883
2884static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
2885{
2886 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
2887
2888 return odi != NULL;
2889}
2890
2891static void free_orphan_dir_info(struct send_ctx *sctx,
2892 struct orphan_dir_info *odi)
2893{
2894 if (!odi)
2895 return;
2896 rb_erase(&odi->node, &sctx->orphan_dirs);
2897 kfree(odi);
2898}
2899
2900/*
2901 * Returns 1 if a directory can be removed at this point in time.
2902 * We check this by iterating all dir items and checking if the inode behind
2903 * the dir item was already processed.
2904 */
2905static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2906 u64 send_progress)
2907{
2908 int ret = 0;
2909 struct btrfs_root *root = sctx->parent_root;
2910 struct btrfs_path *path;
2911 struct btrfs_key key;
2912 struct btrfs_key found_key;
2913 struct btrfs_key loc;
2914 struct btrfs_dir_item *di;
2915 struct orphan_dir_info *odi = NULL;
2916
2917 /*
2918 * Don't try to rmdir the top/root subvolume dir.
2919 */
2920 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2921 return 0;
2922
2923 path = alloc_path_for_send();
2924 if (!path)
2925 return -ENOMEM;
2926
2927 key.objectid = dir;
2928 key.type = BTRFS_DIR_INDEX_KEY;
2929 key.offset = 0;
2930
2931 odi = get_orphan_dir_info(sctx, dir);
2932 if (odi)
2933 key.offset = odi->last_dir_index_offset;
2934
2935 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2936 if (ret < 0)
2937 goto out;
2938
2939 while (1) {
2940 struct waiting_dir_move *dm;
2941
2942 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2943 ret = btrfs_next_leaf(root, path);
2944 if (ret < 0)
2945 goto out;
2946 else if (ret > 0)
2947 break;
2948 continue;
2949 }
2950 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2951 path->slots[0]);
2952 if (found_key.objectid != key.objectid ||
2953 found_key.type != key.type)
2954 break;
2955
2956 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2957 struct btrfs_dir_item);
2958 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2959
2960 dm = get_waiting_dir_move(sctx, loc.objectid);
2961 if (dm) {
2962 odi = add_orphan_dir_info(sctx, dir);
2963 if (IS_ERR(odi)) {
2964 ret = PTR_ERR(odi);
2965 goto out;
2966 }
2967 odi->gen = dir_gen;
2968 odi->last_dir_index_offset = found_key.offset;
2969 dm->rmdir_ino = dir;
2970 ret = 0;
2971 goto out;
2972 }
2973
2974 if (loc.objectid > send_progress) {
2975 odi = add_orphan_dir_info(sctx, dir);
2976 if (IS_ERR(odi)) {
2977 ret = PTR_ERR(odi);
2978 goto out;
2979 }
2980 odi->gen = dir_gen;
2981 odi->last_dir_index_offset = found_key.offset;
2982 ret = 0;
2983 goto out;
2984 }
2985
2986 path->slots[0]++;
2987 }
2988 free_orphan_dir_info(sctx, odi);
2989
2990 ret = 1;
2991
2992out:
2993 btrfs_free_path(path);
2994 return ret;
2995}
2996
2997static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
2998{
2999 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3000
3001 return entry != NULL;
3002}
3003
3004static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3005{
3006 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3007 struct rb_node *parent = NULL;
3008 struct waiting_dir_move *entry, *dm;
3009
3010 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3011 if (!dm)
3012 return -ENOMEM;
3013 dm->ino = ino;
3014 dm->rmdir_ino = 0;
3015 dm->orphanized = orphanized;
3016
3017 while (*p) {
3018 parent = *p;
3019 entry = rb_entry(parent, struct waiting_dir_move, node);
3020 if (ino < entry->ino) {
3021 p = &(*p)->rb_left;
3022 } else if (ino > entry->ino) {
3023 p = &(*p)->rb_right;
3024 } else {
3025 kfree(dm);
3026 return -EEXIST;
3027 }
3028 }
3029
3030 rb_link_node(&dm->node, parent, p);
3031 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3032 return 0;
3033}
3034
3035static struct waiting_dir_move *
3036get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3037{
3038 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3039 struct waiting_dir_move *entry;
3040
3041 while (n) {
3042 entry = rb_entry(n, struct waiting_dir_move, node);
3043 if (ino < entry->ino)
3044 n = n->rb_left;
3045 else if (ino > entry->ino)
3046 n = n->rb_right;
3047 else
3048 return entry;
3049 }
3050 return NULL;
3051}
3052
3053static void free_waiting_dir_move(struct send_ctx *sctx,
3054 struct waiting_dir_move *dm)
3055{
3056 if (!dm)
3057 return;
3058 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3059 kfree(dm);
3060}
3061
3062static int add_pending_dir_move(struct send_ctx *sctx,
3063 u64 ino,
3064 u64 ino_gen,
3065 u64 parent_ino,
3066 struct list_head *new_refs,
3067 struct list_head *deleted_refs,
3068 const bool is_orphan)
3069{
3070 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3071 struct rb_node *parent = NULL;
3072 struct pending_dir_move *entry = NULL, *pm;
3073 struct recorded_ref *cur;
3074 int exists = 0;
3075 int ret;
3076
3077 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3078 if (!pm)
3079 return -ENOMEM;
3080 pm->parent_ino = parent_ino;
3081 pm->ino = ino;
3082 pm->gen = ino_gen;
3083 INIT_LIST_HEAD(&pm->list);
3084 INIT_LIST_HEAD(&pm->update_refs);
3085 RB_CLEAR_NODE(&pm->node);
3086
3087 while (*p) {
3088 parent = *p;
3089 entry = rb_entry(parent, struct pending_dir_move, node);
3090 if (parent_ino < entry->parent_ino) {
3091 p = &(*p)->rb_left;
3092 } else if (parent_ino > entry->parent_ino) {
3093 p = &(*p)->rb_right;
3094 } else {
3095 exists = 1;
3096 break;
3097 }
3098 }
3099
3100 list_for_each_entry(cur, deleted_refs, list) {
3101 ret = dup_ref(cur, &pm->update_refs);
3102 if (ret < 0)
3103 goto out;
3104 }
3105 list_for_each_entry(cur, new_refs, list) {
3106 ret = dup_ref(cur, &pm->update_refs);
3107 if (ret < 0)
3108 goto out;
3109 }
3110
3111 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3112 if (ret)
3113 goto out;
3114
3115 if (exists) {
3116 list_add_tail(&pm->list, &entry->list);
3117 } else {
3118 rb_link_node(&pm->node, parent, p);
3119 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3120 }
3121 ret = 0;
3122out:
3123 if (ret) {
3124 __free_recorded_refs(&pm->update_refs);
3125 kfree(pm);
3126 }
3127 return ret;
3128}
3129
3130static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3131 u64 parent_ino)
3132{
3133 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3134 struct pending_dir_move *entry;
3135
3136 while (n) {
3137 entry = rb_entry(n, struct pending_dir_move, node);
3138 if (parent_ino < entry->parent_ino)
3139 n = n->rb_left;
3140 else if (parent_ino > entry->parent_ino)
3141 n = n->rb_right;
3142 else
3143 return entry;
3144 }
3145 return NULL;
3146}
3147
3148static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3149 u64 ino, u64 gen, u64 *ancestor_ino)
3150{
3151 int ret = 0;
3152 u64 parent_inode = 0;
3153 u64 parent_gen = 0;
3154 u64 start_ino = ino;
3155
3156 *ancestor_ino = 0;
3157 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3158 fs_path_reset(name);
3159
3160 if (is_waiting_for_rm(sctx, ino))
3161 break;
3162 if (is_waiting_for_move(sctx, ino)) {
3163 if (*ancestor_ino == 0)
3164 *ancestor_ino = ino;
3165 ret = get_first_ref(sctx->parent_root, ino,
3166 &parent_inode, &parent_gen, name);
3167 } else {
3168 ret = __get_cur_name_and_parent(sctx, ino, gen,
3169 &parent_inode,
3170 &parent_gen, name);
3171 if (ret > 0) {
3172 ret = 0;
3173 break;
3174 }
3175 }
3176 if (ret < 0)
3177 break;
3178 if (parent_inode == start_ino) {
3179 ret = 1;
3180 if (*ancestor_ino == 0)
3181 *ancestor_ino = ino;
3182 break;
3183 }
3184 ino = parent_inode;
3185 gen = parent_gen;
3186 }
3187 return ret;
3188}
3189
3190static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3191{
3192 struct fs_path *from_path = NULL;
3193 struct fs_path *to_path = NULL;
3194 struct fs_path *name = NULL;
3195 u64 orig_progress = sctx->send_progress;
3196 struct recorded_ref *cur;
3197 u64 parent_ino, parent_gen;
3198 struct waiting_dir_move *dm = NULL;
3199 u64 rmdir_ino = 0;
3200 u64 ancestor;
3201 bool is_orphan;
3202 int ret;
3203
3204 name = fs_path_alloc();
3205 from_path = fs_path_alloc();
3206 if (!name || !from_path) {
3207 ret = -ENOMEM;
3208 goto out;
3209 }
3210
3211 dm = get_waiting_dir_move(sctx, pm->ino);
3212 ASSERT(dm);
3213 rmdir_ino = dm->rmdir_ino;
3214 is_orphan = dm->orphanized;
3215 free_waiting_dir_move(sctx, dm);
3216
3217 if (is_orphan) {
3218 ret = gen_unique_name(sctx, pm->ino,
3219 pm->gen, from_path);
3220 } else {
3221 ret = get_first_ref(sctx->parent_root, pm->ino,
3222 &parent_ino, &parent_gen, name);
3223 if (ret < 0)
3224 goto out;
3225 ret = get_cur_path(sctx, parent_ino, parent_gen,
3226 from_path);
3227 if (ret < 0)
3228 goto out;
3229 ret = fs_path_add_path(from_path, name);
3230 }
3231 if (ret < 0)
3232 goto out;
3233
3234 sctx->send_progress = sctx->cur_ino + 1;
3235 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3236 if (ret < 0)
3237 goto out;
3238 if (ret) {
3239 LIST_HEAD(deleted_refs);
3240 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3241 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3242 &pm->update_refs, &deleted_refs,
3243 is_orphan);
3244 if (ret < 0)
3245 goto out;
3246 if (rmdir_ino) {
3247 dm = get_waiting_dir_move(sctx, pm->ino);
3248 ASSERT(dm);
3249 dm->rmdir_ino = rmdir_ino;
3250 }
3251 goto out;
3252 }
3253 fs_path_reset(name);
3254 to_path = name;
3255 name = NULL;
3256 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3257 if (ret < 0)
3258 goto out;
3259
3260 ret = send_rename(sctx, from_path, to_path);
3261 if (ret < 0)
3262 goto out;
3263
3264 if (rmdir_ino) {
3265 struct orphan_dir_info *odi;
3266 u64 gen;
3267
3268 odi = get_orphan_dir_info(sctx, rmdir_ino);
3269 if (!odi) {
3270 /* already deleted */
3271 goto finish;
3272 }
3273 gen = odi->gen;
3274
3275 ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
3276 if (ret < 0)
3277 goto out;
3278 if (!ret)
3279 goto finish;
3280
3281 name = fs_path_alloc();
3282 if (!name) {
3283 ret = -ENOMEM;
3284 goto out;
3285 }
3286 ret = get_cur_path(sctx, rmdir_ino, gen, name);
3287 if (ret < 0)
3288 goto out;
3289 ret = send_rmdir(sctx, name);
3290 if (ret < 0)
3291 goto out;
3292 }
3293
3294finish:
3295 ret = send_utimes(sctx, pm->ino, pm->gen);
3296 if (ret < 0)
3297 goto out;
3298
3299 /*
3300 * After rename/move, need to update the utimes of both new parent(s)
3301 * and old parent(s).
3302 */
3303 list_for_each_entry(cur, &pm->update_refs, list) {
3304 /*
3305 * The parent inode might have been deleted in the send snapshot
3306 */
3307 ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3308 NULL, NULL, NULL, NULL, NULL);
3309 if (ret == -ENOENT) {
3310 ret = 0;
3311 continue;
3312 }
3313 if (ret < 0)
3314 goto out;
3315
3316 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3317 if (ret < 0)
3318 goto out;
3319 }
3320
3321out:
3322 fs_path_free(name);
3323 fs_path_free(from_path);
3324 fs_path_free(to_path);
3325 sctx->send_progress = orig_progress;
3326
3327 return ret;
3328}
3329
3330static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3331{
3332 if (!list_empty(&m->list))
3333 list_del(&m->list);
3334 if (!RB_EMPTY_NODE(&m->node))
3335 rb_erase(&m->node, &sctx->pending_dir_moves);
3336 __free_recorded_refs(&m->update_refs);
3337 kfree(m);
3338}
3339
3340static void tail_append_pending_moves(struct send_ctx *sctx,
3341 struct pending_dir_move *moves,
3342 struct list_head *stack)
3343{
3344 if (list_empty(&moves->list)) {
3345 list_add_tail(&moves->list, stack);
3346 } else {
3347 LIST_HEAD(list);
3348 list_splice_init(&moves->list, &list);
3349 list_add_tail(&moves->list, stack);
3350 list_splice_tail(&list, stack);
3351 }
3352 if (!RB_EMPTY_NODE(&moves->node)) {
3353 rb_erase(&moves->node, &sctx->pending_dir_moves);
3354 RB_CLEAR_NODE(&moves->node);
3355 }
3356}
3357
3358static int apply_children_dir_moves(struct send_ctx *sctx)
3359{
3360 struct pending_dir_move *pm;
3361 struct list_head stack;
3362 u64 parent_ino = sctx->cur_ino;
3363 int ret = 0;
3364
3365 pm = get_pending_dir_moves(sctx, parent_ino);
3366 if (!pm)
3367 return 0;
3368
3369 INIT_LIST_HEAD(&stack);
3370 tail_append_pending_moves(sctx, pm, &stack);
3371
3372 while (!list_empty(&stack)) {
3373 pm = list_first_entry(&stack, struct pending_dir_move, list);
3374 parent_ino = pm->ino;
3375 ret = apply_dir_move(sctx, pm);
3376 free_pending_move(sctx, pm);
3377 if (ret)
3378 goto out;
3379 pm = get_pending_dir_moves(sctx, parent_ino);
3380 if (pm)
3381 tail_append_pending_moves(sctx, pm, &stack);
3382 }
3383 return 0;
3384
3385out:
3386 while (!list_empty(&stack)) {
3387 pm = list_first_entry(&stack, struct pending_dir_move, list);
3388 free_pending_move(sctx, pm);
3389 }
3390 return ret;
3391}
3392
3393/*
3394 * We might need to delay a directory rename even when no ancestor directory
3395 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3396 * renamed. This happens when we rename a directory to the old name (the name
3397 * in the parent root) of some other unrelated directory that got its rename
3398 * delayed due to some ancestor with higher number that got renamed.
3399 *
3400 * Example:
3401 *
3402 * Parent snapshot:
3403 * . (ino 256)
3404 * |---- a/ (ino 257)
3405 * | |---- file (ino 260)
3406 * |
3407 * |---- b/ (ino 258)
3408 * |---- c/ (ino 259)
3409 *
3410 * Send snapshot:
3411 * . (ino 256)
3412 * |---- a/ (ino 258)
3413 * |---- x/ (ino 259)
3414 * |---- y/ (ino 257)
3415 * |----- file (ino 260)
3416 *
3417 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3418 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3419 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3420 * must issue is:
3421 *
3422 * 1 - rename 259 from 'c' to 'x'
3423 * 2 - rename 257 from 'a' to 'x/y'
3424 * 3 - rename 258 from 'b' to 'a'
3425 *
3426 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3427 * be done right away and < 0 on error.
3428 */
3429static int wait_for_dest_dir_move(struct send_ctx *sctx,
3430 struct recorded_ref *parent_ref,
3431 const bool is_orphan)
3432{
3433 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3434 struct btrfs_path *path;
3435 struct btrfs_key key;
3436 struct btrfs_key di_key;
3437 struct btrfs_dir_item *di;
3438 u64 left_gen;
3439 u64 right_gen;
3440 int ret = 0;
3441 struct waiting_dir_move *wdm;
3442
3443 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3444 return 0;
3445
3446 path = alloc_path_for_send();
3447 if (!path)
3448 return -ENOMEM;
3449
3450 key.objectid = parent_ref->dir;
3451 key.type = BTRFS_DIR_ITEM_KEY;
3452 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3453
3454 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3455 if (ret < 0) {
3456 goto out;
3457 } else if (ret > 0) {
3458 ret = 0;
3459 goto out;
3460 }
3461
3462 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3463 parent_ref->name_len);
3464 if (!di) {
3465 ret = 0;
3466 goto out;
3467 }
3468 /*
3469 * di_key.objectid has the number of the inode that has a dentry in the
3470 * parent directory with the same name that sctx->cur_ino is being
3471 * renamed to. We need to check if that inode is in the send root as
3472 * well and if it is currently marked as an inode with a pending rename,
3473 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3474 * that it happens after that other inode is renamed.
3475 */
3476 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3477 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3478 ret = 0;
3479 goto out;
3480 }
3481
3482 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3483 &left_gen, NULL, NULL, NULL, NULL);
3484 if (ret < 0)
3485 goto out;
3486 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3487 &right_gen, NULL, NULL, NULL, NULL);
3488 if (ret < 0) {
3489 if (ret == -ENOENT)
3490 ret = 0;
3491 goto out;
3492 }
3493
3494 /* Different inode, no need to delay the rename of sctx->cur_ino */
3495 if (right_gen != left_gen) {
3496 ret = 0;
3497 goto out;
3498 }
3499
3500 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3501 if (wdm && !wdm->orphanized) {
3502 ret = add_pending_dir_move(sctx,
3503 sctx->cur_ino,
3504 sctx->cur_inode_gen,
3505 di_key.objectid,
3506 &sctx->new_refs,
3507 &sctx->deleted_refs,
3508 is_orphan);
3509 if (!ret)
3510 ret = 1;
3511 }
3512out:
3513 btrfs_free_path(path);
3514 return ret;
3515}
3516
3517/*
3518 * Check if inode ino2, or any of its ancestors, is inode ino1.
3519 * Return 1 if true, 0 if false and < 0 on error.
3520 */
3521static int check_ino_in_path(struct btrfs_root *root,
3522 const u64 ino1,
3523 const u64 ino1_gen,
3524 const u64 ino2,
3525 const u64 ino2_gen,
3526 struct fs_path *fs_path)
3527{
3528 u64 ino = ino2;
3529
3530 if (ino1 == ino2)
3531 return ino1_gen == ino2_gen;
3532
3533 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3534 u64 parent;
3535 u64 parent_gen;
3536 int ret;
3537
3538 fs_path_reset(fs_path);
3539 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3540 if (ret < 0)
3541 return ret;
3542 if (parent == ino1)
3543 return parent_gen == ino1_gen;
3544 ino = parent;
3545 }
3546 return 0;
3547}
3548
3549/*
3550 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
3551 * possible path (in case ino2 is not a directory and has multiple hard links).
3552 * Return 1 if true, 0 if false and < 0 on error.
3553 */
3554static int is_ancestor(struct btrfs_root *root,
3555 const u64 ino1,
3556 const u64 ino1_gen,
3557 const u64 ino2,
3558 struct fs_path *fs_path)
3559{
3560 bool free_fs_path = false;
3561 int ret = 0;
3562 struct btrfs_path *path = NULL;
3563 struct btrfs_key key;
3564
3565 if (!fs_path) {
3566 fs_path = fs_path_alloc();
3567 if (!fs_path)
3568 return -ENOMEM;
3569 free_fs_path = true;
3570 }
3571
3572 path = alloc_path_for_send();
3573 if (!path) {
3574 ret = -ENOMEM;
3575 goto out;
3576 }
3577
3578 key.objectid = ino2;
3579 key.type = BTRFS_INODE_REF_KEY;
3580 key.offset = 0;
3581
3582 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3583 if (ret < 0)
3584 goto out;
3585
3586 while (true) {
3587 struct extent_buffer *leaf = path->nodes[0];
3588 int slot = path->slots[0];
3589 u32 cur_offset = 0;
3590 u32 item_size;
3591
3592 if (slot >= btrfs_header_nritems(leaf)) {
3593 ret = btrfs_next_leaf(root, path);
3594 if (ret < 0)
3595 goto out;
3596 if (ret > 0)
3597 break;
3598 continue;
3599 }
3600
3601 btrfs_item_key_to_cpu(leaf, &key, slot);
3602 if (key.objectid != ino2)
3603 break;
3604 if (key.type != BTRFS_INODE_REF_KEY &&
3605 key.type != BTRFS_INODE_EXTREF_KEY)
3606 break;
3607
3608 item_size = btrfs_item_size_nr(leaf, slot);
3609 while (cur_offset < item_size) {
3610 u64 parent;
3611 u64 parent_gen;
3612
3613 if (key.type == BTRFS_INODE_EXTREF_KEY) {
3614 unsigned long ptr;
3615 struct btrfs_inode_extref *extref;
3616
3617 ptr = btrfs_item_ptr_offset(leaf, slot);
3618 extref = (struct btrfs_inode_extref *)
3619 (ptr + cur_offset);
3620 parent = btrfs_inode_extref_parent(leaf,
3621 extref);
3622 cur_offset += sizeof(*extref);
3623 cur_offset += btrfs_inode_extref_name_len(leaf,
3624 extref);
3625 } else {
3626 parent = key.offset;
3627 cur_offset = item_size;
3628 }
3629
3630 ret = get_inode_info(root, parent, NULL, &parent_gen,
3631 NULL, NULL, NULL, NULL);
3632 if (ret < 0)
3633 goto out;
3634 ret = check_ino_in_path(root, ino1, ino1_gen,
3635 parent, parent_gen, fs_path);
3636 if (ret)
3637 goto out;
3638 }
3639 path->slots[0]++;
3640 }
3641 ret = 0;
3642 out:
3643 btrfs_free_path(path);
3644 if (free_fs_path)
3645 fs_path_free(fs_path);
3646 return ret;
3647}
3648
3649static int wait_for_parent_move(struct send_ctx *sctx,
3650 struct recorded_ref *parent_ref,
3651 const bool is_orphan)
3652{
3653 int ret = 0;
3654 u64 ino = parent_ref->dir;
3655 u64 ino_gen = parent_ref->dir_gen;
3656 u64 parent_ino_before, parent_ino_after;
3657 struct fs_path *path_before = NULL;
3658 struct fs_path *path_after = NULL;
3659 int len1, len2;
3660
3661 path_after = fs_path_alloc();
3662 path_before = fs_path_alloc();
3663 if (!path_after || !path_before) {
3664 ret = -ENOMEM;
3665 goto out;
3666 }
3667
3668 /*
3669 * Our current directory inode may not yet be renamed/moved because some
3670 * ancestor (immediate or not) has to be renamed/moved first. So find if
3671 * such ancestor exists and make sure our own rename/move happens after
3672 * that ancestor is processed to avoid path build infinite loops (done
3673 * at get_cur_path()).
3674 */
3675 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3676 u64 parent_ino_after_gen;
3677
3678 if (is_waiting_for_move(sctx, ino)) {
3679 /*
3680 * If the current inode is an ancestor of ino in the
3681 * parent root, we need to delay the rename of the
3682 * current inode, otherwise don't delayed the rename
3683 * because we can end up with a circular dependency
3684 * of renames, resulting in some directories never
3685 * getting the respective rename operations issued in
3686 * the send stream or getting into infinite path build
3687 * loops.
3688 */
3689 ret = is_ancestor(sctx->parent_root,
3690 sctx->cur_ino, sctx->cur_inode_gen,
3691 ino, path_before);
3692 if (ret)
3693 break;
3694 }
3695
3696 fs_path_reset(path_before);
3697 fs_path_reset(path_after);
3698
3699 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3700 &parent_ino_after_gen, path_after);
3701 if (ret < 0)
3702 goto out;
3703 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3704 NULL, path_before);
3705 if (ret < 0 && ret != -ENOENT) {
3706 goto out;
3707 } else if (ret == -ENOENT) {
3708 ret = 0;
3709 break;
3710 }
3711
3712 len1 = fs_path_len(path_before);
3713 len2 = fs_path_len(path_after);
3714 if (ino > sctx->cur_ino &&
3715 (parent_ino_before != parent_ino_after || len1 != len2 ||
3716 memcmp(path_before->start, path_after->start, len1))) {
3717 u64 parent_ino_gen;
3718
3719 ret = get_inode_info(sctx->parent_root, ino, NULL,
3720 &parent_ino_gen, NULL, NULL, NULL,
3721 NULL);
3722 if (ret < 0)
3723 goto out;
3724 if (ino_gen == parent_ino_gen) {
3725 ret = 1;
3726 break;
3727 }
3728 }
3729 ino = parent_ino_after;
3730 ino_gen = parent_ino_after_gen;
3731 }
3732
3733out:
3734 fs_path_free(path_before);
3735 fs_path_free(path_after);
3736
3737 if (ret == 1) {
3738 ret = add_pending_dir_move(sctx,
3739 sctx->cur_ino,
3740 sctx->cur_inode_gen,
3741 ino,
3742 &sctx->new_refs,
3743 &sctx->deleted_refs,
3744 is_orphan);
3745 if (!ret)
3746 ret = 1;
3747 }
3748
3749 return ret;
3750}
3751
3752static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3753{
3754 int ret;
3755 struct fs_path *new_path;
3756
3757 /*
3758 * Our reference's name member points to its full_path member string, so
3759 * we use here a new path.
3760 */
3761 new_path = fs_path_alloc();
3762 if (!new_path)
3763 return -ENOMEM;
3764
3765 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
3766 if (ret < 0) {
3767 fs_path_free(new_path);
3768 return ret;
3769 }
3770 ret = fs_path_add(new_path, ref->name, ref->name_len);
3771 if (ret < 0) {
3772 fs_path_free(new_path);
3773 return ret;
3774 }
3775
3776 fs_path_free(ref->full_path);
3777 set_ref_path(ref, new_path);
3778
3779 return 0;
3780}
3781
3782/*
3783 * This does all the move/link/unlink/rmdir magic.
3784 */
3785static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3786{
3787 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3788 int ret = 0;
3789 struct recorded_ref *cur;
3790 struct recorded_ref *cur2;
3791 struct list_head check_dirs;
3792 struct fs_path *valid_path = NULL;
3793 u64 ow_inode = 0;
3794 u64 ow_gen;
3795 u64 ow_mode;
3796 int did_overwrite = 0;
3797 int is_orphan = 0;
3798 u64 last_dir_ino_rm = 0;
3799 bool can_rename = true;
3800 bool orphanized_dir = false;
3801 bool orphanized_ancestor = false;
3802
3803 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3804
3805 /*
3806 * This should never happen as the root dir always has the same ref
3807 * which is always '..'
3808 */
3809 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3810 INIT_LIST_HEAD(&check_dirs);
3811
3812 valid_path = fs_path_alloc();
3813 if (!valid_path) {
3814 ret = -ENOMEM;
3815 goto out;
3816 }
3817
3818 /*
3819 * First, check if the first ref of the current inode was overwritten
3820 * before. If yes, we know that the current inode was already orphanized
3821 * and thus use the orphan name. If not, we can use get_cur_path to
3822 * get the path of the first ref as it would like while receiving at
3823 * this point in time.
3824 * New inodes are always orphan at the beginning, so force to use the
3825 * orphan name in this case.
3826 * The first ref is stored in valid_path and will be updated if it
3827 * gets moved around.
3828 */
3829 if (!sctx->cur_inode_new) {
3830 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3831 sctx->cur_inode_gen);
3832 if (ret < 0)
3833 goto out;
3834 if (ret)
3835 did_overwrite = 1;
3836 }
3837 if (sctx->cur_inode_new || did_overwrite) {
3838 ret = gen_unique_name(sctx, sctx->cur_ino,
3839 sctx->cur_inode_gen, valid_path);
3840 if (ret < 0)
3841 goto out;
3842 is_orphan = 1;
3843 } else {
3844 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3845 valid_path);
3846 if (ret < 0)
3847 goto out;
3848 }
3849
3850 list_for_each_entry(cur, &sctx->new_refs, list) {
3851 /*
3852 * We may have refs where the parent directory does not exist
3853 * yet. This happens if the parent directories inum is higher
3854 * than the current inum. To handle this case, we create the
3855 * parent directory out of order. But we need to check if this
3856 * did already happen before due to other refs in the same dir.
3857 */
3858 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3859 if (ret < 0)
3860 goto out;
3861 if (ret == inode_state_will_create) {
3862 ret = 0;
3863 /*
3864 * First check if any of the current inodes refs did
3865 * already create the dir.
3866 */
3867 list_for_each_entry(cur2, &sctx->new_refs, list) {
3868 if (cur == cur2)
3869 break;
3870 if (cur2->dir == cur->dir) {
3871 ret = 1;
3872 break;
3873 }
3874 }
3875
3876 /*
3877 * If that did not happen, check if a previous inode
3878 * did already create the dir.
3879 */
3880 if (!ret)
3881 ret = did_create_dir(sctx, cur->dir);
3882 if (ret < 0)
3883 goto out;
3884 if (!ret) {
3885 ret = send_create_inode(sctx, cur->dir);
3886 if (ret < 0)
3887 goto out;
3888 }
3889 }
3890
3891 /*
3892 * Check if this new ref would overwrite the first ref of
3893 * another unprocessed inode. If yes, orphanize the
3894 * overwritten inode. If we find an overwritten ref that is
3895 * not the first ref, simply unlink it.
3896 */
3897 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3898 cur->name, cur->name_len,
3899 &ow_inode, &ow_gen, &ow_mode);
3900 if (ret < 0)
3901 goto out;
3902 if (ret) {
3903 ret = is_first_ref(sctx->parent_root,
3904 ow_inode, cur->dir, cur->name,
3905 cur->name_len);
3906 if (ret < 0)
3907 goto out;
3908 if (ret) {
3909 struct name_cache_entry *nce;
3910 struct waiting_dir_move *wdm;
3911
3912 ret = orphanize_inode(sctx, ow_inode, ow_gen,
3913 cur->full_path);
3914 if (ret < 0)
3915 goto out;
3916 if (S_ISDIR(ow_mode))
3917 orphanized_dir = true;
3918
3919 /*
3920 * If ow_inode has its rename operation delayed
3921 * make sure that its orphanized name is used in
3922 * the source path when performing its rename
3923 * operation.
3924 */
3925 if (is_waiting_for_move(sctx, ow_inode)) {
3926 wdm = get_waiting_dir_move(sctx,
3927 ow_inode);
3928 ASSERT(wdm);
3929 wdm->orphanized = true;
3930 }
3931
3932 /*
3933 * Make sure we clear our orphanized inode's
3934 * name from the name cache. This is because the
3935 * inode ow_inode might be an ancestor of some
3936 * other inode that will be orphanized as well
3937 * later and has an inode number greater than
3938 * sctx->send_progress. We need to prevent
3939 * future name lookups from using the old name
3940 * and get instead the orphan name.
3941 */
3942 nce = name_cache_search(sctx, ow_inode, ow_gen);
3943 if (nce) {
3944 name_cache_delete(sctx, nce);
3945 kfree(nce);
3946 }
3947
3948 /*
3949 * ow_inode might currently be an ancestor of
3950 * cur_ino, therefore compute valid_path (the
3951 * current path of cur_ino) again because it
3952 * might contain the pre-orphanization name of
3953 * ow_inode, which is no longer valid.
3954 */
3955 ret = is_ancestor(sctx->parent_root,
3956 ow_inode, ow_gen,
3957 sctx->cur_ino, NULL);
3958 if (ret > 0) {
3959 orphanized_ancestor = true;
3960 fs_path_reset(valid_path);
3961 ret = get_cur_path(sctx, sctx->cur_ino,
3962 sctx->cur_inode_gen,
3963 valid_path);
3964 }
3965 if (ret < 0)
3966 goto out;
3967 } else {
3968 ret = send_unlink(sctx, cur->full_path);
3969 if (ret < 0)
3970 goto out;
3971 }
3972 }
3973
3974 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
3975 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
3976 if (ret < 0)
3977 goto out;
3978 if (ret == 1) {
3979 can_rename = false;
3980 *pending_move = 1;
3981 }
3982 }
3983
3984 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
3985 can_rename) {
3986 ret = wait_for_parent_move(sctx, cur, is_orphan);
3987 if (ret < 0)
3988 goto out;
3989 if (ret == 1) {
3990 can_rename = false;
3991 *pending_move = 1;
3992 }
3993 }
3994
3995 /*
3996 * link/move the ref to the new place. If we have an orphan
3997 * inode, move it and update valid_path. If not, link or move
3998 * it depending on the inode mode.
3999 */
4000 if (is_orphan && can_rename) {
4001 ret = send_rename(sctx, valid_path, cur->full_path);
4002 if (ret < 0)
4003 goto out;
4004 is_orphan = 0;
4005 ret = fs_path_copy(valid_path, cur->full_path);
4006 if (ret < 0)
4007 goto out;
4008 } else if (can_rename) {
4009 if (S_ISDIR(sctx->cur_inode_mode)) {
4010 /*
4011 * Dirs can't be linked, so move it. For moved
4012 * dirs, we always have one new and one deleted
4013 * ref. The deleted ref is ignored later.
4014 */
4015 ret = send_rename(sctx, valid_path,
4016 cur->full_path);
4017 if (!ret)
4018 ret = fs_path_copy(valid_path,
4019 cur->full_path);
4020 if (ret < 0)
4021 goto out;
4022 } else {
4023 /*
4024 * We might have previously orphanized an inode
4025 * which is an ancestor of our current inode,
4026 * so our reference's full path, which was
4027 * computed before any such orphanizations, must
4028 * be updated.
4029 */
4030 if (orphanized_dir) {
4031 ret = update_ref_path(sctx, cur);
4032 if (ret < 0)
4033 goto out;
4034 }
4035 ret = send_link(sctx, cur->full_path,
4036 valid_path);
4037 if (ret < 0)
4038 goto out;
4039 }
4040 }
4041 ret = dup_ref(cur, &check_dirs);
4042 if (ret < 0)
4043 goto out;
4044 }
4045
4046 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
4047 /*
4048 * Check if we can already rmdir the directory. If not,
4049 * orphanize it. For every dir item inside that gets deleted
4050 * later, we do this check again and rmdir it then if possible.
4051 * See the use of check_dirs for more details.
4052 */
4053 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4054 sctx->cur_ino);
4055 if (ret < 0)
4056 goto out;
4057 if (ret) {
4058 ret = send_rmdir(sctx, valid_path);
4059 if (ret < 0)
4060 goto out;
4061 } else if (!is_orphan) {
4062 ret = orphanize_inode(sctx, sctx->cur_ino,
4063 sctx->cur_inode_gen, valid_path);
4064 if (ret < 0)
4065 goto out;
4066 is_orphan = 1;
4067 }
4068
4069 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4070 ret = dup_ref(cur, &check_dirs);
4071 if (ret < 0)
4072 goto out;
4073 }
4074 } else if (S_ISDIR(sctx->cur_inode_mode) &&
4075 !list_empty(&sctx->deleted_refs)) {
4076 /*
4077 * We have a moved dir. Add the old parent to check_dirs
4078 */
4079 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4080 list);
4081 ret = dup_ref(cur, &check_dirs);
4082 if (ret < 0)
4083 goto out;
4084 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
4085 /*
4086 * We have a non dir inode. Go through all deleted refs and
4087 * unlink them if they were not already overwritten by other
4088 * inodes.
4089 */
4090 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4091 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4092 sctx->cur_ino, sctx->cur_inode_gen,
4093 cur->name, cur->name_len);
4094 if (ret < 0)
4095 goto out;
4096 if (!ret) {
4097 /*
4098 * If we orphanized any ancestor before, we need
4099 * to recompute the full path for deleted names,
4100 * since any such path was computed before we
4101 * processed any references and orphanized any
4102 * ancestor inode.
4103 */
4104 if (orphanized_ancestor) {
4105 ret = update_ref_path(sctx, cur);
4106 if (ret < 0)
4107 goto out;
4108 }
4109 ret = send_unlink(sctx, cur->full_path);
4110 if (ret < 0)
4111 goto out;
4112 }
4113 ret = dup_ref(cur, &check_dirs);
4114 if (ret < 0)
4115 goto out;
4116 }
4117 /*
4118 * If the inode is still orphan, unlink the orphan. This may
4119 * happen when a previous inode did overwrite the first ref
4120 * of this inode and no new refs were added for the current
4121 * inode. Unlinking does not mean that the inode is deleted in
4122 * all cases. There may still be links to this inode in other
4123 * places.
4124 */
4125 if (is_orphan) {
4126 ret = send_unlink(sctx, valid_path);
4127 if (ret < 0)
4128 goto out;
4129 }
4130 }
4131
4132 /*
4133 * We did collect all parent dirs where cur_inode was once located. We
4134 * now go through all these dirs and check if they are pending for
4135 * deletion and if it's finally possible to perform the rmdir now.
4136 * We also update the inode stats of the parent dirs here.
4137 */
4138 list_for_each_entry(cur, &check_dirs, list) {
4139 /*
4140 * In case we had refs into dirs that were not processed yet,
4141 * we don't need to do the utime and rmdir logic for these dirs.
4142 * The dir will be processed later.
4143 */
4144 if (cur->dir > sctx->cur_ino)
4145 continue;
4146
4147 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4148 if (ret < 0)
4149 goto out;
4150
4151 if (ret == inode_state_did_create ||
4152 ret == inode_state_no_change) {
4153 /* TODO delayed utimes */
4154 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4155 if (ret < 0)
4156 goto out;
4157 } else if (ret == inode_state_did_delete &&
4158 cur->dir != last_dir_ino_rm) {
4159 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4160 sctx->cur_ino);
4161 if (ret < 0)
4162 goto out;
4163 if (ret) {
4164 ret = get_cur_path(sctx, cur->dir,
4165 cur->dir_gen, valid_path);
4166 if (ret < 0)
4167 goto out;
4168 ret = send_rmdir(sctx, valid_path);
4169 if (ret < 0)
4170 goto out;
4171 last_dir_ino_rm = cur->dir;
4172 }
4173 }
4174 }
4175
4176 ret = 0;
4177
4178out:
4179 __free_recorded_refs(&check_dirs);
4180 free_recorded_refs(sctx);
4181 fs_path_free(valid_path);
4182 return ret;
4183}
4184
4185static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
4186 void *ctx, struct list_head *refs)
4187{
4188 int ret = 0;
4189 struct send_ctx *sctx = ctx;
4190 struct fs_path *p;
4191 u64 gen;
4192
4193 p = fs_path_alloc();
4194 if (!p)
4195 return -ENOMEM;
4196
4197 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
4198 NULL, NULL);
4199 if (ret < 0)
4200 goto out;
4201
4202 ret = get_cur_path(sctx, dir, gen, p);
4203 if (ret < 0)
4204 goto out;
4205 ret = fs_path_add_path(p, name);
4206 if (ret < 0)
4207 goto out;
4208
4209 ret = __record_ref(refs, dir, gen, p);
4210
4211out:
4212 if (ret)
4213 fs_path_free(p);
4214 return ret;
4215}
4216
4217static int __record_new_ref(int num, u64 dir, int index,
4218 struct fs_path *name,
4219 void *ctx)
4220{
4221 struct send_ctx *sctx = ctx;
4222 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
4223}
4224
4225
4226static int __record_deleted_ref(int num, u64 dir, int index,
4227 struct fs_path *name,
4228 void *ctx)
4229{
4230 struct send_ctx *sctx = ctx;
4231 return record_ref(sctx->parent_root, dir, name, ctx,
4232 &sctx->deleted_refs);
4233}
4234
4235static int record_new_ref(struct send_ctx *sctx)
4236{
4237 int ret;
4238
4239 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4240 sctx->cmp_key, 0, __record_new_ref, sctx);
4241 if (ret < 0)
4242 goto out;
4243 ret = 0;
4244
4245out:
4246 return ret;
4247}
4248
4249static int record_deleted_ref(struct send_ctx *sctx)
4250{
4251 int ret;
4252
4253 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4254 sctx->cmp_key, 0, __record_deleted_ref, sctx);
4255 if (ret < 0)
4256 goto out;
4257 ret = 0;
4258
4259out:
4260 return ret;
4261}
4262
4263struct find_ref_ctx {
4264 u64 dir;
4265 u64 dir_gen;
4266 struct btrfs_root *root;
4267 struct fs_path *name;
4268 int found_idx;
4269};
4270
4271static int __find_iref(int num, u64 dir, int index,
4272 struct fs_path *name,
4273 void *ctx_)
4274{
4275 struct find_ref_ctx *ctx = ctx_;
4276 u64 dir_gen;
4277 int ret;
4278
4279 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
4280 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
4281 /*
4282 * To avoid doing extra lookups we'll only do this if everything
4283 * else matches.
4284 */
4285 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
4286 NULL, NULL, NULL);
4287 if (ret)
4288 return ret;
4289 if (dir_gen != ctx->dir_gen)
4290 return 0;
4291 ctx->found_idx = num;
4292 return 1;
4293 }
4294 return 0;
4295}
4296
4297static int find_iref(struct btrfs_root *root,
4298 struct btrfs_path *path,
4299 struct btrfs_key *key,
4300 u64 dir, u64 dir_gen, struct fs_path *name)
4301{
4302 int ret;
4303 struct find_ref_ctx ctx;
4304
4305 ctx.dir = dir;
4306 ctx.name = name;
4307 ctx.dir_gen = dir_gen;
4308 ctx.found_idx = -1;
4309 ctx.root = root;
4310
4311 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
4312 if (ret < 0)
4313 return ret;
4314
4315 if (ctx.found_idx == -1)
4316 return -ENOENT;
4317
4318 return ctx.found_idx;
4319}
4320
4321static int __record_changed_new_ref(int num, u64 dir, int index,
4322 struct fs_path *name,
4323 void *ctx)
4324{
4325 u64 dir_gen;
4326 int ret;
4327 struct send_ctx *sctx = ctx;
4328
4329 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4330 NULL, NULL, NULL);
4331 if (ret)
4332 return ret;
4333
4334 ret = find_iref(sctx->parent_root, sctx->right_path,
4335 sctx->cmp_key, dir, dir_gen, name);
4336 if (ret == -ENOENT)
4337 ret = __record_new_ref(num, dir, index, name, sctx);
4338 else if (ret > 0)
4339 ret = 0;
4340
4341 return ret;
4342}
4343
4344static int __record_changed_deleted_ref(int num, u64 dir, int index,
4345 struct fs_path *name,
4346 void *ctx)
4347{
4348 u64 dir_gen;
4349 int ret;
4350 struct send_ctx *sctx = ctx;
4351
4352 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4353 NULL, NULL, NULL);
4354 if (ret)
4355 return ret;
4356
4357 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
4358 dir, dir_gen, name);
4359 if (ret == -ENOENT)
4360 ret = __record_deleted_ref(num, dir, index, name, sctx);
4361 else if (ret > 0)
4362 ret = 0;
4363
4364 return ret;
4365}
4366
4367static int record_changed_ref(struct send_ctx *sctx)
4368{
4369 int ret = 0;
4370
4371 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4372 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
4373 if (ret < 0)
4374 goto out;
4375 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4376 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
4377 if (ret < 0)
4378 goto out;
4379 ret = 0;
4380
4381out:
4382 return ret;
4383}
4384
4385/*
4386 * Record and process all refs at once. Needed when an inode changes the
4387 * generation number, which means that it was deleted and recreated.
4388 */
4389static int process_all_refs(struct send_ctx *sctx,
4390 enum btrfs_compare_tree_result cmd)
4391{
4392 int ret;
4393 struct btrfs_root *root;
4394 struct btrfs_path *path;
4395 struct btrfs_key key;
4396 struct btrfs_key found_key;
4397 struct extent_buffer *eb;
4398 int slot;
4399 iterate_inode_ref_t cb;
4400 int pending_move = 0;
4401
4402 path = alloc_path_for_send();
4403 if (!path)
4404 return -ENOMEM;
4405
4406 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4407 root = sctx->send_root;
4408 cb = __record_new_ref;
4409 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4410 root = sctx->parent_root;
4411 cb = __record_deleted_ref;
4412 } else {
4413 btrfs_err(sctx->send_root->fs_info,
4414 "Wrong command %d in process_all_refs", cmd);
4415 ret = -EINVAL;
4416 goto out;
4417 }
4418
4419 key.objectid = sctx->cmp_key->objectid;
4420 key.type = BTRFS_INODE_REF_KEY;
4421 key.offset = 0;
4422 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4423 if (ret < 0)
4424 goto out;
4425
4426 while (1) {
4427 eb = path->nodes[0];
4428 slot = path->slots[0];
4429 if (slot >= btrfs_header_nritems(eb)) {
4430 ret = btrfs_next_leaf(root, path);
4431 if (ret < 0)
4432 goto out;
4433 else if (ret > 0)
4434 break;
4435 continue;
4436 }
4437
4438 btrfs_item_key_to_cpu(eb, &found_key, slot);
4439
4440 if (found_key.objectid != key.objectid ||
4441 (found_key.type != BTRFS_INODE_REF_KEY &&
4442 found_key.type != BTRFS_INODE_EXTREF_KEY))
4443 break;
4444
4445 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4446 if (ret < 0)
4447 goto out;
4448
4449 path->slots[0]++;
4450 }
4451 btrfs_release_path(path);
4452
4453 /*
4454 * We don't actually care about pending_move as we are simply
4455 * re-creating this inode and will be rename'ing it into place once we
4456 * rename the parent directory.
4457 */
4458 ret = process_recorded_refs(sctx, &pending_move);
4459out:
4460 btrfs_free_path(path);
4461 return ret;
4462}
4463
4464static int send_set_xattr(struct send_ctx *sctx,
4465 struct fs_path *path,
4466 const char *name, int name_len,
4467 const char *data, int data_len)
4468{
4469 int ret = 0;
4470
4471 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4472 if (ret < 0)
4473 goto out;
4474
4475 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4476 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4477 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4478
4479 ret = send_cmd(sctx);
4480
4481tlv_put_failure:
4482out:
4483 return ret;
4484}
4485
4486static int send_remove_xattr(struct send_ctx *sctx,
4487 struct fs_path *path,
4488 const char *name, int name_len)
4489{
4490 int ret = 0;
4491
4492 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4493 if (ret < 0)
4494 goto out;
4495
4496 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4497 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4498
4499 ret = send_cmd(sctx);
4500
4501tlv_put_failure:
4502out:
4503 return ret;
4504}
4505
4506static int __process_new_xattr(int num, struct btrfs_key *di_key,
4507 const char *name, int name_len,
4508 const char *data, int data_len,
4509 u8 type, void *ctx)
4510{
4511 int ret;
4512 struct send_ctx *sctx = ctx;
4513 struct fs_path *p;
4514 struct posix_acl_xattr_header dummy_acl;
4515
4516 p = fs_path_alloc();
4517 if (!p)
4518 return -ENOMEM;
4519
4520 /*
4521 * This hack is needed because empty acls are stored as zero byte
4522 * data in xattrs. Problem with that is, that receiving these zero byte
4523 * acls will fail later. To fix this, we send a dummy acl list that
4524 * only contains the version number and no entries.
4525 */
4526 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4527 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4528 if (data_len == 0) {
4529 dummy_acl.a_version =
4530 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4531 data = (char *)&dummy_acl;
4532 data_len = sizeof(dummy_acl);
4533 }
4534 }
4535
4536 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4537 if (ret < 0)
4538 goto out;
4539
4540 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4541
4542out:
4543 fs_path_free(p);
4544 return ret;
4545}
4546
4547static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4548 const char *name, int name_len,
4549 const char *data, int data_len,
4550 u8 type, void *ctx)
4551{
4552 int ret;
4553 struct send_ctx *sctx = ctx;
4554 struct fs_path *p;
4555
4556 p = fs_path_alloc();
4557 if (!p)
4558 return -ENOMEM;
4559
4560 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4561 if (ret < 0)
4562 goto out;
4563
4564 ret = send_remove_xattr(sctx, p, name, name_len);
4565
4566out:
4567 fs_path_free(p);
4568 return ret;
4569}
4570
4571static int process_new_xattr(struct send_ctx *sctx)
4572{
4573 int ret = 0;
4574
4575 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4576 __process_new_xattr, sctx);
4577
4578 return ret;
4579}
4580
4581static int process_deleted_xattr(struct send_ctx *sctx)
4582{
4583 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4584 __process_deleted_xattr, sctx);
4585}
4586
4587struct find_xattr_ctx {
4588 const char *name;
4589 int name_len;
4590 int found_idx;
4591 char *found_data;
4592 int found_data_len;
4593};
4594
4595static int __find_xattr(int num, struct btrfs_key *di_key,
4596 const char *name, int name_len,
4597 const char *data, int data_len,
4598 u8 type, void *vctx)
4599{
4600 struct find_xattr_ctx *ctx = vctx;
4601
4602 if (name_len == ctx->name_len &&
4603 strncmp(name, ctx->name, name_len) == 0) {
4604 ctx->found_idx = num;
4605 ctx->found_data_len = data_len;
4606 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4607 if (!ctx->found_data)
4608 return -ENOMEM;
4609 return 1;
4610 }
4611 return 0;
4612}
4613
4614static int find_xattr(struct btrfs_root *root,
4615 struct btrfs_path *path,
4616 struct btrfs_key *key,
4617 const char *name, int name_len,
4618 char **data, int *data_len)
4619{
4620 int ret;
4621 struct find_xattr_ctx ctx;
4622
4623 ctx.name = name;
4624 ctx.name_len = name_len;
4625 ctx.found_idx = -1;
4626 ctx.found_data = NULL;
4627 ctx.found_data_len = 0;
4628
4629 ret = iterate_dir_item(root, path, __find_xattr, &ctx);
4630 if (ret < 0)
4631 return ret;
4632
4633 if (ctx.found_idx == -1)
4634 return -ENOENT;
4635 if (data) {
4636 *data = ctx.found_data;
4637 *data_len = ctx.found_data_len;
4638 } else {
4639 kfree(ctx.found_data);
4640 }
4641 return ctx.found_idx;
4642}
4643
4644
4645static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4646 const char *name, int name_len,
4647 const char *data, int data_len,
4648 u8 type, void *ctx)
4649{
4650 int ret;
4651 struct send_ctx *sctx = ctx;
4652 char *found_data = NULL;
4653 int found_data_len = 0;
4654
4655 ret = find_xattr(sctx->parent_root, sctx->right_path,
4656 sctx->cmp_key, name, name_len, &found_data,
4657 &found_data_len);
4658 if (ret == -ENOENT) {
4659 ret = __process_new_xattr(num, di_key, name, name_len, data,
4660 data_len, type, ctx);
4661 } else if (ret >= 0) {
4662 if (data_len != found_data_len ||
4663 memcmp(data, found_data, data_len)) {
4664 ret = __process_new_xattr(num, di_key, name, name_len,
4665 data, data_len, type, ctx);
4666 } else {
4667 ret = 0;
4668 }
4669 }
4670
4671 kfree(found_data);
4672 return ret;
4673}
4674
4675static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4676 const char *name, int name_len,
4677 const char *data, int data_len,
4678 u8 type, void *ctx)
4679{
4680 int ret;
4681 struct send_ctx *sctx = ctx;
4682
4683 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4684 name, name_len, NULL, NULL);
4685 if (ret == -ENOENT)
4686 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4687 data_len, type, ctx);
4688 else if (ret >= 0)
4689 ret = 0;
4690
4691 return ret;
4692}
4693
4694static int process_changed_xattr(struct send_ctx *sctx)
4695{
4696 int ret = 0;
4697
4698 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4699 __process_changed_new_xattr, sctx);
4700 if (ret < 0)
4701 goto out;
4702 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4703 __process_changed_deleted_xattr, sctx);
4704
4705out:
4706 return ret;
4707}
4708
4709static int process_all_new_xattrs(struct send_ctx *sctx)
4710{
4711 int ret;
4712 struct btrfs_root *root;
4713 struct btrfs_path *path;
4714 struct btrfs_key key;
4715 struct btrfs_key found_key;
4716 struct extent_buffer *eb;
4717 int slot;
4718
4719 path = alloc_path_for_send();
4720 if (!path)
4721 return -ENOMEM;
4722
4723 root = sctx->send_root;
4724
4725 key.objectid = sctx->cmp_key->objectid;
4726 key.type = BTRFS_XATTR_ITEM_KEY;
4727 key.offset = 0;
4728 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4729 if (ret < 0)
4730 goto out;
4731
4732 while (1) {
4733 eb = path->nodes[0];
4734 slot = path->slots[0];
4735 if (slot >= btrfs_header_nritems(eb)) {
4736 ret = btrfs_next_leaf(root, path);
4737 if (ret < 0) {
4738 goto out;
4739 } else if (ret > 0) {
4740 ret = 0;
4741 break;
4742 }
4743 continue;
4744 }
4745
4746 btrfs_item_key_to_cpu(eb, &found_key, slot);
4747 if (found_key.objectid != key.objectid ||
4748 found_key.type != key.type) {
4749 ret = 0;
4750 goto out;
4751 }
4752
4753 ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
4754 if (ret < 0)
4755 goto out;
4756
4757 path->slots[0]++;
4758 }
4759
4760out:
4761 btrfs_free_path(path);
4762 return ret;
4763}
4764
4765static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4766{
4767 struct btrfs_root *root = sctx->send_root;
4768 struct btrfs_fs_info *fs_info = root->fs_info;
4769 struct inode *inode;
4770 struct page *page;
4771 char *addr;
4772 struct btrfs_key key;
4773 pgoff_t index = offset >> PAGE_SHIFT;
4774 pgoff_t last_index;
4775 unsigned pg_offset = offset_in_page(offset);
4776 ssize_t ret = 0;
4777
4778 key.objectid = sctx->cur_ino;
4779 key.type = BTRFS_INODE_ITEM_KEY;
4780 key.offset = 0;
4781
4782 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4783 if (IS_ERR(inode))
4784 return PTR_ERR(inode);
4785
4786 if (offset + len > i_size_read(inode)) {
4787 if (offset > i_size_read(inode))
4788 len = 0;
4789 else
4790 len = offset - i_size_read(inode);
4791 }
4792 if (len == 0)
4793 goto out;
4794
4795 last_index = (offset + len - 1) >> PAGE_SHIFT;
4796
4797 /* initial readahead */
4798 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4799 file_ra_state_init(&sctx->ra, inode->i_mapping);
4800
4801 while (index <= last_index) {
4802 unsigned cur_len = min_t(unsigned, len,
4803 PAGE_SIZE - pg_offset);
4804
4805 page = find_lock_page(inode->i_mapping, index);
4806 if (!page) {
4807 page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
4808 NULL, index, last_index + 1 - index);
4809
4810 page = find_or_create_page(inode->i_mapping, index,
4811 GFP_KERNEL);
4812 if (!page) {
4813 ret = -ENOMEM;
4814 break;
4815 }
4816 }
4817
4818 if (PageReadahead(page)) {
4819 page_cache_async_readahead(inode->i_mapping, &sctx->ra,
4820 NULL, page, index, last_index + 1 - index);
4821 }
4822
4823 if (!PageUptodate(page)) {
4824 btrfs_readpage(NULL, page);
4825 lock_page(page);
4826 if (!PageUptodate(page)) {
4827 unlock_page(page);
4828 put_page(page);
4829 ret = -EIO;
4830 break;
4831 }
4832 }
4833
4834 addr = kmap(page);
4835 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4836 kunmap(page);
4837 unlock_page(page);
4838 put_page(page);
4839 index++;
4840 pg_offset = 0;
4841 len -= cur_len;
4842 ret += cur_len;
4843 }
4844out:
4845 iput(inode);
4846 return ret;
4847}
4848
4849/*
4850 * Read some bytes from the current inode/file and send a write command to
4851 * user space.
4852 */
4853static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4854{
4855 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
4856 int ret = 0;
4857 struct fs_path *p;
4858 ssize_t num_read = 0;
4859
4860 p = fs_path_alloc();
4861 if (!p)
4862 return -ENOMEM;
4863
4864 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
4865
4866 num_read = fill_read_buf(sctx, offset, len);
4867 if (num_read <= 0) {
4868 if (num_read < 0)
4869 ret = num_read;
4870 goto out;
4871 }
4872
4873 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4874 if (ret < 0)
4875 goto out;
4876
4877 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4878 if (ret < 0)
4879 goto out;
4880
4881 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4882 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4883 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
4884
4885 ret = send_cmd(sctx);
4886
4887tlv_put_failure:
4888out:
4889 fs_path_free(p);
4890 if (ret < 0)
4891 return ret;
4892 return num_read;
4893}
4894
4895/*
4896 * Send a clone command to user space.
4897 */
4898static int send_clone(struct send_ctx *sctx,
4899 u64 offset, u32 len,
4900 struct clone_root *clone_root)
4901{
4902 int ret = 0;
4903 struct fs_path *p;
4904 u64 gen;
4905
4906 btrfs_debug(sctx->send_root->fs_info,
4907 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
4908 offset, len, clone_root->root->root_key.objectid,
4909 clone_root->ino, clone_root->offset);
4910
4911 p = fs_path_alloc();
4912 if (!p)
4913 return -ENOMEM;
4914
4915 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
4916 if (ret < 0)
4917 goto out;
4918
4919 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4920 if (ret < 0)
4921 goto out;
4922
4923 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4924 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
4925 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4926
4927 if (clone_root->root == sctx->send_root) {
4928 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
4929 &gen, NULL, NULL, NULL, NULL);
4930 if (ret < 0)
4931 goto out;
4932 ret = get_cur_path(sctx, clone_root->ino, gen, p);
4933 } else {
4934 ret = get_inode_path(clone_root->root, clone_root->ino, p);
4935 }
4936 if (ret < 0)
4937 goto out;
4938
4939 /*
4940 * If the parent we're using has a received_uuid set then use that as
4941 * our clone source as that is what we will look for when doing a
4942 * receive.
4943 *
4944 * This covers the case that we create a snapshot off of a received
4945 * subvolume and then use that as the parent and try to receive on a
4946 * different host.
4947 */
4948 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
4949 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4950 clone_root->root->root_item.received_uuid);
4951 else
4952 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4953 clone_root->root->root_item.uuid);
4954 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
4955 le64_to_cpu(clone_root->root->root_item.ctransid));
4956 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
4957 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
4958 clone_root->offset);
4959
4960 ret = send_cmd(sctx);
4961
4962tlv_put_failure:
4963out:
4964 fs_path_free(p);
4965 return ret;
4966}
4967
4968/*
4969 * Send an update extent command to user space.
4970 */
4971static int send_update_extent(struct send_ctx *sctx,
4972 u64 offset, u32 len)
4973{
4974 int ret = 0;
4975 struct fs_path *p;
4976
4977 p = fs_path_alloc();
4978 if (!p)
4979 return -ENOMEM;
4980
4981 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
4982 if (ret < 0)
4983 goto out;
4984
4985 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4986 if (ret < 0)
4987 goto out;
4988
4989 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4990 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4991 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
4992
4993 ret = send_cmd(sctx);
4994
4995tlv_put_failure:
4996out:
4997 fs_path_free(p);
4998 return ret;
4999}
5000
5001static int send_hole(struct send_ctx *sctx, u64 end)
5002{
5003 struct fs_path *p = NULL;
5004 u64 offset = sctx->cur_inode_last_extent;
5005 u64 len;
5006 int ret = 0;
5007
5008 /*
5009 * A hole that starts at EOF or beyond it. Since we do not yet support
5010 * fallocate (for extent preallocation and hole punching), sending a
5011 * write of zeroes starting at EOF or beyond would later require issuing
5012 * a truncate operation which would undo the write and achieve nothing.
5013 */
5014 if (offset >= sctx->cur_inode_size)
5015 return 0;
5016
5017 /*
5018 * Don't go beyond the inode's i_size due to prealloc extents that start
5019 * after the i_size.
5020 */
5021 end = min_t(u64, end, sctx->cur_inode_size);
5022
5023 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5024 return send_update_extent(sctx, offset, end - offset);
5025
5026 p = fs_path_alloc();
5027 if (!p)
5028 return -ENOMEM;
5029 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5030 if (ret < 0)
5031 goto tlv_put_failure;
5032 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
5033 while (offset < end) {
5034 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
5035
5036 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5037 if (ret < 0)
5038 break;
5039 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5040 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5041 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
5042 ret = send_cmd(sctx);
5043 if (ret < 0)
5044 break;
5045 offset += len;
5046 }
5047 sctx->cur_inode_next_write_offset = offset;
5048tlv_put_failure:
5049 fs_path_free(p);
5050 return ret;
5051}
5052
5053static int send_extent_data(struct send_ctx *sctx,
5054 const u64 offset,
5055 const u64 len)
5056{
5057 u64 sent = 0;
5058
5059 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5060 return send_update_extent(sctx, offset, len);
5061
5062 while (sent < len) {
5063 u64 size = len - sent;
5064 int ret;
5065
5066 if (size > BTRFS_SEND_READ_SIZE)
5067 size = BTRFS_SEND_READ_SIZE;
5068 ret = send_write(sctx, offset + sent, size);
5069 if (ret < 0)
5070 return ret;
5071 if (!ret)
5072 break;
5073 sent += ret;
5074 }
5075 return 0;
5076}
5077
5078static int clone_range(struct send_ctx *sctx,
5079 struct clone_root *clone_root,
5080 const u64 disk_byte,
5081 u64 data_offset,
5082 u64 offset,
5083 u64 len)
5084{
5085 struct btrfs_path *path;
5086 struct btrfs_key key;
5087 int ret;
5088 u64 clone_src_i_size = 0;
5089
5090 /*
5091 * Prevent cloning from a zero offset with a length matching the sector
5092 * size because in some scenarios this will make the receiver fail.
5093 *
5094 * For example, if in the source filesystem the extent at offset 0
5095 * has a length of sectorsize and it was written using direct IO, then
5096 * it can never be an inline extent (even if compression is enabled).
5097 * Then this extent can be cloned in the original filesystem to a non
5098 * zero file offset, but it may not be possible to clone in the
5099 * destination filesystem because it can be inlined due to compression
5100 * on the destination filesystem (as the receiver's write operations are
5101 * always done using buffered IO). The same happens when the original
5102 * filesystem does not have compression enabled but the destination
5103 * filesystem has.
5104 */
5105 if (clone_root->offset == 0 &&
5106 len == sctx->send_root->fs_info->sectorsize)
5107 return send_extent_data(sctx, offset, len);
5108
5109 path = alloc_path_for_send();
5110 if (!path)
5111 return -ENOMEM;
5112
5113 /*
5114 * There are inodes that have extents that lie behind its i_size. Don't
5115 * accept clones from these extents.
5116 */
5117 ret = __get_inode_info(clone_root->root, path, clone_root->ino,
5118 &clone_src_i_size, NULL, NULL, NULL, NULL, NULL);
5119 btrfs_release_path(path);
5120 if (ret < 0)
5121 goto out;
5122
5123 /*
5124 * We can't send a clone operation for the entire range if we find
5125 * extent items in the respective range in the source file that
5126 * refer to different extents or if we find holes.
5127 * So check for that and do a mix of clone and regular write/copy
5128 * operations if needed.
5129 *
5130 * Example:
5131 *
5132 * mkfs.btrfs -f /dev/sda
5133 * mount /dev/sda /mnt
5134 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5135 * cp --reflink=always /mnt/foo /mnt/bar
5136 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5137 * btrfs subvolume snapshot -r /mnt /mnt/snap
5138 *
5139 * If when we send the snapshot and we are processing file bar (which
5140 * has a higher inode number than foo) we blindly send a clone operation
5141 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5142 * a file bar that matches the content of file foo - iow, doesn't match
5143 * the content from bar in the original filesystem.
5144 */
5145 key.objectid = clone_root->ino;
5146 key.type = BTRFS_EXTENT_DATA_KEY;
5147 key.offset = clone_root->offset;
5148 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5149 if (ret < 0)
5150 goto out;
5151 if (ret > 0 && path->slots[0] > 0) {
5152 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5153 if (key.objectid == clone_root->ino &&
5154 key.type == BTRFS_EXTENT_DATA_KEY)
5155 path->slots[0]--;
5156 }
5157
5158 while (true) {
5159 struct extent_buffer *leaf = path->nodes[0];
5160 int slot = path->slots[0];
5161 struct btrfs_file_extent_item *ei;
5162 u8 type;
5163 u64 ext_len;
5164 u64 clone_len;
5165 u64 clone_data_offset;
5166
5167 if (slot >= btrfs_header_nritems(leaf)) {
5168 ret = btrfs_next_leaf(clone_root->root, path);
5169 if (ret < 0)
5170 goto out;
5171 else if (ret > 0)
5172 break;
5173 continue;
5174 }
5175
5176 btrfs_item_key_to_cpu(leaf, &key, slot);
5177
5178 /*
5179 * We might have an implicit trailing hole (NO_HOLES feature
5180 * enabled). We deal with it after leaving this loop.
5181 */
5182 if (key.objectid != clone_root->ino ||
5183 key.type != BTRFS_EXTENT_DATA_KEY)
5184 break;
5185
5186 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5187 type = btrfs_file_extent_type(leaf, ei);
5188 if (type == BTRFS_FILE_EXTENT_INLINE) {
5189 ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
5190 ext_len = PAGE_ALIGN(ext_len);
5191 } else {
5192 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5193 }
5194
5195 if (key.offset + ext_len <= clone_root->offset)
5196 goto next;
5197
5198 if (key.offset > clone_root->offset) {
5199 /* Implicit hole, NO_HOLES feature enabled. */
5200 u64 hole_len = key.offset - clone_root->offset;
5201
5202 if (hole_len > len)
5203 hole_len = len;
5204 ret = send_extent_data(sctx, offset, hole_len);
5205 if (ret < 0)
5206 goto out;
5207
5208 len -= hole_len;
5209 if (len == 0)
5210 break;
5211 offset += hole_len;
5212 clone_root->offset += hole_len;
5213 data_offset += hole_len;
5214 }
5215
5216 if (key.offset >= clone_root->offset + len)
5217 break;
5218
5219 if (key.offset >= clone_src_i_size)
5220 break;
5221
5222 if (key.offset + ext_len > clone_src_i_size)
5223 ext_len = clone_src_i_size - key.offset;
5224
5225 clone_data_offset = btrfs_file_extent_offset(leaf, ei);
5226 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
5227 clone_root->offset = key.offset;
5228 if (clone_data_offset < data_offset &&
5229 clone_data_offset + ext_len > data_offset) {
5230 u64 extent_offset;
5231
5232 extent_offset = data_offset - clone_data_offset;
5233 ext_len -= extent_offset;
5234 clone_data_offset += extent_offset;
5235 clone_root->offset += extent_offset;
5236 }
5237 }
5238
5239 clone_len = min_t(u64, ext_len, len);
5240
5241 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
5242 clone_data_offset == data_offset) {
5243 const u64 src_end = clone_root->offset + clone_len;
5244 const u64 sectorsize = SZ_64K;
5245
5246 /*
5247 * We can't clone the last block, when its size is not
5248 * sector size aligned, into the middle of a file. If we
5249 * do so, the receiver will get a failure (-EINVAL) when
5250 * trying to clone or will silently corrupt the data in
5251 * the destination file if it's on a kernel without the
5252 * fix introduced by commit ac765f83f1397646
5253 * ("Btrfs: fix data corruption due to cloning of eof
5254 * block).
5255 *
5256 * So issue a clone of the aligned down range plus a
5257 * regular write for the eof block, if we hit that case.
5258 *
5259 * Also, we use the maximum possible sector size, 64K,
5260 * because we don't know what's the sector size of the
5261 * filesystem that receives the stream, so we have to
5262 * assume the largest possible sector size.
5263 */
5264 if (src_end == clone_src_i_size &&
5265 !IS_ALIGNED(src_end, sectorsize) &&
5266 offset + clone_len < sctx->cur_inode_size) {
5267 u64 slen;
5268
5269 slen = ALIGN_DOWN(src_end - clone_root->offset,
5270 sectorsize);
5271 if (slen > 0) {
5272 ret = send_clone(sctx, offset, slen,
5273 clone_root);
5274 if (ret < 0)
5275 goto out;
5276 }
5277 ret = send_extent_data(sctx, offset + slen,
5278 clone_len - slen);
5279 } else {
5280 ret = send_clone(sctx, offset, clone_len,
5281 clone_root);
5282 }
5283 } else {
5284 ret = send_extent_data(sctx, offset, clone_len);
5285 }
5286
5287 if (ret < 0)
5288 goto out;
5289
5290 len -= clone_len;
5291 if (len == 0)
5292 break;
5293 offset += clone_len;
5294 clone_root->offset += clone_len;
5295 data_offset += clone_len;
5296next:
5297 path->slots[0]++;
5298 }
5299
5300 if (len > 0)
5301 ret = send_extent_data(sctx, offset, len);
5302 else
5303 ret = 0;
5304out:
5305 btrfs_free_path(path);
5306 return ret;
5307}
5308
5309static int send_write_or_clone(struct send_ctx *sctx,
5310 struct btrfs_path *path,
5311 struct btrfs_key *key,
5312 struct clone_root *clone_root)
5313{
5314 int ret = 0;
5315 struct btrfs_file_extent_item *ei;
5316 u64 offset = key->offset;
5317 u64 len;
5318 u8 type;
5319 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
5320
5321 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5322 struct btrfs_file_extent_item);
5323 type = btrfs_file_extent_type(path->nodes[0], ei);
5324 if (type == BTRFS_FILE_EXTENT_INLINE) {
5325 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
5326 /*
5327 * it is possible the inline item won't cover the whole page,
5328 * but there may be items after this page. Make
5329 * sure to send the whole thing
5330 */
5331 len = PAGE_ALIGN(len);
5332 } else {
5333 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
5334 }
5335
5336 if (offset >= sctx->cur_inode_size) {
5337 ret = 0;
5338 goto out;
5339 }
5340 if (offset + len > sctx->cur_inode_size)
5341 len = sctx->cur_inode_size - offset;
5342 if (len == 0) {
5343 ret = 0;
5344 goto out;
5345 }
5346
5347 if (clone_root && IS_ALIGNED(offset + len, bs)) {
5348 u64 disk_byte;
5349 u64 data_offset;
5350
5351 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5352 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5353 ret = clone_range(sctx, clone_root, disk_byte, data_offset,
5354 offset, len);
5355 } else {
5356 ret = send_extent_data(sctx, offset, len);
5357 }
5358 sctx->cur_inode_next_write_offset = offset + len;
5359out:
5360 return ret;
5361}
5362
5363static int is_extent_unchanged(struct send_ctx *sctx,
5364 struct btrfs_path *left_path,
5365 struct btrfs_key *ekey)
5366{
5367 int ret = 0;
5368 struct btrfs_key key;
5369 struct btrfs_path *path = NULL;
5370 struct extent_buffer *eb;
5371 int slot;
5372 struct btrfs_key found_key;
5373 struct btrfs_file_extent_item *ei;
5374 u64 left_disknr;
5375 u64 right_disknr;
5376 u64 left_offset;
5377 u64 right_offset;
5378 u64 left_offset_fixed;
5379 u64 left_len;
5380 u64 right_len;
5381 u64 left_gen;
5382 u64 right_gen;
5383 u8 left_type;
5384 u8 right_type;
5385
5386 path = alloc_path_for_send();
5387 if (!path)
5388 return -ENOMEM;
5389
5390 eb = left_path->nodes[0];
5391 slot = left_path->slots[0];
5392 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5393 left_type = btrfs_file_extent_type(eb, ei);
5394
5395 if (left_type != BTRFS_FILE_EXTENT_REG) {
5396 ret = 0;
5397 goto out;
5398 }
5399 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5400 left_len = btrfs_file_extent_num_bytes(eb, ei);
5401 left_offset = btrfs_file_extent_offset(eb, ei);
5402 left_gen = btrfs_file_extent_generation(eb, ei);
5403
5404 /*
5405 * Following comments will refer to these graphics. L is the left
5406 * extents which we are checking at the moment. 1-8 are the right
5407 * extents that we iterate.
5408 *
5409 * |-----L-----|
5410 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5411 *
5412 * |-----L-----|
5413 * |--1--|-2b-|...(same as above)
5414 *
5415 * Alternative situation. Happens on files where extents got split.
5416 * |-----L-----|
5417 * |-----------7-----------|-6-|
5418 *
5419 * Alternative situation. Happens on files which got larger.
5420 * |-----L-----|
5421 * |-8-|
5422 * Nothing follows after 8.
5423 */
5424
5425 key.objectid = ekey->objectid;
5426 key.type = BTRFS_EXTENT_DATA_KEY;
5427 key.offset = ekey->offset;
5428 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5429 if (ret < 0)
5430 goto out;
5431 if (ret) {
5432 ret = 0;
5433 goto out;
5434 }
5435
5436 /*
5437 * Handle special case where the right side has no extents at all.
5438 */
5439 eb = path->nodes[0];
5440 slot = path->slots[0];
5441 btrfs_item_key_to_cpu(eb, &found_key, slot);
5442 if (found_key.objectid != key.objectid ||
5443 found_key.type != key.type) {
5444 /* If we're a hole then just pretend nothing changed */
5445 ret = (left_disknr) ? 0 : 1;
5446 goto out;
5447 }
5448
5449 /*
5450 * We're now on 2a, 2b or 7.
5451 */
5452 key = found_key;
5453 while (key.offset < ekey->offset + left_len) {
5454 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5455 right_type = btrfs_file_extent_type(eb, ei);
5456 if (right_type != BTRFS_FILE_EXTENT_REG &&
5457 right_type != BTRFS_FILE_EXTENT_INLINE) {
5458 ret = 0;
5459 goto out;
5460 }
5461
5462 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5463 right_len = btrfs_file_extent_ram_bytes(eb, ei);
5464 right_len = PAGE_ALIGN(right_len);
5465 } else {
5466 right_len = btrfs_file_extent_num_bytes(eb, ei);
5467 }
5468
5469 /*
5470 * Are we at extent 8? If yes, we know the extent is changed.
5471 * This may only happen on the first iteration.
5472 */
5473 if (found_key.offset + right_len <= ekey->offset) {
5474 /* If we're a hole just pretend nothing changed */
5475 ret = (left_disknr) ? 0 : 1;
5476 goto out;
5477 }
5478
5479 /*
5480 * We just wanted to see if when we have an inline extent, what
5481 * follows it is a regular extent (wanted to check the above
5482 * condition for inline extents too). This should normally not
5483 * happen but it's possible for example when we have an inline
5484 * compressed extent representing data with a size matching
5485 * the page size (currently the same as sector size).
5486 */
5487 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5488 ret = 0;
5489 goto out;
5490 }
5491
5492 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5493 right_offset = btrfs_file_extent_offset(eb, ei);
5494 right_gen = btrfs_file_extent_generation(eb, ei);
5495
5496 left_offset_fixed = left_offset;
5497 if (key.offset < ekey->offset) {
5498 /* Fix the right offset for 2a and 7. */
5499 right_offset += ekey->offset - key.offset;
5500 } else {
5501 /* Fix the left offset for all behind 2a and 2b */
5502 left_offset_fixed += key.offset - ekey->offset;
5503 }
5504
5505 /*
5506 * Check if we have the same extent.
5507 */
5508 if (left_disknr != right_disknr ||
5509 left_offset_fixed != right_offset ||
5510 left_gen != right_gen) {
5511 ret = 0;
5512 goto out;
5513 }
5514
5515 /*
5516 * Go to the next extent.
5517 */
5518 ret = btrfs_next_item(sctx->parent_root, path);
5519 if (ret < 0)
5520 goto out;
5521 if (!ret) {
5522 eb = path->nodes[0];
5523 slot = path->slots[0];
5524 btrfs_item_key_to_cpu(eb, &found_key, slot);
5525 }
5526 if (ret || found_key.objectid != key.objectid ||
5527 found_key.type != key.type) {
5528 key.offset += right_len;
5529 break;
5530 }
5531 if (found_key.offset != key.offset + right_len) {
5532 ret = 0;
5533 goto out;
5534 }
5535 key = found_key;
5536 }
5537
5538 /*
5539 * We're now behind the left extent (treat as unchanged) or at the end
5540 * of the right side (treat as changed).
5541 */
5542 if (key.offset >= ekey->offset + left_len)
5543 ret = 1;
5544 else
5545 ret = 0;
5546
5547
5548out:
5549 btrfs_free_path(path);
5550 return ret;
5551}
5552
5553static int get_last_extent(struct send_ctx *sctx, u64 offset)
5554{
5555 struct btrfs_path *path;
5556 struct btrfs_root *root = sctx->send_root;
5557 struct btrfs_file_extent_item *fi;
5558 struct btrfs_key key;
5559 u64 extent_end;
5560 u8 type;
5561 int ret;
5562
5563 path = alloc_path_for_send();
5564 if (!path)
5565 return -ENOMEM;
5566
5567 sctx->cur_inode_last_extent = 0;
5568
5569 key.objectid = sctx->cur_ino;
5570 key.type = BTRFS_EXTENT_DATA_KEY;
5571 key.offset = offset;
5572 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
5573 if (ret < 0)
5574 goto out;
5575 ret = 0;
5576 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5577 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
5578 goto out;
5579
5580 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5581 struct btrfs_file_extent_item);
5582 type = btrfs_file_extent_type(path->nodes[0], fi);
5583 if (type == BTRFS_FILE_EXTENT_INLINE) {
5584 u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
5585 extent_end = ALIGN(key.offset + size,
5586 sctx->send_root->fs_info->sectorsize);
5587 } else {
5588 extent_end = key.offset +
5589 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5590 }
5591 sctx->cur_inode_last_extent = extent_end;
5592out:
5593 btrfs_free_path(path);
5594 return ret;
5595}
5596
5597static int range_is_hole_in_parent(struct send_ctx *sctx,
5598 const u64 start,
5599 const u64 end)
5600{
5601 struct btrfs_path *path;
5602 struct btrfs_key key;
5603 struct btrfs_root *root = sctx->parent_root;
5604 u64 search_start = start;
5605 int ret;
5606
5607 path = alloc_path_for_send();
5608 if (!path)
5609 return -ENOMEM;
5610
5611 key.objectid = sctx->cur_ino;
5612 key.type = BTRFS_EXTENT_DATA_KEY;
5613 key.offset = search_start;
5614 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5615 if (ret < 0)
5616 goto out;
5617 if (ret > 0 && path->slots[0] > 0)
5618 path->slots[0]--;
5619
5620 while (search_start < end) {
5621 struct extent_buffer *leaf = path->nodes[0];
5622 int slot = path->slots[0];
5623 struct btrfs_file_extent_item *fi;
5624 u64 extent_end;
5625
5626 if (slot >= btrfs_header_nritems(leaf)) {
5627 ret = btrfs_next_leaf(root, path);
5628 if (ret < 0)
5629 goto out;
5630 else if (ret > 0)
5631 break;
5632 continue;
5633 }
5634
5635 btrfs_item_key_to_cpu(leaf, &key, slot);
5636 if (key.objectid < sctx->cur_ino ||
5637 key.type < BTRFS_EXTENT_DATA_KEY)
5638 goto next;
5639 if (key.objectid > sctx->cur_ino ||
5640 key.type > BTRFS_EXTENT_DATA_KEY ||
5641 key.offset >= end)
5642 break;
5643
5644 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5645 if (btrfs_file_extent_type(leaf, fi) ==
5646 BTRFS_FILE_EXTENT_INLINE) {
5647 u64 size = btrfs_file_extent_ram_bytes(leaf, fi);
5648
5649 extent_end = ALIGN(key.offset + size,
5650 root->fs_info->sectorsize);
5651 } else {
5652 extent_end = key.offset +
5653 btrfs_file_extent_num_bytes(leaf, fi);
5654 }
5655 if (extent_end <= start)
5656 goto next;
5657 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
5658 search_start = extent_end;
5659 goto next;
5660 }
5661 ret = 0;
5662 goto out;
5663next:
5664 path->slots[0]++;
5665 }
5666 ret = 1;
5667out:
5668 btrfs_free_path(path);
5669 return ret;
5670}
5671
5672static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
5673 struct btrfs_key *key)
5674{
5675 struct btrfs_file_extent_item *fi;
5676 u64 extent_end;
5677 u8 type;
5678 int ret = 0;
5679
5680 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
5681 return 0;
5682
5683 if (sctx->cur_inode_last_extent == (u64)-1) {
5684 ret = get_last_extent(sctx, key->offset - 1);
5685 if (ret)
5686 return ret;
5687 }
5688
5689 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5690 struct btrfs_file_extent_item);
5691 type = btrfs_file_extent_type(path->nodes[0], fi);
5692 if (type == BTRFS_FILE_EXTENT_INLINE) {
5693 u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi);
5694 extent_end = ALIGN(key->offset + size,
5695 sctx->send_root->fs_info->sectorsize);
5696 } else {
5697 extent_end = key->offset +
5698 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5699 }
5700
5701 if (path->slots[0] == 0 &&
5702 sctx->cur_inode_last_extent < key->offset) {
5703 /*
5704 * We might have skipped entire leafs that contained only
5705 * file extent items for our current inode. These leafs have
5706 * a generation number smaller (older) than the one in the
5707 * current leaf and the leaf our last extent came from, and
5708 * are located between these 2 leafs.
5709 */
5710 ret = get_last_extent(sctx, key->offset - 1);
5711 if (ret)
5712 return ret;
5713 }
5714
5715 if (sctx->cur_inode_last_extent < key->offset) {
5716 ret = range_is_hole_in_parent(sctx,
5717 sctx->cur_inode_last_extent,
5718 key->offset);
5719 if (ret < 0)
5720 return ret;
5721 else if (ret == 0)
5722 ret = send_hole(sctx, key->offset);
5723 else
5724 ret = 0;
5725 }
5726 sctx->cur_inode_last_extent = extent_end;
5727 return ret;
5728}
5729
5730static int process_extent(struct send_ctx *sctx,
5731 struct btrfs_path *path,
5732 struct btrfs_key *key)
5733{
5734 struct clone_root *found_clone = NULL;
5735 int ret = 0;
5736
5737 if (S_ISLNK(sctx->cur_inode_mode))
5738 return 0;
5739
5740 if (sctx->parent_root && !sctx->cur_inode_new) {
5741 ret = is_extent_unchanged(sctx, path, key);
5742 if (ret < 0)
5743 goto out;
5744 if (ret) {
5745 ret = 0;
5746 goto out_hole;
5747 }
5748 } else {
5749 struct btrfs_file_extent_item *ei;
5750 u8 type;
5751
5752 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5753 struct btrfs_file_extent_item);
5754 type = btrfs_file_extent_type(path->nodes[0], ei);
5755 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
5756 type == BTRFS_FILE_EXTENT_REG) {
5757 /*
5758 * The send spec does not have a prealloc command yet,
5759 * so just leave a hole for prealloc'ed extents until
5760 * we have enough commands queued up to justify rev'ing
5761 * the send spec.
5762 */
5763 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
5764 ret = 0;
5765 goto out;
5766 }
5767
5768 /* Have a hole, just skip it. */
5769 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
5770 ret = 0;
5771 goto out;
5772 }
5773 }
5774 }
5775
5776 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
5777 sctx->cur_inode_size, &found_clone);
5778 if (ret != -ENOENT && ret < 0)
5779 goto out;
5780
5781 ret = send_write_or_clone(sctx, path, key, found_clone);
5782 if (ret)
5783 goto out;
5784out_hole:
5785 ret = maybe_send_hole(sctx, path, key);
5786out:
5787 return ret;
5788}
5789
5790static int process_all_extents(struct send_ctx *sctx)
5791{
5792 int ret;
5793 struct btrfs_root *root;
5794 struct btrfs_path *path;
5795 struct btrfs_key key;
5796 struct btrfs_key found_key;
5797 struct extent_buffer *eb;
5798 int slot;
5799
5800 root = sctx->send_root;
5801 path = alloc_path_for_send();
5802 if (!path)
5803 return -ENOMEM;
5804
5805 key.objectid = sctx->cmp_key->objectid;
5806 key.type = BTRFS_EXTENT_DATA_KEY;
5807 key.offset = 0;
5808 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5809 if (ret < 0)
5810 goto out;
5811
5812 while (1) {
5813 eb = path->nodes[0];
5814 slot = path->slots[0];
5815
5816 if (slot >= btrfs_header_nritems(eb)) {
5817 ret = btrfs_next_leaf(root, path);
5818 if (ret < 0) {
5819 goto out;
5820 } else if (ret > 0) {
5821 ret = 0;
5822 break;
5823 }
5824 continue;
5825 }
5826
5827 btrfs_item_key_to_cpu(eb, &found_key, slot);
5828
5829 if (found_key.objectid != key.objectid ||
5830 found_key.type != key.type) {
5831 ret = 0;
5832 goto out;
5833 }
5834
5835 ret = process_extent(sctx, path, &found_key);
5836 if (ret < 0)
5837 goto out;
5838
5839 path->slots[0]++;
5840 }
5841
5842out:
5843 btrfs_free_path(path);
5844 return ret;
5845}
5846
5847static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
5848 int *pending_move,
5849 int *refs_processed)
5850{
5851 int ret = 0;
5852
5853 if (sctx->cur_ino == 0)
5854 goto out;
5855 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
5856 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
5857 goto out;
5858 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
5859 goto out;
5860
5861 ret = process_recorded_refs(sctx, pending_move);
5862 if (ret < 0)
5863 goto out;
5864
5865 *refs_processed = 1;
5866out:
5867 return ret;
5868}
5869
5870static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
5871{
5872 int ret = 0;
5873 u64 left_mode;
5874 u64 left_uid;
5875 u64 left_gid;
5876 u64 right_mode;
5877 u64 right_uid;
5878 u64 right_gid;
5879 int need_chmod = 0;
5880 int need_chown = 0;
5881 int need_truncate = 1;
5882 int pending_move = 0;
5883 int refs_processed = 0;
5884
5885 if (sctx->ignore_cur_inode)
5886 return 0;
5887
5888 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
5889 &refs_processed);
5890 if (ret < 0)
5891 goto out;
5892
5893 /*
5894 * We have processed the refs and thus need to advance send_progress.
5895 * Now, calls to get_cur_xxx will take the updated refs of the current
5896 * inode into account.
5897 *
5898 * On the other hand, if our current inode is a directory and couldn't
5899 * be moved/renamed because its parent was renamed/moved too and it has
5900 * a higher inode number, we can only move/rename our current inode
5901 * after we moved/renamed its parent. Therefore in this case operate on
5902 * the old path (pre move/rename) of our current inode, and the
5903 * move/rename will be performed later.
5904 */
5905 if (refs_processed && !pending_move)
5906 sctx->send_progress = sctx->cur_ino + 1;
5907
5908 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
5909 goto out;
5910 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
5911 goto out;
5912
5913 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
5914 &left_mode, &left_uid, &left_gid, NULL);
5915 if (ret < 0)
5916 goto out;
5917
5918 if (!sctx->parent_root || sctx->cur_inode_new) {
5919 need_chown = 1;
5920 if (!S_ISLNK(sctx->cur_inode_mode))
5921 need_chmod = 1;
5922 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
5923 need_truncate = 0;
5924 } else {
5925 u64 old_size;
5926
5927 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
5928 &old_size, NULL, &right_mode, &right_uid,
5929 &right_gid, NULL);
5930 if (ret < 0)
5931 goto out;
5932
5933 if (left_uid != right_uid || left_gid != right_gid)
5934 need_chown = 1;
5935 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
5936 need_chmod = 1;
5937 if ((old_size == sctx->cur_inode_size) ||
5938 (sctx->cur_inode_size > old_size &&
5939 sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
5940 need_truncate = 0;
5941 }
5942
5943 if (S_ISREG(sctx->cur_inode_mode)) {
5944 if (need_send_hole(sctx)) {
5945 if (sctx->cur_inode_last_extent == (u64)-1 ||
5946 sctx->cur_inode_last_extent <
5947 sctx->cur_inode_size) {
5948 ret = get_last_extent(sctx, (u64)-1);
5949 if (ret)
5950 goto out;
5951 }
5952 if (sctx->cur_inode_last_extent <
5953 sctx->cur_inode_size) {
5954 ret = send_hole(sctx, sctx->cur_inode_size);
5955 if (ret)
5956 goto out;
5957 }
5958 }
5959 if (need_truncate) {
5960 ret = send_truncate(sctx, sctx->cur_ino,
5961 sctx->cur_inode_gen,
5962 sctx->cur_inode_size);
5963 if (ret < 0)
5964 goto out;
5965 }
5966 }
5967
5968 if (need_chown) {
5969 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5970 left_uid, left_gid);
5971 if (ret < 0)
5972 goto out;
5973 }
5974 if (need_chmod) {
5975 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5976 left_mode);
5977 if (ret < 0)
5978 goto out;
5979 }
5980
5981 /*
5982 * If other directory inodes depended on our current directory
5983 * inode's move/rename, now do their move/rename operations.
5984 */
5985 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
5986 ret = apply_children_dir_moves(sctx);
5987 if (ret)
5988 goto out;
5989 /*
5990 * Need to send that every time, no matter if it actually
5991 * changed between the two trees as we have done changes to
5992 * the inode before. If our inode is a directory and it's
5993 * waiting to be moved/renamed, we will send its utimes when
5994 * it's moved/renamed, therefore we don't need to do it here.
5995 */
5996 sctx->send_progress = sctx->cur_ino + 1;
5997 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
5998 if (ret < 0)
5999 goto out;
6000 }
6001
6002out:
6003 return ret;
6004}
6005
6006struct parent_paths_ctx {
6007 struct list_head *refs;
6008 struct send_ctx *sctx;
6009};
6010
6011static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
6012 void *ctx)
6013{
6014 struct parent_paths_ctx *ppctx = ctx;
6015
6016 return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
6017 ppctx->refs);
6018}
6019
6020/*
6021 * Issue unlink operations for all paths of the current inode found in the
6022 * parent snapshot.
6023 */
6024static int btrfs_unlink_all_paths(struct send_ctx *sctx)
6025{
6026 LIST_HEAD(deleted_refs);
6027 struct btrfs_path *path;
6028 struct btrfs_key key;
6029 struct parent_paths_ctx ctx;
6030 int ret;
6031
6032 path = alloc_path_for_send();
6033 if (!path)
6034 return -ENOMEM;
6035
6036 key.objectid = sctx->cur_ino;
6037 key.type = BTRFS_INODE_REF_KEY;
6038 key.offset = 0;
6039 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
6040 if (ret < 0)
6041 goto out;
6042
6043 ctx.refs = &deleted_refs;
6044 ctx.sctx = sctx;
6045
6046 while (true) {
6047 struct extent_buffer *eb = path->nodes[0];
6048 int slot = path->slots[0];
6049
6050 if (slot >= btrfs_header_nritems(eb)) {
6051 ret = btrfs_next_leaf(sctx->parent_root, path);
6052 if (ret < 0)
6053 goto out;
6054 else if (ret > 0)
6055 break;
6056 continue;
6057 }
6058
6059 btrfs_item_key_to_cpu(eb, &key, slot);
6060 if (key.objectid != sctx->cur_ino)
6061 break;
6062 if (key.type != BTRFS_INODE_REF_KEY &&
6063 key.type != BTRFS_INODE_EXTREF_KEY)
6064 break;
6065
6066 ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
6067 record_parent_ref, &ctx);
6068 if (ret < 0)
6069 goto out;
6070
6071 path->slots[0]++;
6072 }
6073
6074 while (!list_empty(&deleted_refs)) {
6075 struct recorded_ref *ref;
6076
6077 ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
6078 ret = send_unlink(sctx, ref->full_path);
6079 if (ret < 0)
6080 goto out;
6081 fs_path_free(ref->full_path);
6082 list_del(&ref->list);
6083 kfree(ref);
6084 }
6085 ret = 0;
6086out:
6087 btrfs_free_path(path);
6088 if (ret)
6089 __free_recorded_refs(&deleted_refs);
6090 return ret;
6091}
6092
6093static int changed_inode(struct send_ctx *sctx,
6094 enum btrfs_compare_tree_result result)
6095{
6096 int ret = 0;
6097 struct btrfs_key *key = sctx->cmp_key;
6098 struct btrfs_inode_item *left_ii = NULL;
6099 struct btrfs_inode_item *right_ii = NULL;
6100 u64 left_gen = 0;
6101 u64 right_gen = 0;
6102
6103 sctx->cur_ino = key->objectid;
6104 sctx->cur_inode_new_gen = 0;
6105 sctx->cur_inode_last_extent = (u64)-1;
6106 sctx->cur_inode_next_write_offset = 0;
6107 sctx->ignore_cur_inode = false;
6108
6109 /*
6110 * Set send_progress to current inode. This will tell all get_cur_xxx
6111 * functions that the current inode's refs are not updated yet. Later,
6112 * when process_recorded_refs is finished, it is set to cur_ino + 1.
6113 */
6114 sctx->send_progress = sctx->cur_ino;
6115
6116 if (result == BTRFS_COMPARE_TREE_NEW ||
6117 result == BTRFS_COMPARE_TREE_CHANGED) {
6118 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
6119 sctx->left_path->slots[0],
6120 struct btrfs_inode_item);
6121 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
6122 left_ii);
6123 } else {
6124 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6125 sctx->right_path->slots[0],
6126 struct btrfs_inode_item);
6127 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6128 right_ii);
6129 }
6130 if (result == BTRFS_COMPARE_TREE_CHANGED) {
6131 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6132 sctx->right_path->slots[0],
6133 struct btrfs_inode_item);
6134
6135 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6136 right_ii);
6137
6138 /*
6139 * The cur_ino = root dir case is special here. We can't treat
6140 * the inode as deleted+reused because it would generate a
6141 * stream that tries to delete/mkdir the root dir.
6142 */
6143 if (left_gen != right_gen &&
6144 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6145 sctx->cur_inode_new_gen = 1;
6146 }
6147
6148 /*
6149 * Normally we do not find inodes with a link count of zero (orphans)
6150 * because the most common case is to create a snapshot and use it
6151 * for a send operation. However other less common use cases involve
6152 * using a subvolume and send it after turning it to RO mode just
6153 * after deleting all hard links of a file while holding an open
6154 * file descriptor against it or turning a RO snapshot into RW mode,
6155 * keep an open file descriptor against a file, delete it and then
6156 * turn the snapshot back to RO mode before using it for a send
6157 * operation. So if we find such cases, ignore the inode and all its
6158 * items completely if it's a new inode, or if it's a changed inode
6159 * make sure all its previous paths (from the parent snapshot) are all
6160 * unlinked and all other the inode items are ignored.
6161 */
6162 if (result == BTRFS_COMPARE_TREE_NEW ||
6163 result == BTRFS_COMPARE_TREE_CHANGED) {
6164 u32 nlinks;
6165
6166 nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
6167 if (nlinks == 0) {
6168 sctx->ignore_cur_inode = true;
6169 if (result == BTRFS_COMPARE_TREE_CHANGED)
6170 ret = btrfs_unlink_all_paths(sctx);
6171 goto out;
6172 }
6173 }
6174
6175 if (result == BTRFS_COMPARE_TREE_NEW) {
6176 sctx->cur_inode_gen = left_gen;
6177 sctx->cur_inode_new = 1;
6178 sctx->cur_inode_deleted = 0;
6179 sctx->cur_inode_size = btrfs_inode_size(
6180 sctx->left_path->nodes[0], left_ii);
6181 sctx->cur_inode_mode = btrfs_inode_mode(
6182 sctx->left_path->nodes[0], left_ii);
6183 sctx->cur_inode_rdev = btrfs_inode_rdev(
6184 sctx->left_path->nodes[0], left_ii);
6185 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6186 ret = send_create_inode_if_needed(sctx);
6187 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
6188 sctx->cur_inode_gen = right_gen;
6189 sctx->cur_inode_new = 0;
6190 sctx->cur_inode_deleted = 1;
6191 sctx->cur_inode_size = btrfs_inode_size(
6192 sctx->right_path->nodes[0], right_ii);
6193 sctx->cur_inode_mode = btrfs_inode_mode(
6194 sctx->right_path->nodes[0], right_ii);
6195 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
6196 /*
6197 * We need to do some special handling in case the inode was
6198 * reported as changed with a changed generation number. This
6199 * means that the original inode was deleted and new inode
6200 * reused the same inum. So we have to treat the old inode as
6201 * deleted and the new one as new.
6202 */
6203 if (sctx->cur_inode_new_gen) {
6204 /*
6205 * First, process the inode as if it was deleted.
6206 */
6207 sctx->cur_inode_gen = right_gen;
6208 sctx->cur_inode_new = 0;
6209 sctx->cur_inode_deleted = 1;
6210 sctx->cur_inode_size = btrfs_inode_size(
6211 sctx->right_path->nodes[0], right_ii);
6212 sctx->cur_inode_mode = btrfs_inode_mode(
6213 sctx->right_path->nodes[0], right_ii);
6214 ret = process_all_refs(sctx,
6215 BTRFS_COMPARE_TREE_DELETED);
6216 if (ret < 0)
6217 goto out;
6218
6219 /*
6220 * Now process the inode as if it was new.
6221 */
6222 sctx->cur_inode_gen = left_gen;
6223 sctx->cur_inode_new = 1;
6224 sctx->cur_inode_deleted = 0;
6225 sctx->cur_inode_size = btrfs_inode_size(
6226 sctx->left_path->nodes[0], left_ii);
6227 sctx->cur_inode_mode = btrfs_inode_mode(
6228 sctx->left_path->nodes[0], left_ii);
6229 sctx->cur_inode_rdev = btrfs_inode_rdev(
6230 sctx->left_path->nodes[0], left_ii);
6231 ret = send_create_inode_if_needed(sctx);
6232 if (ret < 0)
6233 goto out;
6234
6235 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
6236 if (ret < 0)
6237 goto out;
6238 /*
6239 * Advance send_progress now as we did not get into
6240 * process_recorded_refs_if_needed in the new_gen case.
6241 */
6242 sctx->send_progress = sctx->cur_ino + 1;
6243
6244 /*
6245 * Now process all extents and xattrs of the inode as if
6246 * they were all new.
6247 */
6248 ret = process_all_extents(sctx);
6249 if (ret < 0)
6250 goto out;
6251 ret = process_all_new_xattrs(sctx);
6252 if (ret < 0)
6253 goto out;
6254 } else {
6255 sctx->cur_inode_gen = left_gen;
6256 sctx->cur_inode_new = 0;
6257 sctx->cur_inode_new_gen = 0;
6258 sctx->cur_inode_deleted = 0;
6259 sctx->cur_inode_size = btrfs_inode_size(
6260 sctx->left_path->nodes[0], left_ii);
6261 sctx->cur_inode_mode = btrfs_inode_mode(
6262 sctx->left_path->nodes[0], left_ii);
6263 }
6264 }
6265
6266out:
6267 return ret;
6268}
6269
6270/*
6271 * We have to process new refs before deleted refs, but compare_trees gives us
6272 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
6273 * first and later process them in process_recorded_refs.
6274 * For the cur_inode_new_gen case, we skip recording completely because
6275 * changed_inode did already initiate processing of refs. The reason for this is
6276 * that in this case, compare_tree actually compares the refs of 2 different
6277 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
6278 * refs of the right tree as deleted and all refs of the left tree as new.
6279 */
6280static int changed_ref(struct send_ctx *sctx,
6281 enum btrfs_compare_tree_result result)
6282{
6283 int ret = 0;
6284
6285 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6286 inconsistent_snapshot_error(sctx, result, "reference");
6287 return -EIO;
6288 }
6289
6290 if (!sctx->cur_inode_new_gen &&
6291 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
6292 if (result == BTRFS_COMPARE_TREE_NEW)
6293 ret = record_new_ref(sctx);
6294 else if (result == BTRFS_COMPARE_TREE_DELETED)
6295 ret = record_deleted_ref(sctx);
6296 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6297 ret = record_changed_ref(sctx);
6298 }
6299
6300 return ret;
6301}
6302
6303/*
6304 * Process new/deleted/changed xattrs. We skip processing in the
6305 * cur_inode_new_gen case because changed_inode did already initiate processing
6306 * of xattrs. The reason is the same as in changed_ref
6307 */
6308static int changed_xattr(struct send_ctx *sctx,
6309 enum btrfs_compare_tree_result result)
6310{
6311 int ret = 0;
6312
6313 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6314 inconsistent_snapshot_error(sctx, result, "xattr");
6315 return -EIO;
6316 }
6317
6318 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6319 if (result == BTRFS_COMPARE_TREE_NEW)
6320 ret = process_new_xattr(sctx);
6321 else if (result == BTRFS_COMPARE_TREE_DELETED)
6322 ret = process_deleted_xattr(sctx);
6323 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6324 ret = process_changed_xattr(sctx);
6325 }
6326
6327 return ret;
6328}
6329
6330/*
6331 * Process new/deleted/changed extents. We skip processing in the
6332 * cur_inode_new_gen case because changed_inode did already initiate processing
6333 * of extents. The reason is the same as in changed_ref
6334 */
6335static int changed_extent(struct send_ctx *sctx,
6336 enum btrfs_compare_tree_result result)
6337{
6338 int ret = 0;
6339
6340 /*
6341 * We have found an extent item that changed without the inode item
6342 * having changed. This can happen either after relocation (where the
6343 * disk_bytenr of an extent item is replaced at
6344 * relocation.c:replace_file_extents()) or after deduplication into a
6345 * file in both the parent and send snapshots (where an extent item can
6346 * get modified or replaced with a new one). Note that deduplication
6347 * updates the inode item, but it only changes the iversion (sequence
6348 * field in the inode item) of the inode, so if a file is deduplicated
6349 * the same amount of times in both the parent and send snapshots, its
6350 * iversion becames the same in both snapshots, whence the inode item is
6351 * the same on both snapshots.
6352 */
6353 if (sctx->cur_ino != sctx->cmp_key->objectid)
6354 return 0;
6355
6356 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6357 if (result != BTRFS_COMPARE_TREE_DELETED)
6358 ret = process_extent(sctx, sctx->left_path,
6359 sctx->cmp_key);
6360 }
6361
6362 return ret;
6363}
6364
6365static int dir_changed(struct send_ctx *sctx, u64 dir)
6366{
6367 u64 orig_gen, new_gen;
6368 int ret;
6369
6370 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
6371 NULL, NULL);
6372 if (ret)
6373 return ret;
6374
6375 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
6376 NULL, NULL, NULL);
6377 if (ret)
6378 return ret;
6379
6380 return (orig_gen != new_gen) ? 1 : 0;
6381}
6382
6383static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
6384 struct btrfs_key *key)
6385{
6386 struct btrfs_inode_extref *extref;
6387 struct extent_buffer *leaf;
6388 u64 dirid = 0, last_dirid = 0;
6389 unsigned long ptr;
6390 u32 item_size;
6391 u32 cur_offset = 0;
6392 int ref_name_len;
6393 int ret = 0;
6394
6395 /* Easy case, just check this one dirid */
6396 if (key->type == BTRFS_INODE_REF_KEY) {
6397 dirid = key->offset;
6398
6399 ret = dir_changed(sctx, dirid);
6400 goto out;
6401 }
6402
6403 leaf = path->nodes[0];
6404 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
6405 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
6406 while (cur_offset < item_size) {
6407 extref = (struct btrfs_inode_extref *)(ptr +
6408 cur_offset);
6409 dirid = btrfs_inode_extref_parent(leaf, extref);
6410 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
6411 cur_offset += ref_name_len + sizeof(*extref);
6412 if (dirid == last_dirid)
6413 continue;
6414 ret = dir_changed(sctx, dirid);
6415 if (ret)
6416 break;
6417 last_dirid = dirid;
6418 }
6419out:
6420 return ret;
6421}
6422
6423/*
6424 * Updates compare related fields in sctx and simply forwards to the actual
6425 * changed_xxx functions.
6426 */
6427static int changed_cb(struct btrfs_path *left_path,
6428 struct btrfs_path *right_path,
6429 struct btrfs_key *key,
6430 enum btrfs_compare_tree_result result,
6431 void *ctx)
6432{
6433 int ret = 0;
6434 struct send_ctx *sctx = ctx;
6435
6436 if (result == BTRFS_COMPARE_TREE_SAME) {
6437 if (key->type == BTRFS_INODE_REF_KEY ||
6438 key->type == BTRFS_INODE_EXTREF_KEY) {
6439 ret = compare_refs(sctx, left_path, key);
6440 if (!ret)
6441 return 0;
6442 if (ret < 0)
6443 return ret;
6444 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
6445 return maybe_send_hole(sctx, left_path, key);
6446 } else {
6447 return 0;
6448 }
6449 result = BTRFS_COMPARE_TREE_CHANGED;
6450 ret = 0;
6451 }
6452
6453 sctx->left_path = left_path;
6454 sctx->right_path = right_path;
6455 sctx->cmp_key = key;
6456
6457 ret = finish_inode_if_needed(sctx, 0);
6458 if (ret < 0)
6459 goto out;
6460
6461 /* Ignore non-FS objects */
6462 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
6463 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
6464 goto out;
6465
6466 if (key->type == BTRFS_INODE_ITEM_KEY) {
6467 ret = changed_inode(sctx, result);
6468 } else if (!sctx->ignore_cur_inode) {
6469 if (key->type == BTRFS_INODE_REF_KEY ||
6470 key->type == BTRFS_INODE_EXTREF_KEY)
6471 ret = changed_ref(sctx, result);
6472 else if (key->type == BTRFS_XATTR_ITEM_KEY)
6473 ret = changed_xattr(sctx, result);
6474 else if (key->type == BTRFS_EXTENT_DATA_KEY)
6475 ret = changed_extent(sctx, result);
6476 }
6477
6478out:
6479 return ret;
6480}
6481
6482static int full_send_tree(struct send_ctx *sctx)
6483{
6484 int ret;
6485 struct btrfs_root *send_root = sctx->send_root;
6486 struct btrfs_key key;
6487 struct btrfs_path *path;
6488 struct extent_buffer *eb;
6489 int slot;
6490
6491 path = alloc_path_for_send();
6492 if (!path)
6493 return -ENOMEM;
6494
6495 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
6496 key.type = BTRFS_INODE_ITEM_KEY;
6497 key.offset = 0;
6498
6499 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
6500 if (ret < 0)
6501 goto out;
6502 if (ret)
6503 goto out_finish;
6504
6505 while (1) {
6506 eb = path->nodes[0];
6507 slot = path->slots[0];
6508 btrfs_item_key_to_cpu(eb, &key, slot);
6509
6510 ret = changed_cb(path, NULL, &key,
6511 BTRFS_COMPARE_TREE_NEW, sctx);
6512 if (ret < 0)
6513 goto out;
6514
6515 ret = btrfs_next_item(send_root, path);
6516 if (ret < 0)
6517 goto out;
6518 if (ret) {
6519 ret = 0;
6520 break;
6521 }
6522 }
6523
6524out_finish:
6525 ret = finish_inode_if_needed(sctx, 1);
6526
6527out:
6528 btrfs_free_path(path);
6529 return ret;
6530}
6531
6532static int tree_move_down(struct btrfs_path *path, int *level)
6533{
6534 struct extent_buffer *eb;
6535
6536 BUG_ON(*level == 0);
6537 eb = btrfs_read_node_slot(path->nodes[*level], path->slots[*level]);
6538 if (IS_ERR(eb))
6539 return PTR_ERR(eb);
6540
6541 path->nodes[*level - 1] = eb;
6542 path->slots[*level - 1] = 0;
6543 (*level)--;
6544 return 0;
6545}
6546
6547static int tree_move_next_or_upnext(struct btrfs_path *path,
6548 int *level, int root_level)
6549{
6550 int ret = 0;
6551 int nritems;
6552 nritems = btrfs_header_nritems(path->nodes[*level]);
6553
6554 path->slots[*level]++;
6555
6556 while (path->slots[*level] >= nritems) {
6557 if (*level == root_level)
6558 return -1;
6559
6560 /* move upnext */
6561 path->slots[*level] = 0;
6562 free_extent_buffer(path->nodes[*level]);
6563 path->nodes[*level] = NULL;
6564 (*level)++;
6565 path->slots[*level]++;
6566
6567 nritems = btrfs_header_nritems(path->nodes[*level]);
6568 ret = 1;
6569 }
6570 return ret;
6571}
6572
6573/*
6574 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
6575 * or down.
6576 */
6577static int tree_advance(struct btrfs_path *path,
6578 int *level, int root_level,
6579 int allow_down,
6580 struct btrfs_key *key)
6581{
6582 int ret;
6583
6584 if (*level == 0 || !allow_down) {
6585 ret = tree_move_next_or_upnext(path, level, root_level);
6586 } else {
6587 ret = tree_move_down(path, level);
6588 }
6589 if (ret >= 0) {
6590 if (*level == 0)
6591 btrfs_item_key_to_cpu(path->nodes[*level], key,
6592 path->slots[*level]);
6593 else
6594 btrfs_node_key_to_cpu(path->nodes[*level], key,
6595 path->slots[*level]);
6596 }
6597 return ret;
6598}
6599
6600static int tree_compare_item(struct btrfs_path *left_path,
6601 struct btrfs_path *right_path,
6602 char *tmp_buf)
6603{
6604 int cmp;
6605 int len1, len2;
6606 unsigned long off1, off2;
6607
6608 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
6609 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
6610 if (len1 != len2)
6611 return 1;
6612
6613 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
6614 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
6615 right_path->slots[0]);
6616
6617 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
6618
6619 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
6620 if (cmp)
6621 return 1;
6622 return 0;
6623}
6624
6625/*
6626 * This function compares two trees and calls the provided callback for
6627 * every changed/new/deleted item it finds.
6628 * If shared tree blocks are encountered, whole subtrees are skipped, making
6629 * the compare pretty fast on snapshotted subvolumes.
6630 *
6631 * This currently works on commit roots only. As commit roots are read only,
6632 * we don't do any locking. The commit roots are protected with transactions.
6633 * Transactions are ended and rejoined when a commit is tried in between.
6634 *
6635 * This function checks for modifications done to the trees while comparing.
6636 * If it detects a change, it aborts immediately.
6637 */
6638static int btrfs_compare_trees(struct btrfs_root *left_root,
6639 struct btrfs_root *right_root,
6640 btrfs_changed_cb_t changed_cb, void *ctx)
6641{
6642 struct btrfs_fs_info *fs_info = left_root->fs_info;
6643 int ret;
6644 int cmp;
6645 struct btrfs_path *left_path = NULL;
6646 struct btrfs_path *right_path = NULL;
6647 struct btrfs_key left_key;
6648 struct btrfs_key right_key;
6649 char *tmp_buf = NULL;
6650 int left_root_level;
6651 int right_root_level;
6652 int left_level;
6653 int right_level;
6654 int left_end_reached;
6655 int right_end_reached;
6656 int advance_left;
6657 int advance_right;
6658 u64 left_blockptr;
6659 u64 right_blockptr;
6660 u64 left_gen;
6661 u64 right_gen;
6662
6663 left_path = btrfs_alloc_path();
6664 if (!left_path) {
6665 ret = -ENOMEM;
6666 goto out;
6667 }
6668 right_path = btrfs_alloc_path();
6669 if (!right_path) {
6670 ret = -ENOMEM;
6671 goto out;
6672 }
6673
6674 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
6675 if (!tmp_buf) {
6676 ret = -ENOMEM;
6677 goto out;
6678 }
6679
6680 left_path->search_commit_root = 1;
6681 left_path->skip_locking = 1;
6682 right_path->search_commit_root = 1;
6683 right_path->skip_locking = 1;
6684
6685 /*
6686 * Strategy: Go to the first items of both trees. Then do
6687 *
6688 * If both trees are at level 0
6689 * Compare keys of current items
6690 * If left < right treat left item as new, advance left tree
6691 * and repeat
6692 * If left > right treat right item as deleted, advance right tree
6693 * and repeat
6694 * If left == right do deep compare of items, treat as changed if
6695 * needed, advance both trees and repeat
6696 * If both trees are at the same level but not at level 0
6697 * Compare keys of current nodes/leafs
6698 * If left < right advance left tree and repeat
6699 * If left > right advance right tree and repeat
6700 * If left == right compare blockptrs of the next nodes/leafs
6701 * If they match advance both trees but stay at the same level
6702 * and repeat
6703 * If they don't match advance both trees while allowing to go
6704 * deeper and repeat
6705 * If tree levels are different
6706 * Advance the tree that needs it and repeat
6707 *
6708 * Advancing a tree means:
6709 * If we are at level 0, try to go to the next slot. If that's not
6710 * possible, go one level up and repeat. Stop when we found a level
6711 * where we could go to the next slot. We may at this point be on a
6712 * node or a leaf.
6713 *
6714 * If we are not at level 0 and not on shared tree blocks, go one
6715 * level deeper.
6716 *
6717 * If we are not at level 0 and on shared tree blocks, go one slot to
6718 * the right if possible or go up and right.
6719 */
6720
6721 down_read(&fs_info->commit_root_sem);
6722 left_level = btrfs_header_level(left_root->commit_root);
6723 left_root_level = left_level;
6724 left_path->nodes[left_level] =
6725 btrfs_clone_extent_buffer(left_root->commit_root);
6726 if (!left_path->nodes[left_level]) {
6727 up_read(&fs_info->commit_root_sem);
6728 ret = -ENOMEM;
6729 goto out;
6730 }
6731
6732 right_level = btrfs_header_level(right_root->commit_root);
6733 right_root_level = right_level;
6734 right_path->nodes[right_level] =
6735 btrfs_clone_extent_buffer(right_root->commit_root);
6736 if (!right_path->nodes[right_level]) {
6737 up_read(&fs_info->commit_root_sem);
6738 ret = -ENOMEM;
6739 goto out;
6740 }
6741 up_read(&fs_info->commit_root_sem);
6742
6743 if (left_level == 0)
6744 btrfs_item_key_to_cpu(left_path->nodes[left_level],
6745 &left_key, left_path->slots[left_level]);
6746 else
6747 btrfs_node_key_to_cpu(left_path->nodes[left_level],
6748 &left_key, left_path->slots[left_level]);
6749 if (right_level == 0)
6750 btrfs_item_key_to_cpu(right_path->nodes[right_level],
6751 &right_key, right_path->slots[right_level]);
6752 else
6753 btrfs_node_key_to_cpu(right_path->nodes[right_level],
6754 &right_key, right_path->slots[right_level]);
6755
6756 left_end_reached = right_end_reached = 0;
6757 advance_left = advance_right = 0;
6758
6759 while (1) {
6760 cond_resched();
6761 if (advance_left && !left_end_reached) {
6762 ret = tree_advance(left_path, &left_level,
6763 left_root_level,
6764 advance_left != ADVANCE_ONLY_NEXT,
6765 &left_key);
6766 if (ret == -1)
6767 left_end_reached = ADVANCE;
6768 else if (ret < 0)
6769 goto out;
6770 advance_left = 0;
6771 }
6772 if (advance_right && !right_end_reached) {
6773 ret = tree_advance(right_path, &right_level,
6774 right_root_level,
6775 advance_right != ADVANCE_ONLY_NEXT,
6776 &right_key);
6777 if (ret == -1)
6778 right_end_reached = ADVANCE;
6779 else if (ret < 0)
6780 goto out;
6781 advance_right = 0;
6782 }
6783
6784 if (left_end_reached && right_end_reached) {
6785 ret = 0;
6786 goto out;
6787 } else if (left_end_reached) {
6788 if (right_level == 0) {
6789 ret = changed_cb(left_path, right_path,
6790 &right_key,
6791 BTRFS_COMPARE_TREE_DELETED,
6792 ctx);
6793 if (ret < 0)
6794 goto out;
6795 }
6796 advance_right = ADVANCE;
6797 continue;
6798 } else if (right_end_reached) {
6799 if (left_level == 0) {
6800 ret = changed_cb(left_path, right_path,
6801 &left_key,
6802 BTRFS_COMPARE_TREE_NEW,
6803 ctx);
6804 if (ret < 0)
6805 goto out;
6806 }
6807 advance_left = ADVANCE;
6808 continue;
6809 }
6810
6811 if (left_level == 0 && right_level == 0) {
6812 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
6813 if (cmp < 0) {
6814 ret = changed_cb(left_path, right_path,
6815 &left_key,
6816 BTRFS_COMPARE_TREE_NEW,
6817 ctx);
6818 if (ret < 0)
6819 goto out;
6820 advance_left = ADVANCE;
6821 } else if (cmp > 0) {
6822 ret = changed_cb(left_path, right_path,
6823 &right_key,
6824 BTRFS_COMPARE_TREE_DELETED,
6825 ctx);
6826 if (ret < 0)
6827 goto out;
6828 advance_right = ADVANCE;
6829 } else {
6830 enum btrfs_compare_tree_result result;
6831
6832 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
6833 ret = tree_compare_item(left_path, right_path,
6834 tmp_buf);
6835 if (ret)
6836 result = BTRFS_COMPARE_TREE_CHANGED;
6837 else
6838 result = BTRFS_COMPARE_TREE_SAME;
6839 ret = changed_cb(left_path, right_path,
6840 &left_key, result, ctx);
6841 if (ret < 0)
6842 goto out;
6843 advance_left = ADVANCE;
6844 advance_right = ADVANCE;
6845 }
6846 } else if (left_level == right_level) {
6847 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
6848 if (cmp < 0) {
6849 advance_left = ADVANCE;
6850 } else if (cmp > 0) {
6851 advance_right = ADVANCE;
6852 } else {
6853 left_blockptr = btrfs_node_blockptr(
6854 left_path->nodes[left_level],
6855 left_path->slots[left_level]);
6856 right_blockptr = btrfs_node_blockptr(
6857 right_path->nodes[right_level],
6858 right_path->slots[right_level]);
6859 left_gen = btrfs_node_ptr_generation(
6860 left_path->nodes[left_level],
6861 left_path->slots[left_level]);
6862 right_gen = btrfs_node_ptr_generation(
6863 right_path->nodes[right_level],
6864 right_path->slots[right_level]);
6865 if (left_blockptr == right_blockptr &&
6866 left_gen == right_gen) {
6867 /*
6868 * As we're on a shared block, don't
6869 * allow to go deeper.
6870 */
6871 advance_left = ADVANCE_ONLY_NEXT;
6872 advance_right = ADVANCE_ONLY_NEXT;
6873 } else {
6874 advance_left = ADVANCE;
6875 advance_right = ADVANCE;
6876 }
6877 }
6878 } else if (left_level < right_level) {
6879 advance_right = ADVANCE;
6880 } else {
6881 advance_left = ADVANCE;
6882 }
6883 }
6884
6885out:
6886 btrfs_free_path(left_path);
6887 btrfs_free_path(right_path);
6888 kvfree(tmp_buf);
6889 return ret;
6890}
6891
6892static int send_subvol(struct send_ctx *sctx)
6893{
6894 int ret;
6895
6896 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
6897 ret = send_header(sctx);
6898 if (ret < 0)
6899 goto out;
6900 }
6901
6902 ret = send_subvol_begin(sctx);
6903 if (ret < 0)
6904 goto out;
6905
6906 if (sctx->parent_root) {
6907 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
6908 changed_cb, sctx);
6909 if (ret < 0)
6910 goto out;
6911 ret = finish_inode_if_needed(sctx, 1);
6912 if (ret < 0)
6913 goto out;
6914 } else {
6915 ret = full_send_tree(sctx);
6916 if (ret < 0)
6917 goto out;
6918 }
6919
6920out:
6921 free_recorded_refs(sctx);
6922 return ret;
6923}
6924
6925/*
6926 * If orphan cleanup did remove any orphans from a root, it means the tree
6927 * was modified and therefore the commit root is not the same as the current
6928 * root anymore. This is a problem, because send uses the commit root and
6929 * therefore can see inode items that don't exist in the current root anymore,
6930 * and for example make calls to btrfs_iget, which will do tree lookups based
6931 * on the current root and not on the commit root. Those lookups will fail,
6932 * returning a -ESTALE error, and making send fail with that error. So make
6933 * sure a send does not see any orphans we have just removed, and that it will
6934 * see the same inodes regardless of whether a transaction commit happened
6935 * before it started (meaning that the commit root will be the same as the
6936 * current root) or not.
6937 */
6938static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
6939{
6940 int i;
6941 struct btrfs_trans_handle *trans = NULL;
6942
6943again:
6944 if (sctx->parent_root &&
6945 sctx->parent_root->node != sctx->parent_root->commit_root)
6946 goto commit_trans;
6947
6948 for (i = 0; i < sctx->clone_roots_cnt; i++)
6949 if (sctx->clone_roots[i].root->node !=
6950 sctx->clone_roots[i].root->commit_root)
6951 goto commit_trans;
6952
6953 if (trans)
6954 return btrfs_end_transaction(trans);
6955
6956 return 0;
6957
6958commit_trans:
6959 /* Use any root, all fs roots will get their commit roots updated. */
6960 if (!trans) {
6961 trans = btrfs_join_transaction(sctx->send_root);
6962 if (IS_ERR(trans))
6963 return PTR_ERR(trans);
6964 goto again;
6965 }
6966
6967 return btrfs_commit_transaction(trans);
6968}
6969
6970/*
6971 * Make sure any existing dellaloc is flushed for any root used by a send
6972 * operation so that we do not miss any data and we do not race with writeback
6973 * finishing and changing a tree while send is using the tree. This could
6974 * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
6975 * a send operation then uses the subvolume.
6976 * After flushing delalloc ensure_commit_roots_uptodate() must be called.
6977 */
6978static int flush_delalloc_roots(struct send_ctx *sctx)
6979{
6980 struct btrfs_root *root = sctx->parent_root;
6981 int ret;
6982 int i;
6983
6984 if (root) {
6985 ret = btrfs_start_delalloc_snapshot(root);
6986 if (ret)
6987 return ret;
6988 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
6989 }
6990
6991 for (i = 0; i < sctx->clone_roots_cnt; i++) {
6992 root = sctx->clone_roots[i].root;
6993 ret = btrfs_start_delalloc_snapshot(root);
6994 if (ret)
6995 return ret;
6996 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
6997 }
6998
6999 return 0;
7000}
7001
7002static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
7003{
7004 spin_lock(&root->root_item_lock);
7005 root->send_in_progress--;
7006 /*
7007 * Not much left to do, we don't know why it's unbalanced and
7008 * can't blindly reset it to 0.
7009 */
7010 if (root->send_in_progress < 0)
7011 btrfs_err(root->fs_info,
7012 "send_in_progress unbalanced %d root %llu",
7013 root->send_in_progress, root->root_key.objectid);
7014 spin_unlock(&root->root_item_lock);
7015}
7016
7017static void dedupe_in_progress_warn(const struct btrfs_root *root)
7018{
7019 btrfs_warn_rl(root->fs_info,
7020"cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
7021 root->root_key.objectid, root->dedupe_in_progress);
7022}
7023
7024long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
7025{
7026 int ret = 0;
7027 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
7028 struct btrfs_fs_info *fs_info = send_root->fs_info;
7029 struct btrfs_root *clone_root;
7030 struct btrfs_key key;
7031 struct send_ctx *sctx = NULL;
7032 u32 i;
7033 u64 *clone_sources_tmp = NULL;
7034 int clone_sources_to_rollback = 0;
7035 unsigned alloc_size;
7036 int sort_clone_roots = 0;
7037 int index;
7038
7039 if (!capable(CAP_SYS_ADMIN))
7040 return -EPERM;
7041
7042 /*
7043 * The subvolume must remain read-only during send, protect against
7044 * making it RW. This also protects against deletion.
7045 */
7046 spin_lock(&send_root->root_item_lock);
7047 if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
7048 dedupe_in_progress_warn(send_root);
7049 spin_unlock(&send_root->root_item_lock);
7050 return -EAGAIN;
7051 }
7052 send_root->send_in_progress++;
7053 spin_unlock(&send_root->root_item_lock);
7054
7055 /*
7056 * This is done when we lookup the root, it should already be complete
7057 * by the time we get here.
7058 */
7059 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
7060
7061 /*
7062 * Userspace tools do the checks and warn the user if it's
7063 * not RO.
7064 */
7065 if (!btrfs_root_readonly(send_root)) {
7066 ret = -EPERM;
7067 goto out;
7068 }
7069
7070 /*
7071 * Check that we don't overflow at later allocations, we request
7072 * clone_sources_count + 1 items, and compare to unsigned long inside
7073 * access_ok.
7074 */
7075 if (arg->clone_sources_count >
7076 ULONG_MAX / sizeof(struct clone_root) - 1) {
7077 ret = -EINVAL;
7078 goto out;
7079 }
7080
7081 if (!access_ok(arg->clone_sources,
7082 sizeof(*arg->clone_sources) *
7083 arg->clone_sources_count)) {
7084 ret = -EFAULT;
7085 goto out;
7086 }
7087
7088 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
7089 ret = -EINVAL;
7090 goto out;
7091 }
7092
7093 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
7094 if (!sctx) {
7095 ret = -ENOMEM;
7096 goto out;
7097 }
7098
7099 INIT_LIST_HEAD(&sctx->new_refs);
7100 INIT_LIST_HEAD(&sctx->deleted_refs);
7101 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
7102 INIT_LIST_HEAD(&sctx->name_cache_list);
7103
7104 sctx->flags = arg->flags;
7105
7106 sctx->send_filp = fget(arg->send_fd);
7107 if (!sctx->send_filp) {
7108 ret = -EBADF;
7109 goto out;
7110 }
7111
7112 sctx->send_root = send_root;
7113 /*
7114 * Unlikely but possible, if the subvolume is marked for deletion but
7115 * is slow to remove the directory entry, send can still be started
7116 */
7117 if (btrfs_root_dead(sctx->send_root)) {
7118 ret = -EPERM;
7119 goto out;
7120 }
7121
7122 sctx->clone_roots_cnt = arg->clone_sources_count;
7123
7124 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
7125 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
7126 if (!sctx->send_buf) {
7127 ret = -ENOMEM;
7128 goto out;
7129 }
7130
7131 sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL);
7132 if (!sctx->read_buf) {
7133 ret = -ENOMEM;
7134 goto out;
7135 }
7136
7137 sctx->pending_dir_moves = RB_ROOT;
7138 sctx->waiting_dir_moves = RB_ROOT;
7139 sctx->orphan_dirs = RB_ROOT;
7140
7141 alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1);
7142
7143 sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL);
7144 if (!sctx->clone_roots) {
7145 ret = -ENOMEM;
7146 goto out;
7147 }
7148
7149 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
7150
7151 if (arg->clone_sources_count) {
7152 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
7153 if (!clone_sources_tmp) {
7154 ret = -ENOMEM;
7155 goto out;
7156 }
7157
7158 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
7159 alloc_size);
7160 if (ret) {
7161 ret = -EFAULT;
7162 goto out;
7163 }
7164
7165 for (i = 0; i < arg->clone_sources_count; i++) {
7166 key.objectid = clone_sources_tmp[i];
7167 key.type = BTRFS_ROOT_ITEM_KEY;
7168 key.offset = (u64)-1;
7169
7170 index = srcu_read_lock(&fs_info->subvol_srcu);
7171
7172 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
7173 if (IS_ERR(clone_root)) {
7174 srcu_read_unlock(&fs_info->subvol_srcu, index);
7175 ret = PTR_ERR(clone_root);
7176 goto out;
7177 }
7178 spin_lock(&clone_root->root_item_lock);
7179 if (!btrfs_root_readonly(clone_root) ||
7180 btrfs_root_dead(clone_root)) {
7181 spin_unlock(&clone_root->root_item_lock);
7182 srcu_read_unlock(&fs_info->subvol_srcu, index);
7183 ret = -EPERM;
7184 goto out;
7185 }
7186 if (clone_root->dedupe_in_progress) {
7187 dedupe_in_progress_warn(clone_root);
7188 spin_unlock(&clone_root->root_item_lock);
7189 srcu_read_unlock(&fs_info->subvol_srcu, index);
7190 ret = -EAGAIN;
7191 goto out;
7192 }
7193 clone_root->send_in_progress++;
7194 spin_unlock(&clone_root->root_item_lock);
7195 srcu_read_unlock(&fs_info->subvol_srcu, index);
7196
7197 sctx->clone_roots[i].root = clone_root;
7198 clone_sources_to_rollback = i + 1;
7199 }
7200 kvfree(clone_sources_tmp);
7201 clone_sources_tmp = NULL;
7202 }
7203
7204 if (arg->parent_root) {
7205 key.objectid = arg->parent_root;
7206 key.type = BTRFS_ROOT_ITEM_KEY;
7207 key.offset = (u64)-1;
7208
7209 index = srcu_read_lock(&fs_info->subvol_srcu);
7210
7211 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
7212 if (IS_ERR(sctx->parent_root)) {
7213 srcu_read_unlock(&fs_info->subvol_srcu, index);
7214 ret = PTR_ERR(sctx->parent_root);
7215 goto out;
7216 }
7217
7218 spin_lock(&sctx->parent_root->root_item_lock);
7219 sctx->parent_root->send_in_progress++;
7220 if (!btrfs_root_readonly(sctx->parent_root) ||
7221 btrfs_root_dead(sctx->parent_root)) {
7222 spin_unlock(&sctx->parent_root->root_item_lock);
7223 srcu_read_unlock(&fs_info->subvol_srcu, index);
7224 ret = -EPERM;
7225 goto out;
7226 }
7227 if (sctx->parent_root->dedupe_in_progress) {
7228 dedupe_in_progress_warn(sctx->parent_root);
7229 spin_unlock(&sctx->parent_root->root_item_lock);
7230 srcu_read_unlock(&fs_info->subvol_srcu, index);
7231 ret = -EAGAIN;
7232 goto out;
7233 }
7234 spin_unlock(&sctx->parent_root->root_item_lock);
7235
7236 srcu_read_unlock(&fs_info->subvol_srcu, index);
7237 }
7238
7239 /*
7240 * Clones from send_root are allowed, but only if the clone source
7241 * is behind the current send position. This is checked while searching
7242 * for possible clone sources.
7243 */
7244 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
7245
7246 /* We do a bsearch later */
7247 sort(sctx->clone_roots, sctx->clone_roots_cnt,
7248 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
7249 NULL);
7250 sort_clone_roots = 1;
7251
7252 ret = flush_delalloc_roots(sctx);
7253 if (ret)
7254 goto out;
7255
7256 ret = ensure_commit_roots_uptodate(sctx);
7257 if (ret)
7258 goto out;
7259
7260 mutex_lock(&fs_info->balance_mutex);
7261 if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
7262 mutex_unlock(&fs_info->balance_mutex);
7263 btrfs_warn_rl(fs_info,
7264 "cannot run send because a balance operation is in progress");
7265 ret = -EAGAIN;
7266 goto out;
7267 }
7268 fs_info->send_in_progress++;
7269 mutex_unlock(&fs_info->balance_mutex);
7270
7271 current->journal_info = BTRFS_SEND_TRANS_STUB;
7272 ret = send_subvol(sctx);
7273 current->journal_info = NULL;
7274 mutex_lock(&fs_info->balance_mutex);
7275 fs_info->send_in_progress--;
7276 mutex_unlock(&fs_info->balance_mutex);
7277 if (ret < 0)
7278 goto out;
7279
7280 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
7281 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
7282 if (ret < 0)
7283 goto out;
7284 ret = send_cmd(sctx);
7285 if (ret < 0)
7286 goto out;
7287 }
7288
7289out:
7290 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
7291 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
7292 struct rb_node *n;
7293 struct pending_dir_move *pm;
7294
7295 n = rb_first(&sctx->pending_dir_moves);
7296 pm = rb_entry(n, struct pending_dir_move, node);
7297 while (!list_empty(&pm->list)) {
7298 struct pending_dir_move *pm2;
7299
7300 pm2 = list_first_entry(&pm->list,
7301 struct pending_dir_move, list);
7302 free_pending_move(sctx, pm2);
7303 }
7304 free_pending_move(sctx, pm);
7305 }
7306
7307 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
7308 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
7309 struct rb_node *n;
7310 struct waiting_dir_move *dm;
7311
7312 n = rb_first(&sctx->waiting_dir_moves);
7313 dm = rb_entry(n, struct waiting_dir_move, node);
7314 rb_erase(&dm->node, &sctx->waiting_dir_moves);
7315 kfree(dm);
7316 }
7317
7318 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
7319 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
7320 struct rb_node *n;
7321 struct orphan_dir_info *odi;
7322
7323 n = rb_first(&sctx->orphan_dirs);
7324 odi = rb_entry(n, struct orphan_dir_info, node);
7325 free_orphan_dir_info(sctx, odi);
7326 }
7327
7328 if (sort_clone_roots) {
7329 for (i = 0; i < sctx->clone_roots_cnt; i++)
7330 btrfs_root_dec_send_in_progress(
7331 sctx->clone_roots[i].root);
7332 } else {
7333 for (i = 0; sctx && i < clone_sources_to_rollback; i++)
7334 btrfs_root_dec_send_in_progress(
7335 sctx->clone_roots[i].root);
7336
7337 btrfs_root_dec_send_in_progress(send_root);
7338 }
7339 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
7340 btrfs_root_dec_send_in_progress(sctx->parent_root);
7341
7342 kvfree(clone_sources_tmp);
7343
7344 if (sctx) {
7345 if (sctx->send_filp)
7346 fput(sctx->send_filp);
7347
7348 kvfree(sctx->clone_roots);
7349 kvfree(sctx->send_buf);
7350 kvfree(sctx->read_buf);
7351
7352 name_cache_free(sctx);
7353
7354 kfree(sctx);
7355 }
7356
7357 return ret;
7358}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 */
5
6#include <linux/bsearch.h>
7#include <linux/fs.h>
8#include <linux/file.h>
9#include <linux/sort.h>
10#include <linux/mount.h>
11#include <linux/xattr.h>
12#include <linux/posix_acl_xattr.h>
13#include <linux/radix-tree.h>
14#include <linux/vmalloc.h>
15#include <linux/string.h>
16#include <linux/compat.h>
17#include <linux/crc32c.h>
18
19#include "send.h"
20#include "backref.h"
21#include "locking.h"
22#include "disk-io.h"
23#include "btrfs_inode.h"
24#include "transaction.h"
25#include "compression.h"
26#include "xattr.h"
27
28/*
29 * Maximum number of references an extent can have in order for us to attempt to
30 * issue clone operations instead of write operations. This currently exists to
31 * avoid hitting limitations of the backreference walking code (taking a lot of
32 * time and using too much memory for extents with large number of references).
33 */
34#define SEND_MAX_EXTENT_REFS 64
35
36/*
37 * A fs_path is a helper to dynamically build path names with unknown size.
38 * It reallocates the internal buffer on demand.
39 * It allows fast adding of path elements on the right side (normal path) and
40 * fast adding to the left side (reversed path). A reversed path can also be
41 * unreversed if needed.
42 */
43struct fs_path {
44 union {
45 struct {
46 char *start;
47 char *end;
48
49 char *buf;
50 unsigned short buf_len:15;
51 unsigned short reversed:1;
52 char inline_buf[];
53 };
54 /*
55 * Average path length does not exceed 200 bytes, we'll have
56 * better packing in the slab and higher chance to satisfy
57 * a allocation later during send.
58 */
59 char pad[256];
60 };
61};
62#define FS_PATH_INLINE_SIZE \
63 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
64
65
66/* reused for each extent */
67struct clone_root {
68 struct btrfs_root *root;
69 u64 ino;
70 u64 offset;
71
72 u64 found_refs;
73};
74
75#define SEND_CTX_MAX_NAME_CACHE_SIZE 128
76#define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
77
78struct send_ctx {
79 struct file *send_filp;
80 loff_t send_off;
81 char *send_buf;
82 u32 send_size;
83 u32 send_max_size;
84 u64 total_send_size;
85 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
86 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
87
88 struct btrfs_root *send_root;
89 struct btrfs_root *parent_root;
90 struct clone_root *clone_roots;
91 int clone_roots_cnt;
92
93 /* current state of the compare_tree call */
94 struct btrfs_path *left_path;
95 struct btrfs_path *right_path;
96 struct btrfs_key *cmp_key;
97
98 /*
99 * infos of the currently processed inode. In case of deleted inodes,
100 * these are the values from the deleted inode.
101 */
102 u64 cur_ino;
103 u64 cur_inode_gen;
104 int cur_inode_new;
105 int cur_inode_new_gen;
106 int cur_inode_deleted;
107 u64 cur_inode_size;
108 u64 cur_inode_mode;
109 u64 cur_inode_rdev;
110 u64 cur_inode_last_extent;
111 u64 cur_inode_next_write_offset;
112 bool ignore_cur_inode;
113
114 u64 send_progress;
115
116 struct list_head new_refs;
117 struct list_head deleted_refs;
118
119 struct radix_tree_root name_cache;
120 struct list_head name_cache_list;
121 int name_cache_size;
122
123 struct file_ra_state ra;
124
125 /*
126 * We process inodes by their increasing order, so if before an
127 * incremental send we reverse the parent/child relationship of
128 * directories such that a directory with a lower inode number was
129 * the parent of a directory with a higher inode number, and the one
130 * becoming the new parent got renamed too, we can't rename/move the
131 * directory with lower inode number when we finish processing it - we
132 * must process the directory with higher inode number first, then
133 * rename/move it and then rename/move the directory with lower inode
134 * number. Example follows.
135 *
136 * Tree state when the first send was performed:
137 *
138 * .
139 * |-- a (ino 257)
140 * |-- b (ino 258)
141 * |
142 * |
143 * |-- c (ino 259)
144 * | |-- d (ino 260)
145 * |
146 * |-- c2 (ino 261)
147 *
148 * Tree state when the second (incremental) send is performed:
149 *
150 * .
151 * |-- a (ino 257)
152 * |-- b (ino 258)
153 * |-- c2 (ino 261)
154 * |-- d2 (ino 260)
155 * |-- cc (ino 259)
156 *
157 * The sequence of steps that lead to the second state was:
158 *
159 * mv /a/b/c/d /a/b/c2/d2
160 * mv /a/b/c /a/b/c2/d2/cc
161 *
162 * "c" has lower inode number, but we can't move it (2nd mv operation)
163 * before we move "d", which has higher inode number.
164 *
165 * So we just memorize which move/rename operations must be performed
166 * later when their respective parent is processed and moved/renamed.
167 */
168
169 /* Indexed by parent directory inode number. */
170 struct rb_root pending_dir_moves;
171
172 /*
173 * Reverse index, indexed by the inode number of a directory that
174 * is waiting for the move/rename of its immediate parent before its
175 * own move/rename can be performed.
176 */
177 struct rb_root waiting_dir_moves;
178
179 /*
180 * A directory that is going to be rm'ed might have a child directory
181 * which is in the pending directory moves index above. In this case,
182 * the directory can only be removed after the move/rename of its child
183 * is performed. Example:
184 *
185 * Parent snapshot:
186 *
187 * . (ino 256)
188 * |-- a/ (ino 257)
189 * |-- b/ (ino 258)
190 * |-- c/ (ino 259)
191 * | |-- x/ (ino 260)
192 * |
193 * |-- y/ (ino 261)
194 *
195 * Send snapshot:
196 *
197 * . (ino 256)
198 * |-- a/ (ino 257)
199 * |-- b/ (ino 258)
200 * |-- YY/ (ino 261)
201 * |-- x/ (ino 260)
202 *
203 * Sequence of steps that lead to the send snapshot:
204 * rm -f /a/b/c/foo.txt
205 * mv /a/b/y /a/b/YY
206 * mv /a/b/c/x /a/b/YY
207 * rmdir /a/b/c
208 *
209 * When the child is processed, its move/rename is delayed until its
210 * parent is processed (as explained above), but all other operations
211 * like update utimes, chown, chgrp, etc, are performed and the paths
212 * that it uses for those operations must use the orphanized name of
213 * its parent (the directory we're going to rm later), so we need to
214 * memorize that name.
215 *
216 * Indexed by the inode number of the directory to be deleted.
217 */
218 struct rb_root orphan_dirs;
219};
220
221struct pending_dir_move {
222 struct rb_node node;
223 struct list_head list;
224 u64 parent_ino;
225 u64 ino;
226 u64 gen;
227 struct list_head update_refs;
228};
229
230struct waiting_dir_move {
231 struct rb_node node;
232 u64 ino;
233 /*
234 * There might be some directory that could not be removed because it
235 * was waiting for this directory inode to be moved first. Therefore
236 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
237 */
238 u64 rmdir_ino;
239 u64 rmdir_gen;
240 bool orphanized;
241};
242
243struct orphan_dir_info {
244 struct rb_node node;
245 u64 ino;
246 u64 gen;
247 u64 last_dir_index_offset;
248};
249
250struct name_cache_entry {
251 struct list_head list;
252 /*
253 * radix_tree has only 32bit entries but we need to handle 64bit inums.
254 * We use the lower 32bit of the 64bit inum to store it in the tree. If
255 * more then one inum would fall into the same entry, we use radix_list
256 * to store the additional entries. radix_list is also used to store
257 * entries where two entries have the same inum but different
258 * generations.
259 */
260 struct list_head radix_list;
261 u64 ino;
262 u64 gen;
263 u64 parent_ino;
264 u64 parent_gen;
265 int ret;
266 int need_later_update;
267 int name_len;
268 char name[];
269};
270
271#define ADVANCE 1
272#define ADVANCE_ONLY_NEXT -1
273
274enum btrfs_compare_tree_result {
275 BTRFS_COMPARE_TREE_NEW,
276 BTRFS_COMPARE_TREE_DELETED,
277 BTRFS_COMPARE_TREE_CHANGED,
278 BTRFS_COMPARE_TREE_SAME,
279};
280
281__cold
282static void inconsistent_snapshot_error(struct send_ctx *sctx,
283 enum btrfs_compare_tree_result result,
284 const char *what)
285{
286 const char *result_string;
287
288 switch (result) {
289 case BTRFS_COMPARE_TREE_NEW:
290 result_string = "new";
291 break;
292 case BTRFS_COMPARE_TREE_DELETED:
293 result_string = "deleted";
294 break;
295 case BTRFS_COMPARE_TREE_CHANGED:
296 result_string = "updated";
297 break;
298 case BTRFS_COMPARE_TREE_SAME:
299 ASSERT(0);
300 result_string = "unchanged";
301 break;
302 default:
303 ASSERT(0);
304 result_string = "unexpected";
305 }
306
307 btrfs_err(sctx->send_root->fs_info,
308 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
309 result_string, what, sctx->cmp_key->objectid,
310 sctx->send_root->root_key.objectid,
311 (sctx->parent_root ?
312 sctx->parent_root->root_key.objectid : 0));
313}
314
315static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
316
317static struct waiting_dir_move *
318get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
319
320static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
321
322static int need_send_hole(struct send_ctx *sctx)
323{
324 return (sctx->parent_root && !sctx->cur_inode_new &&
325 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
326 S_ISREG(sctx->cur_inode_mode));
327}
328
329static void fs_path_reset(struct fs_path *p)
330{
331 if (p->reversed) {
332 p->start = p->buf + p->buf_len - 1;
333 p->end = p->start;
334 *p->start = 0;
335 } else {
336 p->start = p->buf;
337 p->end = p->start;
338 *p->start = 0;
339 }
340}
341
342static struct fs_path *fs_path_alloc(void)
343{
344 struct fs_path *p;
345
346 p = kmalloc(sizeof(*p), GFP_KERNEL);
347 if (!p)
348 return NULL;
349 p->reversed = 0;
350 p->buf = p->inline_buf;
351 p->buf_len = FS_PATH_INLINE_SIZE;
352 fs_path_reset(p);
353 return p;
354}
355
356static struct fs_path *fs_path_alloc_reversed(void)
357{
358 struct fs_path *p;
359
360 p = fs_path_alloc();
361 if (!p)
362 return NULL;
363 p->reversed = 1;
364 fs_path_reset(p);
365 return p;
366}
367
368static void fs_path_free(struct fs_path *p)
369{
370 if (!p)
371 return;
372 if (p->buf != p->inline_buf)
373 kfree(p->buf);
374 kfree(p);
375}
376
377static int fs_path_len(struct fs_path *p)
378{
379 return p->end - p->start;
380}
381
382static int fs_path_ensure_buf(struct fs_path *p, int len)
383{
384 char *tmp_buf;
385 int path_len;
386 int old_buf_len;
387
388 len++;
389
390 if (p->buf_len >= len)
391 return 0;
392
393 if (len > PATH_MAX) {
394 WARN_ON(1);
395 return -ENOMEM;
396 }
397
398 path_len = p->end - p->start;
399 old_buf_len = p->buf_len;
400
401 /*
402 * First time the inline_buf does not suffice
403 */
404 if (p->buf == p->inline_buf) {
405 tmp_buf = kmalloc(len, GFP_KERNEL);
406 if (tmp_buf)
407 memcpy(tmp_buf, p->buf, old_buf_len);
408 } else {
409 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
410 }
411 if (!tmp_buf)
412 return -ENOMEM;
413 p->buf = tmp_buf;
414 /*
415 * The real size of the buffer is bigger, this will let the fast path
416 * happen most of the time
417 */
418 p->buf_len = ksize(p->buf);
419
420 if (p->reversed) {
421 tmp_buf = p->buf + old_buf_len - path_len - 1;
422 p->end = p->buf + p->buf_len - 1;
423 p->start = p->end - path_len;
424 memmove(p->start, tmp_buf, path_len + 1);
425 } else {
426 p->start = p->buf;
427 p->end = p->start + path_len;
428 }
429 return 0;
430}
431
432static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
433 char **prepared)
434{
435 int ret;
436 int new_len;
437
438 new_len = p->end - p->start + name_len;
439 if (p->start != p->end)
440 new_len++;
441 ret = fs_path_ensure_buf(p, new_len);
442 if (ret < 0)
443 goto out;
444
445 if (p->reversed) {
446 if (p->start != p->end)
447 *--p->start = '/';
448 p->start -= name_len;
449 *prepared = p->start;
450 } else {
451 if (p->start != p->end)
452 *p->end++ = '/';
453 *prepared = p->end;
454 p->end += name_len;
455 *p->end = 0;
456 }
457
458out:
459 return ret;
460}
461
462static int fs_path_add(struct fs_path *p, const char *name, int name_len)
463{
464 int ret;
465 char *prepared;
466
467 ret = fs_path_prepare_for_add(p, name_len, &prepared);
468 if (ret < 0)
469 goto out;
470 memcpy(prepared, name, name_len);
471
472out:
473 return ret;
474}
475
476static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
477{
478 int ret;
479 char *prepared;
480
481 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
482 if (ret < 0)
483 goto out;
484 memcpy(prepared, p2->start, p2->end - p2->start);
485
486out:
487 return ret;
488}
489
490static int fs_path_add_from_extent_buffer(struct fs_path *p,
491 struct extent_buffer *eb,
492 unsigned long off, int len)
493{
494 int ret;
495 char *prepared;
496
497 ret = fs_path_prepare_for_add(p, len, &prepared);
498 if (ret < 0)
499 goto out;
500
501 read_extent_buffer(eb, prepared, off, len);
502
503out:
504 return ret;
505}
506
507static int fs_path_copy(struct fs_path *p, struct fs_path *from)
508{
509 int ret;
510
511 p->reversed = from->reversed;
512 fs_path_reset(p);
513
514 ret = fs_path_add_path(p, from);
515
516 return ret;
517}
518
519
520static void fs_path_unreverse(struct fs_path *p)
521{
522 char *tmp;
523 int len;
524
525 if (!p->reversed)
526 return;
527
528 tmp = p->start;
529 len = p->end - p->start;
530 p->start = p->buf;
531 p->end = p->start + len;
532 memmove(p->start, tmp, len + 1);
533 p->reversed = 0;
534}
535
536static struct btrfs_path *alloc_path_for_send(void)
537{
538 struct btrfs_path *path;
539
540 path = btrfs_alloc_path();
541 if (!path)
542 return NULL;
543 path->search_commit_root = 1;
544 path->skip_locking = 1;
545 path->need_commit_sem = 1;
546 return path;
547}
548
549static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
550{
551 int ret;
552 u32 pos = 0;
553
554 while (pos < len) {
555 ret = kernel_write(filp, buf + pos, len - pos, off);
556 /* TODO handle that correctly */
557 /*if (ret == -ERESTARTSYS) {
558 continue;
559 }*/
560 if (ret < 0)
561 return ret;
562 if (ret == 0) {
563 return -EIO;
564 }
565 pos += ret;
566 }
567
568 return 0;
569}
570
571static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
572{
573 struct btrfs_tlv_header *hdr;
574 int total_len = sizeof(*hdr) + len;
575 int left = sctx->send_max_size - sctx->send_size;
576
577 if (unlikely(left < total_len))
578 return -EOVERFLOW;
579
580 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
581 put_unaligned_le16(attr, &hdr->tlv_type);
582 put_unaligned_le16(len, &hdr->tlv_len);
583 memcpy(hdr + 1, data, len);
584 sctx->send_size += total_len;
585
586 return 0;
587}
588
589#define TLV_PUT_DEFINE_INT(bits) \
590 static int tlv_put_u##bits(struct send_ctx *sctx, \
591 u##bits attr, u##bits value) \
592 { \
593 __le##bits __tmp = cpu_to_le##bits(value); \
594 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
595 }
596
597TLV_PUT_DEFINE_INT(64)
598
599static int tlv_put_string(struct send_ctx *sctx, u16 attr,
600 const char *str, int len)
601{
602 if (len == -1)
603 len = strlen(str);
604 return tlv_put(sctx, attr, str, len);
605}
606
607static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
608 const u8 *uuid)
609{
610 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
611}
612
613static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
614 struct extent_buffer *eb,
615 struct btrfs_timespec *ts)
616{
617 struct btrfs_timespec bts;
618 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
619 return tlv_put(sctx, attr, &bts, sizeof(bts));
620}
621
622
623#define TLV_PUT(sctx, attrtype, data, attrlen) \
624 do { \
625 ret = tlv_put(sctx, attrtype, data, attrlen); \
626 if (ret < 0) \
627 goto tlv_put_failure; \
628 } while (0)
629
630#define TLV_PUT_INT(sctx, attrtype, bits, value) \
631 do { \
632 ret = tlv_put_u##bits(sctx, attrtype, value); \
633 if (ret < 0) \
634 goto tlv_put_failure; \
635 } while (0)
636
637#define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
638#define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
639#define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
640#define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
641#define TLV_PUT_STRING(sctx, attrtype, str, len) \
642 do { \
643 ret = tlv_put_string(sctx, attrtype, str, len); \
644 if (ret < 0) \
645 goto tlv_put_failure; \
646 } while (0)
647#define TLV_PUT_PATH(sctx, attrtype, p) \
648 do { \
649 ret = tlv_put_string(sctx, attrtype, p->start, \
650 p->end - p->start); \
651 if (ret < 0) \
652 goto tlv_put_failure; \
653 } while(0)
654#define TLV_PUT_UUID(sctx, attrtype, uuid) \
655 do { \
656 ret = tlv_put_uuid(sctx, attrtype, uuid); \
657 if (ret < 0) \
658 goto tlv_put_failure; \
659 } while (0)
660#define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
661 do { \
662 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
663 if (ret < 0) \
664 goto tlv_put_failure; \
665 } while (0)
666
667static int send_header(struct send_ctx *sctx)
668{
669 struct btrfs_stream_header hdr;
670
671 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
672 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
673
674 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
675 &sctx->send_off);
676}
677
678/*
679 * For each command/item we want to send to userspace, we call this function.
680 */
681static int begin_cmd(struct send_ctx *sctx, int cmd)
682{
683 struct btrfs_cmd_header *hdr;
684
685 if (WARN_ON(!sctx->send_buf))
686 return -EINVAL;
687
688 BUG_ON(sctx->send_size);
689
690 sctx->send_size += sizeof(*hdr);
691 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
692 put_unaligned_le16(cmd, &hdr->cmd);
693
694 return 0;
695}
696
697static int send_cmd(struct send_ctx *sctx)
698{
699 int ret;
700 struct btrfs_cmd_header *hdr;
701 u32 crc;
702
703 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
704 put_unaligned_le32(sctx->send_size - sizeof(*hdr), &hdr->len);
705 put_unaligned_le32(0, &hdr->crc);
706
707 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
708 put_unaligned_le32(crc, &hdr->crc);
709
710 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
711 &sctx->send_off);
712
713 sctx->total_send_size += sctx->send_size;
714 sctx->cmd_send_size[get_unaligned_le16(&hdr->cmd)] += sctx->send_size;
715 sctx->send_size = 0;
716
717 return ret;
718}
719
720/*
721 * Sends a move instruction to user space
722 */
723static int send_rename(struct send_ctx *sctx,
724 struct fs_path *from, struct fs_path *to)
725{
726 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
727 int ret;
728
729 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
730
731 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
732 if (ret < 0)
733 goto out;
734
735 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
736 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
737
738 ret = send_cmd(sctx);
739
740tlv_put_failure:
741out:
742 return ret;
743}
744
745/*
746 * Sends a link instruction to user space
747 */
748static int send_link(struct send_ctx *sctx,
749 struct fs_path *path, struct fs_path *lnk)
750{
751 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
752 int ret;
753
754 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
755
756 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
757 if (ret < 0)
758 goto out;
759
760 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
761 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
762
763 ret = send_cmd(sctx);
764
765tlv_put_failure:
766out:
767 return ret;
768}
769
770/*
771 * Sends an unlink instruction to user space
772 */
773static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
774{
775 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
776 int ret;
777
778 btrfs_debug(fs_info, "send_unlink %s", path->start);
779
780 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
781 if (ret < 0)
782 goto out;
783
784 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
785
786 ret = send_cmd(sctx);
787
788tlv_put_failure:
789out:
790 return ret;
791}
792
793/*
794 * Sends a rmdir instruction to user space
795 */
796static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
797{
798 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
799 int ret;
800
801 btrfs_debug(fs_info, "send_rmdir %s", path->start);
802
803 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
804 if (ret < 0)
805 goto out;
806
807 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
808
809 ret = send_cmd(sctx);
810
811tlv_put_failure:
812out:
813 return ret;
814}
815
816/*
817 * Helper function to retrieve some fields from an inode item.
818 */
819static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
820 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
821 u64 *gid, u64 *rdev)
822{
823 int ret;
824 struct btrfs_inode_item *ii;
825 struct btrfs_key key;
826
827 key.objectid = ino;
828 key.type = BTRFS_INODE_ITEM_KEY;
829 key.offset = 0;
830 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
831 if (ret) {
832 if (ret > 0)
833 ret = -ENOENT;
834 return ret;
835 }
836
837 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
838 struct btrfs_inode_item);
839 if (size)
840 *size = btrfs_inode_size(path->nodes[0], ii);
841 if (gen)
842 *gen = btrfs_inode_generation(path->nodes[0], ii);
843 if (mode)
844 *mode = btrfs_inode_mode(path->nodes[0], ii);
845 if (uid)
846 *uid = btrfs_inode_uid(path->nodes[0], ii);
847 if (gid)
848 *gid = btrfs_inode_gid(path->nodes[0], ii);
849 if (rdev)
850 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
851
852 return ret;
853}
854
855static int get_inode_info(struct btrfs_root *root,
856 u64 ino, u64 *size, u64 *gen,
857 u64 *mode, u64 *uid, u64 *gid,
858 u64 *rdev)
859{
860 struct btrfs_path *path;
861 int ret;
862
863 path = alloc_path_for_send();
864 if (!path)
865 return -ENOMEM;
866 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
867 rdev);
868 btrfs_free_path(path);
869 return ret;
870}
871
872typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
873 struct fs_path *p,
874 void *ctx);
875
876/*
877 * Helper function to iterate the entries in ONE btrfs_inode_ref or
878 * btrfs_inode_extref.
879 * The iterate callback may return a non zero value to stop iteration. This can
880 * be a negative value for error codes or 1 to simply stop it.
881 *
882 * path must point to the INODE_REF or INODE_EXTREF when called.
883 */
884static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
885 struct btrfs_key *found_key, int resolve,
886 iterate_inode_ref_t iterate, void *ctx)
887{
888 struct extent_buffer *eb = path->nodes[0];
889 struct btrfs_item *item;
890 struct btrfs_inode_ref *iref;
891 struct btrfs_inode_extref *extref;
892 struct btrfs_path *tmp_path;
893 struct fs_path *p;
894 u32 cur = 0;
895 u32 total;
896 int slot = path->slots[0];
897 u32 name_len;
898 char *start;
899 int ret = 0;
900 int num = 0;
901 int index;
902 u64 dir;
903 unsigned long name_off;
904 unsigned long elem_size;
905 unsigned long ptr;
906
907 p = fs_path_alloc_reversed();
908 if (!p)
909 return -ENOMEM;
910
911 tmp_path = alloc_path_for_send();
912 if (!tmp_path) {
913 fs_path_free(p);
914 return -ENOMEM;
915 }
916
917
918 if (found_key->type == BTRFS_INODE_REF_KEY) {
919 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
920 struct btrfs_inode_ref);
921 item = btrfs_item_nr(slot);
922 total = btrfs_item_size(eb, item);
923 elem_size = sizeof(*iref);
924 } else {
925 ptr = btrfs_item_ptr_offset(eb, slot);
926 total = btrfs_item_size_nr(eb, slot);
927 elem_size = sizeof(*extref);
928 }
929
930 while (cur < total) {
931 fs_path_reset(p);
932
933 if (found_key->type == BTRFS_INODE_REF_KEY) {
934 iref = (struct btrfs_inode_ref *)(ptr + cur);
935 name_len = btrfs_inode_ref_name_len(eb, iref);
936 name_off = (unsigned long)(iref + 1);
937 index = btrfs_inode_ref_index(eb, iref);
938 dir = found_key->offset;
939 } else {
940 extref = (struct btrfs_inode_extref *)(ptr + cur);
941 name_len = btrfs_inode_extref_name_len(eb, extref);
942 name_off = (unsigned long)&extref->name;
943 index = btrfs_inode_extref_index(eb, extref);
944 dir = btrfs_inode_extref_parent(eb, extref);
945 }
946
947 if (resolve) {
948 start = btrfs_ref_to_path(root, tmp_path, name_len,
949 name_off, eb, dir,
950 p->buf, p->buf_len);
951 if (IS_ERR(start)) {
952 ret = PTR_ERR(start);
953 goto out;
954 }
955 if (start < p->buf) {
956 /* overflow , try again with larger buffer */
957 ret = fs_path_ensure_buf(p,
958 p->buf_len + p->buf - start);
959 if (ret < 0)
960 goto out;
961 start = btrfs_ref_to_path(root, tmp_path,
962 name_len, name_off,
963 eb, dir,
964 p->buf, p->buf_len);
965 if (IS_ERR(start)) {
966 ret = PTR_ERR(start);
967 goto out;
968 }
969 BUG_ON(start < p->buf);
970 }
971 p->start = start;
972 } else {
973 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
974 name_len);
975 if (ret < 0)
976 goto out;
977 }
978
979 cur += elem_size + name_len;
980 ret = iterate(num, dir, index, p, ctx);
981 if (ret)
982 goto out;
983 num++;
984 }
985
986out:
987 btrfs_free_path(tmp_path);
988 fs_path_free(p);
989 return ret;
990}
991
992typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
993 const char *name, int name_len,
994 const char *data, int data_len,
995 u8 type, void *ctx);
996
997/*
998 * Helper function to iterate the entries in ONE btrfs_dir_item.
999 * The iterate callback may return a non zero value to stop iteration. This can
1000 * be a negative value for error codes or 1 to simply stop it.
1001 *
1002 * path must point to the dir item when called.
1003 */
1004static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
1005 iterate_dir_item_t iterate, void *ctx)
1006{
1007 int ret = 0;
1008 struct extent_buffer *eb;
1009 struct btrfs_item *item;
1010 struct btrfs_dir_item *di;
1011 struct btrfs_key di_key;
1012 char *buf = NULL;
1013 int buf_len;
1014 u32 name_len;
1015 u32 data_len;
1016 u32 cur;
1017 u32 len;
1018 u32 total;
1019 int slot;
1020 int num;
1021 u8 type;
1022
1023 /*
1024 * Start with a small buffer (1 page). If later we end up needing more
1025 * space, which can happen for xattrs on a fs with a leaf size greater
1026 * then the page size, attempt to increase the buffer. Typically xattr
1027 * values are small.
1028 */
1029 buf_len = PATH_MAX;
1030 buf = kmalloc(buf_len, GFP_KERNEL);
1031 if (!buf) {
1032 ret = -ENOMEM;
1033 goto out;
1034 }
1035
1036 eb = path->nodes[0];
1037 slot = path->slots[0];
1038 item = btrfs_item_nr(slot);
1039 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1040 cur = 0;
1041 len = 0;
1042 total = btrfs_item_size(eb, item);
1043
1044 num = 0;
1045 while (cur < total) {
1046 name_len = btrfs_dir_name_len(eb, di);
1047 data_len = btrfs_dir_data_len(eb, di);
1048 type = btrfs_dir_type(eb, di);
1049 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1050
1051 if (type == BTRFS_FT_XATTR) {
1052 if (name_len > XATTR_NAME_MAX) {
1053 ret = -ENAMETOOLONG;
1054 goto out;
1055 }
1056 if (name_len + data_len >
1057 BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1058 ret = -E2BIG;
1059 goto out;
1060 }
1061 } else {
1062 /*
1063 * Path too long
1064 */
1065 if (name_len + data_len > PATH_MAX) {
1066 ret = -ENAMETOOLONG;
1067 goto out;
1068 }
1069 }
1070
1071 if (name_len + data_len > buf_len) {
1072 buf_len = name_len + data_len;
1073 if (is_vmalloc_addr(buf)) {
1074 vfree(buf);
1075 buf = NULL;
1076 } else {
1077 char *tmp = krealloc(buf, buf_len,
1078 GFP_KERNEL | __GFP_NOWARN);
1079
1080 if (!tmp)
1081 kfree(buf);
1082 buf = tmp;
1083 }
1084 if (!buf) {
1085 buf = kvmalloc(buf_len, GFP_KERNEL);
1086 if (!buf) {
1087 ret = -ENOMEM;
1088 goto out;
1089 }
1090 }
1091 }
1092
1093 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1094 name_len + data_len);
1095
1096 len = sizeof(*di) + name_len + data_len;
1097 di = (struct btrfs_dir_item *)((char *)di + len);
1098 cur += len;
1099
1100 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1101 data_len, type, ctx);
1102 if (ret < 0)
1103 goto out;
1104 if (ret) {
1105 ret = 0;
1106 goto out;
1107 }
1108
1109 num++;
1110 }
1111
1112out:
1113 kvfree(buf);
1114 return ret;
1115}
1116
1117static int __copy_first_ref(int num, u64 dir, int index,
1118 struct fs_path *p, void *ctx)
1119{
1120 int ret;
1121 struct fs_path *pt = ctx;
1122
1123 ret = fs_path_copy(pt, p);
1124 if (ret < 0)
1125 return ret;
1126
1127 /* we want the first only */
1128 return 1;
1129}
1130
1131/*
1132 * Retrieve the first path of an inode. If an inode has more then one
1133 * ref/hardlink, this is ignored.
1134 */
1135static int get_inode_path(struct btrfs_root *root,
1136 u64 ino, struct fs_path *path)
1137{
1138 int ret;
1139 struct btrfs_key key, found_key;
1140 struct btrfs_path *p;
1141
1142 p = alloc_path_for_send();
1143 if (!p)
1144 return -ENOMEM;
1145
1146 fs_path_reset(path);
1147
1148 key.objectid = ino;
1149 key.type = BTRFS_INODE_REF_KEY;
1150 key.offset = 0;
1151
1152 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1153 if (ret < 0)
1154 goto out;
1155 if (ret) {
1156 ret = 1;
1157 goto out;
1158 }
1159 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1160 if (found_key.objectid != ino ||
1161 (found_key.type != BTRFS_INODE_REF_KEY &&
1162 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1163 ret = -ENOENT;
1164 goto out;
1165 }
1166
1167 ret = iterate_inode_ref(root, p, &found_key, 1,
1168 __copy_first_ref, path);
1169 if (ret < 0)
1170 goto out;
1171 ret = 0;
1172
1173out:
1174 btrfs_free_path(p);
1175 return ret;
1176}
1177
1178struct backref_ctx {
1179 struct send_ctx *sctx;
1180
1181 /* number of total found references */
1182 u64 found;
1183
1184 /*
1185 * used for clones found in send_root. clones found behind cur_objectid
1186 * and cur_offset are not considered as allowed clones.
1187 */
1188 u64 cur_objectid;
1189 u64 cur_offset;
1190
1191 /* may be truncated in case it's the last extent in a file */
1192 u64 extent_len;
1193
1194 /* Just to check for bugs in backref resolving */
1195 int found_itself;
1196};
1197
1198static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1199{
1200 u64 root = (u64)(uintptr_t)key;
1201 struct clone_root *cr = (struct clone_root *)elt;
1202
1203 if (root < cr->root->root_key.objectid)
1204 return -1;
1205 if (root > cr->root->root_key.objectid)
1206 return 1;
1207 return 0;
1208}
1209
1210static int __clone_root_cmp_sort(const void *e1, const void *e2)
1211{
1212 struct clone_root *cr1 = (struct clone_root *)e1;
1213 struct clone_root *cr2 = (struct clone_root *)e2;
1214
1215 if (cr1->root->root_key.objectid < cr2->root->root_key.objectid)
1216 return -1;
1217 if (cr1->root->root_key.objectid > cr2->root->root_key.objectid)
1218 return 1;
1219 return 0;
1220}
1221
1222/*
1223 * Called for every backref that is found for the current extent.
1224 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1225 */
1226static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1227{
1228 struct backref_ctx *bctx = ctx_;
1229 struct clone_root *found;
1230
1231 /* First check if the root is in the list of accepted clone sources */
1232 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1233 bctx->sctx->clone_roots_cnt,
1234 sizeof(struct clone_root),
1235 __clone_root_cmp_bsearch);
1236 if (!found)
1237 return 0;
1238
1239 if (found->root == bctx->sctx->send_root &&
1240 ino == bctx->cur_objectid &&
1241 offset == bctx->cur_offset) {
1242 bctx->found_itself = 1;
1243 }
1244
1245 /*
1246 * Make sure we don't consider clones from send_root that are
1247 * behind the current inode/offset.
1248 */
1249 if (found->root == bctx->sctx->send_root) {
1250 /*
1251 * If the source inode was not yet processed we can't issue a
1252 * clone operation, as the source extent does not exist yet at
1253 * the destination of the stream.
1254 */
1255 if (ino > bctx->cur_objectid)
1256 return 0;
1257 /*
1258 * We clone from the inode currently being sent as long as the
1259 * source extent is already processed, otherwise we could try
1260 * to clone from an extent that does not exist yet at the
1261 * destination of the stream.
1262 */
1263 if (ino == bctx->cur_objectid &&
1264 offset + bctx->extent_len >
1265 bctx->sctx->cur_inode_next_write_offset)
1266 return 0;
1267 }
1268
1269 bctx->found++;
1270 found->found_refs++;
1271 if (ino < found->ino) {
1272 found->ino = ino;
1273 found->offset = offset;
1274 } else if (found->ino == ino) {
1275 /*
1276 * same extent found more then once in the same file.
1277 */
1278 if (found->offset > offset + bctx->extent_len)
1279 found->offset = offset;
1280 }
1281
1282 return 0;
1283}
1284
1285/*
1286 * Given an inode, offset and extent item, it finds a good clone for a clone
1287 * instruction. Returns -ENOENT when none could be found. The function makes
1288 * sure that the returned clone is usable at the point where sending is at the
1289 * moment. This means, that no clones are accepted which lie behind the current
1290 * inode+offset.
1291 *
1292 * path must point to the extent item when called.
1293 */
1294static int find_extent_clone(struct send_ctx *sctx,
1295 struct btrfs_path *path,
1296 u64 ino, u64 data_offset,
1297 u64 ino_size,
1298 struct clone_root **found)
1299{
1300 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1301 int ret;
1302 int extent_type;
1303 u64 logical;
1304 u64 disk_byte;
1305 u64 num_bytes;
1306 u64 extent_item_pos;
1307 u64 flags = 0;
1308 struct btrfs_file_extent_item *fi;
1309 struct extent_buffer *eb = path->nodes[0];
1310 struct backref_ctx *backref_ctx = NULL;
1311 struct clone_root *cur_clone_root;
1312 struct btrfs_key found_key;
1313 struct btrfs_path *tmp_path;
1314 struct btrfs_extent_item *ei;
1315 int compressed;
1316 u32 i;
1317
1318 tmp_path = alloc_path_for_send();
1319 if (!tmp_path)
1320 return -ENOMEM;
1321
1322 /* We only use this path under the commit sem */
1323 tmp_path->need_commit_sem = 0;
1324
1325 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
1326 if (!backref_ctx) {
1327 ret = -ENOMEM;
1328 goto out;
1329 }
1330
1331 if (data_offset >= ino_size) {
1332 /*
1333 * There may be extents that lie behind the file's size.
1334 * I at least had this in combination with snapshotting while
1335 * writing large files.
1336 */
1337 ret = 0;
1338 goto out;
1339 }
1340
1341 fi = btrfs_item_ptr(eb, path->slots[0],
1342 struct btrfs_file_extent_item);
1343 extent_type = btrfs_file_extent_type(eb, fi);
1344 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1345 ret = -ENOENT;
1346 goto out;
1347 }
1348 compressed = btrfs_file_extent_compression(eb, fi);
1349
1350 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1351 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1352 if (disk_byte == 0) {
1353 ret = -ENOENT;
1354 goto out;
1355 }
1356 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1357
1358 down_read(&fs_info->commit_root_sem);
1359 ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1360 &found_key, &flags);
1361 up_read(&fs_info->commit_root_sem);
1362
1363 if (ret < 0)
1364 goto out;
1365 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1366 ret = -EIO;
1367 goto out;
1368 }
1369
1370 ei = btrfs_item_ptr(tmp_path->nodes[0], tmp_path->slots[0],
1371 struct btrfs_extent_item);
1372 /*
1373 * Backreference walking (iterate_extent_inodes() below) is currently
1374 * too expensive when an extent has a large number of references, both
1375 * in time spent and used memory. So for now just fallback to write
1376 * operations instead of clone operations when an extent has more than
1377 * a certain amount of references.
1378 */
1379 if (btrfs_extent_refs(tmp_path->nodes[0], ei) > SEND_MAX_EXTENT_REFS) {
1380 ret = -ENOENT;
1381 goto out;
1382 }
1383 btrfs_release_path(tmp_path);
1384
1385 /*
1386 * Setup the clone roots.
1387 */
1388 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1389 cur_clone_root = sctx->clone_roots + i;
1390 cur_clone_root->ino = (u64)-1;
1391 cur_clone_root->offset = 0;
1392 cur_clone_root->found_refs = 0;
1393 }
1394
1395 backref_ctx->sctx = sctx;
1396 backref_ctx->found = 0;
1397 backref_ctx->cur_objectid = ino;
1398 backref_ctx->cur_offset = data_offset;
1399 backref_ctx->found_itself = 0;
1400 backref_ctx->extent_len = num_bytes;
1401
1402 /*
1403 * The last extent of a file may be too large due to page alignment.
1404 * We need to adjust extent_len in this case so that the checks in
1405 * __iterate_backrefs work.
1406 */
1407 if (data_offset + num_bytes >= ino_size)
1408 backref_ctx->extent_len = ino_size - data_offset;
1409
1410 /*
1411 * Now collect all backrefs.
1412 */
1413 if (compressed == BTRFS_COMPRESS_NONE)
1414 extent_item_pos = logical - found_key.objectid;
1415 else
1416 extent_item_pos = 0;
1417 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1418 extent_item_pos, 1, __iterate_backrefs,
1419 backref_ctx, false);
1420
1421 if (ret < 0)
1422 goto out;
1423
1424 if (!backref_ctx->found_itself) {
1425 /* found a bug in backref code? */
1426 ret = -EIO;
1427 btrfs_err(fs_info,
1428 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1429 ino, data_offset, disk_byte, found_key.objectid);
1430 goto out;
1431 }
1432
1433 btrfs_debug(fs_info,
1434 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1435 data_offset, ino, num_bytes, logical);
1436
1437 if (!backref_ctx->found)
1438 btrfs_debug(fs_info, "no clones found");
1439
1440 cur_clone_root = NULL;
1441 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1442 if (sctx->clone_roots[i].found_refs) {
1443 if (!cur_clone_root)
1444 cur_clone_root = sctx->clone_roots + i;
1445 else if (sctx->clone_roots[i].root == sctx->send_root)
1446 /* prefer clones from send_root over others */
1447 cur_clone_root = sctx->clone_roots + i;
1448 }
1449
1450 }
1451
1452 if (cur_clone_root) {
1453 *found = cur_clone_root;
1454 ret = 0;
1455 } else {
1456 ret = -ENOENT;
1457 }
1458
1459out:
1460 btrfs_free_path(tmp_path);
1461 kfree(backref_ctx);
1462 return ret;
1463}
1464
1465static int read_symlink(struct btrfs_root *root,
1466 u64 ino,
1467 struct fs_path *dest)
1468{
1469 int ret;
1470 struct btrfs_path *path;
1471 struct btrfs_key key;
1472 struct btrfs_file_extent_item *ei;
1473 u8 type;
1474 u8 compression;
1475 unsigned long off;
1476 int len;
1477
1478 path = alloc_path_for_send();
1479 if (!path)
1480 return -ENOMEM;
1481
1482 key.objectid = ino;
1483 key.type = BTRFS_EXTENT_DATA_KEY;
1484 key.offset = 0;
1485 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1486 if (ret < 0)
1487 goto out;
1488 if (ret) {
1489 /*
1490 * An empty symlink inode. Can happen in rare error paths when
1491 * creating a symlink (transaction committed before the inode
1492 * eviction handler removed the symlink inode items and a crash
1493 * happened in between or the subvol was snapshoted in between).
1494 * Print an informative message to dmesg/syslog so that the user
1495 * can delete the symlink.
1496 */
1497 btrfs_err(root->fs_info,
1498 "Found empty symlink inode %llu at root %llu",
1499 ino, root->root_key.objectid);
1500 ret = -EIO;
1501 goto out;
1502 }
1503
1504 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1505 struct btrfs_file_extent_item);
1506 type = btrfs_file_extent_type(path->nodes[0], ei);
1507 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1508 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1509 BUG_ON(compression);
1510
1511 off = btrfs_file_extent_inline_start(ei);
1512 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei);
1513
1514 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1515
1516out:
1517 btrfs_free_path(path);
1518 return ret;
1519}
1520
1521/*
1522 * Helper function to generate a file name that is unique in the root of
1523 * send_root and parent_root. This is used to generate names for orphan inodes.
1524 */
1525static int gen_unique_name(struct send_ctx *sctx,
1526 u64 ino, u64 gen,
1527 struct fs_path *dest)
1528{
1529 int ret = 0;
1530 struct btrfs_path *path;
1531 struct btrfs_dir_item *di;
1532 char tmp[64];
1533 int len;
1534 u64 idx = 0;
1535
1536 path = alloc_path_for_send();
1537 if (!path)
1538 return -ENOMEM;
1539
1540 while (1) {
1541 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1542 ino, gen, idx);
1543 ASSERT(len < sizeof(tmp));
1544
1545 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1546 path, BTRFS_FIRST_FREE_OBJECTID,
1547 tmp, strlen(tmp), 0);
1548 btrfs_release_path(path);
1549 if (IS_ERR(di)) {
1550 ret = PTR_ERR(di);
1551 goto out;
1552 }
1553 if (di) {
1554 /* not unique, try again */
1555 idx++;
1556 continue;
1557 }
1558
1559 if (!sctx->parent_root) {
1560 /* unique */
1561 ret = 0;
1562 break;
1563 }
1564
1565 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1566 path, BTRFS_FIRST_FREE_OBJECTID,
1567 tmp, strlen(tmp), 0);
1568 btrfs_release_path(path);
1569 if (IS_ERR(di)) {
1570 ret = PTR_ERR(di);
1571 goto out;
1572 }
1573 if (di) {
1574 /* not unique, try again */
1575 idx++;
1576 continue;
1577 }
1578 /* unique */
1579 break;
1580 }
1581
1582 ret = fs_path_add(dest, tmp, strlen(tmp));
1583
1584out:
1585 btrfs_free_path(path);
1586 return ret;
1587}
1588
1589enum inode_state {
1590 inode_state_no_change,
1591 inode_state_will_create,
1592 inode_state_did_create,
1593 inode_state_will_delete,
1594 inode_state_did_delete,
1595};
1596
1597static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1598{
1599 int ret;
1600 int left_ret;
1601 int right_ret;
1602 u64 left_gen;
1603 u64 right_gen;
1604
1605 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1606 NULL, NULL);
1607 if (ret < 0 && ret != -ENOENT)
1608 goto out;
1609 left_ret = ret;
1610
1611 if (!sctx->parent_root) {
1612 right_ret = -ENOENT;
1613 } else {
1614 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1615 NULL, NULL, NULL, NULL);
1616 if (ret < 0 && ret != -ENOENT)
1617 goto out;
1618 right_ret = ret;
1619 }
1620
1621 if (!left_ret && !right_ret) {
1622 if (left_gen == gen && right_gen == gen) {
1623 ret = inode_state_no_change;
1624 } else if (left_gen == gen) {
1625 if (ino < sctx->send_progress)
1626 ret = inode_state_did_create;
1627 else
1628 ret = inode_state_will_create;
1629 } else if (right_gen == gen) {
1630 if (ino < sctx->send_progress)
1631 ret = inode_state_did_delete;
1632 else
1633 ret = inode_state_will_delete;
1634 } else {
1635 ret = -ENOENT;
1636 }
1637 } else if (!left_ret) {
1638 if (left_gen == gen) {
1639 if (ino < sctx->send_progress)
1640 ret = inode_state_did_create;
1641 else
1642 ret = inode_state_will_create;
1643 } else {
1644 ret = -ENOENT;
1645 }
1646 } else if (!right_ret) {
1647 if (right_gen == gen) {
1648 if (ino < sctx->send_progress)
1649 ret = inode_state_did_delete;
1650 else
1651 ret = inode_state_will_delete;
1652 } else {
1653 ret = -ENOENT;
1654 }
1655 } else {
1656 ret = -ENOENT;
1657 }
1658
1659out:
1660 return ret;
1661}
1662
1663static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1664{
1665 int ret;
1666
1667 if (ino == BTRFS_FIRST_FREE_OBJECTID)
1668 return 1;
1669
1670 ret = get_cur_inode_state(sctx, ino, gen);
1671 if (ret < 0)
1672 goto out;
1673
1674 if (ret == inode_state_no_change ||
1675 ret == inode_state_did_create ||
1676 ret == inode_state_will_delete)
1677 ret = 1;
1678 else
1679 ret = 0;
1680
1681out:
1682 return ret;
1683}
1684
1685/*
1686 * Helper function to lookup a dir item in a dir.
1687 */
1688static int lookup_dir_item_inode(struct btrfs_root *root,
1689 u64 dir, const char *name, int name_len,
1690 u64 *found_inode,
1691 u8 *found_type)
1692{
1693 int ret = 0;
1694 struct btrfs_dir_item *di;
1695 struct btrfs_key key;
1696 struct btrfs_path *path;
1697
1698 path = alloc_path_for_send();
1699 if (!path)
1700 return -ENOMEM;
1701
1702 di = btrfs_lookup_dir_item(NULL, root, path,
1703 dir, name, name_len, 0);
1704 if (IS_ERR_OR_NULL(di)) {
1705 ret = di ? PTR_ERR(di) : -ENOENT;
1706 goto out;
1707 }
1708 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1709 if (key.type == BTRFS_ROOT_ITEM_KEY) {
1710 ret = -ENOENT;
1711 goto out;
1712 }
1713 *found_inode = key.objectid;
1714 *found_type = btrfs_dir_type(path->nodes[0], di);
1715
1716out:
1717 btrfs_free_path(path);
1718 return ret;
1719}
1720
1721/*
1722 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1723 * generation of the parent dir and the name of the dir entry.
1724 */
1725static int get_first_ref(struct btrfs_root *root, u64 ino,
1726 u64 *dir, u64 *dir_gen, struct fs_path *name)
1727{
1728 int ret;
1729 struct btrfs_key key;
1730 struct btrfs_key found_key;
1731 struct btrfs_path *path;
1732 int len;
1733 u64 parent_dir;
1734
1735 path = alloc_path_for_send();
1736 if (!path)
1737 return -ENOMEM;
1738
1739 key.objectid = ino;
1740 key.type = BTRFS_INODE_REF_KEY;
1741 key.offset = 0;
1742
1743 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1744 if (ret < 0)
1745 goto out;
1746 if (!ret)
1747 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1748 path->slots[0]);
1749 if (ret || found_key.objectid != ino ||
1750 (found_key.type != BTRFS_INODE_REF_KEY &&
1751 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1752 ret = -ENOENT;
1753 goto out;
1754 }
1755
1756 if (found_key.type == BTRFS_INODE_REF_KEY) {
1757 struct btrfs_inode_ref *iref;
1758 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1759 struct btrfs_inode_ref);
1760 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1761 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1762 (unsigned long)(iref + 1),
1763 len);
1764 parent_dir = found_key.offset;
1765 } else {
1766 struct btrfs_inode_extref *extref;
1767 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1768 struct btrfs_inode_extref);
1769 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1770 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1771 (unsigned long)&extref->name, len);
1772 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1773 }
1774 if (ret < 0)
1775 goto out;
1776 btrfs_release_path(path);
1777
1778 if (dir_gen) {
1779 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1780 NULL, NULL, NULL);
1781 if (ret < 0)
1782 goto out;
1783 }
1784
1785 *dir = parent_dir;
1786
1787out:
1788 btrfs_free_path(path);
1789 return ret;
1790}
1791
1792static int is_first_ref(struct btrfs_root *root,
1793 u64 ino, u64 dir,
1794 const char *name, int name_len)
1795{
1796 int ret;
1797 struct fs_path *tmp_name;
1798 u64 tmp_dir;
1799
1800 tmp_name = fs_path_alloc();
1801 if (!tmp_name)
1802 return -ENOMEM;
1803
1804 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1805 if (ret < 0)
1806 goto out;
1807
1808 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1809 ret = 0;
1810 goto out;
1811 }
1812
1813 ret = !memcmp(tmp_name->start, name, name_len);
1814
1815out:
1816 fs_path_free(tmp_name);
1817 return ret;
1818}
1819
1820/*
1821 * Used by process_recorded_refs to determine if a new ref would overwrite an
1822 * already existing ref. In case it detects an overwrite, it returns the
1823 * inode/gen in who_ino/who_gen.
1824 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1825 * to make sure later references to the overwritten inode are possible.
1826 * Orphanizing is however only required for the first ref of an inode.
1827 * process_recorded_refs does an additional is_first_ref check to see if
1828 * orphanizing is really required.
1829 */
1830static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1831 const char *name, int name_len,
1832 u64 *who_ino, u64 *who_gen, u64 *who_mode)
1833{
1834 int ret = 0;
1835 u64 gen;
1836 u64 other_inode = 0;
1837 u8 other_type = 0;
1838
1839 if (!sctx->parent_root)
1840 goto out;
1841
1842 ret = is_inode_existent(sctx, dir, dir_gen);
1843 if (ret <= 0)
1844 goto out;
1845
1846 /*
1847 * If we have a parent root we need to verify that the parent dir was
1848 * not deleted and then re-created, if it was then we have no overwrite
1849 * and we can just unlink this entry.
1850 */
1851 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
1852 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1853 NULL, NULL, NULL);
1854 if (ret < 0 && ret != -ENOENT)
1855 goto out;
1856 if (ret) {
1857 ret = 0;
1858 goto out;
1859 }
1860 if (gen != dir_gen)
1861 goto out;
1862 }
1863
1864 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1865 &other_inode, &other_type);
1866 if (ret < 0 && ret != -ENOENT)
1867 goto out;
1868 if (ret) {
1869 ret = 0;
1870 goto out;
1871 }
1872
1873 /*
1874 * Check if the overwritten ref was already processed. If yes, the ref
1875 * was already unlinked/moved, so we can safely assume that we will not
1876 * overwrite anything at this point in time.
1877 */
1878 if (other_inode > sctx->send_progress ||
1879 is_waiting_for_move(sctx, other_inode)) {
1880 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1881 who_gen, who_mode, NULL, NULL, NULL);
1882 if (ret < 0)
1883 goto out;
1884
1885 ret = 1;
1886 *who_ino = other_inode;
1887 } else {
1888 ret = 0;
1889 }
1890
1891out:
1892 return ret;
1893}
1894
1895/*
1896 * Checks if the ref was overwritten by an already processed inode. This is
1897 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1898 * thus the orphan name needs be used.
1899 * process_recorded_refs also uses it to avoid unlinking of refs that were
1900 * overwritten.
1901 */
1902static int did_overwrite_ref(struct send_ctx *sctx,
1903 u64 dir, u64 dir_gen,
1904 u64 ino, u64 ino_gen,
1905 const char *name, int name_len)
1906{
1907 int ret = 0;
1908 u64 gen;
1909 u64 ow_inode;
1910 u8 other_type;
1911
1912 if (!sctx->parent_root)
1913 goto out;
1914
1915 ret = is_inode_existent(sctx, dir, dir_gen);
1916 if (ret <= 0)
1917 goto out;
1918
1919 if (dir != BTRFS_FIRST_FREE_OBJECTID) {
1920 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
1921 NULL, NULL, NULL);
1922 if (ret < 0 && ret != -ENOENT)
1923 goto out;
1924 if (ret) {
1925 ret = 0;
1926 goto out;
1927 }
1928 if (gen != dir_gen)
1929 goto out;
1930 }
1931
1932 /* check if the ref was overwritten by another ref */
1933 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1934 &ow_inode, &other_type);
1935 if (ret < 0 && ret != -ENOENT)
1936 goto out;
1937 if (ret) {
1938 /* was never and will never be overwritten */
1939 ret = 0;
1940 goto out;
1941 }
1942
1943 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1944 NULL, NULL);
1945 if (ret < 0)
1946 goto out;
1947
1948 if (ow_inode == ino && gen == ino_gen) {
1949 ret = 0;
1950 goto out;
1951 }
1952
1953 /*
1954 * We know that it is or will be overwritten. Check this now.
1955 * The current inode being processed might have been the one that caused
1956 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1957 * the current inode being processed.
1958 */
1959 if ((ow_inode < sctx->send_progress) ||
1960 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1961 gen == sctx->cur_inode_gen))
1962 ret = 1;
1963 else
1964 ret = 0;
1965
1966out:
1967 return ret;
1968}
1969
1970/*
1971 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1972 * that got overwritten. This is used by process_recorded_refs to determine
1973 * if it has to use the path as returned by get_cur_path or the orphan name.
1974 */
1975static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1976{
1977 int ret = 0;
1978 struct fs_path *name = NULL;
1979 u64 dir;
1980 u64 dir_gen;
1981
1982 if (!sctx->parent_root)
1983 goto out;
1984
1985 name = fs_path_alloc();
1986 if (!name)
1987 return -ENOMEM;
1988
1989 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
1990 if (ret < 0)
1991 goto out;
1992
1993 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
1994 name->start, fs_path_len(name));
1995
1996out:
1997 fs_path_free(name);
1998 return ret;
1999}
2000
2001/*
2002 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
2003 * so we need to do some special handling in case we have clashes. This function
2004 * takes care of this with the help of name_cache_entry::radix_list.
2005 * In case of error, nce is kfreed.
2006 */
2007static int name_cache_insert(struct send_ctx *sctx,
2008 struct name_cache_entry *nce)
2009{
2010 int ret = 0;
2011 struct list_head *nce_head;
2012
2013 nce_head = radix_tree_lookup(&sctx->name_cache,
2014 (unsigned long)nce->ino);
2015 if (!nce_head) {
2016 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2017 if (!nce_head) {
2018 kfree(nce);
2019 return -ENOMEM;
2020 }
2021 INIT_LIST_HEAD(nce_head);
2022
2023 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2024 if (ret < 0) {
2025 kfree(nce_head);
2026 kfree(nce);
2027 return ret;
2028 }
2029 }
2030 list_add_tail(&nce->radix_list, nce_head);
2031 list_add_tail(&nce->list, &sctx->name_cache_list);
2032 sctx->name_cache_size++;
2033
2034 return ret;
2035}
2036
2037static void name_cache_delete(struct send_ctx *sctx,
2038 struct name_cache_entry *nce)
2039{
2040 struct list_head *nce_head;
2041
2042 nce_head = radix_tree_lookup(&sctx->name_cache,
2043 (unsigned long)nce->ino);
2044 if (!nce_head) {
2045 btrfs_err(sctx->send_root->fs_info,
2046 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2047 nce->ino, sctx->name_cache_size);
2048 }
2049
2050 list_del(&nce->radix_list);
2051 list_del(&nce->list);
2052 sctx->name_cache_size--;
2053
2054 /*
2055 * We may not get to the final release of nce_head if the lookup fails
2056 */
2057 if (nce_head && list_empty(nce_head)) {
2058 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2059 kfree(nce_head);
2060 }
2061}
2062
2063static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2064 u64 ino, u64 gen)
2065{
2066 struct list_head *nce_head;
2067 struct name_cache_entry *cur;
2068
2069 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2070 if (!nce_head)
2071 return NULL;
2072
2073 list_for_each_entry(cur, nce_head, radix_list) {
2074 if (cur->ino == ino && cur->gen == gen)
2075 return cur;
2076 }
2077 return NULL;
2078}
2079
2080/*
2081 * Remove some entries from the beginning of name_cache_list.
2082 */
2083static void name_cache_clean_unused(struct send_ctx *sctx)
2084{
2085 struct name_cache_entry *nce;
2086
2087 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2088 return;
2089
2090 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2091 nce = list_entry(sctx->name_cache_list.next,
2092 struct name_cache_entry, list);
2093 name_cache_delete(sctx, nce);
2094 kfree(nce);
2095 }
2096}
2097
2098static void name_cache_free(struct send_ctx *sctx)
2099{
2100 struct name_cache_entry *nce;
2101
2102 while (!list_empty(&sctx->name_cache_list)) {
2103 nce = list_entry(sctx->name_cache_list.next,
2104 struct name_cache_entry, list);
2105 name_cache_delete(sctx, nce);
2106 kfree(nce);
2107 }
2108}
2109
2110/*
2111 * Used by get_cur_path for each ref up to the root.
2112 * Returns 0 if it succeeded.
2113 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2114 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2115 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2116 * Returns <0 in case of error.
2117 */
2118static int __get_cur_name_and_parent(struct send_ctx *sctx,
2119 u64 ino, u64 gen,
2120 u64 *parent_ino,
2121 u64 *parent_gen,
2122 struct fs_path *dest)
2123{
2124 int ret;
2125 int nce_ret;
2126 struct name_cache_entry *nce = NULL;
2127
2128 /*
2129 * First check if we already did a call to this function with the same
2130 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2131 * return the cached result.
2132 */
2133 nce = name_cache_search(sctx, ino, gen);
2134 if (nce) {
2135 if (ino < sctx->send_progress && nce->need_later_update) {
2136 name_cache_delete(sctx, nce);
2137 kfree(nce);
2138 nce = NULL;
2139 } else {
2140 /*
2141 * Removes the entry from the list and adds it back to
2142 * the end. This marks the entry as recently used so
2143 * that name_cache_clean_unused does not remove it.
2144 */
2145 list_move_tail(&nce->list, &sctx->name_cache_list);
2146
2147 *parent_ino = nce->parent_ino;
2148 *parent_gen = nce->parent_gen;
2149 ret = fs_path_add(dest, nce->name, nce->name_len);
2150 if (ret < 0)
2151 goto out;
2152 ret = nce->ret;
2153 goto out;
2154 }
2155 }
2156
2157 /*
2158 * If the inode is not existent yet, add the orphan name and return 1.
2159 * This should only happen for the parent dir that we determine in
2160 * __record_new_ref
2161 */
2162 ret = is_inode_existent(sctx, ino, gen);
2163 if (ret < 0)
2164 goto out;
2165
2166 if (!ret) {
2167 ret = gen_unique_name(sctx, ino, gen, dest);
2168 if (ret < 0)
2169 goto out;
2170 ret = 1;
2171 goto out_cache;
2172 }
2173
2174 /*
2175 * Depending on whether the inode was already processed or not, use
2176 * send_root or parent_root for ref lookup.
2177 */
2178 if (ino < sctx->send_progress)
2179 ret = get_first_ref(sctx->send_root, ino,
2180 parent_ino, parent_gen, dest);
2181 else
2182 ret = get_first_ref(sctx->parent_root, ino,
2183 parent_ino, parent_gen, dest);
2184 if (ret < 0)
2185 goto out;
2186
2187 /*
2188 * Check if the ref was overwritten by an inode's ref that was processed
2189 * earlier. If yes, treat as orphan and return 1.
2190 */
2191 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2192 dest->start, dest->end - dest->start);
2193 if (ret < 0)
2194 goto out;
2195 if (ret) {
2196 fs_path_reset(dest);
2197 ret = gen_unique_name(sctx, ino, gen, dest);
2198 if (ret < 0)
2199 goto out;
2200 ret = 1;
2201 }
2202
2203out_cache:
2204 /*
2205 * Store the result of the lookup in the name cache.
2206 */
2207 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2208 if (!nce) {
2209 ret = -ENOMEM;
2210 goto out;
2211 }
2212
2213 nce->ino = ino;
2214 nce->gen = gen;
2215 nce->parent_ino = *parent_ino;
2216 nce->parent_gen = *parent_gen;
2217 nce->name_len = fs_path_len(dest);
2218 nce->ret = ret;
2219 strcpy(nce->name, dest->start);
2220
2221 if (ino < sctx->send_progress)
2222 nce->need_later_update = 0;
2223 else
2224 nce->need_later_update = 1;
2225
2226 nce_ret = name_cache_insert(sctx, nce);
2227 if (nce_ret < 0)
2228 ret = nce_ret;
2229 name_cache_clean_unused(sctx);
2230
2231out:
2232 return ret;
2233}
2234
2235/*
2236 * Magic happens here. This function returns the first ref to an inode as it
2237 * would look like while receiving the stream at this point in time.
2238 * We walk the path up to the root. For every inode in between, we check if it
2239 * was already processed/sent. If yes, we continue with the parent as found
2240 * in send_root. If not, we continue with the parent as found in parent_root.
2241 * If we encounter an inode that was deleted at this point in time, we use the
2242 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2243 * that were not created yet and overwritten inodes/refs.
2244 *
2245 * When do we have orphan inodes:
2246 * 1. When an inode is freshly created and thus no valid refs are available yet
2247 * 2. When a directory lost all it's refs (deleted) but still has dir items
2248 * inside which were not processed yet (pending for move/delete). If anyone
2249 * tried to get the path to the dir items, it would get a path inside that
2250 * orphan directory.
2251 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2252 * of an unprocessed inode. If in that case the first ref would be
2253 * overwritten, the overwritten inode gets "orphanized". Later when we
2254 * process this overwritten inode, it is restored at a new place by moving
2255 * the orphan inode.
2256 *
2257 * sctx->send_progress tells this function at which point in time receiving
2258 * would be.
2259 */
2260static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2261 struct fs_path *dest)
2262{
2263 int ret = 0;
2264 struct fs_path *name = NULL;
2265 u64 parent_inode = 0;
2266 u64 parent_gen = 0;
2267 int stop = 0;
2268
2269 name = fs_path_alloc();
2270 if (!name) {
2271 ret = -ENOMEM;
2272 goto out;
2273 }
2274
2275 dest->reversed = 1;
2276 fs_path_reset(dest);
2277
2278 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2279 struct waiting_dir_move *wdm;
2280
2281 fs_path_reset(name);
2282
2283 if (is_waiting_for_rm(sctx, ino, gen)) {
2284 ret = gen_unique_name(sctx, ino, gen, name);
2285 if (ret < 0)
2286 goto out;
2287 ret = fs_path_add_path(dest, name);
2288 break;
2289 }
2290
2291 wdm = get_waiting_dir_move(sctx, ino);
2292 if (wdm && wdm->orphanized) {
2293 ret = gen_unique_name(sctx, ino, gen, name);
2294 stop = 1;
2295 } else if (wdm) {
2296 ret = get_first_ref(sctx->parent_root, ino,
2297 &parent_inode, &parent_gen, name);
2298 } else {
2299 ret = __get_cur_name_and_parent(sctx, ino, gen,
2300 &parent_inode,
2301 &parent_gen, name);
2302 if (ret)
2303 stop = 1;
2304 }
2305
2306 if (ret < 0)
2307 goto out;
2308
2309 ret = fs_path_add_path(dest, name);
2310 if (ret < 0)
2311 goto out;
2312
2313 ino = parent_inode;
2314 gen = parent_gen;
2315 }
2316
2317out:
2318 fs_path_free(name);
2319 if (!ret)
2320 fs_path_unreverse(dest);
2321 return ret;
2322}
2323
2324/*
2325 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2326 */
2327static int send_subvol_begin(struct send_ctx *sctx)
2328{
2329 int ret;
2330 struct btrfs_root *send_root = sctx->send_root;
2331 struct btrfs_root *parent_root = sctx->parent_root;
2332 struct btrfs_path *path;
2333 struct btrfs_key key;
2334 struct btrfs_root_ref *ref;
2335 struct extent_buffer *leaf;
2336 char *name = NULL;
2337 int namelen;
2338
2339 path = btrfs_alloc_path();
2340 if (!path)
2341 return -ENOMEM;
2342
2343 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2344 if (!name) {
2345 btrfs_free_path(path);
2346 return -ENOMEM;
2347 }
2348
2349 key.objectid = send_root->root_key.objectid;
2350 key.type = BTRFS_ROOT_BACKREF_KEY;
2351 key.offset = 0;
2352
2353 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2354 &key, path, 1, 0);
2355 if (ret < 0)
2356 goto out;
2357 if (ret) {
2358 ret = -ENOENT;
2359 goto out;
2360 }
2361
2362 leaf = path->nodes[0];
2363 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2364 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2365 key.objectid != send_root->root_key.objectid) {
2366 ret = -ENOENT;
2367 goto out;
2368 }
2369 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2370 namelen = btrfs_root_ref_name_len(leaf, ref);
2371 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2372 btrfs_release_path(path);
2373
2374 if (parent_root) {
2375 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2376 if (ret < 0)
2377 goto out;
2378 } else {
2379 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2380 if (ret < 0)
2381 goto out;
2382 }
2383
2384 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2385
2386 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2387 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2388 sctx->send_root->root_item.received_uuid);
2389 else
2390 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2391 sctx->send_root->root_item.uuid);
2392
2393 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2394 btrfs_root_ctransid(&sctx->send_root->root_item));
2395 if (parent_root) {
2396 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2397 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2398 parent_root->root_item.received_uuid);
2399 else
2400 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2401 parent_root->root_item.uuid);
2402 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2403 btrfs_root_ctransid(&sctx->parent_root->root_item));
2404 }
2405
2406 ret = send_cmd(sctx);
2407
2408tlv_put_failure:
2409out:
2410 btrfs_free_path(path);
2411 kfree(name);
2412 return ret;
2413}
2414
2415static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2416{
2417 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2418 int ret = 0;
2419 struct fs_path *p;
2420
2421 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2422
2423 p = fs_path_alloc();
2424 if (!p)
2425 return -ENOMEM;
2426
2427 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2428 if (ret < 0)
2429 goto out;
2430
2431 ret = get_cur_path(sctx, ino, gen, p);
2432 if (ret < 0)
2433 goto out;
2434 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2435 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2436
2437 ret = send_cmd(sctx);
2438
2439tlv_put_failure:
2440out:
2441 fs_path_free(p);
2442 return ret;
2443}
2444
2445static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2446{
2447 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2448 int ret = 0;
2449 struct fs_path *p;
2450
2451 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2452
2453 p = fs_path_alloc();
2454 if (!p)
2455 return -ENOMEM;
2456
2457 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2458 if (ret < 0)
2459 goto out;
2460
2461 ret = get_cur_path(sctx, ino, gen, p);
2462 if (ret < 0)
2463 goto out;
2464 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2465 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2466
2467 ret = send_cmd(sctx);
2468
2469tlv_put_failure:
2470out:
2471 fs_path_free(p);
2472 return ret;
2473}
2474
2475static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2476{
2477 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2478 int ret = 0;
2479 struct fs_path *p;
2480
2481 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2482 ino, uid, gid);
2483
2484 p = fs_path_alloc();
2485 if (!p)
2486 return -ENOMEM;
2487
2488 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2489 if (ret < 0)
2490 goto out;
2491
2492 ret = get_cur_path(sctx, ino, gen, p);
2493 if (ret < 0)
2494 goto out;
2495 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2496 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2497 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2498
2499 ret = send_cmd(sctx);
2500
2501tlv_put_failure:
2502out:
2503 fs_path_free(p);
2504 return ret;
2505}
2506
2507static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2508{
2509 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2510 int ret = 0;
2511 struct fs_path *p = NULL;
2512 struct btrfs_inode_item *ii;
2513 struct btrfs_path *path = NULL;
2514 struct extent_buffer *eb;
2515 struct btrfs_key key;
2516 int slot;
2517
2518 btrfs_debug(fs_info, "send_utimes %llu", ino);
2519
2520 p = fs_path_alloc();
2521 if (!p)
2522 return -ENOMEM;
2523
2524 path = alloc_path_for_send();
2525 if (!path) {
2526 ret = -ENOMEM;
2527 goto out;
2528 }
2529
2530 key.objectid = ino;
2531 key.type = BTRFS_INODE_ITEM_KEY;
2532 key.offset = 0;
2533 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2534 if (ret > 0)
2535 ret = -ENOENT;
2536 if (ret < 0)
2537 goto out;
2538
2539 eb = path->nodes[0];
2540 slot = path->slots[0];
2541 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2542
2543 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2544 if (ret < 0)
2545 goto out;
2546
2547 ret = get_cur_path(sctx, ino, gen, p);
2548 if (ret < 0)
2549 goto out;
2550 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2551 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2552 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2553 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2554 /* TODO Add otime support when the otime patches get into upstream */
2555
2556 ret = send_cmd(sctx);
2557
2558tlv_put_failure:
2559out:
2560 fs_path_free(p);
2561 btrfs_free_path(path);
2562 return ret;
2563}
2564
2565/*
2566 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2567 * a valid path yet because we did not process the refs yet. So, the inode
2568 * is created as orphan.
2569 */
2570static int send_create_inode(struct send_ctx *sctx, u64 ino)
2571{
2572 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2573 int ret = 0;
2574 struct fs_path *p;
2575 int cmd;
2576 u64 gen;
2577 u64 mode;
2578 u64 rdev;
2579
2580 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2581
2582 p = fs_path_alloc();
2583 if (!p)
2584 return -ENOMEM;
2585
2586 if (ino != sctx->cur_ino) {
2587 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2588 NULL, NULL, &rdev);
2589 if (ret < 0)
2590 goto out;
2591 } else {
2592 gen = sctx->cur_inode_gen;
2593 mode = sctx->cur_inode_mode;
2594 rdev = sctx->cur_inode_rdev;
2595 }
2596
2597 if (S_ISREG(mode)) {
2598 cmd = BTRFS_SEND_C_MKFILE;
2599 } else if (S_ISDIR(mode)) {
2600 cmd = BTRFS_SEND_C_MKDIR;
2601 } else if (S_ISLNK(mode)) {
2602 cmd = BTRFS_SEND_C_SYMLINK;
2603 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2604 cmd = BTRFS_SEND_C_MKNOD;
2605 } else if (S_ISFIFO(mode)) {
2606 cmd = BTRFS_SEND_C_MKFIFO;
2607 } else if (S_ISSOCK(mode)) {
2608 cmd = BTRFS_SEND_C_MKSOCK;
2609 } else {
2610 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2611 (int)(mode & S_IFMT));
2612 ret = -EOPNOTSUPP;
2613 goto out;
2614 }
2615
2616 ret = begin_cmd(sctx, cmd);
2617 if (ret < 0)
2618 goto out;
2619
2620 ret = gen_unique_name(sctx, ino, gen, p);
2621 if (ret < 0)
2622 goto out;
2623
2624 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2625 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2626
2627 if (S_ISLNK(mode)) {
2628 fs_path_reset(p);
2629 ret = read_symlink(sctx->send_root, ino, p);
2630 if (ret < 0)
2631 goto out;
2632 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2633 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2634 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2635 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2636 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2637 }
2638
2639 ret = send_cmd(sctx);
2640 if (ret < 0)
2641 goto out;
2642
2643
2644tlv_put_failure:
2645out:
2646 fs_path_free(p);
2647 return ret;
2648}
2649
2650/*
2651 * We need some special handling for inodes that get processed before the parent
2652 * directory got created. See process_recorded_refs for details.
2653 * This function does the check if we already created the dir out of order.
2654 */
2655static int did_create_dir(struct send_ctx *sctx, u64 dir)
2656{
2657 int ret = 0;
2658 struct btrfs_path *path = NULL;
2659 struct btrfs_key key;
2660 struct btrfs_key found_key;
2661 struct btrfs_key di_key;
2662 struct extent_buffer *eb;
2663 struct btrfs_dir_item *di;
2664 int slot;
2665
2666 path = alloc_path_for_send();
2667 if (!path) {
2668 ret = -ENOMEM;
2669 goto out;
2670 }
2671
2672 key.objectid = dir;
2673 key.type = BTRFS_DIR_INDEX_KEY;
2674 key.offset = 0;
2675 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2676 if (ret < 0)
2677 goto out;
2678
2679 while (1) {
2680 eb = path->nodes[0];
2681 slot = path->slots[0];
2682 if (slot >= btrfs_header_nritems(eb)) {
2683 ret = btrfs_next_leaf(sctx->send_root, path);
2684 if (ret < 0) {
2685 goto out;
2686 } else if (ret > 0) {
2687 ret = 0;
2688 break;
2689 }
2690 continue;
2691 }
2692
2693 btrfs_item_key_to_cpu(eb, &found_key, slot);
2694 if (found_key.objectid != key.objectid ||
2695 found_key.type != key.type) {
2696 ret = 0;
2697 goto out;
2698 }
2699
2700 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2701 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2702
2703 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2704 di_key.objectid < sctx->send_progress) {
2705 ret = 1;
2706 goto out;
2707 }
2708
2709 path->slots[0]++;
2710 }
2711
2712out:
2713 btrfs_free_path(path);
2714 return ret;
2715}
2716
2717/*
2718 * Only creates the inode if it is:
2719 * 1. Not a directory
2720 * 2. Or a directory which was not created already due to out of order
2721 * directories. See did_create_dir and process_recorded_refs for details.
2722 */
2723static int send_create_inode_if_needed(struct send_ctx *sctx)
2724{
2725 int ret;
2726
2727 if (S_ISDIR(sctx->cur_inode_mode)) {
2728 ret = did_create_dir(sctx, sctx->cur_ino);
2729 if (ret < 0)
2730 goto out;
2731 if (ret) {
2732 ret = 0;
2733 goto out;
2734 }
2735 }
2736
2737 ret = send_create_inode(sctx, sctx->cur_ino);
2738 if (ret < 0)
2739 goto out;
2740
2741out:
2742 return ret;
2743}
2744
2745struct recorded_ref {
2746 struct list_head list;
2747 char *name;
2748 struct fs_path *full_path;
2749 u64 dir;
2750 u64 dir_gen;
2751 int name_len;
2752};
2753
2754static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
2755{
2756 ref->full_path = path;
2757 ref->name = (char *)kbasename(ref->full_path->start);
2758 ref->name_len = ref->full_path->end - ref->name;
2759}
2760
2761/*
2762 * We need to process new refs before deleted refs, but compare_tree gives us
2763 * everything mixed. So we first record all refs and later process them.
2764 * This function is a helper to record one ref.
2765 */
2766static int __record_ref(struct list_head *head, u64 dir,
2767 u64 dir_gen, struct fs_path *path)
2768{
2769 struct recorded_ref *ref;
2770
2771 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
2772 if (!ref)
2773 return -ENOMEM;
2774
2775 ref->dir = dir;
2776 ref->dir_gen = dir_gen;
2777 set_ref_path(ref, path);
2778 list_add_tail(&ref->list, head);
2779 return 0;
2780}
2781
2782static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2783{
2784 struct recorded_ref *new;
2785
2786 new = kmalloc(sizeof(*ref), GFP_KERNEL);
2787 if (!new)
2788 return -ENOMEM;
2789
2790 new->dir = ref->dir;
2791 new->dir_gen = ref->dir_gen;
2792 new->full_path = NULL;
2793 INIT_LIST_HEAD(&new->list);
2794 list_add_tail(&new->list, list);
2795 return 0;
2796}
2797
2798static void __free_recorded_refs(struct list_head *head)
2799{
2800 struct recorded_ref *cur;
2801
2802 while (!list_empty(head)) {
2803 cur = list_entry(head->next, struct recorded_ref, list);
2804 fs_path_free(cur->full_path);
2805 list_del(&cur->list);
2806 kfree(cur);
2807 }
2808}
2809
2810static void free_recorded_refs(struct send_ctx *sctx)
2811{
2812 __free_recorded_refs(&sctx->new_refs);
2813 __free_recorded_refs(&sctx->deleted_refs);
2814}
2815
2816/*
2817 * Renames/moves a file/dir to its orphan name. Used when the first
2818 * ref of an unprocessed inode gets overwritten and for all non empty
2819 * directories.
2820 */
2821static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2822 struct fs_path *path)
2823{
2824 int ret;
2825 struct fs_path *orphan;
2826
2827 orphan = fs_path_alloc();
2828 if (!orphan)
2829 return -ENOMEM;
2830
2831 ret = gen_unique_name(sctx, ino, gen, orphan);
2832 if (ret < 0)
2833 goto out;
2834
2835 ret = send_rename(sctx, path, orphan);
2836
2837out:
2838 fs_path_free(orphan);
2839 return ret;
2840}
2841
2842static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
2843 u64 dir_ino, u64 dir_gen)
2844{
2845 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2846 struct rb_node *parent = NULL;
2847 struct orphan_dir_info *entry, *odi;
2848
2849 while (*p) {
2850 parent = *p;
2851 entry = rb_entry(parent, struct orphan_dir_info, node);
2852 if (dir_ino < entry->ino)
2853 p = &(*p)->rb_left;
2854 else if (dir_ino > entry->ino)
2855 p = &(*p)->rb_right;
2856 else if (dir_gen < entry->gen)
2857 p = &(*p)->rb_left;
2858 else if (dir_gen > entry->gen)
2859 p = &(*p)->rb_right;
2860 else
2861 return entry;
2862 }
2863
2864 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2865 if (!odi)
2866 return ERR_PTR(-ENOMEM);
2867 odi->ino = dir_ino;
2868 odi->gen = dir_gen;
2869 odi->last_dir_index_offset = 0;
2870
2871 rb_link_node(&odi->node, parent, p);
2872 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2873 return odi;
2874}
2875
2876static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
2877 u64 dir_ino, u64 gen)
2878{
2879 struct rb_node *n = sctx->orphan_dirs.rb_node;
2880 struct orphan_dir_info *entry;
2881
2882 while (n) {
2883 entry = rb_entry(n, struct orphan_dir_info, node);
2884 if (dir_ino < entry->ino)
2885 n = n->rb_left;
2886 else if (dir_ino > entry->ino)
2887 n = n->rb_right;
2888 else if (gen < entry->gen)
2889 n = n->rb_left;
2890 else if (gen > entry->gen)
2891 n = n->rb_right;
2892 else
2893 return entry;
2894 }
2895 return NULL;
2896}
2897
2898static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
2899{
2900 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
2901
2902 return odi != NULL;
2903}
2904
2905static void free_orphan_dir_info(struct send_ctx *sctx,
2906 struct orphan_dir_info *odi)
2907{
2908 if (!odi)
2909 return;
2910 rb_erase(&odi->node, &sctx->orphan_dirs);
2911 kfree(odi);
2912}
2913
2914/*
2915 * Returns 1 if a directory can be removed at this point in time.
2916 * We check this by iterating all dir items and checking if the inode behind
2917 * the dir item was already processed.
2918 */
2919static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2920 u64 send_progress)
2921{
2922 int ret = 0;
2923 struct btrfs_root *root = sctx->parent_root;
2924 struct btrfs_path *path;
2925 struct btrfs_key key;
2926 struct btrfs_key found_key;
2927 struct btrfs_key loc;
2928 struct btrfs_dir_item *di;
2929 struct orphan_dir_info *odi = NULL;
2930
2931 /*
2932 * Don't try to rmdir the top/root subvolume dir.
2933 */
2934 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2935 return 0;
2936
2937 path = alloc_path_for_send();
2938 if (!path)
2939 return -ENOMEM;
2940
2941 key.objectid = dir;
2942 key.type = BTRFS_DIR_INDEX_KEY;
2943 key.offset = 0;
2944
2945 odi = get_orphan_dir_info(sctx, dir, dir_gen);
2946 if (odi)
2947 key.offset = odi->last_dir_index_offset;
2948
2949 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2950 if (ret < 0)
2951 goto out;
2952
2953 while (1) {
2954 struct waiting_dir_move *dm;
2955
2956 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2957 ret = btrfs_next_leaf(root, path);
2958 if (ret < 0)
2959 goto out;
2960 else if (ret > 0)
2961 break;
2962 continue;
2963 }
2964 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2965 path->slots[0]);
2966 if (found_key.objectid != key.objectid ||
2967 found_key.type != key.type)
2968 break;
2969
2970 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2971 struct btrfs_dir_item);
2972 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2973
2974 dm = get_waiting_dir_move(sctx, loc.objectid);
2975 if (dm) {
2976 odi = add_orphan_dir_info(sctx, dir, dir_gen);
2977 if (IS_ERR(odi)) {
2978 ret = PTR_ERR(odi);
2979 goto out;
2980 }
2981 odi->gen = dir_gen;
2982 odi->last_dir_index_offset = found_key.offset;
2983 dm->rmdir_ino = dir;
2984 dm->rmdir_gen = dir_gen;
2985 ret = 0;
2986 goto out;
2987 }
2988
2989 if (loc.objectid > send_progress) {
2990 odi = add_orphan_dir_info(sctx, dir, dir_gen);
2991 if (IS_ERR(odi)) {
2992 ret = PTR_ERR(odi);
2993 goto out;
2994 }
2995 odi->gen = dir_gen;
2996 odi->last_dir_index_offset = found_key.offset;
2997 ret = 0;
2998 goto out;
2999 }
3000
3001 path->slots[0]++;
3002 }
3003 free_orphan_dir_info(sctx, odi);
3004
3005 ret = 1;
3006
3007out:
3008 btrfs_free_path(path);
3009 return ret;
3010}
3011
3012static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
3013{
3014 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3015
3016 return entry != NULL;
3017}
3018
3019static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3020{
3021 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3022 struct rb_node *parent = NULL;
3023 struct waiting_dir_move *entry, *dm;
3024
3025 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3026 if (!dm)
3027 return -ENOMEM;
3028 dm->ino = ino;
3029 dm->rmdir_ino = 0;
3030 dm->rmdir_gen = 0;
3031 dm->orphanized = orphanized;
3032
3033 while (*p) {
3034 parent = *p;
3035 entry = rb_entry(parent, struct waiting_dir_move, node);
3036 if (ino < entry->ino) {
3037 p = &(*p)->rb_left;
3038 } else if (ino > entry->ino) {
3039 p = &(*p)->rb_right;
3040 } else {
3041 kfree(dm);
3042 return -EEXIST;
3043 }
3044 }
3045
3046 rb_link_node(&dm->node, parent, p);
3047 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3048 return 0;
3049}
3050
3051static struct waiting_dir_move *
3052get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3053{
3054 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3055 struct waiting_dir_move *entry;
3056
3057 while (n) {
3058 entry = rb_entry(n, struct waiting_dir_move, node);
3059 if (ino < entry->ino)
3060 n = n->rb_left;
3061 else if (ino > entry->ino)
3062 n = n->rb_right;
3063 else
3064 return entry;
3065 }
3066 return NULL;
3067}
3068
3069static void free_waiting_dir_move(struct send_ctx *sctx,
3070 struct waiting_dir_move *dm)
3071{
3072 if (!dm)
3073 return;
3074 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3075 kfree(dm);
3076}
3077
3078static int add_pending_dir_move(struct send_ctx *sctx,
3079 u64 ino,
3080 u64 ino_gen,
3081 u64 parent_ino,
3082 struct list_head *new_refs,
3083 struct list_head *deleted_refs,
3084 const bool is_orphan)
3085{
3086 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3087 struct rb_node *parent = NULL;
3088 struct pending_dir_move *entry = NULL, *pm;
3089 struct recorded_ref *cur;
3090 int exists = 0;
3091 int ret;
3092
3093 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3094 if (!pm)
3095 return -ENOMEM;
3096 pm->parent_ino = parent_ino;
3097 pm->ino = ino;
3098 pm->gen = ino_gen;
3099 INIT_LIST_HEAD(&pm->list);
3100 INIT_LIST_HEAD(&pm->update_refs);
3101 RB_CLEAR_NODE(&pm->node);
3102
3103 while (*p) {
3104 parent = *p;
3105 entry = rb_entry(parent, struct pending_dir_move, node);
3106 if (parent_ino < entry->parent_ino) {
3107 p = &(*p)->rb_left;
3108 } else if (parent_ino > entry->parent_ino) {
3109 p = &(*p)->rb_right;
3110 } else {
3111 exists = 1;
3112 break;
3113 }
3114 }
3115
3116 list_for_each_entry(cur, deleted_refs, list) {
3117 ret = dup_ref(cur, &pm->update_refs);
3118 if (ret < 0)
3119 goto out;
3120 }
3121 list_for_each_entry(cur, new_refs, list) {
3122 ret = dup_ref(cur, &pm->update_refs);
3123 if (ret < 0)
3124 goto out;
3125 }
3126
3127 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3128 if (ret)
3129 goto out;
3130
3131 if (exists) {
3132 list_add_tail(&pm->list, &entry->list);
3133 } else {
3134 rb_link_node(&pm->node, parent, p);
3135 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3136 }
3137 ret = 0;
3138out:
3139 if (ret) {
3140 __free_recorded_refs(&pm->update_refs);
3141 kfree(pm);
3142 }
3143 return ret;
3144}
3145
3146static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3147 u64 parent_ino)
3148{
3149 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3150 struct pending_dir_move *entry;
3151
3152 while (n) {
3153 entry = rb_entry(n, struct pending_dir_move, node);
3154 if (parent_ino < entry->parent_ino)
3155 n = n->rb_left;
3156 else if (parent_ino > entry->parent_ino)
3157 n = n->rb_right;
3158 else
3159 return entry;
3160 }
3161 return NULL;
3162}
3163
3164static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3165 u64 ino, u64 gen, u64 *ancestor_ino)
3166{
3167 int ret = 0;
3168 u64 parent_inode = 0;
3169 u64 parent_gen = 0;
3170 u64 start_ino = ino;
3171
3172 *ancestor_ino = 0;
3173 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3174 fs_path_reset(name);
3175
3176 if (is_waiting_for_rm(sctx, ino, gen))
3177 break;
3178 if (is_waiting_for_move(sctx, ino)) {
3179 if (*ancestor_ino == 0)
3180 *ancestor_ino = ino;
3181 ret = get_first_ref(sctx->parent_root, ino,
3182 &parent_inode, &parent_gen, name);
3183 } else {
3184 ret = __get_cur_name_and_parent(sctx, ino, gen,
3185 &parent_inode,
3186 &parent_gen, name);
3187 if (ret > 0) {
3188 ret = 0;
3189 break;
3190 }
3191 }
3192 if (ret < 0)
3193 break;
3194 if (parent_inode == start_ino) {
3195 ret = 1;
3196 if (*ancestor_ino == 0)
3197 *ancestor_ino = ino;
3198 break;
3199 }
3200 ino = parent_inode;
3201 gen = parent_gen;
3202 }
3203 return ret;
3204}
3205
3206static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3207{
3208 struct fs_path *from_path = NULL;
3209 struct fs_path *to_path = NULL;
3210 struct fs_path *name = NULL;
3211 u64 orig_progress = sctx->send_progress;
3212 struct recorded_ref *cur;
3213 u64 parent_ino, parent_gen;
3214 struct waiting_dir_move *dm = NULL;
3215 u64 rmdir_ino = 0;
3216 u64 rmdir_gen;
3217 u64 ancestor;
3218 bool is_orphan;
3219 int ret;
3220
3221 name = fs_path_alloc();
3222 from_path = fs_path_alloc();
3223 if (!name || !from_path) {
3224 ret = -ENOMEM;
3225 goto out;
3226 }
3227
3228 dm = get_waiting_dir_move(sctx, pm->ino);
3229 ASSERT(dm);
3230 rmdir_ino = dm->rmdir_ino;
3231 rmdir_gen = dm->rmdir_gen;
3232 is_orphan = dm->orphanized;
3233 free_waiting_dir_move(sctx, dm);
3234
3235 if (is_orphan) {
3236 ret = gen_unique_name(sctx, pm->ino,
3237 pm->gen, from_path);
3238 } else {
3239 ret = get_first_ref(sctx->parent_root, pm->ino,
3240 &parent_ino, &parent_gen, name);
3241 if (ret < 0)
3242 goto out;
3243 ret = get_cur_path(sctx, parent_ino, parent_gen,
3244 from_path);
3245 if (ret < 0)
3246 goto out;
3247 ret = fs_path_add_path(from_path, name);
3248 }
3249 if (ret < 0)
3250 goto out;
3251
3252 sctx->send_progress = sctx->cur_ino + 1;
3253 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3254 if (ret < 0)
3255 goto out;
3256 if (ret) {
3257 LIST_HEAD(deleted_refs);
3258 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3259 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3260 &pm->update_refs, &deleted_refs,
3261 is_orphan);
3262 if (ret < 0)
3263 goto out;
3264 if (rmdir_ino) {
3265 dm = get_waiting_dir_move(sctx, pm->ino);
3266 ASSERT(dm);
3267 dm->rmdir_ino = rmdir_ino;
3268 dm->rmdir_gen = rmdir_gen;
3269 }
3270 goto out;
3271 }
3272 fs_path_reset(name);
3273 to_path = name;
3274 name = NULL;
3275 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3276 if (ret < 0)
3277 goto out;
3278
3279 ret = send_rename(sctx, from_path, to_path);
3280 if (ret < 0)
3281 goto out;
3282
3283 if (rmdir_ino) {
3284 struct orphan_dir_info *odi;
3285 u64 gen;
3286
3287 odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
3288 if (!odi) {
3289 /* already deleted */
3290 goto finish;
3291 }
3292 gen = odi->gen;
3293
3294 ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino);
3295 if (ret < 0)
3296 goto out;
3297 if (!ret)
3298 goto finish;
3299
3300 name = fs_path_alloc();
3301 if (!name) {
3302 ret = -ENOMEM;
3303 goto out;
3304 }
3305 ret = get_cur_path(sctx, rmdir_ino, gen, name);
3306 if (ret < 0)
3307 goto out;
3308 ret = send_rmdir(sctx, name);
3309 if (ret < 0)
3310 goto out;
3311 }
3312
3313finish:
3314 ret = send_utimes(sctx, pm->ino, pm->gen);
3315 if (ret < 0)
3316 goto out;
3317
3318 /*
3319 * After rename/move, need to update the utimes of both new parent(s)
3320 * and old parent(s).
3321 */
3322 list_for_each_entry(cur, &pm->update_refs, list) {
3323 /*
3324 * The parent inode might have been deleted in the send snapshot
3325 */
3326 ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3327 NULL, NULL, NULL, NULL, NULL);
3328 if (ret == -ENOENT) {
3329 ret = 0;
3330 continue;
3331 }
3332 if (ret < 0)
3333 goto out;
3334
3335 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3336 if (ret < 0)
3337 goto out;
3338 }
3339
3340out:
3341 fs_path_free(name);
3342 fs_path_free(from_path);
3343 fs_path_free(to_path);
3344 sctx->send_progress = orig_progress;
3345
3346 return ret;
3347}
3348
3349static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3350{
3351 if (!list_empty(&m->list))
3352 list_del(&m->list);
3353 if (!RB_EMPTY_NODE(&m->node))
3354 rb_erase(&m->node, &sctx->pending_dir_moves);
3355 __free_recorded_refs(&m->update_refs);
3356 kfree(m);
3357}
3358
3359static void tail_append_pending_moves(struct send_ctx *sctx,
3360 struct pending_dir_move *moves,
3361 struct list_head *stack)
3362{
3363 if (list_empty(&moves->list)) {
3364 list_add_tail(&moves->list, stack);
3365 } else {
3366 LIST_HEAD(list);
3367 list_splice_init(&moves->list, &list);
3368 list_add_tail(&moves->list, stack);
3369 list_splice_tail(&list, stack);
3370 }
3371 if (!RB_EMPTY_NODE(&moves->node)) {
3372 rb_erase(&moves->node, &sctx->pending_dir_moves);
3373 RB_CLEAR_NODE(&moves->node);
3374 }
3375}
3376
3377static int apply_children_dir_moves(struct send_ctx *sctx)
3378{
3379 struct pending_dir_move *pm;
3380 struct list_head stack;
3381 u64 parent_ino = sctx->cur_ino;
3382 int ret = 0;
3383
3384 pm = get_pending_dir_moves(sctx, parent_ino);
3385 if (!pm)
3386 return 0;
3387
3388 INIT_LIST_HEAD(&stack);
3389 tail_append_pending_moves(sctx, pm, &stack);
3390
3391 while (!list_empty(&stack)) {
3392 pm = list_first_entry(&stack, struct pending_dir_move, list);
3393 parent_ino = pm->ino;
3394 ret = apply_dir_move(sctx, pm);
3395 free_pending_move(sctx, pm);
3396 if (ret)
3397 goto out;
3398 pm = get_pending_dir_moves(sctx, parent_ino);
3399 if (pm)
3400 tail_append_pending_moves(sctx, pm, &stack);
3401 }
3402 return 0;
3403
3404out:
3405 while (!list_empty(&stack)) {
3406 pm = list_first_entry(&stack, struct pending_dir_move, list);
3407 free_pending_move(sctx, pm);
3408 }
3409 return ret;
3410}
3411
3412/*
3413 * We might need to delay a directory rename even when no ancestor directory
3414 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3415 * renamed. This happens when we rename a directory to the old name (the name
3416 * in the parent root) of some other unrelated directory that got its rename
3417 * delayed due to some ancestor with higher number that got renamed.
3418 *
3419 * Example:
3420 *
3421 * Parent snapshot:
3422 * . (ino 256)
3423 * |---- a/ (ino 257)
3424 * | |---- file (ino 260)
3425 * |
3426 * |---- b/ (ino 258)
3427 * |---- c/ (ino 259)
3428 *
3429 * Send snapshot:
3430 * . (ino 256)
3431 * |---- a/ (ino 258)
3432 * |---- x/ (ino 259)
3433 * |---- y/ (ino 257)
3434 * |----- file (ino 260)
3435 *
3436 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3437 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3438 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3439 * must issue is:
3440 *
3441 * 1 - rename 259 from 'c' to 'x'
3442 * 2 - rename 257 from 'a' to 'x/y'
3443 * 3 - rename 258 from 'b' to 'a'
3444 *
3445 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3446 * be done right away and < 0 on error.
3447 */
3448static int wait_for_dest_dir_move(struct send_ctx *sctx,
3449 struct recorded_ref *parent_ref,
3450 const bool is_orphan)
3451{
3452 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3453 struct btrfs_path *path;
3454 struct btrfs_key key;
3455 struct btrfs_key di_key;
3456 struct btrfs_dir_item *di;
3457 u64 left_gen;
3458 u64 right_gen;
3459 int ret = 0;
3460 struct waiting_dir_move *wdm;
3461
3462 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3463 return 0;
3464
3465 path = alloc_path_for_send();
3466 if (!path)
3467 return -ENOMEM;
3468
3469 key.objectid = parent_ref->dir;
3470 key.type = BTRFS_DIR_ITEM_KEY;
3471 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3472
3473 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3474 if (ret < 0) {
3475 goto out;
3476 } else if (ret > 0) {
3477 ret = 0;
3478 goto out;
3479 }
3480
3481 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3482 parent_ref->name_len);
3483 if (!di) {
3484 ret = 0;
3485 goto out;
3486 }
3487 /*
3488 * di_key.objectid has the number of the inode that has a dentry in the
3489 * parent directory with the same name that sctx->cur_ino is being
3490 * renamed to. We need to check if that inode is in the send root as
3491 * well and if it is currently marked as an inode with a pending rename,
3492 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3493 * that it happens after that other inode is renamed.
3494 */
3495 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3496 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3497 ret = 0;
3498 goto out;
3499 }
3500
3501 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3502 &left_gen, NULL, NULL, NULL, NULL);
3503 if (ret < 0)
3504 goto out;
3505 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3506 &right_gen, NULL, NULL, NULL, NULL);
3507 if (ret < 0) {
3508 if (ret == -ENOENT)
3509 ret = 0;
3510 goto out;
3511 }
3512
3513 /* Different inode, no need to delay the rename of sctx->cur_ino */
3514 if (right_gen != left_gen) {
3515 ret = 0;
3516 goto out;
3517 }
3518
3519 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3520 if (wdm && !wdm->orphanized) {
3521 ret = add_pending_dir_move(sctx,
3522 sctx->cur_ino,
3523 sctx->cur_inode_gen,
3524 di_key.objectid,
3525 &sctx->new_refs,
3526 &sctx->deleted_refs,
3527 is_orphan);
3528 if (!ret)
3529 ret = 1;
3530 }
3531out:
3532 btrfs_free_path(path);
3533 return ret;
3534}
3535
3536/*
3537 * Check if inode ino2, or any of its ancestors, is inode ino1.
3538 * Return 1 if true, 0 if false and < 0 on error.
3539 */
3540static int check_ino_in_path(struct btrfs_root *root,
3541 const u64 ino1,
3542 const u64 ino1_gen,
3543 const u64 ino2,
3544 const u64 ino2_gen,
3545 struct fs_path *fs_path)
3546{
3547 u64 ino = ino2;
3548
3549 if (ino1 == ino2)
3550 return ino1_gen == ino2_gen;
3551
3552 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3553 u64 parent;
3554 u64 parent_gen;
3555 int ret;
3556
3557 fs_path_reset(fs_path);
3558 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3559 if (ret < 0)
3560 return ret;
3561 if (parent == ino1)
3562 return parent_gen == ino1_gen;
3563 ino = parent;
3564 }
3565 return 0;
3566}
3567
3568/*
3569 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
3570 * possible path (in case ino2 is not a directory and has multiple hard links).
3571 * Return 1 if true, 0 if false and < 0 on error.
3572 */
3573static int is_ancestor(struct btrfs_root *root,
3574 const u64 ino1,
3575 const u64 ino1_gen,
3576 const u64 ino2,
3577 struct fs_path *fs_path)
3578{
3579 bool free_fs_path = false;
3580 int ret = 0;
3581 struct btrfs_path *path = NULL;
3582 struct btrfs_key key;
3583
3584 if (!fs_path) {
3585 fs_path = fs_path_alloc();
3586 if (!fs_path)
3587 return -ENOMEM;
3588 free_fs_path = true;
3589 }
3590
3591 path = alloc_path_for_send();
3592 if (!path) {
3593 ret = -ENOMEM;
3594 goto out;
3595 }
3596
3597 key.objectid = ino2;
3598 key.type = BTRFS_INODE_REF_KEY;
3599 key.offset = 0;
3600
3601 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3602 if (ret < 0)
3603 goto out;
3604
3605 while (true) {
3606 struct extent_buffer *leaf = path->nodes[0];
3607 int slot = path->slots[0];
3608 u32 cur_offset = 0;
3609 u32 item_size;
3610
3611 if (slot >= btrfs_header_nritems(leaf)) {
3612 ret = btrfs_next_leaf(root, path);
3613 if (ret < 0)
3614 goto out;
3615 if (ret > 0)
3616 break;
3617 continue;
3618 }
3619
3620 btrfs_item_key_to_cpu(leaf, &key, slot);
3621 if (key.objectid != ino2)
3622 break;
3623 if (key.type != BTRFS_INODE_REF_KEY &&
3624 key.type != BTRFS_INODE_EXTREF_KEY)
3625 break;
3626
3627 item_size = btrfs_item_size_nr(leaf, slot);
3628 while (cur_offset < item_size) {
3629 u64 parent;
3630 u64 parent_gen;
3631
3632 if (key.type == BTRFS_INODE_EXTREF_KEY) {
3633 unsigned long ptr;
3634 struct btrfs_inode_extref *extref;
3635
3636 ptr = btrfs_item_ptr_offset(leaf, slot);
3637 extref = (struct btrfs_inode_extref *)
3638 (ptr + cur_offset);
3639 parent = btrfs_inode_extref_parent(leaf,
3640 extref);
3641 cur_offset += sizeof(*extref);
3642 cur_offset += btrfs_inode_extref_name_len(leaf,
3643 extref);
3644 } else {
3645 parent = key.offset;
3646 cur_offset = item_size;
3647 }
3648
3649 ret = get_inode_info(root, parent, NULL, &parent_gen,
3650 NULL, NULL, NULL, NULL);
3651 if (ret < 0)
3652 goto out;
3653 ret = check_ino_in_path(root, ino1, ino1_gen,
3654 parent, parent_gen, fs_path);
3655 if (ret)
3656 goto out;
3657 }
3658 path->slots[0]++;
3659 }
3660 ret = 0;
3661 out:
3662 btrfs_free_path(path);
3663 if (free_fs_path)
3664 fs_path_free(fs_path);
3665 return ret;
3666}
3667
3668static int wait_for_parent_move(struct send_ctx *sctx,
3669 struct recorded_ref *parent_ref,
3670 const bool is_orphan)
3671{
3672 int ret = 0;
3673 u64 ino = parent_ref->dir;
3674 u64 ino_gen = parent_ref->dir_gen;
3675 u64 parent_ino_before, parent_ino_after;
3676 struct fs_path *path_before = NULL;
3677 struct fs_path *path_after = NULL;
3678 int len1, len2;
3679
3680 path_after = fs_path_alloc();
3681 path_before = fs_path_alloc();
3682 if (!path_after || !path_before) {
3683 ret = -ENOMEM;
3684 goto out;
3685 }
3686
3687 /*
3688 * Our current directory inode may not yet be renamed/moved because some
3689 * ancestor (immediate or not) has to be renamed/moved first. So find if
3690 * such ancestor exists and make sure our own rename/move happens after
3691 * that ancestor is processed to avoid path build infinite loops (done
3692 * at get_cur_path()).
3693 */
3694 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3695 u64 parent_ino_after_gen;
3696
3697 if (is_waiting_for_move(sctx, ino)) {
3698 /*
3699 * If the current inode is an ancestor of ino in the
3700 * parent root, we need to delay the rename of the
3701 * current inode, otherwise don't delayed the rename
3702 * because we can end up with a circular dependency
3703 * of renames, resulting in some directories never
3704 * getting the respective rename operations issued in
3705 * the send stream or getting into infinite path build
3706 * loops.
3707 */
3708 ret = is_ancestor(sctx->parent_root,
3709 sctx->cur_ino, sctx->cur_inode_gen,
3710 ino, path_before);
3711 if (ret)
3712 break;
3713 }
3714
3715 fs_path_reset(path_before);
3716 fs_path_reset(path_after);
3717
3718 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3719 &parent_ino_after_gen, path_after);
3720 if (ret < 0)
3721 goto out;
3722 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3723 NULL, path_before);
3724 if (ret < 0 && ret != -ENOENT) {
3725 goto out;
3726 } else if (ret == -ENOENT) {
3727 ret = 0;
3728 break;
3729 }
3730
3731 len1 = fs_path_len(path_before);
3732 len2 = fs_path_len(path_after);
3733 if (ino > sctx->cur_ino &&
3734 (parent_ino_before != parent_ino_after || len1 != len2 ||
3735 memcmp(path_before->start, path_after->start, len1))) {
3736 u64 parent_ino_gen;
3737
3738 ret = get_inode_info(sctx->parent_root, ino, NULL,
3739 &parent_ino_gen, NULL, NULL, NULL,
3740 NULL);
3741 if (ret < 0)
3742 goto out;
3743 if (ino_gen == parent_ino_gen) {
3744 ret = 1;
3745 break;
3746 }
3747 }
3748 ino = parent_ino_after;
3749 ino_gen = parent_ino_after_gen;
3750 }
3751
3752out:
3753 fs_path_free(path_before);
3754 fs_path_free(path_after);
3755
3756 if (ret == 1) {
3757 ret = add_pending_dir_move(sctx,
3758 sctx->cur_ino,
3759 sctx->cur_inode_gen,
3760 ino,
3761 &sctx->new_refs,
3762 &sctx->deleted_refs,
3763 is_orphan);
3764 if (!ret)
3765 ret = 1;
3766 }
3767
3768 return ret;
3769}
3770
3771static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3772{
3773 int ret;
3774 struct fs_path *new_path;
3775
3776 /*
3777 * Our reference's name member points to its full_path member string, so
3778 * we use here a new path.
3779 */
3780 new_path = fs_path_alloc();
3781 if (!new_path)
3782 return -ENOMEM;
3783
3784 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
3785 if (ret < 0) {
3786 fs_path_free(new_path);
3787 return ret;
3788 }
3789 ret = fs_path_add(new_path, ref->name, ref->name_len);
3790 if (ret < 0) {
3791 fs_path_free(new_path);
3792 return ret;
3793 }
3794
3795 fs_path_free(ref->full_path);
3796 set_ref_path(ref, new_path);
3797
3798 return 0;
3799}
3800
3801/*
3802 * When processing the new references for an inode we may orphanize an existing
3803 * directory inode because its old name conflicts with one of the new references
3804 * of the current inode. Later, when processing another new reference of our
3805 * inode, we might need to orphanize another inode, but the path we have in the
3806 * reference reflects the pre-orphanization name of the directory we previously
3807 * orphanized. For example:
3808 *
3809 * parent snapshot looks like:
3810 *
3811 * . (ino 256)
3812 * |----- f1 (ino 257)
3813 * |----- f2 (ino 258)
3814 * |----- d1/ (ino 259)
3815 * |----- d2/ (ino 260)
3816 *
3817 * send snapshot looks like:
3818 *
3819 * . (ino 256)
3820 * |----- d1 (ino 258)
3821 * |----- f2/ (ino 259)
3822 * |----- f2_link/ (ino 260)
3823 * | |----- f1 (ino 257)
3824 * |
3825 * |----- d2 (ino 258)
3826 *
3827 * When processing inode 257 we compute the name for inode 259 as "d1", and we
3828 * cache it in the name cache. Later when we start processing inode 258, when
3829 * collecting all its new references we set a full path of "d1/d2" for its new
3830 * reference with name "d2". When we start processing the new references we
3831 * start by processing the new reference with name "d1", and this results in
3832 * orphanizing inode 259, since its old reference causes a conflict. Then we
3833 * move on the next new reference, with name "d2", and we find out we must
3834 * orphanize inode 260, as its old reference conflicts with ours - but for the
3835 * orphanization we use a source path corresponding to the path we stored in the
3836 * new reference, which is "d1/d2" and not "o259-6-0/d2" - this makes the
3837 * receiver fail since the path component "d1/" no longer exists, it was renamed
3838 * to "o259-6-0/" when processing the previous new reference. So in this case we
3839 * must recompute the path in the new reference and use it for the new
3840 * orphanization operation.
3841 */
3842static int refresh_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3843{
3844 char *name;
3845 int ret;
3846
3847 name = kmemdup(ref->name, ref->name_len, GFP_KERNEL);
3848 if (!name)
3849 return -ENOMEM;
3850
3851 fs_path_reset(ref->full_path);
3852 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, ref->full_path);
3853 if (ret < 0)
3854 goto out;
3855
3856 ret = fs_path_add(ref->full_path, name, ref->name_len);
3857 if (ret < 0)
3858 goto out;
3859
3860 /* Update the reference's base name pointer. */
3861 set_ref_path(ref, ref->full_path);
3862out:
3863 kfree(name);
3864 return ret;
3865}
3866
3867/*
3868 * This does all the move/link/unlink/rmdir magic.
3869 */
3870static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3871{
3872 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3873 int ret = 0;
3874 struct recorded_ref *cur;
3875 struct recorded_ref *cur2;
3876 struct list_head check_dirs;
3877 struct fs_path *valid_path = NULL;
3878 u64 ow_inode = 0;
3879 u64 ow_gen;
3880 u64 ow_mode;
3881 int did_overwrite = 0;
3882 int is_orphan = 0;
3883 u64 last_dir_ino_rm = 0;
3884 bool can_rename = true;
3885 bool orphanized_dir = false;
3886 bool orphanized_ancestor = false;
3887
3888 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3889
3890 /*
3891 * This should never happen as the root dir always has the same ref
3892 * which is always '..'
3893 */
3894 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3895 INIT_LIST_HEAD(&check_dirs);
3896
3897 valid_path = fs_path_alloc();
3898 if (!valid_path) {
3899 ret = -ENOMEM;
3900 goto out;
3901 }
3902
3903 /*
3904 * First, check if the first ref of the current inode was overwritten
3905 * before. If yes, we know that the current inode was already orphanized
3906 * and thus use the orphan name. If not, we can use get_cur_path to
3907 * get the path of the first ref as it would like while receiving at
3908 * this point in time.
3909 * New inodes are always orphan at the beginning, so force to use the
3910 * orphan name in this case.
3911 * The first ref is stored in valid_path and will be updated if it
3912 * gets moved around.
3913 */
3914 if (!sctx->cur_inode_new) {
3915 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3916 sctx->cur_inode_gen);
3917 if (ret < 0)
3918 goto out;
3919 if (ret)
3920 did_overwrite = 1;
3921 }
3922 if (sctx->cur_inode_new || did_overwrite) {
3923 ret = gen_unique_name(sctx, sctx->cur_ino,
3924 sctx->cur_inode_gen, valid_path);
3925 if (ret < 0)
3926 goto out;
3927 is_orphan = 1;
3928 } else {
3929 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3930 valid_path);
3931 if (ret < 0)
3932 goto out;
3933 }
3934
3935 /*
3936 * Before doing any rename and link operations, do a first pass on the
3937 * new references to orphanize any unprocessed inodes that may have a
3938 * reference that conflicts with one of the new references of the current
3939 * inode. This needs to happen first because a new reference may conflict
3940 * with the old reference of a parent directory, so we must make sure
3941 * that the path used for link and rename commands don't use an
3942 * orphanized name when an ancestor was not yet orphanized.
3943 *
3944 * Example:
3945 *
3946 * Parent snapshot:
3947 *
3948 * . (ino 256)
3949 * |----- testdir/ (ino 259)
3950 * | |----- a (ino 257)
3951 * |
3952 * |----- b (ino 258)
3953 *
3954 * Send snapshot:
3955 *
3956 * . (ino 256)
3957 * |----- testdir_2/ (ino 259)
3958 * | |----- a (ino 260)
3959 * |
3960 * |----- testdir (ino 257)
3961 * |----- b (ino 257)
3962 * |----- b2 (ino 258)
3963 *
3964 * Processing the new reference for inode 257 with name "b" may happen
3965 * before processing the new reference with name "testdir". If so, we
3966 * must make sure that by the time we send a link command to create the
3967 * hard link "b", inode 259 was already orphanized, since the generated
3968 * path in "valid_path" already contains the orphanized name for 259.
3969 * We are processing inode 257, so only later when processing 259 we do
3970 * the rename operation to change its temporary (orphanized) name to
3971 * "testdir_2".
3972 */
3973 list_for_each_entry(cur, &sctx->new_refs, list) {
3974 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3975 if (ret < 0)
3976 goto out;
3977 if (ret == inode_state_will_create)
3978 continue;
3979
3980 /*
3981 * Check if this new ref would overwrite the first ref of another
3982 * unprocessed inode. If yes, orphanize the overwritten inode.
3983 * If we find an overwritten ref that is not the first ref,
3984 * simply unlink it.
3985 */
3986 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3987 cur->name, cur->name_len,
3988 &ow_inode, &ow_gen, &ow_mode);
3989 if (ret < 0)
3990 goto out;
3991 if (ret) {
3992 ret = is_first_ref(sctx->parent_root,
3993 ow_inode, cur->dir, cur->name,
3994 cur->name_len);
3995 if (ret < 0)
3996 goto out;
3997 if (ret) {
3998 struct name_cache_entry *nce;
3999 struct waiting_dir_move *wdm;
4000
4001 if (orphanized_dir) {
4002 ret = refresh_ref_path(sctx, cur);
4003 if (ret < 0)
4004 goto out;
4005 }
4006
4007 ret = orphanize_inode(sctx, ow_inode, ow_gen,
4008 cur->full_path);
4009 if (ret < 0)
4010 goto out;
4011 if (S_ISDIR(ow_mode))
4012 orphanized_dir = true;
4013
4014 /*
4015 * If ow_inode has its rename operation delayed
4016 * make sure that its orphanized name is used in
4017 * the source path when performing its rename
4018 * operation.
4019 */
4020 if (is_waiting_for_move(sctx, ow_inode)) {
4021 wdm = get_waiting_dir_move(sctx,
4022 ow_inode);
4023 ASSERT(wdm);
4024 wdm->orphanized = true;
4025 }
4026
4027 /*
4028 * Make sure we clear our orphanized inode's
4029 * name from the name cache. This is because the
4030 * inode ow_inode might be an ancestor of some
4031 * other inode that will be orphanized as well
4032 * later and has an inode number greater than
4033 * sctx->send_progress. We need to prevent
4034 * future name lookups from using the old name
4035 * and get instead the orphan name.
4036 */
4037 nce = name_cache_search(sctx, ow_inode, ow_gen);
4038 if (nce) {
4039 name_cache_delete(sctx, nce);
4040 kfree(nce);
4041 }
4042
4043 /*
4044 * ow_inode might currently be an ancestor of
4045 * cur_ino, therefore compute valid_path (the
4046 * current path of cur_ino) again because it
4047 * might contain the pre-orphanization name of
4048 * ow_inode, which is no longer valid.
4049 */
4050 ret = is_ancestor(sctx->parent_root,
4051 ow_inode, ow_gen,
4052 sctx->cur_ino, NULL);
4053 if (ret > 0) {
4054 orphanized_ancestor = true;
4055 fs_path_reset(valid_path);
4056 ret = get_cur_path(sctx, sctx->cur_ino,
4057 sctx->cur_inode_gen,
4058 valid_path);
4059 }
4060 if (ret < 0)
4061 goto out;
4062 } else {
4063 /*
4064 * If we previously orphanized a directory that
4065 * collided with a new reference that we already
4066 * processed, recompute the current path because
4067 * that directory may be part of the path.
4068 */
4069 if (orphanized_dir) {
4070 ret = refresh_ref_path(sctx, cur);
4071 if (ret < 0)
4072 goto out;
4073 }
4074 ret = send_unlink(sctx, cur->full_path);
4075 if (ret < 0)
4076 goto out;
4077 }
4078 }
4079
4080 }
4081
4082 list_for_each_entry(cur, &sctx->new_refs, list) {
4083 /*
4084 * We may have refs where the parent directory does not exist
4085 * yet. This happens if the parent directories inum is higher
4086 * than the current inum. To handle this case, we create the
4087 * parent directory out of order. But we need to check if this
4088 * did already happen before due to other refs in the same dir.
4089 */
4090 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4091 if (ret < 0)
4092 goto out;
4093 if (ret == inode_state_will_create) {
4094 ret = 0;
4095 /*
4096 * First check if any of the current inodes refs did
4097 * already create the dir.
4098 */
4099 list_for_each_entry(cur2, &sctx->new_refs, list) {
4100 if (cur == cur2)
4101 break;
4102 if (cur2->dir == cur->dir) {
4103 ret = 1;
4104 break;
4105 }
4106 }
4107
4108 /*
4109 * If that did not happen, check if a previous inode
4110 * did already create the dir.
4111 */
4112 if (!ret)
4113 ret = did_create_dir(sctx, cur->dir);
4114 if (ret < 0)
4115 goto out;
4116 if (!ret) {
4117 ret = send_create_inode(sctx, cur->dir);
4118 if (ret < 0)
4119 goto out;
4120 }
4121 }
4122
4123 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
4124 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
4125 if (ret < 0)
4126 goto out;
4127 if (ret == 1) {
4128 can_rename = false;
4129 *pending_move = 1;
4130 }
4131 }
4132
4133 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
4134 can_rename) {
4135 ret = wait_for_parent_move(sctx, cur, is_orphan);
4136 if (ret < 0)
4137 goto out;
4138 if (ret == 1) {
4139 can_rename = false;
4140 *pending_move = 1;
4141 }
4142 }
4143
4144 /*
4145 * link/move the ref to the new place. If we have an orphan
4146 * inode, move it and update valid_path. If not, link or move
4147 * it depending on the inode mode.
4148 */
4149 if (is_orphan && can_rename) {
4150 ret = send_rename(sctx, valid_path, cur->full_path);
4151 if (ret < 0)
4152 goto out;
4153 is_orphan = 0;
4154 ret = fs_path_copy(valid_path, cur->full_path);
4155 if (ret < 0)
4156 goto out;
4157 } else if (can_rename) {
4158 if (S_ISDIR(sctx->cur_inode_mode)) {
4159 /*
4160 * Dirs can't be linked, so move it. For moved
4161 * dirs, we always have one new and one deleted
4162 * ref. The deleted ref is ignored later.
4163 */
4164 ret = send_rename(sctx, valid_path,
4165 cur->full_path);
4166 if (!ret)
4167 ret = fs_path_copy(valid_path,
4168 cur->full_path);
4169 if (ret < 0)
4170 goto out;
4171 } else {
4172 /*
4173 * We might have previously orphanized an inode
4174 * which is an ancestor of our current inode,
4175 * so our reference's full path, which was
4176 * computed before any such orphanizations, must
4177 * be updated.
4178 */
4179 if (orphanized_dir) {
4180 ret = update_ref_path(sctx, cur);
4181 if (ret < 0)
4182 goto out;
4183 }
4184 ret = send_link(sctx, cur->full_path,
4185 valid_path);
4186 if (ret < 0)
4187 goto out;
4188 }
4189 }
4190 ret = dup_ref(cur, &check_dirs);
4191 if (ret < 0)
4192 goto out;
4193 }
4194
4195 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
4196 /*
4197 * Check if we can already rmdir the directory. If not,
4198 * orphanize it. For every dir item inside that gets deleted
4199 * later, we do this check again and rmdir it then if possible.
4200 * See the use of check_dirs for more details.
4201 */
4202 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
4203 sctx->cur_ino);
4204 if (ret < 0)
4205 goto out;
4206 if (ret) {
4207 ret = send_rmdir(sctx, valid_path);
4208 if (ret < 0)
4209 goto out;
4210 } else if (!is_orphan) {
4211 ret = orphanize_inode(sctx, sctx->cur_ino,
4212 sctx->cur_inode_gen, valid_path);
4213 if (ret < 0)
4214 goto out;
4215 is_orphan = 1;
4216 }
4217
4218 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4219 ret = dup_ref(cur, &check_dirs);
4220 if (ret < 0)
4221 goto out;
4222 }
4223 } else if (S_ISDIR(sctx->cur_inode_mode) &&
4224 !list_empty(&sctx->deleted_refs)) {
4225 /*
4226 * We have a moved dir. Add the old parent to check_dirs
4227 */
4228 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4229 list);
4230 ret = dup_ref(cur, &check_dirs);
4231 if (ret < 0)
4232 goto out;
4233 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
4234 /*
4235 * We have a non dir inode. Go through all deleted refs and
4236 * unlink them if they were not already overwritten by other
4237 * inodes.
4238 */
4239 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4240 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4241 sctx->cur_ino, sctx->cur_inode_gen,
4242 cur->name, cur->name_len);
4243 if (ret < 0)
4244 goto out;
4245 if (!ret) {
4246 /*
4247 * If we orphanized any ancestor before, we need
4248 * to recompute the full path for deleted names,
4249 * since any such path was computed before we
4250 * processed any references and orphanized any
4251 * ancestor inode.
4252 */
4253 if (orphanized_ancestor) {
4254 ret = update_ref_path(sctx, cur);
4255 if (ret < 0)
4256 goto out;
4257 }
4258 ret = send_unlink(sctx, cur->full_path);
4259 if (ret < 0)
4260 goto out;
4261 }
4262 ret = dup_ref(cur, &check_dirs);
4263 if (ret < 0)
4264 goto out;
4265 }
4266 /*
4267 * If the inode is still orphan, unlink the orphan. This may
4268 * happen when a previous inode did overwrite the first ref
4269 * of this inode and no new refs were added for the current
4270 * inode. Unlinking does not mean that the inode is deleted in
4271 * all cases. There may still be links to this inode in other
4272 * places.
4273 */
4274 if (is_orphan) {
4275 ret = send_unlink(sctx, valid_path);
4276 if (ret < 0)
4277 goto out;
4278 }
4279 }
4280
4281 /*
4282 * We did collect all parent dirs where cur_inode was once located. We
4283 * now go through all these dirs and check if they are pending for
4284 * deletion and if it's finally possible to perform the rmdir now.
4285 * We also update the inode stats of the parent dirs here.
4286 */
4287 list_for_each_entry(cur, &check_dirs, list) {
4288 /*
4289 * In case we had refs into dirs that were not processed yet,
4290 * we don't need to do the utime and rmdir logic for these dirs.
4291 * The dir will be processed later.
4292 */
4293 if (cur->dir > sctx->cur_ino)
4294 continue;
4295
4296 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4297 if (ret < 0)
4298 goto out;
4299
4300 if (ret == inode_state_did_create ||
4301 ret == inode_state_no_change) {
4302 /* TODO delayed utimes */
4303 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4304 if (ret < 0)
4305 goto out;
4306 } else if (ret == inode_state_did_delete &&
4307 cur->dir != last_dir_ino_rm) {
4308 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4309 sctx->cur_ino);
4310 if (ret < 0)
4311 goto out;
4312 if (ret) {
4313 ret = get_cur_path(sctx, cur->dir,
4314 cur->dir_gen, valid_path);
4315 if (ret < 0)
4316 goto out;
4317 ret = send_rmdir(sctx, valid_path);
4318 if (ret < 0)
4319 goto out;
4320 last_dir_ino_rm = cur->dir;
4321 }
4322 }
4323 }
4324
4325 ret = 0;
4326
4327out:
4328 __free_recorded_refs(&check_dirs);
4329 free_recorded_refs(sctx);
4330 fs_path_free(valid_path);
4331 return ret;
4332}
4333
4334static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
4335 void *ctx, struct list_head *refs)
4336{
4337 int ret = 0;
4338 struct send_ctx *sctx = ctx;
4339 struct fs_path *p;
4340 u64 gen;
4341
4342 p = fs_path_alloc();
4343 if (!p)
4344 return -ENOMEM;
4345
4346 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
4347 NULL, NULL);
4348 if (ret < 0)
4349 goto out;
4350
4351 ret = get_cur_path(sctx, dir, gen, p);
4352 if (ret < 0)
4353 goto out;
4354 ret = fs_path_add_path(p, name);
4355 if (ret < 0)
4356 goto out;
4357
4358 ret = __record_ref(refs, dir, gen, p);
4359
4360out:
4361 if (ret)
4362 fs_path_free(p);
4363 return ret;
4364}
4365
4366static int __record_new_ref(int num, u64 dir, int index,
4367 struct fs_path *name,
4368 void *ctx)
4369{
4370 struct send_ctx *sctx = ctx;
4371 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
4372}
4373
4374
4375static int __record_deleted_ref(int num, u64 dir, int index,
4376 struct fs_path *name,
4377 void *ctx)
4378{
4379 struct send_ctx *sctx = ctx;
4380 return record_ref(sctx->parent_root, dir, name, ctx,
4381 &sctx->deleted_refs);
4382}
4383
4384static int record_new_ref(struct send_ctx *sctx)
4385{
4386 int ret;
4387
4388 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4389 sctx->cmp_key, 0, __record_new_ref, sctx);
4390 if (ret < 0)
4391 goto out;
4392 ret = 0;
4393
4394out:
4395 return ret;
4396}
4397
4398static int record_deleted_ref(struct send_ctx *sctx)
4399{
4400 int ret;
4401
4402 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4403 sctx->cmp_key, 0, __record_deleted_ref, sctx);
4404 if (ret < 0)
4405 goto out;
4406 ret = 0;
4407
4408out:
4409 return ret;
4410}
4411
4412struct find_ref_ctx {
4413 u64 dir;
4414 u64 dir_gen;
4415 struct btrfs_root *root;
4416 struct fs_path *name;
4417 int found_idx;
4418};
4419
4420static int __find_iref(int num, u64 dir, int index,
4421 struct fs_path *name,
4422 void *ctx_)
4423{
4424 struct find_ref_ctx *ctx = ctx_;
4425 u64 dir_gen;
4426 int ret;
4427
4428 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
4429 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
4430 /*
4431 * To avoid doing extra lookups we'll only do this if everything
4432 * else matches.
4433 */
4434 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
4435 NULL, NULL, NULL);
4436 if (ret)
4437 return ret;
4438 if (dir_gen != ctx->dir_gen)
4439 return 0;
4440 ctx->found_idx = num;
4441 return 1;
4442 }
4443 return 0;
4444}
4445
4446static int find_iref(struct btrfs_root *root,
4447 struct btrfs_path *path,
4448 struct btrfs_key *key,
4449 u64 dir, u64 dir_gen, struct fs_path *name)
4450{
4451 int ret;
4452 struct find_ref_ctx ctx;
4453
4454 ctx.dir = dir;
4455 ctx.name = name;
4456 ctx.dir_gen = dir_gen;
4457 ctx.found_idx = -1;
4458 ctx.root = root;
4459
4460 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
4461 if (ret < 0)
4462 return ret;
4463
4464 if (ctx.found_idx == -1)
4465 return -ENOENT;
4466
4467 return ctx.found_idx;
4468}
4469
4470static int __record_changed_new_ref(int num, u64 dir, int index,
4471 struct fs_path *name,
4472 void *ctx)
4473{
4474 u64 dir_gen;
4475 int ret;
4476 struct send_ctx *sctx = ctx;
4477
4478 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4479 NULL, NULL, NULL);
4480 if (ret)
4481 return ret;
4482
4483 ret = find_iref(sctx->parent_root, sctx->right_path,
4484 sctx->cmp_key, dir, dir_gen, name);
4485 if (ret == -ENOENT)
4486 ret = __record_new_ref(num, dir, index, name, sctx);
4487 else if (ret > 0)
4488 ret = 0;
4489
4490 return ret;
4491}
4492
4493static int __record_changed_deleted_ref(int num, u64 dir, int index,
4494 struct fs_path *name,
4495 void *ctx)
4496{
4497 u64 dir_gen;
4498 int ret;
4499 struct send_ctx *sctx = ctx;
4500
4501 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4502 NULL, NULL, NULL);
4503 if (ret)
4504 return ret;
4505
4506 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
4507 dir, dir_gen, name);
4508 if (ret == -ENOENT)
4509 ret = __record_deleted_ref(num, dir, index, name, sctx);
4510 else if (ret > 0)
4511 ret = 0;
4512
4513 return ret;
4514}
4515
4516static int record_changed_ref(struct send_ctx *sctx)
4517{
4518 int ret = 0;
4519
4520 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4521 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
4522 if (ret < 0)
4523 goto out;
4524 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4525 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
4526 if (ret < 0)
4527 goto out;
4528 ret = 0;
4529
4530out:
4531 return ret;
4532}
4533
4534/*
4535 * Record and process all refs at once. Needed when an inode changes the
4536 * generation number, which means that it was deleted and recreated.
4537 */
4538static int process_all_refs(struct send_ctx *sctx,
4539 enum btrfs_compare_tree_result cmd)
4540{
4541 int ret;
4542 struct btrfs_root *root;
4543 struct btrfs_path *path;
4544 struct btrfs_key key;
4545 struct btrfs_key found_key;
4546 struct extent_buffer *eb;
4547 int slot;
4548 iterate_inode_ref_t cb;
4549 int pending_move = 0;
4550
4551 path = alloc_path_for_send();
4552 if (!path)
4553 return -ENOMEM;
4554
4555 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4556 root = sctx->send_root;
4557 cb = __record_new_ref;
4558 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4559 root = sctx->parent_root;
4560 cb = __record_deleted_ref;
4561 } else {
4562 btrfs_err(sctx->send_root->fs_info,
4563 "Wrong command %d in process_all_refs", cmd);
4564 ret = -EINVAL;
4565 goto out;
4566 }
4567
4568 key.objectid = sctx->cmp_key->objectid;
4569 key.type = BTRFS_INODE_REF_KEY;
4570 key.offset = 0;
4571 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4572 if (ret < 0)
4573 goto out;
4574
4575 while (1) {
4576 eb = path->nodes[0];
4577 slot = path->slots[0];
4578 if (slot >= btrfs_header_nritems(eb)) {
4579 ret = btrfs_next_leaf(root, path);
4580 if (ret < 0)
4581 goto out;
4582 else if (ret > 0)
4583 break;
4584 continue;
4585 }
4586
4587 btrfs_item_key_to_cpu(eb, &found_key, slot);
4588
4589 if (found_key.objectid != key.objectid ||
4590 (found_key.type != BTRFS_INODE_REF_KEY &&
4591 found_key.type != BTRFS_INODE_EXTREF_KEY))
4592 break;
4593
4594 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4595 if (ret < 0)
4596 goto out;
4597
4598 path->slots[0]++;
4599 }
4600 btrfs_release_path(path);
4601
4602 /*
4603 * We don't actually care about pending_move as we are simply
4604 * re-creating this inode and will be rename'ing it into place once we
4605 * rename the parent directory.
4606 */
4607 ret = process_recorded_refs(sctx, &pending_move);
4608out:
4609 btrfs_free_path(path);
4610 return ret;
4611}
4612
4613static int send_set_xattr(struct send_ctx *sctx,
4614 struct fs_path *path,
4615 const char *name, int name_len,
4616 const char *data, int data_len)
4617{
4618 int ret = 0;
4619
4620 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4621 if (ret < 0)
4622 goto out;
4623
4624 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4625 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4626 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4627
4628 ret = send_cmd(sctx);
4629
4630tlv_put_failure:
4631out:
4632 return ret;
4633}
4634
4635static int send_remove_xattr(struct send_ctx *sctx,
4636 struct fs_path *path,
4637 const char *name, int name_len)
4638{
4639 int ret = 0;
4640
4641 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4642 if (ret < 0)
4643 goto out;
4644
4645 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4646 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4647
4648 ret = send_cmd(sctx);
4649
4650tlv_put_failure:
4651out:
4652 return ret;
4653}
4654
4655static int __process_new_xattr(int num, struct btrfs_key *di_key,
4656 const char *name, int name_len,
4657 const char *data, int data_len,
4658 u8 type, void *ctx)
4659{
4660 int ret;
4661 struct send_ctx *sctx = ctx;
4662 struct fs_path *p;
4663 struct posix_acl_xattr_header dummy_acl;
4664
4665 /* Capabilities are emitted by finish_inode_if_needed */
4666 if (!strncmp(name, XATTR_NAME_CAPS, name_len))
4667 return 0;
4668
4669 p = fs_path_alloc();
4670 if (!p)
4671 return -ENOMEM;
4672
4673 /*
4674 * This hack is needed because empty acls are stored as zero byte
4675 * data in xattrs. Problem with that is, that receiving these zero byte
4676 * acls will fail later. To fix this, we send a dummy acl list that
4677 * only contains the version number and no entries.
4678 */
4679 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4680 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4681 if (data_len == 0) {
4682 dummy_acl.a_version =
4683 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4684 data = (char *)&dummy_acl;
4685 data_len = sizeof(dummy_acl);
4686 }
4687 }
4688
4689 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4690 if (ret < 0)
4691 goto out;
4692
4693 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4694
4695out:
4696 fs_path_free(p);
4697 return ret;
4698}
4699
4700static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4701 const char *name, int name_len,
4702 const char *data, int data_len,
4703 u8 type, void *ctx)
4704{
4705 int ret;
4706 struct send_ctx *sctx = ctx;
4707 struct fs_path *p;
4708
4709 p = fs_path_alloc();
4710 if (!p)
4711 return -ENOMEM;
4712
4713 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4714 if (ret < 0)
4715 goto out;
4716
4717 ret = send_remove_xattr(sctx, p, name, name_len);
4718
4719out:
4720 fs_path_free(p);
4721 return ret;
4722}
4723
4724static int process_new_xattr(struct send_ctx *sctx)
4725{
4726 int ret = 0;
4727
4728 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4729 __process_new_xattr, sctx);
4730
4731 return ret;
4732}
4733
4734static int process_deleted_xattr(struct send_ctx *sctx)
4735{
4736 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4737 __process_deleted_xattr, sctx);
4738}
4739
4740struct find_xattr_ctx {
4741 const char *name;
4742 int name_len;
4743 int found_idx;
4744 char *found_data;
4745 int found_data_len;
4746};
4747
4748static int __find_xattr(int num, struct btrfs_key *di_key,
4749 const char *name, int name_len,
4750 const char *data, int data_len,
4751 u8 type, void *vctx)
4752{
4753 struct find_xattr_ctx *ctx = vctx;
4754
4755 if (name_len == ctx->name_len &&
4756 strncmp(name, ctx->name, name_len) == 0) {
4757 ctx->found_idx = num;
4758 ctx->found_data_len = data_len;
4759 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4760 if (!ctx->found_data)
4761 return -ENOMEM;
4762 return 1;
4763 }
4764 return 0;
4765}
4766
4767static int find_xattr(struct btrfs_root *root,
4768 struct btrfs_path *path,
4769 struct btrfs_key *key,
4770 const char *name, int name_len,
4771 char **data, int *data_len)
4772{
4773 int ret;
4774 struct find_xattr_ctx ctx;
4775
4776 ctx.name = name;
4777 ctx.name_len = name_len;
4778 ctx.found_idx = -1;
4779 ctx.found_data = NULL;
4780 ctx.found_data_len = 0;
4781
4782 ret = iterate_dir_item(root, path, __find_xattr, &ctx);
4783 if (ret < 0)
4784 return ret;
4785
4786 if (ctx.found_idx == -1)
4787 return -ENOENT;
4788 if (data) {
4789 *data = ctx.found_data;
4790 *data_len = ctx.found_data_len;
4791 } else {
4792 kfree(ctx.found_data);
4793 }
4794 return ctx.found_idx;
4795}
4796
4797
4798static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4799 const char *name, int name_len,
4800 const char *data, int data_len,
4801 u8 type, void *ctx)
4802{
4803 int ret;
4804 struct send_ctx *sctx = ctx;
4805 char *found_data = NULL;
4806 int found_data_len = 0;
4807
4808 ret = find_xattr(sctx->parent_root, sctx->right_path,
4809 sctx->cmp_key, name, name_len, &found_data,
4810 &found_data_len);
4811 if (ret == -ENOENT) {
4812 ret = __process_new_xattr(num, di_key, name, name_len, data,
4813 data_len, type, ctx);
4814 } else if (ret >= 0) {
4815 if (data_len != found_data_len ||
4816 memcmp(data, found_data, data_len)) {
4817 ret = __process_new_xattr(num, di_key, name, name_len,
4818 data, data_len, type, ctx);
4819 } else {
4820 ret = 0;
4821 }
4822 }
4823
4824 kfree(found_data);
4825 return ret;
4826}
4827
4828static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4829 const char *name, int name_len,
4830 const char *data, int data_len,
4831 u8 type, void *ctx)
4832{
4833 int ret;
4834 struct send_ctx *sctx = ctx;
4835
4836 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4837 name, name_len, NULL, NULL);
4838 if (ret == -ENOENT)
4839 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4840 data_len, type, ctx);
4841 else if (ret >= 0)
4842 ret = 0;
4843
4844 return ret;
4845}
4846
4847static int process_changed_xattr(struct send_ctx *sctx)
4848{
4849 int ret = 0;
4850
4851 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4852 __process_changed_new_xattr, sctx);
4853 if (ret < 0)
4854 goto out;
4855 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4856 __process_changed_deleted_xattr, sctx);
4857
4858out:
4859 return ret;
4860}
4861
4862static int process_all_new_xattrs(struct send_ctx *sctx)
4863{
4864 int ret;
4865 struct btrfs_root *root;
4866 struct btrfs_path *path;
4867 struct btrfs_key key;
4868 struct btrfs_key found_key;
4869 struct extent_buffer *eb;
4870 int slot;
4871
4872 path = alloc_path_for_send();
4873 if (!path)
4874 return -ENOMEM;
4875
4876 root = sctx->send_root;
4877
4878 key.objectid = sctx->cmp_key->objectid;
4879 key.type = BTRFS_XATTR_ITEM_KEY;
4880 key.offset = 0;
4881 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4882 if (ret < 0)
4883 goto out;
4884
4885 while (1) {
4886 eb = path->nodes[0];
4887 slot = path->slots[0];
4888 if (slot >= btrfs_header_nritems(eb)) {
4889 ret = btrfs_next_leaf(root, path);
4890 if (ret < 0) {
4891 goto out;
4892 } else if (ret > 0) {
4893 ret = 0;
4894 break;
4895 }
4896 continue;
4897 }
4898
4899 btrfs_item_key_to_cpu(eb, &found_key, slot);
4900 if (found_key.objectid != key.objectid ||
4901 found_key.type != key.type) {
4902 ret = 0;
4903 goto out;
4904 }
4905
4906 ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
4907 if (ret < 0)
4908 goto out;
4909
4910 path->slots[0]++;
4911 }
4912
4913out:
4914 btrfs_free_path(path);
4915 return ret;
4916}
4917
4918static inline u64 max_send_read_size(const struct send_ctx *sctx)
4919{
4920 return sctx->send_max_size - SZ_16K;
4921}
4922
4923static int put_data_header(struct send_ctx *sctx, u32 len)
4924{
4925 struct btrfs_tlv_header *hdr;
4926
4927 if (sctx->send_max_size - sctx->send_size < sizeof(*hdr) + len)
4928 return -EOVERFLOW;
4929 hdr = (struct btrfs_tlv_header *)(sctx->send_buf + sctx->send_size);
4930 put_unaligned_le16(BTRFS_SEND_A_DATA, &hdr->tlv_type);
4931 put_unaligned_le16(len, &hdr->tlv_len);
4932 sctx->send_size += sizeof(*hdr);
4933 return 0;
4934}
4935
4936static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
4937{
4938 struct btrfs_root *root = sctx->send_root;
4939 struct btrfs_fs_info *fs_info = root->fs_info;
4940 struct inode *inode;
4941 struct page *page;
4942 pgoff_t index = offset >> PAGE_SHIFT;
4943 pgoff_t last_index;
4944 unsigned pg_offset = offset_in_page(offset);
4945 int ret;
4946
4947 ret = put_data_header(sctx, len);
4948 if (ret)
4949 return ret;
4950
4951 inode = btrfs_iget(fs_info->sb, sctx->cur_ino, root);
4952 if (IS_ERR(inode))
4953 return PTR_ERR(inode);
4954
4955 last_index = (offset + len - 1) >> PAGE_SHIFT;
4956
4957 /* initial readahead */
4958 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4959 file_ra_state_init(&sctx->ra, inode->i_mapping);
4960
4961 while (index <= last_index) {
4962 unsigned cur_len = min_t(unsigned, len,
4963 PAGE_SIZE - pg_offset);
4964
4965 page = find_lock_page(inode->i_mapping, index);
4966 if (!page) {
4967 page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
4968 NULL, index, last_index + 1 - index);
4969
4970 page = find_or_create_page(inode->i_mapping, index,
4971 GFP_KERNEL);
4972 if (!page) {
4973 ret = -ENOMEM;
4974 break;
4975 }
4976 }
4977
4978 if (PageReadahead(page)) {
4979 page_cache_async_readahead(inode->i_mapping, &sctx->ra,
4980 NULL, page, index, last_index + 1 - index);
4981 }
4982
4983 if (!PageUptodate(page)) {
4984 btrfs_readpage(NULL, page);
4985 lock_page(page);
4986 if (!PageUptodate(page)) {
4987 unlock_page(page);
4988 put_page(page);
4989 ret = -EIO;
4990 break;
4991 }
4992 }
4993
4994 memcpy_from_page(sctx->send_buf + sctx->send_size, page,
4995 pg_offset, cur_len);
4996 unlock_page(page);
4997 put_page(page);
4998 index++;
4999 pg_offset = 0;
5000 len -= cur_len;
5001 sctx->send_size += cur_len;
5002 }
5003 iput(inode);
5004 return ret;
5005}
5006
5007/*
5008 * Read some bytes from the current inode/file and send a write command to
5009 * user space.
5010 */
5011static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
5012{
5013 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
5014 int ret = 0;
5015 struct fs_path *p;
5016
5017 p = fs_path_alloc();
5018 if (!p)
5019 return -ENOMEM;
5020
5021 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
5022
5023 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5024 if (ret < 0)
5025 goto out;
5026
5027 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5028 if (ret < 0)
5029 goto out;
5030
5031 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5032 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5033 ret = put_file_data(sctx, offset, len);
5034 if (ret < 0)
5035 goto out;
5036
5037 ret = send_cmd(sctx);
5038
5039tlv_put_failure:
5040out:
5041 fs_path_free(p);
5042 return ret;
5043}
5044
5045/*
5046 * Send a clone command to user space.
5047 */
5048static int send_clone(struct send_ctx *sctx,
5049 u64 offset, u32 len,
5050 struct clone_root *clone_root)
5051{
5052 int ret = 0;
5053 struct fs_path *p;
5054 u64 gen;
5055
5056 btrfs_debug(sctx->send_root->fs_info,
5057 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
5058 offset, len, clone_root->root->root_key.objectid,
5059 clone_root->ino, clone_root->offset);
5060
5061 p = fs_path_alloc();
5062 if (!p)
5063 return -ENOMEM;
5064
5065 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
5066 if (ret < 0)
5067 goto out;
5068
5069 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5070 if (ret < 0)
5071 goto out;
5072
5073 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5074 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
5075 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5076
5077 if (clone_root->root == sctx->send_root) {
5078 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
5079 &gen, NULL, NULL, NULL, NULL);
5080 if (ret < 0)
5081 goto out;
5082 ret = get_cur_path(sctx, clone_root->ino, gen, p);
5083 } else {
5084 ret = get_inode_path(clone_root->root, clone_root->ino, p);
5085 }
5086 if (ret < 0)
5087 goto out;
5088
5089 /*
5090 * If the parent we're using has a received_uuid set then use that as
5091 * our clone source as that is what we will look for when doing a
5092 * receive.
5093 *
5094 * This covers the case that we create a snapshot off of a received
5095 * subvolume and then use that as the parent and try to receive on a
5096 * different host.
5097 */
5098 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
5099 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5100 clone_root->root->root_item.received_uuid);
5101 else
5102 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
5103 clone_root->root->root_item.uuid);
5104 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
5105 btrfs_root_ctransid(&clone_root->root->root_item));
5106 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
5107 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
5108 clone_root->offset);
5109
5110 ret = send_cmd(sctx);
5111
5112tlv_put_failure:
5113out:
5114 fs_path_free(p);
5115 return ret;
5116}
5117
5118/*
5119 * Send an update extent command to user space.
5120 */
5121static int send_update_extent(struct send_ctx *sctx,
5122 u64 offset, u32 len)
5123{
5124 int ret = 0;
5125 struct fs_path *p;
5126
5127 p = fs_path_alloc();
5128 if (!p)
5129 return -ENOMEM;
5130
5131 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
5132 if (ret < 0)
5133 goto out;
5134
5135 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5136 if (ret < 0)
5137 goto out;
5138
5139 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5140 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5141 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
5142
5143 ret = send_cmd(sctx);
5144
5145tlv_put_failure:
5146out:
5147 fs_path_free(p);
5148 return ret;
5149}
5150
5151static int send_hole(struct send_ctx *sctx, u64 end)
5152{
5153 struct fs_path *p = NULL;
5154 u64 read_size = max_send_read_size(sctx);
5155 u64 offset = sctx->cur_inode_last_extent;
5156 int ret = 0;
5157
5158 /*
5159 * A hole that starts at EOF or beyond it. Since we do not yet support
5160 * fallocate (for extent preallocation and hole punching), sending a
5161 * write of zeroes starting at EOF or beyond would later require issuing
5162 * a truncate operation which would undo the write and achieve nothing.
5163 */
5164 if (offset >= sctx->cur_inode_size)
5165 return 0;
5166
5167 /*
5168 * Don't go beyond the inode's i_size due to prealloc extents that start
5169 * after the i_size.
5170 */
5171 end = min_t(u64, end, sctx->cur_inode_size);
5172
5173 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5174 return send_update_extent(sctx, offset, end - offset);
5175
5176 p = fs_path_alloc();
5177 if (!p)
5178 return -ENOMEM;
5179 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
5180 if (ret < 0)
5181 goto tlv_put_failure;
5182 while (offset < end) {
5183 u64 len = min(end - offset, read_size);
5184
5185 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
5186 if (ret < 0)
5187 break;
5188 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
5189 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
5190 ret = put_data_header(sctx, len);
5191 if (ret < 0)
5192 break;
5193 memset(sctx->send_buf + sctx->send_size, 0, len);
5194 sctx->send_size += len;
5195 ret = send_cmd(sctx);
5196 if (ret < 0)
5197 break;
5198 offset += len;
5199 }
5200 sctx->cur_inode_next_write_offset = offset;
5201tlv_put_failure:
5202 fs_path_free(p);
5203 return ret;
5204}
5205
5206static int send_extent_data(struct send_ctx *sctx,
5207 const u64 offset,
5208 const u64 len)
5209{
5210 u64 read_size = max_send_read_size(sctx);
5211 u64 sent = 0;
5212
5213 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
5214 return send_update_extent(sctx, offset, len);
5215
5216 while (sent < len) {
5217 u64 size = min(len - sent, read_size);
5218 int ret;
5219
5220 ret = send_write(sctx, offset + sent, size);
5221 if (ret < 0)
5222 return ret;
5223 sent += size;
5224 }
5225 return 0;
5226}
5227
5228/*
5229 * Search for a capability xattr related to sctx->cur_ino. If the capability is
5230 * found, call send_set_xattr function to emit it.
5231 *
5232 * Return 0 if there isn't a capability, or when the capability was emitted
5233 * successfully, or < 0 if an error occurred.
5234 */
5235static int send_capabilities(struct send_ctx *sctx)
5236{
5237 struct fs_path *fspath = NULL;
5238 struct btrfs_path *path;
5239 struct btrfs_dir_item *di;
5240 struct extent_buffer *leaf;
5241 unsigned long data_ptr;
5242 char *buf = NULL;
5243 int buf_len;
5244 int ret = 0;
5245
5246 path = alloc_path_for_send();
5247 if (!path)
5248 return -ENOMEM;
5249
5250 di = btrfs_lookup_xattr(NULL, sctx->send_root, path, sctx->cur_ino,
5251 XATTR_NAME_CAPS, strlen(XATTR_NAME_CAPS), 0);
5252 if (!di) {
5253 /* There is no xattr for this inode */
5254 goto out;
5255 } else if (IS_ERR(di)) {
5256 ret = PTR_ERR(di);
5257 goto out;
5258 }
5259
5260 leaf = path->nodes[0];
5261 buf_len = btrfs_dir_data_len(leaf, di);
5262
5263 fspath = fs_path_alloc();
5264 buf = kmalloc(buf_len, GFP_KERNEL);
5265 if (!fspath || !buf) {
5266 ret = -ENOMEM;
5267 goto out;
5268 }
5269
5270 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, fspath);
5271 if (ret < 0)
5272 goto out;
5273
5274 data_ptr = (unsigned long)(di + 1) + btrfs_dir_name_len(leaf, di);
5275 read_extent_buffer(leaf, buf, data_ptr, buf_len);
5276
5277 ret = send_set_xattr(sctx, fspath, XATTR_NAME_CAPS,
5278 strlen(XATTR_NAME_CAPS), buf, buf_len);
5279out:
5280 kfree(buf);
5281 fs_path_free(fspath);
5282 btrfs_free_path(path);
5283 return ret;
5284}
5285
5286static int clone_range(struct send_ctx *sctx,
5287 struct clone_root *clone_root,
5288 const u64 disk_byte,
5289 u64 data_offset,
5290 u64 offset,
5291 u64 len)
5292{
5293 struct btrfs_path *path;
5294 struct btrfs_key key;
5295 int ret;
5296 u64 clone_src_i_size = 0;
5297
5298 /*
5299 * Prevent cloning from a zero offset with a length matching the sector
5300 * size because in some scenarios this will make the receiver fail.
5301 *
5302 * For example, if in the source filesystem the extent at offset 0
5303 * has a length of sectorsize and it was written using direct IO, then
5304 * it can never be an inline extent (even if compression is enabled).
5305 * Then this extent can be cloned in the original filesystem to a non
5306 * zero file offset, but it may not be possible to clone in the
5307 * destination filesystem because it can be inlined due to compression
5308 * on the destination filesystem (as the receiver's write operations are
5309 * always done using buffered IO). The same happens when the original
5310 * filesystem does not have compression enabled but the destination
5311 * filesystem has.
5312 */
5313 if (clone_root->offset == 0 &&
5314 len == sctx->send_root->fs_info->sectorsize)
5315 return send_extent_data(sctx, offset, len);
5316
5317 path = alloc_path_for_send();
5318 if (!path)
5319 return -ENOMEM;
5320
5321 /*
5322 * There are inodes that have extents that lie behind its i_size. Don't
5323 * accept clones from these extents.
5324 */
5325 ret = __get_inode_info(clone_root->root, path, clone_root->ino,
5326 &clone_src_i_size, NULL, NULL, NULL, NULL, NULL);
5327 btrfs_release_path(path);
5328 if (ret < 0)
5329 goto out;
5330
5331 /*
5332 * We can't send a clone operation for the entire range if we find
5333 * extent items in the respective range in the source file that
5334 * refer to different extents or if we find holes.
5335 * So check for that and do a mix of clone and regular write/copy
5336 * operations if needed.
5337 *
5338 * Example:
5339 *
5340 * mkfs.btrfs -f /dev/sda
5341 * mount /dev/sda /mnt
5342 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5343 * cp --reflink=always /mnt/foo /mnt/bar
5344 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5345 * btrfs subvolume snapshot -r /mnt /mnt/snap
5346 *
5347 * If when we send the snapshot and we are processing file bar (which
5348 * has a higher inode number than foo) we blindly send a clone operation
5349 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5350 * a file bar that matches the content of file foo - iow, doesn't match
5351 * the content from bar in the original filesystem.
5352 */
5353 key.objectid = clone_root->ino;
5354 key.type = BTRFS_EXTENT_DATA_KEY;
5355 key.offset = clone_root->offset;
5356 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5357 if (ret < 0)
5358 goto out;
5359 if (ret > 0 && path->slots[0] > 0) {
5360 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5361 if (key.objectid == clone_root->ino &&
5362 key.type == BTRFS_EXTENT_DATA_KEY)
5363 path->slots[0]--;
5364 }
5365
5366 while (true) {
5367 struct extent_buffer *leaf = path->nodes[0];
5368 int slot = path->slots[0];
5369 struct btrfs_file_extent_item *ei;
5370 u8 type;
5371 u64 ext_len;
5372 u64 clone_len;
5373 u64 clone_data_offset;
5374
5375 if (slot >= btrfs_header_nritems(leaf)) {
5376 ret = btrfs_next_leaf(clone_root->root, path);
5377 if (ret < 0)
5378 goto out;
5379 else if (ret > 0)
5380 break;
5381 continue;
5382 }
5383
5384 btrfs_item_key_to_cpu(leaf, &key, slot);
5385
5386 /*
5387 * We might have an implicit trailing hole (NO_HOLES feature
5388 * enabled). We deal with it after leaving this loop.
5389 */
5390 if (key.objectid != clone_root->ino ||
5391 key.type != BTRFS_EXTENT_DATA_KEY)
5392 break;
5393
5394 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5395 type = btrfs_file_extent_type(leaf, ei);
5396 if (type == BTRFS_FILE_EXTENT_INLINE) {
5397 ext_len = btrfs_file_extent_ram_bytes(leaf, ei);
5398 ext_len = PAGE_ALIGN(ext_len);
5399 } else {
5400 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5401 }
5402
5403 if (key.offset + ext_len <= clone_root->offset)
5404 goto next;
5405
5406 if (key.offset > clone_root->offset) {
5407 /* Implicit hole, NO_HOLES feature enabled. */
5408 u64 hole_len = key.offset - clone_root->offset;
5409
5410 if (hole_len > len)
5411 hole_len = len;
5412 ret = send_extent_data(sctx, offset, hole_len);
5413 if (ret < 0)
5414 goto out;
5415
5416 len -= hole_len;
5417 if (len == 0)
5418 break;
5419 offset += hole_len;
5420 clone_root->offset += hole_len;
5421 data_offset += hole_len;
5422 }
5423
5424 if (key.offset >= clone_root->offset + len)
5425 break;
5426
5427 if (key.offset >= clone_src_i_size)
5428 break;
5429
5430 if (key.offset + ext_len > clone_src_i_size)
5431 ext_len = clone_src_i_size - key.offset;
5432
5433 clone_data_offset = btrfs_file_extent_offset(leaf, ei);
5434 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte) {
5435 clone_root->offset = key.offset;
5436 if (clone_data_offset < data_offset &&
5437 clone_data_offset + ext_len > data_offset) {
5438 u64 extent_offset;
5439
5440 extent_offset = data_offset - clone_data_offset;
5441 ext_len -= extent_offset;
5442 clone_data_offset += extent_offset;
5443 clone_root->offset += extent_offset;
5444 }
5445 }
5446
5447 clone_len = min_t(u64, ext_len, len);
5448
5449 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
5450 clone_data_offset == data_offset) {
5451 const u64 src_end = clone_root->offset + clone_len;
5452 const u64 sectorsize = SZ_64K;
5453
5454 /*
5455 * We can't clone the last block, when its size is not
5456 * sector size aligned, into the middle of a file. If we
5457 * do so, the receiver will get a failure (-EINVAL) when
5458 * trying to clone or will silently corrupt the data in
5459 * the destination file if it's on a kernel without the
5460 * fix introduced by commit ac765f83f1397646
5461 * ("Btrfs: fix data corruption due to cloning of eof
5462 * block).
5463 *
5464 * So issue a clone of the aligned down range plus a
5465 * regular write for the eof block, if we hit that case.
5466 *
5467 * Also, we use the maximum possible sector size, 64K,
5468 * because we don't know what's the sector size of the
5469 * filesystem that receives the stream, so we have to
5470 * assume the largest possible sector size.
5471 */
5472 if (src_end == clone_src_i_size &&
5473 !IS_ALIGNED(src_end, sectorsize) &&
5474 offset + clone_len < sctx->cur_inode_size) {
5475 u64 slen;
5476
5477 slen = ALIGN_DOWN(src_end - clone_root->offset,
5478 sectorsize);
5479 if (slen > 0) {
5480 ret = send_clone(sctx, offset, slen,
5481 clone_root);
5482 if (ret < 0)
5483 goto out;
5484 }
5485 ret = send_extent_data(sctx, offset + slen,
5486 clone_len - slen);
5487 } else {
5488 ret = send_clone(sctx, offset, clone_len,
5489 clone_root);
5490 }
5491 } else {
5492 ret = send_extent_data(sctx, offset, clone_len);
5493 }
5494
5495 if (ret < 0)
5496 goto out;
5497
5498 len -= clone_len;
5499 if (len == 0)
5500 break;
5501 offset += clone_len;
5502 clone_root->offset += clone_len;
5503
5504 /*
5505 * If we are cloning from the file we are currently processing,
5506 * and using the send root as the clone root, we must stop once
5507 * the current clone offset reaches the current eof of the file
5508 * at the receiver, otherwise we would issue an invalid clone
5509 * operation (source range going beyond eof) and cause the
5510 * receiver to fail. So if we reach the current eof, bail out
5511 * and fallback to a regular write.
5512 */
5513 if (clone_root->root == sctx->send_root &&
5514 clone_root->ino == sctx->cur_ino &&
5515 clone_root->offset >= sctx->cur_inode_next_write_offset)
5516 break;
5517
5518 data_offset += clone_len;
5519next:
5520 path->slots[0]++;
5521 }
5522
5523 if (len > 0)
5524 ret = send_extent_data(sctx, offset, len);
5525 else
5526 ret = 0;
5527out:
5528 btrfs_free_path(path);
5529 return ret;
5530}
5531
5532static int send_write_or_clone(struct send_ctx *sctx,
5533 struct btrfs_path *path,
5534 struct btrfs_key *key,
5535 struct clone_root *clone_root)
5536{
5537 int ret = 0;
5538 u64 offset = key->offset;
5539 u64 end;
5540 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
5541
5542 end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
5543 if (offset >= end)
5544 return 0;
5545
5546 if (clone_root && IS_ALIGNED(end, bs)) {
5547 struct btrfs_file_extent_item *ei;
5548 u64 disk_byte;
5549 u64 data_offset;
5550
5551 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5552 struct btrfs_file_extent_item);
5553 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5554 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5555 ret = clone_range(sctx, clone_root, disk_byte, data_offset,
5556 offset, end - offset);
5557 } else {
5558 ret = send_extent_data(sctx, offset, end - offset);
5559 }
5560 sctx->cur_inode_next_write_offset = end;
5561 return ret;
5562}
5563
5564static int is_extent_unchanged(struct send_ctx *sctx,
5565 struct btrfs_path *left_path,
5566 struct btrfs_key *ekey)
5567{
5568 int ret = 0;
5569 struct btrfs_key key;
5570 struct btrfs_path *path = NULL;
5571 struct extent_buffer *eb;
5572 int slot;
5573 struct btrfs_key found_key;
5574 struct btrfs_file_extent_item *ei;
5575 u64 left_disknr;
5576 u64 right_disknr;
5577 u64 left_offset;
5578 u64 right_offset;
5579 u64 left_offset_fixed;
5580 u64 left_len;
5581 u64 right_len;
5582 u64 left_gen;
5583 u64 right_gen;
5584 u8 left_type;
5585 u8 right_type;
5586
5587 path = alloc_path_for_send();
5588 if (!path)
5589 return -ENOMEM;
5590
5591 eb = left_path->nodes[0];
5592 slot = left_path->slots[0];
5593 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5594 left_type = btrfs_file_extent_type(eb, ei);
5595
5596 if (left_type != BTRFS_FILE_EXTENT_REG) {
5597 ret = 0;
5598 goto out;
5599 }
5600 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5601 left_len = btrfs_file_extent_num_bytes(eb, ei);
5602 left_offset = btrfs_file_extent_offset(eb, ei);
5603 left_gen = btrfs_file_extent_generation(eb, ei);
5604
5605 /*
5606 * Following comments will refer to these graphics. L is the left
5607 * extents which we are checking at the moment. 1-8 are the right
5608 * extents that we iterate.
5609 *
5610 * |-----L-----|
5611 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5612 *
5613 * |-----L-----|
5614 * |--1--|-2b-|...(same as above)
5615 *
5616 * Alternative situation. Happens on files where extents got split.
5617 * |-----L-----|
5618 * |-----------7-----------|-6-|
5619 *
5620 * Alternative situation. Happens on files which got larger.
5621 * |-----L-----|
5622 * |-8-|
5623 * Nothing follows after 8.
5624 */
5625
5626 key.objectid = ekey->objectid;
5627 key.type = BTRFS_EXTENT_DATA_KEY;
5628 key.offset = ekey->offset;
5629 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5630 if (ret < 0)
5631 goto out;
5632 if (ret) {
5633 ret = 0;
5634 goto out;
5635 }
5636
5637 /*
5638 * Handle special case where the right side has no extents at all.
5639 */
5640 eb = path->nodes[0];
5641 slot = path->slots[0];
5642 btrfs_item_key_to_cpu(eb, &found_key, slot);
5643 if (found_key.objectid != key.objectid ||
5644 found_key.type != key.type) {
5645 /* If we're a hole then just pretend nothing changed */
5646 ret = (left_disknr) ? 0 : 1;
5647 goto out;
5648 }
5649
5650 /*
5651 * We're now on 2a, 2b or 7.
5652 */
5653 key = found_key;
5654 while (key.offset < ekey->offset + left_len) {
5655 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5656 right_type = btrfs_file_extent_type(eb, ei);
5657 if (right_type != BTRFS_FILE_EXTENT_REG &&
5658 right_type != BTRFS_FILE_EXTENT_INLINE) {
5659 ret = 0;
5660 goto out;
5661 }
5662
5663 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5664 right_len = btrfs_file_extent_ram_bytes(eb, ei);
5665 right_len = PAGE_ALIGN(right_len);
5666 } else {
5667 right_len = btrfs_file_extent_num_bytes(eb, ei);
5668 }
5669
5670 /*
5671 * Are we at extent 8? If yes, we know the extent is changed.
5672 * This may only happen on the first iteration.
5673 */
5674 if (found_key.offset + right_len <= ekey->offset) {
5675 /* If we're a hole just pretend nothing changed */
5676 ret = (left_disknr) ? 0 : 1;
5677 goto out;
5678 }
5679
5680 /*
5681 * We just wanted to see if when we have an inline extent, what
5682 * follows it is a regular extent (wanted to check the above
5683 * condition for inline extents too). This should normally not
5684 * happen but it's possible for example when we have an inline
5685 * compressed extent representing data with a size matching
5686 * the page size (currently the same as sector size).
5687 */
5688 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5689 ret = 0;
5690 goto out;
5691 }
5692
5693 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5694 right_offset = btrfs_file_extent_offset(eb, ei);
5695 right_gen = btrfs_file_extent_generation(eb, ei);
5696
5697 left_offset_fixed = left_offset;
5698 if (key.offset < ekey->offset) {
5699 /* Fix the right offset for 2a and 7. */
5700 right_offset += ekey->offset - key.offset;
5701 } else {
5702 /* Fix the left offset for all behind 2a and 2b */
5703 left_offset_fixed += key.offset - ekey->offset;
5704 }
5705
5706 /*
5707 * Check if we have the same extent.
5708 */
5709 if (left_disknr != right_disknr ||
5710 left_offset_fixed != right_offset ||
5711 left_gen != right_gen) {
5712 ret = 0;
5713 goto out;
5714 }
5715
5716 /*
5717 * Go to the next extent.
5718 */
5719 ret = btrfs_next_item(sctx->parent_root, path);
5720 if (ret < 0)
5721 goto out;
5722 if (!ret) {
5723 eb = path->nodes[0];
5724 slot = path->slots[0];
5725 btrfs_item_key_to_cpu(eb, &found_key, slot);
5726 }
5727 if (ret || found_key.objectid != key.objectid ||
5728 found_key.type != key.type) {
5729 key.offset += right_len;
5730 break;
5731 }
5732 if (found_key.offset != key.offset + right_len) {
5733 ret = 0;
5734 goto out;
5735 }
5736 key = found_key;
5737 }
5738
5739 /*
5740 * We're now behind the left extent (treat as unchanged) or at the end
5741 * of the right side (treat as changed).
5742 */
5743 if (key.offset >= ekey->offset + left_len)
5744 ret = 1;
5745 else
5746 ret = 0;
5747
5748
5749out:
5750 btrfs_free_path(path);
5751 return ret;
5752}
5753
5754static int get_last_extent(struct send_ctx *sctx, u64 offset)
5755{
5756 struct btrfs_path *path;
5757 struct btrfs_root *root = sctx->send_root;
5758 struct btrfs_key key;
5759 int ret;
5760
5761 path = alloc_path_for_send();
5762 if (!path)
5763 return -ENOMEM;
5764
5765 sctx->cur_inode_last_extent = 0;
5766
5767 key.objectid = sctx->cur_ino;
5768 key.type = BTRFS_EXTENT_DATA_KEY;
5769 key.offset = offset;
5770 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
5771 if (ret < 0)
5772 goto out;
5773 ret = 0;
5774 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5775 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
5776 goto out;
5777
5778 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
5779out:
5780 btrfs_free_path(path);
5781 return ret;
5782}
5783
5784static int range_is_hole_in_parent(struct send_ctx *sctx,
5785 const u64 start,
5786 const u64 end)
5787{
5788 struct btrfs_path *path;
5789 struct btrfs_key key;
5790 struct btrfs_root *root = sctx->parent_root;
5791 u64 search_start = start;
5792 int ret;
5793
5794 path = alloc_path_for_send();
5795 if (!path)
5796 return -ENOMEM;
5797
5798 key.objectid = sctx->cur_ino;
5799 key.type = BTRFS_EXTENT_DATA_KEY;
5800 key.offset = search_start;
5801 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5802 if (ret < 0)
5803 goto out;
5804 if (ret > 0 && path->slots[0] > 0)
5805 path->slots[0]--;
5806
5807 while (search_start < end) {
5808 struct extent_buffer *leaf = path->nodes[0];
5809 int slot = path->slots[0];
5810 struct btrfs_file_extent_item *fi;
5811 u64 extent_end;
5812
5813 if (slot >= btrfs_header_nritems(leaf)) {
5814 ret = btrfs_next_leaf(root, path);
5815 if (ret < 0)
5816 goto out;
5817 else if (ret > 0)
5818 break;
5819 continue;
5820 }
5821
5822 btrfs_item_key_to_cpu(leaf, &key, slot);
5823 if (key.objectid < sctx->cur_ino ||
5824 key.type < BTRFS_EXTENT_DATA_KEY)
5825 goto next;
5826 if (key.objectid > sctx->cur_ino ||
5827 key.type > BTRFS_EXTENT_DATA_KEY ||
5828 key.offset >= end)
5829 break;
5830
5831 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5832 extent_end = btrfs_file_extent_end(path);
5833 if (extent_end <= start)
5834 goto next;
5835 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
5836 search_start = extent_end;
5837 goto next;
5838 }
5839 ret = 0;
5840 goto out;
5841next:
5842 path->slots[0]++;
5843 }
5844 ret = 1;
5845out:
5846 btrfs_free_path(path);
5847 return ret;
5848}
5849
5850static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
5851 struct btrfs_key *key)
5852{
5853 int ret = 0;
5854
5855 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
5856 return 0;
5857
5858 if (sctx->cur_inode_last_extent == (u64)-1) {
5859 ret = get_last_extent(sctx, key->offset - 1);
5860 if (ret)
5861 return ret;
5862 }
5863
5864 if (path->slots[0] == 0 &&
5865 sctx->cur_inode_last_extent < key->offset) {
5866 /*
5867 * We might have skipped entire leafs that contained only
5868 * file extent items for our current inode. These leafs have
5869 * a generation number smaller (older) than the one in the
5870 * current leaf and the leaf our last extent came from, and
5871 * are located between these 2 leafs.
5872 */
5873 ret = get_last_extent(sctx, key->offset - 1);
5874 if (ret)
5875 return ret;
5876 }
5877
5878 if (sctx->cur_inode_last_extent < key->offset) {
5879 ret = range_is_hole_in_parent(sctx,
5880 sctx->cur_inode_last_extent,
5881 key->offset);
5882 if (ret < 0)
5883 return ret;
5884 else if (ret == 0)
5885 ret = send_hole(sctx, key->offset);
5886 else
5887 ret = 0;
5888 }
5889 sctx->cur_inode_last_extent = btrfs_file_extent_end(path);
5890 return ret;
5891}
5892
5893static int process_extent(struct send_ctx *sctx,
5894 struct btrfs_path *path,
5895 struct btrfs_key *key)
5896{
5897 struct clone_root *found_clone = NULL;
5898 int ret = 0;
5899
5900 if (S_ISLNK(sctx->cur_inode_mode))
5901 return 0;
5902
5903 if (sctx->parent_root && !sctx->cur_inode_new) {
5904 ret = is_extent_unchanged(sctx, path, key);
5905 if (ret < 0)
5906 goto out;
5907 if (ret) {
5908 ret = 0;
5909 goto out_hole;
5910 }
5911 } else {
5912 struct btrfs_file_extent_item *ei;
5913 u8 type;
5914
5915 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5916 struct btrfs_file_extent_item);
5917 type = btrfs_file_extent_type(path->nodes[0], ei);
5918 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
5919 type == BTRFS_FILE_EXTENT_REG) {
5920 /*
5921 * The send spec does not have a prealloc command yet,
5922 * so just leave a hole for prealloc'ed extents until
5923 * we have enough commands queued up to justify rev'ing
5924 * the send spec.
5925 */
5926 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
5927 ret = 0;
5928 goto out;
5929 }
5930
5931 /* Have a hole, just skip it. */
5932 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
5933 ret = 0;
5934 goto out;
5935 }
5936 }
5937 }
5938
5939 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
5940 sctx->cur_inode_size, &found_clone);
5941 if (ret != -ENOENT && ret < 0)
5942 goto out;
5943
5944 ret = send_write_or_clone(sctx, path, key, found_clone);
5945 if (ret)
5946 goto out;
5947out_hole:
5948 ret = maybe_send_hole(sctx, path, key);
5949out:
5950 return ret;
5951}
5952
5953static int process_all_extents(struct send_ctx *sctx)
5954{
5955 int ret;
5956 struct btrfs_root *root;
5957 struct btrfs_path *path;
5958 struct btrfs_key key;
5959 struct btrfs_key found_key;
5960 struct extent_buffer *eb;
5961 int slot;
5962
5963 root = sctx->send_root;
5964 path = alloc_path_for_send();
5965 if (!path)
5966 return -ENOMEM;
5967
5968 key.objectid = sctx->cmp_key->objectid;
5969 key.type = BTRFS_EXTENT_DATA_KEY;
5970 key.offset = 0;
5971 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5972 if (ret < 0)
5973 goto out;
5974
5975 while (1) {
5976 eb = path->nodes[0];
5977 slot = path->slots[0];
5978
5979 if (slot >= btrfs_header_nritems(eb)) {
5980 ret = btrfs_next_leaf(root, path);
5981 if (ret < 0) {
5982 goto out;
5983 } else if (ret > 0) {
5984 ret = 0;
5985 break;
5986 }
5987 continue;
5988 }
5989
5990 btrfs_item_key_to_cpu(eb, &found_key, slot);
5991
5992 if (found_key.objectid != key.objectid ||
5993 found_key.type != key.type) {
5994 ret = 0;
5995 goto out;
5996 }
5997
5998 ret = process_extent(sctx, path, &found_key);
5999 if (ret < 0)
6000 goto out;
6001
6002 path->slots[0]++;
6003 }
6004
6005out:
6006 btrfs_free_path(path);
6007 return ret;
6008}
6009
6010static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
6011 int *pending_move,
6012 int *refs_processed)
6013{
6014 int ret = 0;
6015
6016 if (sctx->cur_ino == 0)
6017 goto out;
6018 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
6019 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
6020 goto out;
6021 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
6022 goto out;
6023
6024 ret = process_recorded_refs(sctx, pending_move);
6025 if (ret < 0)
6026 goto out;
6027
6028 *refs_processed = 1;
6029out:
6030 return ret;
6031}
6032
6033static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
6034{
6035 int ret = 0;
6036 u64 left_mode;
6037 u64 left_uid;
6038 u64 left_gid;
6039 u64 right_mode;
6040 u64 right_uid;
6041 u64 right_gid;
6042 int need_chmod = 0;
6043 int need_chown = 0;
6044 int need_truncate = 1;
6045 int pending_move = 0;
6046 int refs_processed = 0;
6047
6048 if (sctx->ignore_cur_inode)
6049 return 0;
6050
6051 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
6052 &refs_processed);
6053 if (ret < 0)
6054 goto out;
6055
6056 /*
6057 * We have processed the refs and thus need to advance send_progress.
6058 * Now, calls to get_cur_xxx will take the updated refs of the current
6059 * inode into account.
6060 *
6061 * On the other hand, if our current inode is a directory and couldn't
6062 * be moved/renamed because its parent was renamed/moved too and it has
6063 * a higher inode number, we can only move/rename our current inode
6064 * after we moved/renamed its parent. Therefore in this case operate on
6065 * the old path (pre move/rename) of our current inode, and the
6066 * move/rename will be performed later.
6067 */
6068 if (refs_processed && !pending_move)
6069 sctx->send_progress = sctx->cur_ino + 1;
6070
6071 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
6072 goto out;
6073 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
6074 goto out;
6075
6076 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
6077 &left_mode, &left_uid, &left_gid, NULL);
6078 if (ret < 0)
6079 goto out;
6080
6081 if (!sctx->parent_root || sctx->cur_inode_new) {
6082 need_chown = 1;
6083 if (!S_ISLNK(sctx->cur_inode_mode))
6084 need_chmod = 1;
6085 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size)
6086 need_truncate = 0;
6087 } else {
6088 u64 old_size;
6089
6090 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
6091 &old_size, NULL, &right_mode, &right_uid,
6092 &right_gid, NULL);
6093 if (ret < 0)
6094 goto out;
6095
6096 if (left_uid != right_uid || left_gid != right_gid)
6097 need_chown = 1;
6098 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
6099 need_chmod = 1;
6100 if ((old_size == sctx->cur_inode_size) ||
6101 (sctx->cur_inode_size > old_size &&
6102 sctx->cur_inode_next_write_offset == sctx->cur_inode_size))
6103 need_truncate = 0;
6104 }
6105
6106 if (S_ISREG(sctx->cur_inode_mode)) {
6107 if (need_send_hole(sctx)) {
6108 if (sctx->cur_inode_last_extent == (u64)-1 ||
6109 sctx->cur_inode_last_extent <
6110 sctx->cur_inode_size) {
6111 ret = get_last_extent(sctx, (u64)-1);
6112 if (ret)
6113 goto out;
6114 }
6115 if (sctx->cur_inode_last_extent <
6116 sctx->cur_inode_size) {
6117 ret = send_hole(sctx, sctx->cur_inode_size);
6118 if (ret)
6119 goto out;
6120 }
6121 }
6122 if (need_truncate) {
6123 ret = send_truncate(sctx, sctx->cur_ino,
6124 sctx->cur_inode_gen,
6125 sctx->cur_inode_size);
6126 if (ret < 0)
6127 goto out;
6128 }
6129 }
6130
6131 if (need_chown) {
6132 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6133 left_uid, left_gid);
6134 if (ret < 0)
6135 goto out;
6136 }
6137 if (need_chmod) {
6138 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
6139 left_mode);
6140 if (ret < 0)
6141 goto out;
6142 }
6143
6144 ret = send_capabilities(sctx);
6145 if (ret < 0)
6146 goto out;
6147
6148 /*
6149 * If other directory inodes depended on our current directory
6150 * inode's move/rename, now do their move/rename operations.
6151 */
6152 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
6153 ret = apply_children_dir_moves(sctx);
6154 if (ret)
6155 goto out;
6156 /*
6157 * Need to send that every time, no matter if it actually
6158 * changed between the two trees as we have done changes to
6159 * the inode before. If our inode is a directory and it's
6160 * waiting to be moved/renamed, we will send its utimes when
6161 * it's moved/renamed, therefore we don't need to do it here.
6162 */
6163 sctx->send_progress = sctx->cur_ino + 1;
6164 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
6165 if (ret < 0)
6166 goto out;
6167 }
6168
6169out:
6170 return ret;
6171}
6172
6173struct parent_paths_ctx {
6174 struct list_head *refs;
6175 struct send_ctx *sctx;
6176};
6177
6178static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name,
6179 void *ctx)
6180{
6181 struct parent_paths_ctx *ppctx = ctx;
6182
6183 return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx,
6184 ppctx->refs);
6185}
6186
6187/*
6188 * Issue unlink operations for all paths of the current inode found in the
6189 * parent snapshot.
6190 */
6191static int btrfs_unlink_all_paths(struct send_ctx *sctx)
6192{
6193 LIST_HEAD(deleted_refs);
6194 struct btrfs_path *path;
6195 struct btrfs_key key;
6196 struct parent_paths_ctx ctx;
6197 int ret;
6198
6199 path = alloc_path_for_send();
6200 if (!path)
6201 return -ENOMEM;
6202
6203 key.objectid = sctx->cur_ino;
6204 key.type = BTRFS_INODE_REF_KEY;
6205 key.offset = 0;
6206 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
6207 if (ret < 0)
6208 goto out;
6209
6210 ctx.refs = &deleted_refs;
6211 ctx.sctx = sctx;
6212
6213 while (true) {
6214 struct extent_buffer *eb = path->nodes[0];
6215 int slot = path->slots[0];
6216
6217 if (slot >= btrfs_header_nritems(eb)) {
6218 ret = btrfs_next_leaf(sctx->parent_root, path);
6219 if (ret < 0)
6220 goto out;
6221 else if (ret > 0)
6222 break;
6223 continue;
6224 }
6225
6226 btrfs_item_key_to_cpu(eb, &key, slot);
6227 if (key.objectid != sctx->cur_ino)
6228 break;
6229 if (key.type != BTRFS_INODE_REF_KEY &&
6230 key.type != BTRFS_INODE_EXTREF_KEY)
6231 break;
6232
6233 ret = iterate_inode_ref(sctx->parent_root, path, &key, 1,
6234 record_parent_ref, &ctx);
6235 if (ret < 0)
6236 goto out;
6237
6238 path->slots[0]++;
6239 }
6240
6241 while (!list_empty(&deleted_refs)) {
6242 struct recorded_ref *ref;
6243
6244 ref = list_first_entry(&deleted_refs, struct recorded_ref, list);
6245 ret = send_unlink(sctx, ref->full_path);
6246 if (ret < 0)
6247 goto out;
6248 fs_path_free(ref->full_path);
6249 list_del(&ref->list);
6250 kfree(ref);
6251 }
6252 ret = 0;
6253out:
6254 btrfs_free_path(path);
6255 if (ret)
6256 __free_recorded_refs(&deleted_refs);
6257 return ret;
6258}
6259
6260static int changed_inode(struct send_ctx *sctx,
6261 enum btrfs_compare_tree_result result)
6262{
6263 int ret = 0;
6264 struct btrfs_key *key = sctx->cmp_key;
6265 struct btrfs_inode_item *left_ii = NULL;
6266 struct btrfs_inode_item *right_ii = NULL;
6267 u64 left_gen = 0;
6268 u64 right_gen = 0;
6269
6270 sctx->cur_ino = key->objectid;
6271 sctx->cur_inode_new_gen = 0;
6272 sctx->cur_inode_last_extent = (u64)-1;
6273 sctx->cur_inode_next_write_offset = 0;
6274 sctx->ignore_cur_inode = false;
6275
6276 /*
6277 * Set send_progress to current inode. This will tell all get_cur_xxx
6278 * functions that the current inode's refs are not updated yet. Later,
6279 * when process_recorded_refs is finished, it is set to cur_ino + 1.
6280 */
6281 sctx->send_progress = sctx->cur_ino;
6282
6283 if (result == BTRFS_COMPARE_TREE_NEW ||
6284 result == BTRFS_COMPARE_TREE_CHANGED) {
6285 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
6286 sctx->left_path->slots[0],
6287 struct btrfs_inode_item);
6288 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
6289 left_ii);
6290 } else {
6291 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6292 sctx->right_path->slots[0],
6293 struct btrfs_inode_item);
6294 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6295 right_ii);
6296 }
6297 if (result == BTRFS_COMPARE_TREE_CHANGED) {
6298 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
6299 sctx->right_path->slots[0],
6300 struct btrfs_inode_item);
6301
6302 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
6303 right_ii);
6304
6305 /*
6306 * The cur_ino = root dir case is special here. We can't treat
6307 * the inode as deleted+reused because it would generate a
6308 * stream that tries to delete/mkdir the root dir.
6309 */
6310 if (left_gen != right_gen &&
6311 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6312 sctx->cur_inode_new_gen = 1;
6313 }
6314
6315 /*
6316 * Normally we do not find inodes with a link count of zero (orphans)
6317 * because the most common case is to create a snapshot and use it
6318 * for a send operation. However other less common use cases involve
6319 * using a subvolume and send it after turning it to RO mode just
6320 * after deleting all hard links of a file while holding an open
6321 * file descriptor against it or turning a RO snapshot into RW mode,
6322 * keep an open file descriptor against a file, delete it and then
6323 * turn the snapshot back to RO mode before using it for a send
6324 * operation. So if we find such cases, ignore the inode and all its
6325 * items completely if it's a new inode, or if it's a changed inode
6326 * make sure all its previous paths (from the parent snapshot) are all
6327 * unlinked and all other the inode items are ignored.
6328 */
6329 if (result == BTRFS_COMPARE_TREE_NEW ||
6330 result == BTRFS_COMPARE_TREE_CHANGED) {
6331 u32 nlinks;
6332
6333 nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii);
6334 if (nlinks == 0) {
6335 sctx->ignore_cur_inode = true;
6336 if (result == BTRFS_COMPARE_TREE_CHANGED)
6337 ret = btrfs_unlink_all_paths(sctx);
6338 goto out;
6339 }
6340 }
6341
6342 if (result == BTRFS_COMPARE_TREE_NEW) {
6343 sctx->cur_inode_gen = left_gen;
6344 sctx->cur_inode_new = 1;
6345 sctx->cur_inode_deleted = 0;
6346 sctx->cur_inode_size = btrfs_inode_size(
6347 sctx->left_path->nodes[0], left_ii);
6348 sctx->cur_inode_mode = btrfs_inode_mode(
6349 sctx->left_path->nodes[0], left_ii);
6350 sctx->cur_inode_rdev = btrfs_inode_rdev(
6351 sctx->left_path->nodes[0], left_ii);
6352 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
6353 ret = send_create_inode_if_needed(sctx);
6354 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
6355 sctx->cur_inode_gen = right_gen;
6356 sctx->cur_inode_new = 0;
6357 sctx->cur_inode_deleted = 1;
6358 sctx->cur_inode_size = btrfs_inode_size(
6359 sctx->right_path->nodes[0], right_ii);
6360 sctx->cur_inode_mode = btrfs_inode_mode(
6361 sctx->right_path->nodes[0], right_ii);
6362 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
6363 /*
6364 * We need to do some special handling in case the inode was
6365 * reported as changed with a changed generation number. This
6366 * means that the original inode was deleted and new inode
6367 * reused the same inum. So we have to treat the old inode as
6368 * deleted and the new one as new.
6369 */
6370 if (sctx->cur_inode_new_gen) {
6371 /*
6372 * First, process the inode as if it was deleted.
6373 */
6374 sctx->cur_inode_gen = right_gen;
6375 sctx->cur_inode_new = 0;
6376 sctx->cur_inode_deleted = 1;
6377 sctx->cur_inode_size = btrfs_inode_size(
6378 sctx->right_path->nodes[0], right_ii);
6379 sctx->cur_inode_mode = btrfs_inode_mode(
6380 sctx->right_path->nodes[0], right_ii);
6381 ret = process_all_refs(sctx,
6382 BTRFS_COMPARE_TREE_DELETED);
6383 if (ret < 0)
6384 goto out;
6385
6386 /*
6387 * Now process the inode as if it was new.
6388 */
6389 sctx->cur_inode_gen = left_gen;
6390 sctx->cur_inode_new = 1;
6391 sctx->cur_inode_deleted = 0;
6392 sctx->cur_inode_size = btrfs_inode_size(
6393 sctx->left_path->nodes[0], left_ii);
6394 sctx->cur_inode_mode = btrfs_inode_mode(
6395 sctx->left_path->nodes[0], left_ii);
6396 sctx->cur_inode_rdev = btrfs_inode_rdev(
6397 sctx->left_path->nodes[0], left_ii);
6398 ret = send_create_inode_if_needed(sctx);
6399 if (ret < 0)
6400 goto out;
6401
6402 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
6403 if (ret < 0)
6404 goto out;
6405 /*
6406 * Advance send_progress now as we did not get into
6407 * process_recorded_refs_if_needed in the new_gen case.
6408 */
6409 sctx->send_progress = sctx->cur_ino + 1;
6410
6411 /*
6412 * Now process all extents and xattrs of the inode as if
6413 * they were all new.
6414 */
6415 ret = process_all_extents(sctx);
6416 if (ret < 0)
6417 goto out;
6418 ret = process_all_new_xattrs(sctx);
6419 if (ret < 0)
6420 goto out;
6421 } else {
6422 sctx->cur_inode_gen = left_gen;
6423 sctx->cur_inode_new = 0;
6424 sctx->cur_inode_new_gen = 0;
6425 sctx->cur_inode_deleted = 0;
6426 sctx->cur_inode_size = btrfs_inode_size(
6427 sctx->left_path->nodes[0], left_ii);
6428 sctx->cur_inode_mode = btrfs_inode_mode(
6429 sctx->left_path->nodes[0], left_ii);
6430 }
6431 }
6432
6433out:
6434 return ret;
6435}
6436
6437/*
6438 * We have to process new refs before deleted refs, but compare_trees gives us
6439 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
6440 * first and later process them in process_recorded_refs.
6441 * For the cur_inode_new_gen case, we skip recording completely because
6442 * changed_inode did already initiate processing of refs. The reason for this is
6443 * that in this case, compare_tree actually compares the refs of 2 different
6444 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
6445 * refs of the right tree as deleted and all refs of the left tree as new.
6446 */
6447static int changed_ref(struct send_ctx *sctx,
6448 enum btrfs_compare_tree_result result)
6449{
6450 int ret = 0;
6451
6452 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6453 inconsistent_snapshot_error(sctx, result, "reference");
6454 return -EIO;
6455 }
6456
6457 if (!sctx->cur_inode_new_gen &&
6458 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
6459 if (result == BTRFS_COMPARE_TREE_NEW)
6460 ret = record_new_ref(sctx);
6461 else if (result == BTRFS_COMPARE_TREE_DELETED)
6462 ret = record_deleted_ref(sctx);
6463 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6464 ret = record_changed_ref(sctx);
6465 }
6466
6467 return ret;
6468}
6469
6470/*
6471 * Process new/deleted/changed xattrs. We skip processing in the
6472 * cur_inode_new_gen case because changed_inode did already initiate processing
6473 * of xattrs. The reason is the same as in changed_ref
6474 */
6475static int changed_xattr(struct send_ctx *sctx,
6476 enum btrfs_compare_tree_result result)
6477{
6478 int ret = 0;
6479
6480 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6481 inconsistent_snapshot_error(sctx, result, "xattr");
6482 return -EIO;
6483 }
6484
6485 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6486 if (result == BTRFS_COMPARE_TREE_NEW)
6487 ret = process_new_xattr(sctx);
6488 else if (result == BTRFS_COMPARE_TREE_DELETED)
6489 ret = process_deleted_xattr(sctx);
6490 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6491 ret = process_changed_xattr(sctx);
6492 }
6493
6494 return ret;
6495}
6496
6497/*
6498 * Process new/deleted/changed extents. We skip processing in the
6499 * cur_inode_new_gen case because changed_inode did already initiate processing
6500 * of extents. The reason is the same as in changed_ref
6501 */
6502static int changed_extent(struct send_ctx *sctx,
6503 enum btrfs_compare_tree_result result)
6504{
6505 int ret = 0;
6506
6507 /*
6508 * We have found an extent item that changed without the inode item
6509 * having changed. This can happen either after relocation (where the
6510 * disk_bytenr of an extent item is replaced at
6511 * relocation.c:replace_file_extents()) or after deduplication into a
6512 * file in both the parent and send snapshots (where an extent item can
6513 * get modified or replaced with a new one). Note that deduplication
6514 * updates the inode item, but it only changes the iversion (sequence
6515 * field in the inode item) of the inode, so if a file is deduplicated
6516 * the same amount of times in both the parent and send snapshots, its
6517 * iversion becomes the same in both snapshots, whence the inode item is
6518 * the same on both snapshots.
6519 */
6520 if (sctx->cur_ino != sctx->cmp_key->objectid)
6521 return 0;
6522
6523 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6524 if (result != BTRFS_COMPARE_TREE_DELETED)
6525 ret = process_extent(sctx, sctx->left_path,
6526 sctx->cmp_key);
6527 }
6528
6529 return ret;
6530}
6531
6532static int dir_changed(struct send_ctx *sctx, u64 dir)
6533{
6534 u64 orig_gen, new_gen;
6535 int ret;
6536
6537 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
6538 NULL, NULL);
6539 if (ret)
6540 return ret;
6541
6542 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
6543 NULL, NULL, NULL);
6544 if (ret)
6545 return ret;
6546
6547 return (orig_gen != new_gen) ? 1 : 0;
6548}
6549
6550static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
6551 struct btrfs_key *key)
6552{
6553 struct btrfs_inode_extref *extref;
6554 struct extent_buffer *leaf;
6555 u64 dirid = 0, last_dirid = 0;
6556 unsigned long ptr;
6557 u32 item_size;
6558 u32 cur_offset = 0;
6559 int ref_name_len;
6560 int ret = 0;
6561
6562 /* Easy case, just check this one dirid */
6563 if (key->type == BTRFS_INODE_REF_KEY) {
6564 dirid = key->offset;
6565
6566 ret = dir_changed(sctx, dirid);
6567 goto out;
6568 }
6569
6570 leaf = path->nodes[0];
6571 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
6572 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
6573 while (cur_offset < item_size) {
6574 extref = (struct btrfs_inode_extref *)(ptr +
6575 cur_offset);
6576 dirid = btrfs_inode_extref_parent(leaf, extref);
6577 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
6578 cur_offset += ref_name_len + sizeof(*extref);
6579 if (dirid == last_dirid)
6580 continue;
6581 ret = dir_changed(sctx, dirid);
6582 if (ret)
6583 break;
6584 last_dirid = dirid;
6585 }
6586out:
6587 return ret;
6588}
6589
6590/*
6591 * Updates compare related fields in sctx and simply forwards to the actual
6592 * changed_xxx functions.
6593 */
6594static int changed_cb(struct btrfs_path *left_path,
6595 struct btrfs_path *right_path,
6596 struct btrfs_key *key,
6597 enum btrfs_compare_tree_result result,
6598 struct send_ctx *sctx)
6599{
6600 int ret = 0;
6601
6602 if (result == BTRFS_COMPARE_TREE_SAME) {
6603 if (key->type == BTRFS_INODE_REF_KEY ||
6604 key->type == BTRFS_INODE_EXTREF_KEY) {
6605 ret = compare_refs(sctx, left_path, key);
6606 if (!ret)
6607 return 0;
6608 if (ret < 0)
6609 return ret;
6610 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
6611 return maybe_send_hole(sctx, left_path, key);
6612 } else {
6613 return 0;
6614 }
6615 result = BTRFS_COMPARE_TREE_CHANGED;
6616 ret = 0;
6617 }
6618
6619 sctx->left_path = left_path;
6620 sctx->right_path = right_path;
6621 sctx->cmp_key = key;
6622
6623 ret = finish_inode_if_needed(sctx, 0);
6624 if (ret < 0)
6625 goto out;
6626
6627 /* Ignore non-FS objects */
6628 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
6629 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
6630 goto out;
6631
6632 if (key->type == BTRFS_INODE_ITEM_KEY) {
6633 ret = changed_inode(sctx, result);
6634 } else if (!sctx->ignore_cur_inode) {
6635 if (key->type == BTRFS_INODE_REF_KEY ||
6636 key->type == BTRFS_INODE_EXTREF_KEY)
6637 ret = changed_ref(sctx, result);
6638 else if (key->type == BTRFS_XATTR_ITEM_KEY)
6639 ret = changed_xattr(sctx, result);
6640 else if (key->type == BTRFS_EXTENT_DATA_KEY)
6641 ret = changed_extent(sctx, result);
6642 }
6643
6644out:
6645 return ret;
6646}
6647
6648static int full_send_tree(struct send_ctx *sctx)
6649{
6650 int ret;
6651 struct btrfs_root *send_root = sctx->send_root;
6652 struct btrfs_key key;
6653 struct btrfs_path *path;
6654 struct extent_buffer *eb;
6655 int slot;
6656
6657 path = alloc_path_for_send();
6658 if (!path)
6659 return -ENOMEM;
6660 path->reada = READA_FORWARD_ALWAYS;
6661
6662 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
6663 key.type = BTRFS_INODE_ITEM_KEY;
6664 key.offset = 0;
6665
6666 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
6667 if (ret < 0)
6668 goto out;
6669 if (ret)
6670 goto out_finish;
6671
6672 while (1) {
6673 eb = path->nodes[0];
6674 slot = path->slots[0];
6675 btrfs_item_key_to_cpu(eb, &key, slot);
6676
6677 ret = changed_cb(path, NULL, &key,
6678 BTRFS_COMPARE_TREE_NEW, sctx);
6679 if (ret < 0)
6680 goto out;
6681
6682 ret = btrfs_next_item(send_root, path);
6683 if (ret < 0)
6684 goto out;
6685 if (ret) {
6686 ret = 0;
6687 break;
6688 }
6689 }
6690
6691out_finish:
6692 ret = finish_inode_if_needed(sctx, 1);
6693
6694out:
6695 btrfs_free_path(path);
6696 return ret;
6697}
6698
6699static int tree_move_down(struct btrfs_path *path, int *level, u64 reada_min_gen)
6700{
6701 struct extent_buffer *eb;
6702 struct extent_buffer *parent = path->nodes[*level];
6703 int slot = path->slots[*level];
6704 const int nritems = btrfs_header_nritems(parent);
6705 u64 reada_max;
6706 u64 reada_done = 0;
6707
6708 BUG_ON(*level == 0);
6709 eb = btrfs_read_node_slot(parent, slot);
6710 if (IS_ERR(eb))
6711 return PTR_ERR(eb);
6712
6713 /*
6714 * Trigger readahead for the next leaves we will process, so that it is
6715 * very likely that when we need them they are already in memory and we
6716 * will not block on disk IO. For nodes we only do readahead for one,
6717 * since the time window between processing nodes is typically larger.
6718 */
6719 reada_max = (*level == 1 ? SZ_128K : eb->fs_info->nodesize);
6720
6721 for (slot++; slot < nritems && reada_done < reada_max; slot++) {
6722 if (btrfs_node_ptr_generation(parent, slot) > reada_min_gen) {
6723 btrfs_readahead_node_child(parent, slot);
6724 reada_done += eb->fs_info->nodesize;
6725 }
6726 }
6727
6728 path->nodes[*level - 1] = eb;
6729 path->slots[*level - 1] = 0;
6730 (*level)--;
6731 return 0;
6732}
6733
6734static int tree_move_next_or_upnext(struct btrfs_path *path,
6735 int *level, int root_level)
6736{
6737 int ret = 0;
6738 int nritems;
6739 nritems = btrfs_header_nritems(path->nodes[*level]);
6740
6741 path->slots[*level]++;
6742
6743 while (path->slots[*level] >= nritems) {
6744 if (*level == root_level)
6745 return -1;
6746
6747 /* move upnext */
6748 path->slots[*level] = 0;
6749 free_extent_buffer(path->nodes[*level]);
6750 path->nodes[*level] = NULL;
6751 (*level)++;
6752 path->slots[*level]++;
6753
6754 nritems = btrfs_header_nritems(path->nodes[*level]);
6755 ret = 1;
6756 }
6757 return ret;
6758}
6759
6760/*
6761 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
6762 * or down.
6763 */
6764static int tree_advance(struct btrfs_path *path,
6765 int *level, int root_level,
6766 int allow_down,
6767 struct btrfs_key *key,
6768 u64 reada_min_gen)
6769{
6770 int ret;
6771
6772 if (*level == 0 || !allow_down) {
6773 ret = tree_move_next_or_upnext(path, level, root_level);
6774 } else {
6775 ret = tree_move_down(path, level, reada_min_gen);
6776 }
6777 if (ret >= 0) {
6778 if (*level == 0)
6779 btrfs_item_key_to_cpu(path->nodes[*level], key,
6780 path->slots[*level]);
6781 else
6782 btrfs_node_key_to_cpu(path->nodes[*level], key,
6783 path->slots[*level]);
6784 }
6785 return ret;
6786}
6787
6788static int tree_compare_item(struct btrfs_path *left_path,
6789 struct btrfs_path *right_path,
6790 char *tmp_buf)
6791{
6792 int cmp;
6793 int len1, len2;
6794 unsigned long off1, off2;
6795
6796 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
6797 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
6798 if (len1 != len2)
6799 return 1;
6800
6801 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
6802 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
6803 right_path->slots[0]);
6804
6805 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
6806
6807 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
6808 if (cmp)
6809 return 1;
6810 return 0;
6811}
6812
6813/*
6814 * This function compares two trees and calls the provided callback for
6815 * every changed/new/deleted item it finds.
6816 * If shared tree blocks are encountered, whole subtrees are skipped, making
6817 * the compare pretty fast on snapshotted subvolumes.
6818 *
6819 * This currently works on commit roots only. As commit roots are read only,
6820 * we don't do any locking. The commit roots are protected with transactions.
6821 * Transactions are ended and rejoined when a commit is tried in between.
6822 *
6823 * This function checks for modifications done to the trees while comparing.
6824 * If it detects a change, it aborts immediately.
6825 */
6826static int btrfs_compare_trees(struct btrfs_root *left_root,
6827 struct btrfs_root *right_root, struct send_ctx *sctx)
6828{
6829 struct btrfs_fs_info *fs_info = left_root->fs_info;
6830 int ret;
6831 int cmp;
6832 struct btrfs_path *left_path = NULL;
6833 struct btrfs_path *right_path = NULL;
6834 struct btrfs_key left_key;
6835 struct btrfs_key right_key;
6836 char *tmp_buf = NULL;
6837 int left_root_level;
6838 int right_root_level;
6839 int left_level;
6840 int right_level;
6841 int left_end_reached;
6842 int right_end_reached;
6843 int advance_left;
6844 int advance_right;
6845 u64 left_blockptr;
6846 u64 right_blockptr;
6847 u64 left_gen;
6848 u64 right_gen;
6849 u64 reada_min_gen;
6850
6851 left_path = btrfs_alloc_path();
6852 if (!left_path) {
6853 ret = -ENOMEM;
6854 goto out;
6855 }
6856 right_path = btrfs_alloc_path();
6857 if (!right_path) {
6858 ret = -ENOMEM;
6859 goto out;
6860 }
6861
6862 tmp_buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
6863 if (!tmp_buf) {
6864 ret = -ENOMEM;
6865 goto out;
6866 }
6867
6868 left_path->search_commit_root = 1;
6869 left_path->skip_locking = 1;
6870 right_path->search_commit_root = 1;
6871 right_path->skip_locking = 1;
6872
6873 /*
6874 * Strategy: Go to the first items of both trees. Then do
6875 *
6876 * If both trees are at level 0
6877 * Compare keys of current items
6878 * If left < right treat left item as new, advance left tree
6879 * and repeat
6880 * If left > right treat right item as deleted, advance right tree
6881 * and repeat
6882 * If left == right do deep compare of items, treat as changed if
6883 * needed, advance both trees and repeat
6884 * If both trees are at the same level but not at level 0
6885 * Compare keys of current nodes/leafs
6886 * If left < right advance left tree and repeat
6887 * If left > right advance right tree and repeat
6888 * If left == right compare blockptrs of the next nodes/leafs
6889 * If they match advance both trees but stay at the same level
6890 * and repeat
6891 * If they don't match advance both trees while allowing to go
6892 * deeper and repeat
6893 * If tree levels are different
6894 * Advance the tree that needs it and repeat
6895 *
6896 * Advancing a tree means:
6897 * If we are at level 0, try to go to the next slot. If that's not
6898 * possible, go one level up and repeat. Stop when we found a level
6899 * where we could go to the next slot. We may at this point be on a
6900 * node or a leaf.
6901 *
6902 * If we are not at level 0 and not on shared tree blocks, go one
6903 * level deeper.
6904 *
6905 * If we are not at level 0 and on shared tree blocks, go one slot to
6906 * the right if possible or go up and right.
6907 */
6908
6909 down_read(&fs_info->commit_root_sem);
6910 left_level = btrfs_header_level(left_root->commit_root);
6911 left_root_level = left_level;
6912 left_path->nodes[left_level] =
6913 btrfs_clone_extent_buffer(left_root->commit_root);
6914 if (!left_path->nodes[left_level]) {
6915 up_read(&fs_info->commit_root_sem);
6916 ret = -ENOMEM;
6917 goto out;
6918 }
6919
6920 right_level = btrfs_header_level(right_root->commit_root);
6921 right_root_level = right_level;
6922 right_path->nodes[right_level] =
6923 btrfs_clone_extent_buffer(right_root->commit_root);
6924 if (!right_path->nodes[right_level]) {
6925 up_read(&fs_info->commit_root_sem);
6926 ret = -ENOMEM;
6927 goto out;
6928 }
6929 /*
6930 * Our right root is the parent root, while the left root is the "send"
6931 * root. We know that all new nodes/leaves in the left root must have
6932 * a generation greater than the right root's generation, so we trigger
6933 * readahead for those nodes and leaves of the left root, as we know we
6934 * will need to read them at some point.
6935 */
6936 reada_min_gen = btrfs_header_generation(right_root->commit_root);
6937 up_read(&fs_info->commit_root_sem);
6938
6939 if (left_level == 0)
6940 btrfs_item_key_to_cpu(left_path->nodes[left_level],
6941 &left_key, left_path->slots[left_level]);
6942 else
6943 btrfs_node_key_to_cpu(left_path->nodes[left_level],
6944 &left_key, left_path->slots[left_level]);
6945 if (right_level == 0)
6946 btrfs_item_key_to_cpu(right_path->nodes[right_level],
6947 &right_key, right_path->slots[right_level]);
6948 else
6949 btrfs_node_key_to_cpu(right_path->nodes[right_level],
6950 &right_key, right_path->slots[right_level]);
6951
6952 left_end_reached = right_end_reached = 0;
6953 advance_left = advance_right = 0;
6954
6955 while (1) {
6956 cond_resched();
6957 if (advance_left && !left_end_reached) {
6958 ret = tree_advance(left_path, &left_level,
6959 left_root_level,
6960 advance_left != ADVANCE_ONLY_NEXT,
6961 &left_key, reada_min_gen);
6962 if (ret == -1)
6963 left_end_reached = ADVANCE;
6964 else if (ret < 0)
6965 goto out;
6966 advance_left = 0;
6967 }
6968 if (advance_right && !right_end_reached) {
6969 ret = tree_advance(right_path, &right_level,
6970 right_root_level,
6971 advance_right != ADVANCE_ONLY_NEXT,
6972 &right_key, reada_min_gen);
6973 if (ret == -1)
6974 right_end_reached = ADVANCE;
6975 else if (ret < 0)
6976 goto out;
6977 advance_right = 0;
6978 }
6979
6980 if (left_end_reached && right_end_reached) {
6981 ret = 0;
6982 goto out;
6983 } else if (left_end_reached) {
6984 if (right_level == 0) {
6985 ret = changed_cb(left_path, right_path,
6986 &right_key,
6987 BTRFS_COMPARE_TREE_DELETED,
6988 sctx);
6989 if (ret < 0)
6990 goto out;
6991 }
6992 advance_right = ADVANCE;
6993 continue;
6994 } else if (right_end_reached) {
6995 if (left_level == 0) {
6996 ret = changed_cb(left_path, right_path,
6997 &left_key,
6998 BTRFS_COMPARE_TREE_NEW,
6999 sctx);
7000 if (ret < 0)
7001 goto out;
7002 }
7003 advance_left = ADVANCE;
7004 continue;
7005 }
7006
7007 if (left_level == 0 && right_level == 0) {
7008 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7009 if (cmp < 0) {
7010 ret = changed_cb(left_path, right_path,
7011 &left_key,
7012 BTRFS_COMPARE_TREE_NEW,
7013 sctx);
7014 if (ret < 0)
7015 goto out;
7016 advance_left = ADVANCE;
7017 } else if (cmp > 0) {
7018 ret = changed_cb(left_path, right_path,
7019 &right_key,
7020 BTRFS_COMPARE_TREE_DELETED,
7021 sctx);
7022 if (ret < 0)
7023 goto out;
7024 advance_right = ADVANCE;
7025 } else {
7026 enum btrfs_compare_tree_result result;
7027
7028 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
7029 ret = tree_compare_item(left_path, right_path,
7030 tmp_buf);
7031 if (ret)
7032 result = BTRFS_COMPARE_TREE_CHANGED;
7033 else
7034 result = BTRFS_COMPARE_TREE_SAME;
7035 ret = changed_cb(left_path, right_path,
7036 &left_key, result, sctx);
7037 if (ret < 0)
7038 goto out;
7039 advance_left = ADVANCE;
7040 advance_right = ADVANCE;
7041 }
7042 } else if (left_level == right_level) {
7043 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
7044 if (cmp < 0) {
7045 advance_left = ADVANCE;
7046 } else if (cmp > 0) {
7047 advance_right = ADVANCE;
7048 } else {
7049 left_blockptr = btrfs_node_blockptr(
7050 left_path->nodes[left_level],
7051 left_path->slots[left_level]);
7052 right_blockptr = btrfs_node_blockptr(
7053 right_path->nodes[right_level],
7054 right_path->slots[right_level]);
7055 left_gen = btrfs_node_ptr_generation(
7056 left_path->nodes[left_level],
7057 left_path->slots[left_level]);
7058 right_gen = btrfs_node_ptr_generation(
7059 right_path->nodes[right_level],
7060 right_path->slots[right_level]);
7061 if (left_blockptr == right_blockptr &&
7062 left_gen == right_gen) {
7063 /*
7064 * As we're on a shared block, don't
7065 * allow to go deeper.
7066 */
7067 advance_left = ADVANCE_ONLY_NEXT;
7068 advance_right = ADVANCE_ONLY_NEXT;
7069 } else {
7070 advance_left = ADVANCE;
7071 advance_right = ADVANCE;
7072 }
7073 }
7074 } else if (left_level < right_level) {
7075 advance_right = ADVANCE;
7076 } else {
7077 advance_left = ADVANCE;
7078 }
7079 }
7080
7081out:
7082 btrfs_free_path(left_path);
7083 btrfs_free_path(right_path);
7084 kvfree(tmp_buf);
7085 return ret;
7086}
7087
7088static int send_subvol(struct send_ctx *sctx)
7089{
7090 int ret;
7091
7092 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
7093 ret = send_header(sctx);
7094 if (ret < 0)
7095 goto out;
7096 }
7097
7098 ret = send_subvol_begin(sctx);
7099 if (ret < 0)
7100 goto out;
7101
7102 if (sctx->parent_root) {
7103 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, sctx);
7104 if (ret < 0)
7105 goto out;
7106 ret = finish_inode_if_needed(sctx, 1);
7107 if (ret < 0)
7108 goto out;
7109 } else {
7110 ret = full_send_tree(sctx);
7111 if (ret < 0)
7112 goto out;
7113 }
7114
7115out:
7116 free_recorded_refs(sctx);
7117 return ret;
7118}
7119
7120/*
7121 * If orphan cleanup did remove any orphans from a root, it means the tree
7122 * was modified and therefore the commit root is not the same as the current
7123 * root anymore. This is a problem, because send uses the commit root and
7124 * therefore can see inode items that don't exist in the current root anymore,
7125 * and for example make calls to btrfs_iget, which will do tree lookups based
7126 * on the current root and not on the commit root. Those lookups will fail,
7127 * returning a -ESTALE error, and making send fail with that error. So make
7128 * sure a send does not see any orphans we have just removed, and that it will
7129 * see the same inodes regardless of whether a transaction commit happened
7130 * before it started (meaning that the commit root will be the same as the
7131 * current root) or not.
7132 */
7133static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
7134{
7135 int i;
7136 struct btrfs_trans_handle *trans = NULL;
7137
7138again:
7139 if (sctx->parent_root &&
7140 sctx->parent_root->node != sctx->parent_root->commit_root)
7141 goto commit_trans;
7142
7143 for (i = 0; i < sctx->clone_roots_cnt; i++)
7144 if (sctx->clone_roots[i].root->node !=
7145 sctx->clone_roots[i].root->commit_root)
7146 goto commit_trans;
7147
7148 if (trans)
7149 return btrfs_end_transaction(trans);
7150
7151 return 0;
7152
7153commit_trans:
7154 /* Use any root, all fs roots will get their commit roots updated. */
7155 if (!trans) {
7156 trans = btrfs_join_transaction(sctx->send_root);
7157 if (IS_ERR(trans))
7158 return PTR_ERR(trans);
7159 goto again;
7160 }
7161
7162 return btrfs_commit_transaction(trans);
7163}
7164
7165/*
7166 * Make sure any existing dellaloc is flushed for any root used by a send
7167 * operation so that we do not miss any data and we do not race with writeback
7168 * finishing and changing a tree while send is using the tree. This could
7169 * happen if a subvolume is in RW mode, has delalloc, is turned to RO mode and
7170 * a send operation then uses the subvolume.
7171 * After flushing delalloc ensure_commit_roots_uptodate() must be called.
7172 */
7173static int flush_delalloc_roots(struct send_ctx *sctx)
7174{
7175 struct btrfs_root *root = sctx->parent_root;
7176 int ret;
7177 int i;
7178
7179 if (root) {
7180 ret = btrfs_start_delalloc_snapshot(root, false);
7181 if (ret)
7182 return ret;
7183 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7184 }
7185
7186 for (i = 0; i < sctx->clone_roots_cnt; i++) {
7187 root = sctx->clone_roots[i].root;
7188 ret = btrfs_start_delalloc_snapshot(root, false);
7189 if (ret)
7190 return ret;
7191 btrfs_wait_ordered_extents(root, U64_MAX, 0, U64_MAX);
7192 }
7193
7194 return 0;
7195}
7196
7197static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
7198{
7199 spin_lock(&root->root_item_lock);
7200 root->send_in_progress--;
7201 /*
7202 * Not much left to do, we don't know why it's unbalanced and
7203 * can't blindly reset it to 0.
7204 */
7205 if (root->send_in_progress < 0)
7206 btrfs_err(root->fs_info,
7207 "send_in_progress unbalanced %d root %llu",
7208 root->send_in_progress, root->root_key.objectid);
7209 spin_unlock(&root->root_item_lock);
7210}
7211
7212static void dedupe_in_progress_warn(const struct btrfs_root *root)
7213{
7214 btrfs_warn_rl(root->fs_info,
7215"cannot use root %llu for send while deduplications on it are in progress (%d in progress)",
7216 root->root_key.objectid, root->dedupe_in_progress);
7217}
7218
7219long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
7220{
7221 int ret = 0;
7222 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
7223 struct btrfs_fs_info *fs_info = send_root->fs_info;
7224 struct btrfs_root *clone_root;
7225 struct send_ctx *sctx = NULL;
7226 u32 i;
7227 u64 *clone_sources_tmp = NULL;
7228 int clone_sources_to_rollback = 0;
7229 size_t alloc_size;
7230 int sort_clone_roots = 0;
7231
7232 if (!capable(CAP_SYS_ADMIN))
7233 return -EPERM;
7234
7235 /*
7236 * The subvolume must remain read-only during send, protect against
7237 * making it RW. This also protects against deletion.
7238 */
7239 spin_lock(&send_root->root_item_lock);
7240 if (btrfs_root_readonly(send_root) && send_root->dedupe_in_progress) {
7241 dedupe_in_progress_warn(send_root);
7242 spin_unlock(&send_root->root_item_lock);
7243 return -EAGAIN;
7244 }
7245 send_root->send_in_progress++;
7246 spin_unlock(&send_root->root_item_lock);
7247
7248 /*
7249 * Userspace tools do the checks and warn the user if it's
7250 * not RO.
7251 */
7252 if (!btrfs_root_readonly(send_root)) {
7253 ret = -EPERM;
7254 goto out;
7255 }
7256
7257 /*
7258 * Check that we don't overflow at later allocations, we request
7259 * clone_sources_count + 1 items, and compare to unsigned long inside
7260 * access_ok.
7261 */
7262 if (arg->clone_sources_count >
7263 ULONG_MAX / sizeof(struct clone_root) - 1) {
7264 ret = -EINVAL;
7265 goto out;
7266 }
7267
7268 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
7269 ret = -EINVAL;
7270 goto out;
7271 }
7272
7273 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
7274 if (!sctx) {
7275 ret = -ENOMEM;
7276 goto out;
7277 }
7278
7279 INIT_LIST_HEAD(&sctx->new_refs);
7280 INIT_LIST_HEAD(&sctx->deleted_refs);
7281 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
7282 INIT_LIST_HEAD(&sctx->name_cache_list);
7283
7284 sctx->flags = arg->flags;
7285
7286 sctx->send_filp = fget(arg->send_fd);
7287 if (!sctx->send_filp) {
7288 ret = -EBADF;
7289 goto out;
7290 }
7291
7292 sctx->send_root = send_root;
7293 /*
7294 * Unlikely but possible, if the subvolume is marked for deletion but
7295 * is slow to remove the directory entry, send can still be started
7296 */
7297 if (btrfs_root_dead(sctx->send_root)) {
7298 ret = -EPERM;
7299 goto out;
7300 }
7301
7302 sctx->clone_roots_cnt = arg->clone_sources_count;
7303
7304 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
7305 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
7306 if (!sctx->send_buf) {
7307 ret = -ENOMEM;
7308 goto out;
7309 }
7310
7311 sctx->pending_dir_moves = RB_ROOT;
7312 sctx->waiting_dir_moves = RB_ROOT;
7313 sctx->orphan_dirs = RB_ROOT;
7314
7315 sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
7316 arg->clone_sources_count + 1,
7317 GFP_KERNEL);
7318 if (!sctx->clone_roots) {
7319 ret = -ENOMEM;
7320 goto out;
7321 }
7322
7323 alloc_size = array_size(sizeof(*arg->clone_sources),
7324 arg->clone_sources_count);
7325
7326 if (arg->clone_sources_count) {
7327 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
7328 if (!clone_sources_tmp) {
7329 ret = -ENOMEM;
7330 goto out;
7331 }
7332
7333 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
7334 alloc_size);
7335 if (ret) {
7336 ret = -EFAULT;
7337 goto out;
7338 }
7339
7340 for (i = 0; i < arg->clone_sources_count; i++) {
7341 clone_root = btrfs_get_fs_root(fs_info,
7342 clone_sources_tmp[i], true);
7343 if (IS_ERR(clone_root)) {
7344 ret = PTR_ERR(clone_root);
7345 goto out;
7346 }
7347 spin_lock(&clone_root->root_item_lock);
7348 if (!btrfs_root_readonly(clone_root) ||
7349 btrfs_root_dead(clone_root)) {
7350 spin_unlock(&clone_root->root_item_lock);
7351 btrfs_put_root(clone_root);
7352 ret = -EPERM;
7353 goto out;
7354 }
7355 if (clone_root->dedupe_in_progress) {
7356 dedupe_in_progress_warn(clone_root);
7357 spin_unlock(&clone_root->root_item_lock);
7358 btrfs_put_root(clone_root);
7359 ret = -EAGAIN;
7360 goto out;
7361 }
7362 clone_root->send_in_progress++;
7363 spin_unlock(&clone_root->root_item_lock);
7364
7365 sctx->clone_roots[i].root = clone_root;
7366 clone_sources_to_rollback = i + 1;
7367 }
7368 kvfree(clone_sources_tmp);
7369 clone_sources_tmp = NULL;
7370 }
7371
7372 if (arg->parent_root) {
7373 sctx->parent_root = btrfs_get_fs_root(fs_info, arg->parent_root,
7374 true);
7375 if (IS_ERR(sctx->parent_root)) {
7376 ret = PTR_ERR(sctx->parent_root);
7377 goto out;
7378 }
7379
7380 spin_lock(&sctx->parent_root->root_item_lock);
7381 sctx->parent_root->send_in_progress++;
7382 if (!btrfs_root_readonly(sctx->parent_root) ||
7383 btrfs_root_dead(sctx->parent_root)) {
7384 spin_unlock(&sctx->parent_root->root_item_lock);
7385 ret = -EPERM;
7386 goto out;
7387 }
7388 if (sctx->parent_root->dedupe_in_progress) {
7389 dedupe_in_progress_warn(sctx->parent_root);
7390 spin_unlock(&sctx->parent_root->root_item_lock);
7391 ret = -EAGAIN;
7392 goto out;
7393 }
7394 spin_unlock(&sctx->parent_root->root_item_lock);
7395 }
7396
7397 /*
7398 * Clones from send_root are allowed, but only if the clone source
7399 * is behind the current send position. This is checked while searching
7400 * for possible clone sources.
7401 */
7402 sctx->clone_roots[sctx->clone_roots_cnt++].root =
7403 btrfs_grab_root(sctx->send_root);
7404
7405 /* We do a bsearch later */
7406 sort(sctx->clone_roots, sctx->clone_roots_cnt,
7407 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
7408 NULL);
7409 sort_clone_roots = 1;
7410
7411 ret = flush_delalloc_roots(sctx);
7412 if (ret)
7413 goto out;
7414
7415 ret = ensure_commit_roots_uptodate(sctx);
7416 if (ret)
7417 goto out;
7418
7419 spin_lock(&fs_info->send_reloc_lock);
7420 if (test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) {
7421 spin_unlock(&fs_info->send_reloc_lock);
7422 btrfs_warn_rl(fs_info,
7423 "cannot run send because a relocation operation is in progress");
7424 ret = -EAGAIN;
7425 goto out;
7426 }
7427 fs_info->send_in_progress++;
7428 spin_unlock(&fs_info->send_reloc_lock);
7429
7430 ret = send_subvol(sctx);
7431 spin_lock(&fs_info->send_reloc_lock);
7432 fs_info->send_in_progress--;
7433 spin_unlock(&fs_info->send_reloc_lock);
7434 if (ret < 0)
7435 goto out;
7436
7437 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
7438 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
7439 if (ret < 0)
7440 goto out;
7441 ret = send_cmd(sctx);
7442 if (ret < 0)
7443 goto out;
7444 }
7445
7446out:
7447 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
7448 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
7449 struct rb_node *n;
7450 struct pending_dir_move *pm;
7451
7452 n = rb_first(&sctx->pending_dir_moves);
7453 pm = rb_entry(n, struct pending_dir_move, node);
7454 while (!list_empty(&pm->list)) {
7455 struct pending_dir_move *pm2;
7456
7457 pm2 = list_first_entry(&pm->list,
7458 struct pending_dir_move, list);
7459 free_pending_move(sctx, pm2);
7460 }
7461 free_pending_move(sctx, pm);
7462 }
7463
7464 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
7465 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
7466 struct rb_node *n;
7467 struct waiting_dir_move *dm;
7468
7469 n = rb_first(&sctx->waiting_dir_moves);
7470 dm = rb_entry(n, struct waiting_dir_move, node);
7471 rb_erase(&dm->node, &sctx->waiting_dir_moves);
7472 kfree(dm);
7473 }
7474
7475 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
7476 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
7477 struct rb_node *n;
7478 struct orphan_dir_info *odi;
7479
7480 n = rb_first(&sctx->orphan_dirs);
7481 odi = rb_entry(n, struct orphan_dir_info, node);
7482 free_orphan_dir_info(sctx, odi);
7483 }
7484
7485 if (sort_clone_roots) {
7486 for (i = 0; i < sctx->clone_roots_cnt; i++) {
7487 btrfs_root_dec_send_in_progress(
7488 sctx->clone_roots[i].root);
7489 btrfs_put_root(sctx->clone_roots[i].root);
7490 }
7491 } else {
7492 for (i = 0; sctx && i < clone_sources_to_rollback; i++) {
7493 btrfs_root_dec_send_in_progress(
7494 sctx->clone_roots[i].root);
7495 btrfs_put_root(sctx->clone_roots[i].root);
7496 }
7497
7498 btrfs_root_dec_send_in_progress(send_root);
7499 }
7500 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) {
7501 btrfs_root_dec_send_in_progress(sctx->parent_root);
7502 btrfs_put_root(sctx->parent_root);
7503 }
7504
7505 kvfree(clone_sources_tmp);
7506
7507 if (sctx) {
7508 if (sctx->send_filp)
7509 fput(sctx->send_filp);
7510
7511 kvfree(sctx->clone_roots);
7512 kvfree(sctx->send_buf);
7513
7514 name_cache_free(sctx);
7515
7516 kfree(sctx);
7517 }
7518
7519 return ret;
7520}