Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/blkdev.h>
4#include <linux/iversion.h>
5#include "compression.h"
6#include "ctree.h"
7#include "delalloc-space.h"
8#include "reflink.h"
9#include "transaction.h"
10
11#define BTRFS_MAX_DEDUPE_LEN SZ_16M
12
13static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
14 struct inode *inode,
15 u64 endoff,
16 const u64 destoff,
17 const u64 olen,
18 int no_time_update)
19{
20 struct btrfs_root *root = BTRFS_I(inode)->root;
21 int ret;
22
23 inode_inc_iversion(inode);
24 if (!no_time_update)
25 inode->i_mtime = inode->i_ctime = current_time(inode);
26 /*
27 * We round up to the block size at eof when determining which
28 * extents to clone above, but shouldn't round up the file size.
29 */
30 if (endoff > destoff + olen)
31 endoff = destoff + olen;
32 if (endoff > inode->i_size) {
33 i_size_write(inode, endoff);
34 btrfs_inode_safe_disk_i_size_write(inode, 0);
35 }
36
37 ret = btrfs_update_inode(trans, root, inode);
38 if (ret) {
39 btrfs_abort_transaction(trans, ret);
40 btrfs_end_transaction(trans);
41 goto out;
42 }
43 ret = btrfs_end_transaction(trans);
44out:
45 return ret;
46}
47
48static int copy_inline_to_page(struct inode *inode,
49 const u64 file_offset,
50 char *inline_data,
51 const u64 size,
52 const u64 datal,
53 const u8 comp_type)
54{
55 const u64 block_size = btrfs_inode_sectorsize(inode);
56 const u64 range_end = file_offset + block_size - 1;
57 const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0);
58 char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0);
59 struct extent_changeset *data_reserved = NULL;
60 struct page *page = NULL;
61 int ret;
62
63 ASSERT(IS_ALIGNED(file_offset, block_size));
64
65 /*
66 * We have flushed and locked the ranges of the source and destination
67 * inodes, we also have locked the inodes, so we are safe to do a
68 * reservation here. Also we must not do the reservation while holding
69 * a transaction open, otherwise we would deadlock.
70 */
71 ret = btrfs_delalloc_reserve_space(BTRFS_I(inode), &data_reserved,
72 file_offset, block_size);
73 if (ret)
74 goto out;
75
76 page = find_or_create_page(inode->i_mapping, file_offset >> PAGE_SHIFT,
77 btrfs_alloc_write_mask(inode->i_mapping));
78 if (!page) {
79 ret = -ENOMEM;
80 goto out_unlock;
81 }
82
83 set_page_extent_mapped(page);
84 clear_extent_bit(&BTRFS_I(inode)->io_tree, file_offset, range_end,
85 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
86 0, 0, NULL);
87 ret = btrfs_set_extent_delalloc(BTRFS_I(inode), file_offset, range_end,
88 0, NULL);
89 if (ret)
90 goto out_unlock;
91
92 if (comp_type == BTRFS_COMPRESS_NONE) {
93 char *map;
94
95 map = kmap(page);
96 memcpy(map, data_start, datal);
97 flush_dcache_page(page);
98 kunmap(page);
99 } else {
100 ret = btrfs_decompress(comp_type, data_start, page, 0,
101 inline_size, datal);
102 if (ret)
103 goto out_unlock;
104 flush_dcache_page(page);
105 }
106
107 /*
108 * If our inline data is smaller then the block/page size, then the
109 * remaining of the block/page is equivalent to zeroes. We had something
110 * like the following done:
111 *
112 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file
113 * $ sync # (or fsync)
114 * $ xfs_io -c "falloc 0 4K" file
115 * $ xfs_io -c "pwrite -S 0xcd 4K 4K"
116 *
117 * So what's in the range [500, 4095] corresponds to zeroes.
118 */
119 if (datal < block_size) {
120 char *map;
121
122 map = kmap(page);
123 memset(map + datal, 0, block_size - datal);
124 flush_dcache_page(page);
125 kunmap(page);
126 }
127
128 SetPageUptodate(page);
129 ClearPageChecked(page);
130 set_page_dirty(page);
131out_unlock:
132 if (page) {
133 unlock_page(page);
134 put_page(page);
135 }
136 if (ret)
137 btrfs_delalloc_release_space(BTRFS_I(inode), data_reserved,
138 file_offset, block_size, true);
139 btrfs_delalloc_release_extents(BTRFS_I(inode), block_size);
140out:
141 extent_changeset_free(data_reserved);
142
143 return ret;
144}
145
146/*
147 * Deal with cloning of inline extents. We try to copy the inline extent from
148 * the source inode to destination inode when possible. When not possible we
149 * copy the inline extent's data into the respective page of the inode.
150 */
151static int clone_copy_inline_extent(struct inode *dst,
152 struct btrfs_path *path,
153 struct btrfs_key *new_key,
154 const u64 drop_start,
155 const u64 datal,
156 const u64 size,
157 const u8 comp_type,
158 char *inline_data,
159 struct btrfs_trans_handle **trans_out)
160{
161 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
162 struct btrfs_root *root = BTRFS_I(dst)->root;
163 const u64 aligned_end = ALIGN(new_key->offset + datal,
164 fs_info->sectorsize);
165 struct btrfs_trans_handle *trans = NULL;
166 int ret;
167 struct btrfs_key key;
168
169 if (new_key->offset > 0) {
170 ret = copy_inline_to_page(dst, new_key->offset, inline_data,
171 size, datal, comp_type);
172 goto out;
173 }
174
175 key.objectid = btrfs_ino(BTRFS_I(dst));
176 key.type = BTRFS_EXTENT_DATA_KEY;
177 key.offset = 0;
178 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
179 if (ret < 0) {
180 return ret;
181 } else if (ret > 0) {
182 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
183 ret = btrfs_next_leaf(root, path);
184 if (ret < 0)
185 return ret;
186 else if (ret > 0)
187 goto copy_inline_extent;
188 }
189 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
190 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
191 key.type == BTRFS_EXTENT_DATA_KEY) {
192 /*
193 * There's an implicit hole at file offset 0, copy the
194 * inline extent's data to the page.
195 */
196 ASSERT(key.offset > 0);
197 ret = copy_inline_to_page(dst, new_key->offset,
198 inline_data, size, datal,
199 comp_type);
200 goto out;
201 }
202 } else if (i_size_read(dst) <= datal) {
203 struct btrfs_file_extent_item *ei;
204
205 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
206 struct btrfs_file_extent_item);
207 /*
208 * If it's an inline extent replace it with the source inline
209 * extent, otherwise copy the source inline extent data into
210 * the respective page at the destination inode.
211 */
212 if (btrfs_file_extent_type(path->nodes[0], ei) ==
213 BTRFS_FILE_EXTENT_INLINE)
214 goto copy_inline_extent;
215
216 ret = copy_inline_to_page(dst, new_key->offset, inline_data,
217 size, datal, comp_type);
218 goto out;
219 }
220
221copy_inline_extent:
222 ret = 0;
223 /*
224 * We have no extent items, or we have an extent at offset 0 which may
225 * or may not be inlined. All these cases are dealt the same way.
226 */
227 if (i_size_read(dst) > datal) {
228 /*
229 * At the destination offset 0 we have either a hole, a regular
230 * extent or an inline extent larger then the one we want to
231 * clone. Deal with all these cases by copying the inline extent
232 * data into the respective page at the destination inode.
233 */
234 ret = copy_inline_to_page(dst, new_key->offset, inline_data,
235 size, datal, comp_type);
236 goto out;
237 }
238
239 btrfs_release_path(path);
240 /*
241 * If we end up here it means were copy the inline extent into a leaf
242 * of the destination inode. We know we will drop or adjust at most one
243 * extent item in the destination root.
244 *
245 * 1 unit - adjusting old extent (we may have to split it)
246 * 1 unit - add new extent
247 * 1 unit - inode update
248 */
249 trans = btrfs_start_transaction(root, 3);
250 if (IS_ERR(trans)) {
251 ret = PTR_ERR(trans);
252 trans = NULL;
253 goto out;
254 }
255 ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1);
256 if (ret)
257 goto out;
258 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
259 if (ret)
260 goto out;
261
262 write_extent_buffer(path->nodes[0], inline_data,
263 btrfs_item_ptr_offset(path->nodes[0],
264 path->slots[0]),
265 size);
266 inode_add_bytes(dst, datal);
267 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags);
268 ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
269out:
270 if (!ret && !trans) {
271 /*
272 * No transaction here means we copied the inline extent into a
273 * page of the destination inode.
274 *
275 * 1 unit to update inode item
276 */
277 trans = btrfs_start_transaction(root, 1);
278 if (IS_ERR(trans)) {
279 ret = PTR_ERR(trans);
280 trans = NULL;
281 }
282 }
283 if (ret && trans) {
284 btrfs_abort_transaction(trans, ret);
285 btrfs_end_transaction(trans);
286 }
287 if (!ret)
288 *trans_out = trans;
289
290 return ret;
291}
292
293/**
294 * btrfs_clone() - clone a range from inode file to another
295 *
296 * @src: Inode to clone from
297 * @inode: Inode to clone to
298 * @off: Offset within source to start clone from
299 * @olen: Original length, passed by user, of range to clone
300 * @olen_aligned: Block-aligned value of olen
301 * @destoff: Offset within @inode to start clone
302 * @no_time_update: Whether to update mtime/ctime on the target inode
303 */
304static int btrfs_clone(struct inode *src, struct inode *inode,
305 const u64 off, const u64 olen, const u64 olen_aligned,
306 const u64 destoff, int no_time_update)
307{
308 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
309 struct btrfs_path *path = NULL;
310 struct extent_buffer *leaf;
311 struct btrfs_trans_handle *trans;
312 char *buf = NULL;
313 struct btrfs_key key;
314 u32 nritems;
315 int slot;
316 int ret;
317 const u64 len = olen_aligned;
318 u64 last_dest_end = destoff;
319
320 ret = -ENOMEM;
321 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
322 if (!buf)
323 return ret;
324
325 path = btrfs_alloc_path();
326 if (!path) {
327 kvfree(buf);
328 return ret;
329 }
330
331 path->reada = READA_FORWARD;
332 /* Clone data */
333 key.objectid = btrfs_ino(BTRFS_I(src));
334 key.type = BTRFS_EXTENT_DATA_KEY;
335 key.offset = off;
336
337 while (1) {
338 u64 next_key_min_offset = key.offset + 1;
339 struct btrfs_file_extent_item *extent;
340 u64 extent_gen;
341 int type;
342 u32 size;
343 struct btrfs_key new_key;
344 u64 disko = 0, diskl = 0;
345 u64 datao = 0, datal = 0;
346 u8 comp;
347 u64 drop_start;
348
349 /* Note the key will change type as we walk through the tree */
350 path->leave_spinning = 1;
351 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
352 0, 0);
353 if (ret < 0)
354 goto out;
355 /*
356 * First search, if no extent item that starts at offset off was
357 * found but the previous item is an extent item, it's possible
358 * it might overlap our target range, therefore process it.
359 */
360 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
361 btrfs_item_key_to_cpu(path->nodes[0], &key,
362 path->slots[0] - 1);
363 if (key.type == BTRFS_EXTENT_DATA_KEY)
364 path->slots[0]--;
365 }
366
367 nritems = btrfs_header_nritems(path->nodes[0]);
368process_slot:
369 if (path->slots[0] >= nritems) {
370 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
371 if (ret < 0)
372 goto out;
373 if (ret > 0)
374 break;
375 nritems = btrfs_header_nritems(path->nodes[0]);
376 }
377 leaf = path->nodes[0];
378 slot = path->slots[0];
379
380 btrfs_item_key_to_cpu(leaf, &key, slot);
381 if (key.type > BTRFS_EXTENT_DATA_KEY ||
382 key.objectid != btrfs_ino(BTRFS_I(src)))
383 break;
384
385 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
386
387 extent = btrfs_item_ptr(leaf, slot,
388 struct btrfs_file_extent_item);
389 extent_gen = btrfs_file_extent_generation(leaf, extent);
390 comp = btrfs_file_extent_compression(leaf, extent);
391 type = btrfs_file_extent_type(leaf, extent);
392 if (type == BTRFS_FILE_EXTENT_REG ||
393 type == BTRFS_FILE_EXTENT_PREALLOC) {
394 disko = btrfs_file_extent_disk_bytenr(leaf, extent);
395 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent);
396 datao = btrfs_file_extent_offset(leaf, extent);
397 datal = btrfs_file_extent_num_bytes(leaf, extent);
398 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
399 /* Take upper bound, may be compressed */
400 datal = btrfs_file_extent_ram_bytes(leaf, extent);
401 }
402
403 /*
404 * The first search might have left us at an extent item that
405 * ends before our target range's start, can happen if we have
406 * holes and NO_HOLES feature enabled.
407 */
408 if (key.offset + datal <= off) {
409 path->slots[0]++;
410 goto process_slot;
411 } else if (key.offset >= off + len) {
412 break;
413 }
414 next_key_min_offset = key.offset + datal;
415 size = btrfs_item_size_nr(leaf, slot);
416 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot),
417 size);
418
419 btrfs_release_path(path);
420 path->leave_spinning = 0;
421
422 memcpy(&new_key, &key, sizeof(new_key));
423 new_key.objectid = btrfs_ino(BTRFS_I(inode));
424 if (off <= key.offset)
425 new_key.offset = key.offset + destoff - off;
426 else
427 new_key.offset = destoff;
428
429 /*
430 * Deal with a hole that doesn't have an extent item that
431 * represents it (NO_HOLES feature enabled).
432 * This hole is either in the middle of the cloning range or at
433 * the beginning (fully overlaps it or partially overlaps it).
434 */
435 if (new_key.offset != last_dest_end)
436 drop_start = last_dest_end;
437 else
438 drop_start = new_key.offset;
439
440 if (type == BTRFS_FILE_EXTENT_REG ||
441 type == BTRFS_FILE_EXTENT_PREALLOC) {
442 struct btrfs_clone_extent_info clone_info;
443
444 /*
445 * a | --- range to clone ---| b
446 * | ------------- extent ------------- |
447 */
448
449 /* Subtract range b */
450 if (key.offset + datal > off + len)
451 datal = off + len - key.offset;
452
453 /* Subtract range a */
454 if (off > key.offset) {
455 datao += off - key.offset;
456 datal -= off - key.offset;
457 }
458
459 clone_info.disk_offset = disko;
460 clone_info.disk_len = diskl;
461 clone_info.data_offset = datao;
462 clone_info.data_len = datal;
463 clone_info.file_offset = new_key.offset;
464 clone_info.extent_buf = buf;
465 clone_info.item_size = size;
466 ret = btrfs_punch_hole_range(inode, path, drop_start,
467 new_key.offset + datal - 1, &clone_info,
468 &trans);
469 if (ret)
470 goto out;
471 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
472 /*
473 * Inline extents always have to start at file offset 0
474 * and can never be bigger then the sector size. We can
475 * never clone only parts of an inline extent, since all
476 * reflink operations must start at a sector size aligned
477 * offset, and the length must be aligned too or end at
478 * the i_size (which implies the whole inlined data).
479 */
480 ASSERT(key.offset == 0);
481 ASSERT(datal <= fs_info->sectorsize);
482 if (key.offset != 0 || datal > fs_info->sectorsize)
483 return -EUCLEAN;
484
485 ret = clone_copy_inline_extent(inode, path, &new_key,
486 drop_start, datal, size,
487 comp, buf, &trans);
488 if (ret)
489 goto out;
490 }
491
492 btrfs_release_path(path);
493
494 /*
495 * If this is a new extent update the last_reflink_trans of both
496 * inodes. This is used by fsync to make sure it does not log
497 * multiple checksum items with overlapping ranges. For older
498 * extents we don't need to do it since inode logging skips the
499 * checksums for older extents. Also ignore holes and inline
500 * extents because they don't have checksums in the csum tree.
501 */
502 if (extent_gen == trans->transid && disko > 0) {
503 BTRFS_I(src)->last_reflink_trans = trans->transid;
504 BTRFS_I(inode)->last_reflink_trans = trans->transid;
505 }
506
507 last_dest_end = ALIGN(new_key.offset + datal,
508 fs_info->sectorsize);
509 ret = clone_finish_inode_update(trans, inode, last_dest_end,
510 destoff, olen, no_time_update);
511 if (ret)
512 goto out;
513 if (new_key.offset + datal >= destoff + len)
514 break;
515
516 btrfs_release_path(path);
517 key.offset = next_key_min_offset;
518
519 if (fatal_signal_pending(current)) {
520 ret = -EINTR;
521 goto out;
522 }
523 }
524 ret = 0;
525
526 if (last_dest_end < destoff + len) {
527 /*
528 * We have an implicit hole that fully or partially overlaps our
529 * cloning range at its end. This means that we either have the
530 * NO_HOLES feature enabled or the implicit hole happened due to
531 * mixing buffered and direct IO writes against this file.
532 */
533 btrfs_release_path(path);
534 path->leave_spinning = 0;
535
536 ret = btrfs_punch_hole_range(inode, path, last_dest_end,
537 destoff + len - 1, NULL, &trans);
538 if (ret)
539 goto out;
540
541 ret = clone_finish_inode_update(trans, inode, destoff + len,
542 destoff, olen, no_time_update);
543 }
544
545out:
546 btrfs_free_path(path);
547 kvfree(buf);
548 return ret;
549}
550
551static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
552 struct inode *inode2, u64 loff2, u64 len)
553{
554 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
555 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
556}
557
558static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
559 struct inode *inode2, u64 loff2, u64 len)
560{
561 if (inode1 < inode2) {
562 swap(inode1, inode2);
563 swap(loff1, loff2);
564 } else if (inode1 == inode2 && loff2 < loff1) {
565 swap(loff1, loff2);
566 }
567 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
568 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
569}
570
571static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
572 struct inode *dst, u64 dst_loff)
573{
574 const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
575 int ret;
576
577 /*
578 * Lock destination range to serialize with concurrent readpages() and
579 * source range to serialize with relocation.
580 */
581 btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
582 ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1);
583 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
584
585 return ret;
586}
587
588static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
589 struct inode *dst, u64 dst_loff)
590{
591 int ret;
592 u64 i, tail_len, chunk_count;
593 struct btrfs_root *root_dst = BTRFS_I(dst)->root;
594
595 spin_lock(&root_dst->root_item_lock);
596 if (root_dst->send_in_progress) {
597 btrfs_warn_rl(root_dst->fs_info,
598"cannot deduplicate to root %llu while send operations are using it (%d in progress)",
599 root_dst->root_key.objectid,
600 root_dst->send_in_progress);
601 spin_unlock(&root_dst->root_item_lock);
602 return -EAGAIN;
603 }
604 root_dst->dedupe_in_progress++;
605 spin_unlock(&root_dst->root_item_lock);
606
607 tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
608 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
609
610 for (i = 0; i < chunk_count; i++) {
611 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
612 dst, dst_loff);
613 if (ret)
614 goto out;
615
616 loff += BTRFS_MAX_DEDUPE_LEN;
617 dst_loff += BTRFS_MAX_DEDUPE_LEN;
618 }
619
620 if (tail_len > 0)
621 ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff);
622out:
623 spin_lock(&root_dst->root_item_lock);
624 root_dst->dedupe_in_progress--;
625 spin_unlock(&root_dst->root_item_lock);
626
627 return ret;
628}
629
630static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
631 u64 off, u64 olen, u64 destoff)
632{
633 struct inode *inode = file_inode(file);
634 struct inode *src = file_inode(file_src);
635 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
636 int ret;
637 int wb_ret;
638 u64 len = olen;
639 u64 bs = fs_info->sb->s_blocksize;
640
641 /*
642 * VFS's generic_remap_file_range_prep() protects us from cloning the
643 * eof block into the middle of a file, which would result in corruption
644 * if the file size is not blocksize aligned. So we don't need to check
645 * for that case here.
646 */
647 if (off + len == src->i_size)
648 len = ALIGN(src->i_size, bs) - off;
649
650 if (destoff > inode->i_size) {
651 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs);
652
653 ret = btrfs_cont_expand(inode, inode->i_size, destoff);
654 if (ret)
655 return ret;
656 /*
657 * We may have truncated the last block if the inode's size is
658 * not sector size aligned, so we need to wait for writeback to
659 * complete before proceeding further, otherwise we can race
660 * with cloning and attempt to increment a reference to an
661 * extent that no longer exists (writeback completed right after
662 * we found the previous extent covering eof and before we
663 * attempted to increment its reference count).
664 */
665 ret = btrfs_wait_ordered_range(inode, wb_start,
666 destoff - wb_start);
667 if (ret)
668 return ret;
669 }
670
671 /*
672 * Lock destination range to serialize with concurrent readpages() and
673 * source range to serialize with relocation.
674 */
675 btrfs_double_extent_lock(src, off, inode, destoff, len);
676 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
677 btrfs_double_extent_unlock(src, off, inode, destoff, len);
678
679 /*
680 * We may have copied an inline extent into a page of the destination
681 * range, so wait for writeback to complete before truncating pages
682 * from the page cache. This is a rare case.
683 */
684 wb_ret = btrfs_wait_ordered_range(inode, destoff, len);
685 ret = ret ? ret : wb_ret;
686 /*
687 * Truncate page cache pages so that future reads will see the cloned
688 * data immediately and not the previous data.
689 */
690 truncate_inode_pages_range(&inode->i_data,
691 round_down(destoff, PAGE_SIZE),
692 round_up(destoff + len, PAGE_SIZE) - 1);
693
694 return ret;
695}
696
697static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
698 struct file *file_out, loff_t pos_out,
699 loff_t *len, unsigned int remap_flags)
700{
701 struct inode *inode_in = file_inode(file_in);
702 struct inode *inode_out = file_inode(file_out);
703 u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize;
704 bool same_inode = inode_out == inode_in;
705 u64 wb_len;
706 int ret;
707
708 if (!(remap_flags & REMAP_FILE_DEDUP)) {
709 struct btrfs_root *root_out = BTRFS_I(inode_out)->root;
710
711 if (btrfs_root_readonly(root_out))
712 return -EROFS;
713
714 if (file_in->f_path.mnt != file_out->f_path.mnt ||
715 inode_in->i_sb != inode_out->i_sb)
716 return -EXDEV;
717 }
718
719 /* Don't make the dst file partly checksummed */
720 if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) !=
721 (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) {
722 return -EINVAL;
723 }
724
725 /*
726 * Now that the inodes are locked, we need to start writeback ourselves
727 * and can not rely on the writeback from the VFS's generic helper
728 * generic_remap_file_range_prep() because:
729 *
730 * 1) For compression we must call filemap_fdatawrite_range() range
731 * twice (btrfs_fdatawrite_range() does it for us), and the generic
732 * helper only calls it once;
733 *
734 * 2) filemap_fdatawrite_range(), called by the generic helper only
735 * waits for the writeback to complete, i.e. for IO to be done, and
736 * not for the ordered extents to complete. We need to wait for them
737 * to complete so that new file extent items are in the fs tree.
738 */
739 if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP))
740 wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs);
741 else
742 wb_len = ALIGN(*len, bs);
743
744 /*
745 * Since we don't lock ranges, wait for ongoing lockless dio writes (as
746 * any in progress could create its ordered extents after we wait for
747 * existing ordered extents below).
748 */
749 inode_dio_wait(inode_in);
750 if (!same_inode)
751 inode_dio_wait(inode_out);
752
753 /*
754 * Workaround to make sure NOCOW buffered write reach disk as NOCOW.
755 *
756 * Btrfs' back references do not have a block level granularity, they
757 * work at the whole extent level.
758 * NOCOW buffered write without data space reserved may not be able
759 * to fall back to CoW due to lack of data space, thus could cause
760 * data loss.
761 *
762 * Here we take a shortcut by flushing the whole inode, so that all
763 * nocow write should reach disk as nocow before we increase the
764 * reference of the extent. We could do better by only flushing NOCOW
765 * data, but that needs extra accounting.
766 *
767 * Also we don't need to check ASYNC_EXTENT, as async extent will be
768 * CoWed anyway, not affecting nocow part.
769 */
770 ret = filemap_flush(inode_in->i_mapping);
771 if (ret < 0)
772 return ret;
773
774 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs),
775 wb_len);
776 if (ret < 0)
777 return ret;
778 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs),
779 wb_len);
780 if (ret < 0)
781 return ret;
782
783 return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
784 len, remap_flags);
785}
786
787loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
788 struct file *dst_file, loff_t destoff, loff_t len,
789 unsigned int remap_flags)
790{
791 struct inode *src_inode = file_inode(src_file);
792 struct inode *dst_inode = file_inode(dst_file);
793 bool same_inode = dst_inode == src_inode;
794 int ret;
795
796 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
797 return -EINVAL;
798
799 if (same_inode)
800 inode_lock(src_inode);
801 else
802 lock_two_nondirectories(src_inode, dst_inode);
803
804 ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff,
805 &len, remap_flags);
806 if (ret < 0 || len == 0)
807 goto out_unlock;
808
809 if (remap_flags & REMAP_FILE_DEDUP)
810 ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff);
811 else
812 ret = btrfs_clone_files(dst_file, src_file, off, len, destoff);
813
814out_unlock:
815 if (same_inode)
816 inode_unlock(src_inode);
817 else
818 unlock_two_nondirectories(src_inode, dst_inode);
819
820 return ret < 0 ? ret : len;
821}
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/blkdev.h>
4#include <linux/iversion.h>
5#include "compression.h"
6#include "ctree.h"
7#include "delalloc-space.h"
8#include "reflink.h"
9#include "transaction.h"
10#include "subpage.h"
11
12#define BTRFS_MAX_DEDUPE_LEN SZ_16M
13
14static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
15 struct inode *inode,
16 u64 endoff,
17 const u64 destoff,
18 const u64 olen,
19 int no_time_update)
20{
21 struct btrfs_root *root = BTRFS_I(inode)->root;
22 int ret;
23
24 inode_inc_iversion(inode);
25 if (!no_time_update)
26 inode->i_mtime = inode->i_ctime = current_time(inode);
27 /*
28 * We round up to the block size at eof when determining which
29 * extents to clone above, but shouldn't round up the file size.
30 */
31 if (endoff > destoff + olen)
32 endoff = destoff + olen;
33 if (endoff > inode->i_size) {
34 i_size_write(inode, endoff);
35 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0);
36 }
37
38 ret = btrfs_update_inode(trans, root, BTRFS_I(inode));
39 if (ret) {
40 btrfs_abort_transaction(trans, ret);
41 btrfs_end_transaction(trans);
42 goto out;
43 }
44 ret = btrfs_end_transaction(trans);
45out:
46 return ret;
47}
48
49static int copy_inline_to_page(struct btrfs_inode *inode,
50 const u64 file_offset,
51 char *inline_data,
52 const u64 size,
53 const u64 datal,
54 const u8 comp_type)
55{
56 struct btrfs_fs_info *fs_info = inode->root->fs_info;
57 const u32 block_size = fs_info->sectorsize;
58 const u64 range_end = file_offset + block_size - 1;
59 const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0);
60 char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0);
61 struct extent_changeset *data_reserved = NULL;
62 struct page *page = NULL;
63 struct address_space *mapping = inode->vfs_inode.i_mapping;
64 int ret;
65
66 ASSERT(IS_ALIGNED(file_offset, block_size));
67
68 /*
69 * We have flushed and locked the ranges of the source and destination
70 * inodes, we also have locked the inodes, so we are safe to do a
71 * reservation here. Also we must not do the reservation while holding
72 * a transaction open, otherwise we would deadlock.
73 */
74 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset,
75 block_size);
76 if (ret)
77 goto out;
78
79 page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT,
80 btrfs_alloc_write_mask(mapping));
81 if (!page) {
82 ret = -ENOMEM;
83 goto out_unlock;
84 }
85
86 ret = set_page_extent_mapped(page);
87 if (ret < 0)
88 goto out_unlock;
89
90 clear_extent_bit(&inode->io_tree, file_offset, range_end,
91 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
92 0, 0, NULL);
93 ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL);
94 if (ret)
95 goto out_unlock;
96
97 /*
98 * After dirtying the page our caller will need to start a transaction,
99 * and if we are low on metadata free space, that can cause flushing of
100 * delalloc for all inodes in order to get metadata space released.
101 * However we are holding the range locked for the whole duration of
102 * the clone/dedupe operation, so we may deadlock if that happens and no
103 * other task releases enough space. So mark this inode as not being
104 * possible to flush to avoid such deadlock. We will clear that flag
105 * when we finish cloning all extents, since a transaction is started
106 * after finding each extent to clone.
107 */
108 set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
109
110 if (comp_type == BTRFS_COMPRESS_NONE) {
111 memcpy_to_page(page, offset_in_page(file_offset), data_start,
112 datal);
113 flush_dcache_page(page);
114 } else {
115 ret = btrfs_decompress(comp_type, data_start, page,
116 offset_in_page(file_offset),
117 inline_size, datal);
118 if (ret)
119 goto out_unlock;
120 flush_dcache_page(page);
121 }
122
123 /*
124 * If our inline data is smaller then the block/page size, then the
125 * remaining of the block/page is equivalent to zeroes. We had something
126 * like the following done:
127 *
128 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file
129 * $ sync # (or fsync)
130 * $ xfs_io -c "falloc 0 4K" file
131 * $ xfs_io -c "pwrite -S 0xcd 4K 4K"
132 *
133 * So what's in the range [500, 4095] corresponds to zeroes.
134 */
135 if (datal < block_size) {
136 memzero_page(page, datal, block_size - datal);
137 flush_dcache_page(page);
138 }
139
140 btrfs_page_set_uptodate(fs_info, page, file_offset, block_size);
141 ClearPageChecked(page);
142 btrfs_page_set_dirty(fs_info, page, file_offset, block_size);
143out_unlock:
144 if (page) {
145 unlock_page(page);
146 put_page(page);
147 }
148 if (ret)
149 btrfs_delalloc_release_space(inode, data_reserved, file_offset,
150 block_size, true);
151 btrfs_delalloc_release_extents(inode, block_size);
152out:
153 extent_changeset_free(data_reserved);
154
155 return ret;
156}
157
158/*
159 * Deal with cloning of inline extents. We try to copy the inline extent from
160 * the source inode to destination inode when possible. When not possible we
161 * copy the inline extent's data into the respective page of the inode.
162 */
163static int clone_copy_inline_extent(struct inode *dst,
164 struct btrfs_path *path,
165 struct btrfs_key *new_key,
166 const u64 drop_start,
167 const u64 datal,
168 const u64 size,
169 const u8 comp_type,
170 char *inline_data,
171 struct btrfs_trans_handle **trans_out)
172{
173 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb);
174 struct btrfs_root *root = BTRFS_I(dst)->root;
175 const u64 aligned_end = ALIGN(new_key->offset + datal,
176 fs_info->sectorsize);
177 struct btrfs_trans_handle *trans = NULL;
178 struct btrfs_drop_extents_args drop_args = { 0 };
179 int ret;
180 struct btrfs_key key;
181
182 if (new_key->offset > 0) {
183 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
184 inline_data, size, datal, comp_type);
185 goto out;
186 }
187
188 key.objectid = btrfs_ino(BTRFS_I(dst));
189 key.type = BTRFS_EXTENT_DATA_KEY;
190 key.offset = 0;
191 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
192 if (ret < 0) {
193 return ret;
194 } else if (ret > 0) {
195 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
196 ret = btrfs_next_leaf(root, path);
197 if (ret < 0)
198 return ret;
199 else if (ret > 0)
200 goto copy_inline_extent;
201 }
202 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
203 if (key.objectid == btrfs_ino(BTRFS_I(dst)) &&
204 key.type == BTRFS_EXTENT_DATA_KEY) {
205 /*
206 * There's an implicit hole at file offset 0, copy the
207 * inline extent's data to the page.
208 */
209 ASSERT(key.offset > 0);
210 goto copy_to_page;
211 }
212 } else if (i_size_read(dst) <= datal) {
213 struct btrfs_file_extent_item *ei;
214
215 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
216 struct btrfs_file_extent_item);
217 /*
218 * If it's an inline extent replace it with the source inline
219 * extent, otherwise copy the source inline extent data into
220 * the respective page at the destination inode.
221 */
222 if (btrfs_file_extent_type(path->nodes[0], ei) ==
223 BTRFS_FILE_EXTENT_INLINE)
224 goto copy_inline_extent;
225
226 goto copy_to_page;
227 }
228
229copy_inline_extent:
230 /*
231 * We have no extent items, or we have an extent at offset 0 which may
232 * or may not be inlined. All these cases are dealt the same way.
233 */
234 if (i_size_read(dst) > datal) {
235 /*
236 * At the destination offset 0 we have either a hole, a regular
237 * extent or an inline extent larger then the one we want to
238 * clone. Deal with all these cases by copying the inline extent
239 * data into the respective page at the destination inode.
240 */
241 goto copy_to_page;
242 }
243
244 /*
245 * Release path before starting a new transaction so we don't hold locks
246 * that would confuse lockdep.
247 */
248 btrfs_release_path(path);
249 /*
250 * If we end up here it means were copy the inline extent into a leaf
251 * of the destination inode. We know we will drop or adjust at most one
252 * extent item in the destination root.
253 *
254 * 1 unit - adjusting old extent (we may have to split it)
255 * 1 unit - add new extent
256 * 1 unit - inode update
257 */
258 trans = btrfs_start_transaction(root, 3);
259 if (IS_ERR(trans)) {
260 ret = PTR_ERR(trans);
261 trans = NULL;
262 goto out;
263 }
264 drop_args.path = path;
265 drop_args.start = drop_start;
266 drop_args.end = aligned_end;
267 drop_args.drop_cache = true;
268 ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args);
269 if (ret)
270 goto out;
271 ret = btrfs_insert_empty_item(trans, root, path, new_key, size);
272 if (ret)
273 goto out;
274
275 write_extent_buffer(path->nodes[0], inline_data,
276 btrfs_item_ptr_offset(path->nodes[0],
277 path->slots[0]),
278 size);
279 btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found);
280 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags);
281 ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end);
282out:
283 if (!ret && !trans) {
284 /*
285 * No transaction here means we copied the inline extent into a
286 * page of the destination inode.
287 *
288 * 1 unit to update inode item
289 */
290 trans = btrfs_start_transaction(root, 1);
291 if (IS_ERR(trans)) {
292 ret = PTR_ERR(trans);
293 trans = NULL;
294 }
295 }
296 if (ret && trans) {
297 btrfs_abort_transaction(trans, ret);
298 btrfs_end_transaction(trans);
299 }
300 if (!ret)
301 *trans_out = trans;
302
303 return ret;
304
305copy_to_page:
306 /*
307 * Release our path because we don't need it anymore and also because
308 * copy_inline_to_page() needs to reserve data and metadata, which may
309 * need to flush delalloc when we are low on available space and
310 * therefore cause a deadlock if writeback of an inline extent needs to
311 * write to the same leaf or an ordered extent completion needs to write
312 * to the same leaf.
313 */
314 btrfs_release_path(path);
315
316 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset,
317 inline_data, size, datal, comp_type);
318 goto out;
319}
320
321/**
322 * btrfs_clone() - clone a range from inode file to another
323 *
324 * @src: Inode to clone from
325 * @inode: Inode to clone to
326 * @off: Offset within source to start clone from
327 * @olen: Original length, passed by user, of range to clone
328 * @olen_aligned: Block-aligned value of olen
329 * @destoff: Offset within @inode to start clone
330 * @no_time_update: Whether to update mtime/ctime on the target inode
331 */
332static int btrfs_clone(struct inode *src, struct inode *inode,
333 const u64 off, const u64 olen, const u64 olen_aligned,
334 const u64 destoff, int no_time_update)
335{
336 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
337 struct btrfs_path *path = NULL;
338 struct extent_buffer *leaf;
339 struct btrfs_trans_handle *trans;
340 char *buf = NULL;
341 struct btrfs_key key;
342 u32 nritems;
343 int slot;
344 int ret;
345 const u64 len = olen_aligned;
346 u64 last_dest_end = destoff;
347
348 ret = -ENOMEM;
349 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL);
350 if (!buf)
351 return ret;
352
353 path = btrfs_alloc_path();
354 if (!path) {
355 kvfree(buf);
356 return ret;
357 }
358
359 path->reada = READA_FORWARD;
360 /* Clone data */
361 key.objectid = btrfs_ino(BTRFS_I(src));
362 key.type = BTRFS_EXTENT_DATA_KEY;
363 key.offset = off;
364
365 while (1) {
366 u64 next_key_min_offset = key.offset + 1;
367 struct btrfs_file_extent_item *extent;
368 u64 extent_gen;
369 int type;
370 u32 size;
371 struct btrfs_key new_key;
372 u64 disko = 0, diskl = 0;
373 u64 datao = 0, datal = 0;
374 u8 comp;
375 u64 drop_start;
376
377 /* Note the key will change type as we walk through the tree */
378 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path,
379 0, 0);
380 if (ret < 0)
381 goto out;
382 /*
383 * First search, if no extent item that starts at offset off was
384 * found but the previous item is an extent item, it's possible
385 * it might overlap our target range, therefore process it.
386 */
387 if (key.offset == off && ret > 0 && path->slots[0] > 0) {
388 btrfs_item_key_to_cpu(path->nodes[0], &key,
389 path->slots[0] - 1);
390 if (key.type == BTRFS_EXTENT_DATA_KEY)
391 path->slots[0]--;
392 }
393
394 nritems = btrfs_header_nritems(path->nodes[0]);
395process_slot:
396 if (path->slots[0] >= nritems) {
397 ret = btrfs_next_leaf(BTRFS_I(src)->root, path);
398 if (ret < 0)
399 goto out;
400 if (ret > 0)
401 break;
402 nritems = btrfs_header_nritems(path->nodes[0]);
403 }
404 leaf = path->nodes[0];
405 slot = path->slots[0];
406
407 btrfs_item_key_to_cpu(leaf, &key, slot);
408 if (key.type > BTRFS_EXTENT_DATA_KEY ||
409 key.objectid != btrfs_ino(BTRFS_I(src)))
410 break;
411
412 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY);
413
414 extent = btrfs_item_ptr(leaf, slot,
415 struct btrfs_file_extent_item);
416 extent_gen = btrfs_file_extent_generation(leaf, extent);
417 comp = btrfs_file_extent_compression(leaf, extent);
418 type = btrfs_file_extent_type(leaf, extent);
419 if (type == BTRFS_FILE_EXTENT_REG ||
420 type == BTRFS_FILE_EXTENT_PREALLOC) {
421 disko = btrfs_file_extent_disk_bytenr(leaf, extent);
422 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent);
423 datao = btrfs_file_extent_offset(leaf, extent);
424 datal = btrfs_file_extent_num_bytes(leaf, extent);
425 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
426 /* Take upper bound, may be compressed */
427 datal = btrfs_file_extent_ram_bytes(leaf, extent);
428 }
429
430 /*
431 * The first search might have left us at an extent item that
432 * ends before our target range's start, can happen if we have
433 * holes and NO_HOLES feature enabled.
434 */
435 if (key.offset + datal <= off) {
436 path->slots[0]++;
437 goto process_slot;
438 } else if (key.offset >= off + len) {
439 break;
440 }
441 next_key_min_offset = key.offset + datal;
442 size = btrfs_item_size_nr(leaf, slot);
443 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot),
444 size);
445
446 btrfs_release_path(path);
447
448 memcpy(&new_key, &key, sizeof(new_key));
449 new_key.objectid = btrfs_ino(BTRFS_I(inode));
450 if (off <= key.offset)
451 new_key.offset = key.offset + destoff - off;
452 else
453 new_key.offset = destoff;
454
455 /*
456 * Deal with a hole that doesn't have an extent item that
457 * represents it (NO_HOLES feature enabled).
458 * This hole is either in the middle of the cloning range or at
459 * the beginning (fully overlaps it or partially overlaps it).
460 */
461 if (new_key.offset != last_dest_end)
462 drop_start = last_dest_end;
463 else
464 drop_start = new_key.offset;
465
466 if (type == BTRFS_FILE_EXTENT_REG ||
467 type == BTRFS_FILE_EXTENT_PREALLOC) {
468 struct btrfs_replace_extent_info clone_info;
469
470 /*
471 * a | --- range to clone ---| b
472 * | ------------- extent ------------- |
473 */
474
475 /* Subtract range b */
476 if (key.offset + datal > off + len)
477 datal = off + len - key.offset;
478
479 /* Subtract range a */
480 if (off > key.offset) {
481 datao += off - key.offset;
482 datal -= off - key.offset;
483 }
484
485 clone_info.disk_offset = disko;
486 clone_info.disk_len = diskl;
487 clone_info.data_offset = datao;
488 clone_info.data_len = datal;
489 clone_info.file_offset = new_key.offset;
490 clone_info.extent_buf = buf;
491 clone_info.is_new_extent = false;
492 ret = btrfs_replace_file_extents(BTRFS_I(inode), path,
493 drop_start, new_key.offset + datal - 1,
494 &clone_info, &trans);
495 if (ret)
496 goto out;
497 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
498 /*
499 * Inline extents always have to start at file offset 0
500 * and can never be bigger then the sector size. We can
501 * never clone only parts of an inline extent, since all
502 * reflink operations must start at a sector size aligned
503 * offset, and the length must be aligned too or end at
504 * the i_size (which implies the whole inlined data).
505 */
506 ASSERT(key.offset == 0);
507 ASSERT(datal <= fs_info->sectorsize);
508 if (key.offset != 0 || datal > fs_info->sectorsize)
509 return -EUCLEAN;
510
511 ret = clone_copy_inline_extent(inode, path, &new_key,
512 drop_start, datal, size,
513 comp, buf, &trans);
514 if (ret)
515 goto out;
516 }
517
518 btrfs_release_path(path);
519
520 /*
521 * If this is a new extent update the last_reflink_trans of both
522 * inodes. This is used by fsync to make sure it does not log
523 * multiple checksum items with overlapping ranges. For older
524 * extents we don't need to do it since inode logging skips the
525 * checksums for older extents. Also ignore holes and inline
526 * extents because they don't have checksums in the csum tree.
527 */
528 if (extent_gen == trans->transid && disko > 0) {
529 BTRFS_I(src)->last_reflink_trans = trans->transid;
530 BTRFS_I(inode)->last_reflink_trans = trans->transid;
531 }
532
533 last_dest_end = ALIGN(new_key.offset + datal,
534 fs_info->sectorsize);
535 ret = clone_finish_inode_update(trans, inode, last_dest_end,
536 destoff, olen, no_time_update);
537 if (ret)
538 goto out;
539 if (new_key.offset + datal >= destoff + len)
540 break;
541
542 btrfs_release_path(path);
543 key.offset = next_key_min_offset;
544
545 if (fatal_signal_pending(current)) {
546 ret = -EINTR;
547 goto out;
548 }
549
550 cond_resched();
551 }
552 ret = 0;
553
554 if (last_dest_end < destoff + len) {
555 /*
556 * We have an implicit hole that fully or partially overlaps our
557 * cloning range at its end. This means that we either have the
558 * NO_HOLES feature enabled or the implicit hole happened due to
559 * mixing buffered and direct IO writes against this file.
560 */
561 btrfs_release_path(path);
562
563 /*
564 * When using NO_HOLES and we are cloning a range that covers
565 * only a hole (no extents) into a range beyond the current
566 * i_size, punching a hole in the target range will not create
567 * an extent map defining a hole, because the range starts at or
568 * beyond current i_size. If the file previously had an i_size
569 * greater than the new i_size set by this clone operation, we
570 * need to make sure the next fsync is a full fsync, so that it
571 * detects and logs a hole covering a range from the current
572 * i_size to the new i_size. If the clone range covers extents,
573 * besides a hole, then we know the full sync flag was already
574 * set by previous calls to btrfs_replace_file_extents() that
575 * replaced file extent items.
576 */
577 if (last_dest_end >= i_size_read(inode))
578 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
579 &BTRFS_I(inode)->runtime_flags);
580
581 ret = btrfs_replace_file_extents(BTRFS_I(inode), path,
582 last_dest_end, destoff + len - 1, NULL, &trans);
583 if (ret)
584 goto out;
585
586 ret = clone_finish_inode_update(trans, inode, destoff + len,
587 destoff, olen, no_time_update);
588 }
589
590out:
591 btrfs_free_path(path);
592 kvfree(buf);
593 clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags);
594
595 return ret;
596}
597
598static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1,
599 struct inode *inode2, u64 loff2, u64 len)
600{
601 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
602 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
603}
604
605static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
606 struct inode *inode2, u64 loff2, u64 len)
607{
608 if (inode1 < inode2) {
609 swap(inode1, inode2);
610 swap(loff1, loff2);
611 } else if (inode1 == inode2 && loff2 < loff1) {
612 swap(loff1, loff2);
613 }
614 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1);
615 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
616}
617
618static void btrfs_double_mmap_lock(struct inode *inode1, struct inode *inode2)
619{
620 if (inode1 < inode2)
621 swap(inode1, inode2);
622 down_write(&BTRFS_I(inode1)->i_mmap_lock);
623 down_write_nested(&BTRFS_I(inode2)->i_mmap_lock, SINGLE_DEPTH_NESTING);
624}
625
626static void btrfs_double_mmap_unlock(struct inode *inode1, struct inode *inode2)
627{
628 up_write(&BTRFS_I(inode1)->i_mmap_lock);
629 up_write(&BTRFS_I(inode2)->i_mmap_lock);
630}
631
632static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len,
633 struct inode *dst, u64 dst_loff)
634{
635 const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize;
636 int ret;
637
638 /*
639 * Lock destination range to serialize with concurrent readpages() and
640 * source range to serialize with relocation.
641 */
642 btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
643 ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1);
644 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len);
645
646 return ret;
647}
648
649static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
650 struct inode *dst, u64 dst_loff)
651{
652 int ret;
653 u64 i, tail_len, chunk_count;
654 struct btrfs_root *root_dst = BTRFS_I(dst)->root;
655
656 spin_lock(&root_dst->root_item_lock);
657 if (root_dst->send_in_progress) {
658 btrfs_warn_rl(root_dst->fs_info,
659"cannot deduplicate to root %llu while send operations are using it (%d in progress)",
660 root_dst->root_key.objectid,
661 root_dst->send_in_progress);
662 spin_unlock(&root_dst->root_item_lock);
663 return -EAGAIN;
664 }
665 root_dst->dedupe_in_progress++;
666 spin_unlock(&root_dst->root_item_lock);
667
668 tail_len = olen % BTRFS_MAX_DEDUPE_LEN;
669 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN);
670
671 for (i = 0; i < chunk_count; i++) {
672 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
673 dst, dst_loff);
674 if (ret)
675 goto out;
676
677 loff += BTRFS_MAX_DEDUPE_LEN;
678 dst_loff += BTRFS_MAX_DEDUPE_LEN;
679 }
680
681 if (tail_len > 0)
682 ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff);
683out:
684 spin_lock(&root_dst->root_item_lock);
685 root_dst->dedupe_in_progress--;
686 spin_unlock(&root_dst->root_item_lock);
687
688 return ret;
689}
690
691static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
692 u64 off, u64 olen, u64 destoff)
693{
694 struct inode *inode = file_inode(file);
695 struct inode *src = file_inode(file_src);
696 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
697 int ret;
698 int wb_ret;
699 u64 len = olen;
700 u64 bs = fs_info->sb->s_blocksize;
701
702 /*
703 * VFS's generic_remap_file_range_prep() protects us from cloning the
704 * eof block into the middle of a file, which would result in corruption
705 * if the file size is not blocksize aligned. So we don't need to check
706 * for that case here.
707 */
708 if (off + len == src->i_size)
709 len = ALIGN(src->i_size, bs) - off;
710
711 if (destoff > inode->i_size) {
712 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs);
713
714 ret = btrfs_cont_expand(BTRFS_I(inode), inode->i_size, destoff);
715 if (ret)
716 return ret;
717 /*
718 * We may have truncated the last block if the inode's size is
719 * not sector size aligned, so we need to wait for writeback to
720 * complete before proceeding further, otherwise we can race
721 * with cloning and attempt to increment a reference to an
722 * extent that no longer exists (writeback completed right after
723 * we found the previous extent covering eof and before we
724 * attempted to increment its reference count).
725 */
726 ret = btrfs_wait_ordered_range(inode, wb_start,
727 destoff - wb_start);
728 if (ret)
729 return ret;
730 }
731
732 /*
733 * Lock destination range to serialize with concurrent readpages() and
734 * source range to serialize with relocation.
735 */
736 btrfs_double_extent_lock(src, off, inode, destoff, len);
737 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
738 btrfs_double_extent_unlock(src, off, inode, destoff, len);
739
740 /*
741 * We may have copied an inline extent into a page of the destination
742 * range, so wait for writeback to complete before truncating pages
743 * from the page cache. This is a rare case.
744 */
745 wb_ret = btrfs_wait_ordered_range(inode, destoff, len);
746 ret = ret ? ret : wb_ret;
747 /*
748 * Truncate page cache pages so that future reads will see the cloned
749 * data immediately and not the previous data.
750 */
751 truncate_inode_pages_range(&inode->i_data,
752 round_down(destoff, PAGE_SIZE),
753 round_up(destoff + len, PAGE_SIZE) - 1);
754
755 return ret;
756}
757
758static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in,
759 struct file *file_out, loff_t pos_out,
760 loff_t *len, unsigned int remap_flags)
761{
762 struct inode *inode_in = file_inode(file_in);
763 struct inode *inode_out = file_inode(file_out);
764 u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize;
765 bool same_inode = inode_out == inode_in;
766 u64 wb_len;
767 int ret;
768
769 if (!(remap_flags & REMAP_FILE_DEDUP)) {
770 struct btrfs_root *root_out = BTRFS_I(inode_out)->root;
771
772 if (btrfs_root_readonly(root_out))
773 return -EROFS;
774
775 if (file_in->f_path.mnt != file_out->f_path.mnt ||
776 inode_in->i_sb != inode_out->i_sb)
777 return -EXDEV;
778 }
779
780 /* Don't make the dst file partly checksummed */
781 if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) !=
782 (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) {
783 return -EINVAL;
784 }
785
786 /*
787 * Now that the inodes are locked, we need to start writeback ourselves
788 * and can not rely on the writeback from the VFS's generic helper
789 * generic_remap_file_range_prep() because:
790 *
791 * 1) For compression we must call filemap_fdatawrite_range() range
792 * twice (btrfs_fdatawrite_range() does it for us), and the generic
793 * helper only calls it once;
794 *
795 * 2) filemap_fdatawrite_range(), called by the generic helper only
796 * waits for the writeback to complete, i.e. for IO to be done, and
797 * not for the ordered extents to complete. We need to wait for them
798 * to complete so that new file extent items are in the fs tree.
799 */
800 if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP))
801 wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs);
802 else
803 wb_len = ALIGN(*len, bs);
804
805 /*
806 * Since we don't lock ranges, wait for ongoing lockless dio writes (as
807 * any in progress could create its ordered extents after we wait for
808 * existing ordered extents below).
809 */
810 inode_dio_wait(inode_in);
811 if (!same_inode)
812 inode_dio_wait(inode_out);
813
814 /*
815 * Workaround to make sure NOCOW buffered write reach disk as NOCOW.
816 *
817 * Btrfs' back references do not have a block level granularity, they
818 * work at the whole extent level.
819 * NOCOW buffered write without data space reserved may not be able
820 * to fall back to CoW due to lack of data space, thus could cause
821 * data loss.
822 *
823 * Here we take a shortcut by flushing the whole inode, so that all
824 * nocow write should reach disk as nocow before we increase the
825 * reference of the extent. We could do better by only flushing NOCOW
826 * data, but that needs extra accounting.
827 *
828 * Also we don't need to check ASYNC_EXTENT, as async extent will be
829 * CoWed anyway, not affecting nocow part.
830 */
831 ret = filemap_flush(inode_in->i_mapping);
832 if (ret < 0)
833 return ret;
834
835 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs),
836 wb_len);
837 if (ret < 0)
838 return ret;
839 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs),
840 wb_len);
841 if (ret < 0)
842 return ret;
843
844 return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
845 len, remap_flags);
846}
847
848static bool file_sync_write(const struct file *file)
849{
850 if (file->f_flags & (__O_SYNC | O_DSYNC))
851 return true;
852 if (IS_SYNC(file_inode(file)))
853 return true;
854
855 return false;
856}
857
858loff_t btrfs_remap_file_range(struct file *src_file, loff_t off,
859 struct file *dst_file, loff_t destoff, loff_t len,
860 unsigned int remap_flags)
861{
862 struct inode *src_inode = file_inode(src_file);
863 struct inode *dst_inode = file_inode(dst_file);
864 bool same_inode = dst_inode == src_inode;
865 int ret;
866
867 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
868 return -EINVAL;
869
870 if (same_inode) {
871 btrfs_inode_lock(src_inode, BTRFS_ILOCK_MMAP);
872 } else {
873 lock_two_nondirectories(src_inode, dst_inode);
874 btrfs_double_mmap_lock(src_inode, dst_inode);
875 }
876
877 ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff,
878 &len, remap_flags);
879 if (ret < 0 || len == 0)
880 goto out_unlock;
881
882 if (remap_flags & REMAP_FILE_DEDUP)
883 ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff);
884 else
885 ret = btrfs_clone_files(dst_file, src_file, off, len, destoff);
886
887out_unlock:
888 if (same_inode) {
889 btrfs_inode_unlock(src_inode, BTRFS_ILOCK_MMAP);
890 } else {
891 btrfs_double_mmap_unlock(src_inode, dst_inode);
892 unlock_two_nondirectories(src_inode, dst_inode);
893 }
894
895 /*
896 * If either the source or the destination file was opened with O_SYNC,
897 * O_DSYNC or has the S_SYNC attribute, fsync both the destination and
898 * source files/ranges, so that after a successful return (0) followed
899 * by a power failure results in the reflinked data to be readable from
900 * both files/ranges.
901 */
902 if (ret == 0 && len > 0 &&
903 (file_sync_write(src_file) || file_sync_write(dst_file))) {
904 ret = btrfs_sync_file(src_file, off, off + len - 1, 0);
905 if (ret == 0)
906 ret = btrfs_sync_file(dst_file, destoff,
907 destoff + len - 1, 0);
908 }
909
910 return ret < 0 ? ret : len;
911}