Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/slab.h>
3#include <linux/stat.h>
4#include <linux/sched/xacct.h>
5#include <linux/fcntl.h>
6#include <linux/file.h>
7#include <linux/uio.h>
8#include <linux/fsnotify.h>
9#include <linux/security.h>
10#include <linux/export.h>
11#include <linux/syscalls.h>
12#include <linux/pagemap.h>
13#include <linux/splice.h>
14#include <linux/compat.h>
15#include <linux/mount.h>
16#include <linux/fs.h>
17#include <linux/dax.h>
18#include <linux/overflow.h>
19#include "internal.h"
20
21#include <linux/uaccess.h>
22#include <asm/unistd.h>
23
24/*
25 * Performs necessary checks before doing a clone.
26 *
27 * Can adjust amount of bytes to clone via @req_count argument.
28 * Returns appropriate error code that caller should return or
29 * zero in case the clone should be allowed.
30 */
31static int generic_remap_checks(struct file *file_in, loff_t pos_in,
32 struct file *file_out, loff_t pos_out,
33 loff_t *req_count, unsigned int remap_flags)
34{
35 struct inode *inode_in = file_in->f_mapping->host;
36 struct inode *inode_out = file_out->f_mapping->host;
37 uint64_t count = *req_count;
38 uint64_t bcount;
39 loff_t size_in, size_out;
40 loff_t bs = inode_out->i_sb->s_blocksize;
41 int ret;
42
43 /* The start of both ranges must be aligned to an fs block. */
44 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
45 return -EINVAL;
46
47 /* Ensure offsets don't wrap. */
48 if (pos_in + count < pos_in || pos_out + count < pos_out)
49 return -EINVAL;
50
51 size_in = i_size_read(inode_in);
52 size_out = i_size_read(inode_out);
53
54 /* Dedupe requires both ranges to be within EOF. */
55 if ((remap_flags & REMAP_FILE_DEDUP) &&
56 (pos_in >= size_in || pos_in + count > size_in ||
57 pos_out >= size_out || pos_out + count > size_out))
58 return -EINVAL;
59
60 /* Ensure the infile range is within the infile. */
61 if (pos_in >= size_in)
62 return -EINVAL;
63 count = min(count, size_in - (uint64_t)pos_in);
64
65 ret = generic_write_check_limits(file_out, pos_out, &count);
66 if (ret)
67 return ret;
68
69 /*
70 * If the user wanted us to link to the infile's EOF, round up to the
71 * next block boundary for this check.
72 *
73 * Otherwise, make sure the count is also block-aligned, having
74 * already confirmed the starting offsets' block alignment.
75 */
76 if (pos_in + count == size_in &&
77 (!(remap_flags & REMAP_FILE_DEDUP) || pos_out + count == size_out)) {
78 bcount = ALIGN(size_in, bs) - pos_in;
79 } else {
80 if (!IS_ALIGNED(count, bs))
81 count = ALIGN_DOWN(count, bs);
82 bcount = count;
83 }
84
85 /* Don't allow overlapped cloning within the same file. */
86 if (inode_in == inode_out &&
87 pos_out + bcount > pos_in &&
88 pos_out < pos_in + bcount)
89 return -EINVAL;
90
91 /*
92 * We shortened the request but the caller can't deal with that, so
93 * bounce the request back to userspace.
94 */
95 if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
96 return -EINVAL;
97
98 *req_count = count;
99 return 0;
100}
101
102static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
103 bool write)
104{
105 int mask = write ? MAY_WRITE : MAY_READ;
106 loff_t tmp;
107 int ret;
108
109 if (unlikely(pos < 0 || len < 0))
110 return -EINVAL;
111
112 if (unlikely(check_add_overflow(pos, len, &tmp)))
113 return -EINVAL;
114
115 ret = security_file_permission(file, mask);
116 if (ret)
117 return ret;
118
119 return fsnotify_file_area_perm(file, mask, &pos, len);
120}
121
122/*
123 * Ensure that we don't remap a partial EOF block in the middle of something
124 * else. Assume that the offsets have already been checked for block
125 * alignment.
126 *
127 * For clone we only link a partial EOF block above or at the destination file's
128 * EOF. For deduplication we accept a partial EOF block only if it ends at the
129 * destination file's EOF (can not link it into the middle of a file).
130 *
131 * Shorten the request if possible.
132 */
133static int generic_remap_check_len(struct inode *inode_in,
134 struct inode *inode_out,
135 loff_t pos_out,
136 loff_t *len,
137 unsigned int remap_flags)
138{
139 u64 blkmask = i_blocksize(inode_in) - 1;
140 loff_t new_len = *len;
141
142 if ((*len & blkmask) == 0)
143 return 0;
144
145 if (pos_out + *len < i_size_read(inode_out))
146 new_len &= ~blkmask;
147
148 if (new_len == *len)
149 return 0;
150
151 if (remap_flags & REMAP_FILE_CAN_SHORTEN) {
152 *len = new_len;
153 return 0;
154 }
155
156 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
157}
158
159/* Read a page's worth of file data into the page cache. */
160static struct folio *vfs_dedupe_get_folio(struct file *file, loff_t pos)
161{
162 return read_mapping_folio(file->f_mapping, pos >> PAGE_SHIFT, file);
163}
164
165/*
166 * Lock two folios, ensuring that we lock in offset order if the folios
167 * are from the same file.
168 */
169static void vfs_lock_two_folios(struct folio *folio1, struct folio *folio2)
170{
171 /* Always lock in order of increasing index. */
172 if (folio1->index > folio2->index)
173 swap(folio1, folio2);
174
175 folio_lock(folio1);
176 if (folio1 != folio2)
177 folio_lock(folio2);
178}
179
180/* Unlock two folios, being careful not to unlock the same folio twice. */
181static void vfs_unlock_two_folios(struct folio *folio1, struct folio *folio2)
182{
183 folio_unlock(folio1);
184 if (folio1 != folio2)
185 folio_unlock(folio2);
186}
187
188/*
189 * Compare extents of two files to see if they are the same.
190 * Caller must have locked both inodes to prevent write races.
191 */
192static int vfs_dedupe_file_range_compare(struct file *src, loff_t srcoff,
193 struct file *dest, loff_t dstoff,
194 loff_t len, bool *is_same)
195{
196 bool same = true;
197 int error = -EINVAL;
198
199 while (len) {
200 struct folio *src_folio, *dst_folio;
201 void *src_addr, *dst_addr;
202 loff_t cmp_len = min(PAGE_SIZE - offset_in_page(srcoff),
203 PAGE_SIZE - offset_in_page(dstoff));
204
205 cmp_len = min(cmp_len, len);
206 if (cmp_len <= 0)
207 goto out_error;
208
209 src_folio = vfs_dedupe_get_folio(src, srcoff);
210 if (IS_ERR(src_folio)) {
211 error = PTR_ERR(src_folio);
212 goto out_error;
213 }
214 dst_folio = vfs_dedupe_get_folio(dest, dstoff);
215 if (IS_ERR(dst_folio)) {
216 error = PTR_ERR(dst_folio);
217 folio_put(src_folio);
218 goto out_error;
219 }
220
221 vfs_lock_two_folios(src_folio, dst_folio);
222
223 /*
224 * Now that we've locked both folios, make sure they're still
225 * mapped to the file data we're interested in. If not,
226 * someone is invalidating pages on us and we lose.
227 */
228 if (!folio_test_uptodate(src_folio) || !folio_test_uptodate(dst_folio) ||
229 src_folio->mapping != src->f_mapping ||
230 dst_folio->mapping != dest->f_mapping) {
231 same = false;
232 goto unlock;
233 }
234
235 src_addr = kmap_local_folio(src_folio,
236 offset_in_folio(src_folio, srcoff));
237 dst_addr = kmap_local_folio(dst_folio,
238 offset_in_folio(dst_folio, dstoff));
239
240 flush_dcache_folio(src_folio);
241 flush_dcache_folio(dst_folio);
242
243 if (memcmp(src_addr, dst_addr, cmp_len))
244 same = false;
245
246 kunmap_local(dst_addr);
247 kunmap_local(src_addr);
248unlock:
249 vfs_unlock_two_folios(src_folio, dst_folio);
250 folio_put(dst_folio);
251 folio_put(src_folio);
252
253 if (!same)
254 break;
255
256 srcoff += cmp_len;
257 dstoff += cmp_len;
258 len -= cmp_len;
259 }
260
261 *is_same = same;
262 return 0;
263
264out_error:
265 return error;
266}
267
268/*
269 * Check that the two inodes are eligible for cloning, the ranges make
270 * sense, and then flush all dirty data. Caller must ensure that the
271 * inodes have been locked against any other modifications.
272 *
273 * If there's an error, then the usual negative error code is returned.
274 * Otherwise returns 0 with *len set to the request length.
275 */
276int
277__generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
278 struct file *file_out, loff_t pos_out,
279 loff_t *len, unsigned int remap_flags,
280 const struct iomap_ops *dax_read_ops)
281{
282 struct inode *inode_in = file_inode(file_in);
283 struct inode *inode_out = file_inode(file_out);
284 bool same_inode = (inode_in == inode_out);
285 int ret;
286
287 /* Don't touch certain kinds of inodes */
288 if (IS_IMMUTABLE(inode_out))
289 return -EPERM;
290
291 if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
292 return -ETXTBSY;
293
294 /* Don't reflink dirs, pipes, sockets... */
295 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
296 return -EISDIR;
297 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
298 return -EINVAL;
299
300 /* Zero length dedupe exits immediately; reflink goes to EOF. */
301 if (*len == 0) {
302 loff_t isize = i_size_read(inode_in);
303
304 if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize)
305 return 0;
306 if (pos_in > isize)
307 return -EINVAL;
308 *len = isize - pos_in;
309 if (*len == 0)
310 return 0;
311 }
312
313 /* Check that we don't violate system file offset limits. */
314 ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len,
315 remap_flags);
316 if (ret || *len == 0)
317 return ret;
318
319 /* Wait for the completion of any pending IOs on both files */
320 inode_dio_wait(inode_in);
321 if (!same_inode)
322 inode_dio_wait(inode_out);
323
324 ret = filemap_write_and_wait_range(inode_in->i_mapping,
325 pos_in, pos_in + *len - 1);
326 if (ret)
327 return ret;
328
329 ret = filemap_write_and_wait_range(inode_out->i_mapping,
330 pos_out, pos_out + *len - 1);
331 if (ret)
332 return ret;
333
334 /*
335 * Check that the extents are the same.
336 */
337 if (remap_flags & REMAP_FILE_DEDUP) {
338 bool is_same = false;
339
340 if (!IS_DAX(inode_in))
341 ret = vfs_dedupe_file_range_compare(file_in, pos_in,
342 file_out, pos_out, *len, &is_same);
343 else if (dax_read_ops)
344 ret = dax_dedupe_file_range_compare(inode_in, pos_in,
345 inode_out, pos_out, *len, &is_same,
346 dax_read_ops);
347 else
348 return -EINVAL;
349 if (ret)
350 return ret;
351 if (!is_same)
352 return -EBADE;
353 }
354
355 ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
356 remap_flags);
357 if (ret || *len == 0)
358 return ret;
359
360 /* If can't alter the file contents, we're done. */
361 if (!(remap_flags & REMAP_FILE_DEDUP))
362 ret = file_modified(file_out);
363
364 return ret;
365}
366
367int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
368 struct file *file_out, loff_t pos_out,
369 loff_t *len, unsigned int remap_flags)
370{
371 return __generic_remap_file_range_prep(file_in, pos_in, file_out,
372 pos_out, len, remap_flags, NULL);
373}
374EXPORT_SYMBOL(generic_remap_file_range_prep);
375
376loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
377 struct file *file_out, loff_t pos_out,
378 loff_t len, unsigned int remap_flags)
379{
380 loff_t ret;
381
382 WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP);
383
384 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
385 return -EXDEV;
386
387 ret = generic_file_rw_checks(file_in, file_out);
388 if (ret < 0)
389 return ret;
390
391 if (!file_in->f_op->remap_file_range)
392 return -EOPNOTSUPP;
393
394 ret = remap_verify_area(file_in, pos_in, len, false);
395 if (ret)
396 return ret;
397
398 ret = remap_verify_area(file_out, pos_out, len, true);
399 if (ret)
400 return ret;
401
402 file_start_write(file_out);
403 ret = file_in->f_op->remap_file_range(file_in, pos_in,
404 file_out, pos_out, len, remap_flags);
405 file_end_write(file_out);
406 if (ret < 0)
407 return ret;
408
409 fsnotify_access(file_in);
410 fsnotify_modify(file_out);
411 return ret;
412}
413EXPORT_SYMBOL(vfs_clone_file_range);
414
415/* Check whether we are allowed to dedupe the destination file */
416static bool may_dedupe_file(struct file *file)
417{
418 struct mnt_idmap *idmap = file_mnt_idmap(file);
419 struct inode *inode = file_inode(file);
420
421 if (capable(CAP_SYS_ADMIN))
422 return true;
423 if (file->f_mode & FMODE_WRITE)
424 return true;
425 if (vfsuid_eq_kuid(i_uid_into_vfsuid(idmap, inode), current_fsuid()))
426 return true;
427 if (!inode_permission(idmap, inode, MAY_WRITE))
428 return true;
429 return false;
430}
431
432loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
433 struct file *dst_file, loff_t dst_pos,
434 loff_t len, unsigned int remap_flags)
435{
436 loff_t ret;
437
438 WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP |
439 REMAP_FILE_CAN_SHORTEN));
440
441 /*
442 * This is redundant if called from vfs_dedupe_file_range(), but other
443 * callers need it and it's not performance sesitive...
444 */
445 ret = remap_verify_area(src_file, src_pos, len, false);
446 if (ret)
447 return ret;
448
449 ret = remap_verify_area(dst_file, dst_pos, len, true);
450 if (ret)
451 return ret;
452
453 /*
454 * This needs to be called after remap_verify_area() because of
455 * sb_start_write() and before may_dedupe_file() because the mount's
456 * MAY_WRITE need to be checked with mnt_get_write_access_file() held.
457 */
458 ret = mnt_want_write_file(dst_file);
459 if (ret)
460 return ret;
461
462 ret = -EPERM;
463 if (!may_dedupe_file(dst_file))
464 goto out_drop_write;
465
466 ret = -EXDEV;
467 if (file_inode(src_file)->i_sb != file_inode(dst_file)->i_sb)
468 goto out_drop_write;
469
470 ret = -EISDIR;
471 if (S_ISDIR(file_inode(dst_file)->i_mode))
472 goto out_drop_write;
473
474 ret = -EINVAL;
475 if (!dst_file->f_op->remap_file_range)
476 goto out_drop_write;
477
478 if (len == 0) {
479 ret = 0;
480 goto out_drop_write;
481 }
482
483 ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file,
484 dst_pos, len, remap_flags | REMAP_FILE_DEDUP);
485out_drop_write:
486 mnt_drop_write_file(dst_file);
487
488 return ret;
489}
490EXPORT_SYMBOL(vfs_dedupe_file_range_one);
491
492int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
493{
494 struct file_dedupe_range_info *info;
495 struct inode *src = file_inode(file);
496 u64 off;
497 u64 len;
498 int i;
499 int ret;
500 u16 count = same->dest_count;
501 loff_t deduped;
502
503 if (!(file->f_mode & FMODE_READ))
504 return -EINVAL;
505
506 if (same->reserved1 || same->reserved2)
507 return -EINVAL;
508
509 off = same->src_offset;
510 len = same->src_length;
511
512 if (S_ISDIR(src->i_mode))
513 return -EISDIR;
514
515 if (!S_ISREG(src->i_mode))
516 return -EINVAL;
517
518 if (!file->f_op->remap_file_range)
519 return -EOPNOTSUPP;
520
521 ret = remap_verify_area(file, off, len, false);
522 if (ret < 0)
523 return ret;
524 ret = 0;
525
526 if (off + len > i_size_read(src))
527 return -EINVAL;
528
529 /* Arbitrary 1G limit on a single dedupe request, can be raised. */
530 len = min_t(u64, len, 1 << 30);
531
532 /* pre-format output fields to sane values */
533 for (i = 0; i < count; i++) {
534 same->info[i].bytes_deduped = 0ULL;
535 same->info[i].status = FILE_DEDUPE_RANGE_SAME;
536 }
537
538 for (i = 0, info = same->info; i < count; i++, info++) {
539 struct fd dst_fd = fdget(info->dest_fd);
540 struct file *dst_file = dst_fd.file;
541
542 if (!dst_file) {
543 info->status = -EBADF;
544 goto next_loop;
545 }
546
547 if (info->reserved) {
548 info->status = -EINVAL;
549 goto next_fdput;
550 }
551
552 deduped = vfs_dedupe_file_range_one(file, off, dst_file,
553 info->dest_offset, len,
554 REMAP_FILE_CAN_SHORTEN);
555 if (deduped == -EBADE)
556 info->status = FILE_DEDUPE_RANGE_DIFFERS;
557 else if (deduped < 0)
558 info->status = deduped;
559 else
560 info->bytes_deduped = len;
561
562next_fdput:
563 fdput(dst_fd);
564next_loop:
565 if (fatal_signal_pending(current))
566 break;
567 }
568 return ret;
569}
570EXPORT_SYMBOL(vfs_dedupe_file_range);
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/slab.h>
3#include <linux/stat.h>
4#include <linux/sched/xacct.h>
5#include <linux/fcntl.h>
6#include <linux/file.h>
7#include <linux/uio.h>
8#include <linux/fsnotify.h>
9#include <linux/security.h>
10#include <linux/export.h>
11#include <linux/syscalls.h>
12#include <linux/pagemap.h>
13#include <linux/splice.h>
14#include <linux/compat.h>
15#include <linux/mount.h>
16#include <linux/fs.h>
17#include "internal.h"
18
19#include <linux/uaccess.h>
20#include <asm/unistd.h>
21
22/*
23 * Performs necessary checks before doing a clone.
24 *
25 * Can adjust amount of bytes to clone via @req_count argument.
26 * Returns appropriate error code that caller should return or
27 * zero in case the clone should be allowed.
28 */
29static int generic_remap_checks(struct file *file_in, loff_t pos_in,
30 struct file *file_out, loff_t pos_out,
31 loff_t *req_count, unsigned int remap_flags)
32{
33 struct inode *inode_in = file_in->f_mapping->host;
34 struct inode *inode_out = file_out->f_mapping->host;
35 uint64_t count = *req_count;
36 uint64_t bcount;
37 loff_t size_in, size_out;
38 loff_t bs = inode_out->i_sb->s_blocksize;
39 int ret;
40
41 /* The start of both ranges must be aligned to an fs block. */
42 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
43 return -EINVAL;
44
45 /* Ensure offsets don't wrap. */
46 if (pos_in + count < pos_in || pos_out + count < pos_out)
47 return -EINVAL;
48
49 size_in = i_size_read(inode_in);
50 size_out = i_size_read(inode_out);
51
52 /* Dedupe requires both ranges to be within EOF. */
53 if ((remap_flags & REMAP_FILE_DEDUP) &&
54 (pos_in >= size_in || pos_in + count > size_in ||
55 pos_out >= size_out || pos_out + count > size_out))
56 return -EINVAL;
57
58 /* Ensure the infile range is within the infile. */
59 if (pos_in >= size_in)
60 return -EINVAL;
61 count = min(count, size_in - (uint64_t)pos_in);
62
63 ret = generic_write_check_limits(file_out, pos_out, &count);
64 if (ret)
65 return ret;
66
67 /*
68 * If the user wanted us to link to the infile's EOF, round up to the
69 * next block boundary for this check.
70 *
71 * Otherwise, make sure the count is also block-aligned, having
72 * already confirmed the starting offsets' block alignment.
73 */
74 if (pos_in + count == size_in) {
75 bcount = ALIGN(size_in, bs) - pos_in;
76 } else {
77 if (!IS_ALIGNED(count, bs))
78 count = ALIGN_DOWN(count, bs);
79 bcount = count;
80 }
81
82 /* Don't allow overlapped cloning within the same file. */
83 if (inode_in == inode_out &&
84 pos_out + bcount > pos_in &&
85 pos_out < pos_in + bcount)
86 return -EINVAL;
87
88 /*
89 * We shortened the request but the caller can't deal with that, so
90 * bounce the request back to userspace.
91 */
92 if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
93 return -EINVAL;
94
95 *req_count = count;
96 return 0;
97}
98
99static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
100 bool write)
101{
102 struct inode *inode = file_inode(file);
103
104 if (unlikely(pos < 0 || len < 0))
105 return -EINVAL;
106
107 if (unlikely((loff_t) (pos + len) < 0))
108 return -EINVAL;
109
110 if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
111 loff_t end = len ? pos + len - 1 : OFFSET_MAX;
112 int retval;
113
114 retval = locks_mandatory_area(inode, file, pos, end,
115 write ? F_WRLCK : F_RDLCK);
116 if (retval < 0)
117 return retval;
118 }
119
120 return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
121}
122
123/*
124 * Ensure that we don't remap a partial EOF block in the middle of something
125 * else. Assume that the offsets have already been checked for block
126 * alignment.
127 *
128 * For clone we only link a partial EOF block above or at the destination file's
129 * EOF. For deduplication we accept a partial EOF block only if it ends at the
130 * destination file's EOF (can not link it into the middle of a file).
131 *
132 * Shorten the request if possible.
133 */
134static int generic_remap_check_len(struct inode *inode_in,
135 struct inode *inode_out,
136 loff_t pos_out,
137 loff_t *len,
138 unsigned int remap_flags)
139{
140 u64 blkmask = i_blocksize(inode_in) - 1;
141 loff_t new_len = *len;
142
143 if ((*len & blkmask) == 0)
144 return 0;
145
146 if (pos_out + *len < i_size_read(inode_out))
147 new_len &= ~blkmask;
148
149 if (new_len == *len)
150 return 0;
151
152 if (remap_flags & REMAP_FILE_CAN_SHORTEN) {
153 *len = new_len;
154 return 0;
155 }
156
157 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
158}
159
160/* Read a page's worth of file data into the page cache. */
161static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
162{
163 struct page *page;
164
165 page = read_mapping_page(inode->i_mapping, offset >> PAGE_SHIFT, NULL);
166 if (IS_ERR(page))
167 return page;
168 if (!PageUptodate(page)) {
169 put_page(page);
170 return ERR_PTR(-EIO);
171 }
172 return page;
173}
174
175/*
176 * Lock two pages, ensuring that we lock in offset order if the pages are from
177 * the same file.
178 */
179static void vfs_lock_two_pages(struct page *page1, struct page *page2)
180{
181 /* Always lock in order of increasing index. */
182 if (page1->index > page2->index)
183 swap(page1, page2);
184
185 lock_page(page1);
186 if (page1 != page2)
187 lock_page(page2);
188}
189
190/* Unlock two pages, being careful not to unlock the same page twice. */
191static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
192{
193 unlock_page(page1);
194 if (page1 != page2)
195 unlock_page(page2);
196}
197
198/*
199 * Compare extents of two files to see if they are the same.
200 * Caller must have locked both inodes to prevent write races.
201 */
202static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
203 struct inode *dest, loff_t destoff,
204 loff_t len, bool *is_same)
205{
206 loff_t src_poff;
207 loff_t dest_poff;
208 void *src_addr;
209 void *dest_addr;
210 struct page *src_page;
211 struct page *dest_page;
212 loff_t cmp_len;
213 bool same;
214 int error;
215
216 error = -EINVAL;
217 same = true;
218 while (len) {
219 src_poff = srcoff & (PAGE_SIZE - 1);
220 dest_poff = destoff & (PAGE_SIZE - 1);
221 cmp_len = min(PAGE_SIZE - src_poff,
222 PAGE_SIZE - dest_poff);
223 cmp_len = min(cmp_len, len);
224 if (cmp_len <= 0)
225 goto out_error;
226
227 src_page = vfs_dedupe_get_page(src, srcoff);
228 if (IS_ERR(src_page)) {
229 error = PTR_ERR(src_page);
230 goto out_error;
231 }
232 dest_page = vfs_dedupe_get_page(dest, destoff);
233 if (IS_ERR(dest_page)) {
234 error = PTR_ERR(dest_page);
235 put_page(src_page);
236 goto out_error;
237 }
238
239 vfs_lock_two_pages(src_page, dest_page);
240
241 /*
242 * Now that we've locked both pages, make sure they're still
243 * mapped to the file data we're interested in. If not,
244 * someone is invalidating pages on us and we lose.
245 */
246 if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
247 src_page->mapping != src->i_mapping ||
248 dest_page->mapping != dest->i_mapping) {
249 same = false;
250 goto unlock;
251 }
252
253 src_addr = kmap_atomic(src_page);
254 dest_addr = kmap_atomic(dest_page);
255
256 flush_dcache_page(src_page);
257 flush_dcache_page(dest_page);
258
259 if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len))
260 same = false;
261
262 kunmap_atomic(dest_addr);
263 kunmap_atomic(src_addr);
264unlock:
265 vfs_unlock_two_pages(src_page, dest_page);
266 put_page(dest_page);
267 put_page(src_page);
268
269 if (!same)
270 break;
271
272 srcoff += cmp_len;
273 destoff += cmp_len;
274 len -= cmp_len;
275 }
276
277 *is_same = same;
278 return 0;
279
280out_error:
281 return error;
282}
283
284/*
285 * Check that the two inodes are eligible for cloning, the ranges make
286 * sense, and then flush all dirty data. Caller must ensure that the
287 * inodes have been locked against any other modifications.
288 *
289 * If there's an error, then the usual negative error code is returned.
290 * Otherwise returns 0 with *len set to the request length.
291 */
292int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
293 struct file *file_out, loff_t pos_out,
294 loff_t *len, unsigned int remap_flags)
295{
296 struct inode *inode_in = file_inode(file_in);
297 struct inode *inode_out = file_inode(file_out);
298 bool same_inode = (inode_in == inode_out);
299 int ret;
300
301 /* Don't touch certain kinds of inodes */
302 if (IS_IMMUTABLE(inode_out))
303 return -EPERM;
304
305 if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
306 return -ETXTBSY;
307
308 /* Don't reflink dirs, pipes, sockets... */
309 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
310 return -EISDIR;
311 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
312 return -EINVAL;
313
314 /* Zero length dedupe exits immediately; reflink goes to EOF. */
315 if (*len == 0) {
316 loff_t isize = i_size_read(inode_in);
317
318 if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize)
319 return 0;
320 if (pos_in > isize)
321 return -EINVAL;
322 *len = isize - pos_in;
323 if (*len == 0)
324 return 0;
325 }
326
327 /* Check that we don't violate system file offset limits. */
328 ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len,
329 remap_flags);
330 if (ret)
331 return ret;
332
333 /* Wait for the completion of any pending IOs on both files */
334 inode_dio_wait(inode_in);
335 if (!same_inode)
336 inode_dio_wait(inode_out);
337
338 ret = filemap_write_and_wait_range(inode_in->i_mapping,
339 pos_in, pos_in + *len - 1);
340 if (ret)
341 return ret;
342
343 ret = filemap_write_and_wait_range(inode_out->i_mapping,
344 pos_out, pos_out + *len - 1);
345 if (ret)
346 return ret;
347
348 /*
349 * Check that the extents are the same.
350 */
351 if (remap_flags & REMAP_FILE_DEDUP) {
352 bool is_same = false;
353
354 ret = vfs_dedupe_file_range_compare(inode_in, pos_in,
355 inode_out, pos_out, *len, &is_same);
356 if (ret)
357 return ret;
358 if (!is_same)
359 return -EBADE;
360 }
361
362 ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
363 remap_flags);
364 if (ret)
365 return ret;
366
367 /* If can't alter the file contents, we're done. */
368 if (!(remap_flags & REMAP_FILE_DEDUP))
369 ret = file_modified(file_out);
370
371 return ret;
372}
373EXPORT_SYMBOL(generic_remap_file_range_prep);
374
375loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
376 struct file *file_out, loff_t pos_out,
377 loff_t len, unsigned int remap_flags)
378{
379 loff_t ret;
380
381 WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP);
382
383 /*
384 * FICLONE/FICLONERANGE ioctls enforce that src and dest files are on
385 * the same mount. Practically, they only need to be on the same file
386 * system.
387 */
388 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
389 return -EXDEV;
390
391 ret = generic_file_rw_checks(file_in, file_out);
392 if (ret < 0)
393 return ret;
394
395 if (!file_in->f_op->remap_file_range)
396 return -EOPNOTSUPP;
397
398 ret = remap_verify_area(file_in, pos_in, len, false);
399 if (ret)
400 return ret;
401
402 ret = remap_verify_area(file_out, pos_out, len, true);
403 if (ret)
404 return ret;
405
406 ret = file_in->f_op->remap_file_range(file_in, pos_in,
407 file_out, pos_out, len, remap_flags);
408 if (ret < 0)
409 return ret;
410
411 fsnotify_access(file_in);
412 fsnotify_modify(file_out);
413 return ret;
414}
415EXPORT_SYMBOL(do_clone_file_range);
416
417loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
418 struct file *file_out, loff_t pos_out,
419 loff_t len, unsigned int remap_flags)
420{
421 loff_t ret;
422
423 file_start_write(file_out);
424 ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len,
425 remap_flags);
426 file_end_write(file_out);
427
428 return ret;
429}
430EXPORT_SYMBOL(vfs_clone_file_range);
431
432/* Check whether we are allowed to dedupe the destination file */
433static bool allow_file_dedupe(struct file *file)
434{
435 struct user_namespace *mnt_userns = file_mnt_user_ns(file);
436 struct inode *inode = file_inode(file);
437
438 if (capable(CAP_SYS_ADMIN))
439 return true;
440 if (file->f_mode & FMODE_WRITE)
441 return true;
442 if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)))
443 return true;
444 if (!inode_permission(mnt_userns, inode, MAY_WRITE))
445 return true;
446 return false;
447}
448
449loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
450 struct file *dst_file, loff_t dst_pos,
451 loff_t len, unsigned int remap_flags)
452{
453 loff_t ret;
454
455 WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP |
456 REMAP_FILE_CAN_SHORTEN));
457
458 ret = mnt_want_write_file(dst_file);
459 if (ret)
460 return ret;
461
462 /*
463 * This is redundant if called from vfs_dedupe_file_range(), but other
464 * callers need it and it's not performance sesitive...
465 */
466 ret = remap_verify_area(src_file, src_pos, len, false);
467 if (ret)
468 goto out_drop_write;
469
470 ret = remap_verify_area(dst_file, dst_pos, len, true);
471 if (ret)
472 goto out_drop_write;
473
474 ret = -EPERM;
475 if (!allow_file_dedupe(dst_file))
476 goto out_drop_write;
477
478 ret = -EXDEV;
479 if (src_file->f_path.mnt != dst_file->f_path.mnt)
480 goto out_drop_write;
481
482 ret = -EISDIR;
483 if (S_ISDIR(file_inode(dst_file)->i_mode))
484 goto out_drop_write;
485
486 ret = -EINVAL;
487 if (!dst_file->f_op->remap_file_range)
488 goto out_drop_write;
489
490 if (len == 0) {
491 ret = 0;
492 goto out_drop_write;
493 }
494
495 ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file,
496 dst_pos, len, remap_flags | REMAP_FILE_DEDUP);
497out_drop_write:
498 mnt_drop_write_file(dst_file);
499
500 return ret;
501}
502EXPORT_SYMBOL(vfs_dedupe_file_range_one);
503
504int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
505{
506 struct file_dedupe_range_info *info;
507 struct inode *src = file_inode(file);
508 u64 off;
509 u64 len;
510 int i;
511 int ret;
512 u16 count = same->dest_count;
513 loff_t deduped;
514
515 if (!(file->f_mode & FMODE_READ))
516 return -EINVAL;
517
518 if (same->reserved1 || same->reserved2)
519 return -EINVAL;
520
521 off = same->src_offset;
522 len = same->src_length;
523
524 if (S_ISDIR(src->i_mode))
525 return -EISDIR;
526
527 if (!S_ISREG(src->i_mode))
528 return -EINVAL;
529
530 if (!file->f_op->remap_file_range)
531 return -EOPNOTSUPP;
532
533 ret = remap_verify_area(file, off, len, false);
534 if (ret < 0)
535 return ret;
536 ret = 0;
537
538 if (off + len > i_size_read(src))
539 return -EINVAL;
540
541 /* Arbitrary 1G limit on a single dedupe request, can be raised. */
542 len = min_t(u64, len, 1 << 30);
543
544 /* pre-format output fields to sane values */
545 for (i = 0; i < count; i++) {
546 same->info[i].bytes_deduped = 0ULL;
547 same->info[i].status = FILE_DEDUPE_RANGE_SAME;
548 }
549
550 for (i = 0, info = same->info; i < count; i++, info++) {
551 struct fd dst_fd = fdget(info->dest_fd);
552 struct file *dst_file = dst_fd.file;
553
554 if (!dst_file) {
555 info->status = -EBADF;
556 goto next_loop;
557 }
558
559 if (info->reserved) {
560 info->status = -EINVAL;
561 goto next_fdput;
562 }
563
564 deduped = vfs_dedupe_file_range_one(file, off, dst_file,
565 info->dest_offset, len,
566 REMAP_FILE_CAN_SHORTEN);
567 if (deduped == -EBADE)
568 info->status = FILE_DEDUPE_RANGE_DIFFERS;
569 else if (deduped < 0)
570 info->status = deduped;
571 else
572 info->bytes_deduped = len;
573
574next_fdput:
575 fdput(dst_fd);
576next_loop:
577 if (fatal_signal_pending(current))
578 break;
579 }
580 return ret;
581}
582EXPORT_SYMBOL(vfs_dedupe_file_range);