Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ioctl.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8#include <linux/syscalls.h>
9#include <linux/mm.h>
10#include <linux/capability.h>
11#include <linux/compat.h>
12#include <linux/file.h>
13#include <linux/fs.h>
14#include <linux/security.h>
15#include <linux/export.h>
16#include <linux/uaccess.h>
17#include <linux/writeback.h>
18#include <linux/buffer_head.h>
19#include <linux/falloc.h>
20#include <linux/sched/signal.h>
21#include <linux/fiemap.h>
22#include <linux/mount.h>
23#include <linux/fscrypt.h>
24#include <linux/fileattr.h>
25
26#include "internal.h"
27
28#include <asm/ioctls.h>
29
30/* So that the fiemap access checks can't overflow on 32 bit machines. */
31#define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
32
33/**
34 * vfs_ioctl - call filesystem specific ioctl methods
35 * @filp: open file to invoke ioctl method on
36 * @cmd: ioctl command to execute
37 * @arg: command-specific argument for ioctl
38 *
39 * Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise
40 * returns -ENOTTY.
41 *
42 * Returns 0 on success, -errno on error.
43 */
44long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
45{
46 int error = -ENOTTY;
47
48 if (!filp->f_op->unlocked_ioctl)
49 goto out;
50
51 error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
52 if (error == -ENOIOCTLCMD)
53 error = -ENOTTY;
54 out:
55 return error;
56}
57EXPORT_SYMBOL(vfs_ioctl);
58
59static int ioctl_fibmap(struct file *filp, int __user *p)
60{
61 struct inode *inode = file_inode(filp);
62 struct super_block *sb = inode->i_sb;
63 int error, ur_block;
64 sector_t block;
65
66 if (!capable(CAP_SYS_RAWIO))
67 return -EPERM;
68
69 error = get_user(ur_block, p);
70 if (error)
71 return error;
72
73 if (ur_block < 0)
74 return -EINVAL;
75
76 block = ur_block;
77 error = bmap(inode, &block);
78
79 if (block > INT_MAX) {
80 error = -ERANGE;
81 pr_warn_ratelimited("[%s/%d] FS: %s File: %pD4 would truncate fibmap result\n",
82 current->comm, task_pid_nr(current),
83 sb->s_id, filp);
84 }
85
86 if (error)
87 ur_block = 0;
88 else
89 ur_block = block;
90
91 if (put_user(ur_block, p))
92 error = -EFAULT;
93
94 return error;
95}
96
97/**
98 * fiemap_fill_next_extent - Fiemap helper function
99 * @fieinfo: Fiemap context passed into ->fiemap
100 * @logical: Extent logical start offset, in bytes
101 * @phys: Extent physical start offset, in bytes
102 * @len: Extent length, in bytes
103 * @flags: FIEMAP_EXTENT flags that describe this extent
104 *
105 * Called from file system ->fiemap callback. Will populate extent
106 * info as passed in via arguments and copy to user memory. On
107 * success, extent count on fieinfo is incremented.
108 *
109 * Returns 0 on success, -errno on error, 1 if this was the last
110 * extent that will fit in user array.
111 */
112int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
113 u64 phys, u64 len, u32 flags)
114{
115 struct fiemap_extent extent;
116 struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
117
118 /* only count the extents */
119 if (fieinfo->fi_extents_max == 0) {
120 fieinfo->fi_extents_mapped++;
121 return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
122 }
123
124 if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
125 return 1;
126
127#define SET_UNKNOWN_FLAGS (FIEMAP_EXTENT_DELALLOC)
128#define SET_NO_UNMOUNTED_IO_FLAGS (FIEMAP_EXTENT_DATA_ENCRYPTED)
129#define SET_NOT_ALIGNED_FLAGS (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
130
131 if (flags & SET_UNKNOWN_FLAGS)
132 flags |= FIEMAP_EXTENT_UNKNOWN;
133 if (flags & SET_NO_UNMOUNTED_IO_FLAGS)
134 flags |= FIEMAP_EXTENT_ENCODED;
135 if (flags & SET_NOT_ALIGNED_FLAGS)
136 flags |= FIEMAP_EXTENT_NOT_ALIGNED;
137
138 memset(&extent, 0, sizeof(extent));
139 extent.fe_logical = logical;
140 extent.fe_physical = phys;
141 extent.fe_length = len;
142 extent.fe_flags = flags;
143
144 dest += fieinfo->fi_extents_mapped;
145 if (copy_to_user(dest, &extent, sizeof(extent)))
146 return -EFAULT;
147
148 fieinfo->fi_extents_mapped++;
149 if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
150 return 1;
151 return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
152}
153EXPORT_SYMBOL(fiemap_fill_next_extent);
154
155/**
156 * fiemap_prep - check validity of requested flags for fiemap
157 * @inode: Inode to operate on
158 * @fieinfo: Fiemap context passed into ->fiemap
159 * @start: Start of the mapped range
160 * @len: Length of the mapped range, can be truncated by this function.
161 * @supported_flags: Set of fiemap flags that the file system understands
162 *
163 * This function must be called from each ->fiemap instance to validate the
164 * fiemap request against the file system parameters.
165 *
166 * Returns 0 on success, or a negative error on failure.
167 */
168int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo,
169 u64 start, u64 *len, u32 supported_flags)
170{
171 u64 maxbytes = inode->i_sb->s_maxbytes;
172 u32 incompat_flags;
173 int ret = 0;
174
175 if (*len == 0)
176 return -EINVAL;
177 if (start >= maxbytes)
178 return -EFBIG;
179
180 /*
181 * Shrink request scope to what the fs can actually handle.
182 */
183 if (*len > maxbytes || (maxbytes - *len) < start)
184 *len = maxbytes - start;
185
186 supported_flags |= FIEMAP_FLAG_SYNC;
187 supported_flags &= FIEMAP_FLAGS_COMPAT;
188 incompat_flags = fieinfo->fi_flags & ~supported_flags;
189 if (incompat_flags) {
190 fieinfo->fi_flags = incompat_flags;
191 return -EBADR;
192 }
193
194 if (fieinfo->fi_flags & FIEMAP_FLAG_SYNC)
195 ret = filemap_write_and_wait(inode->i_mapping);
196 return ret;
197}
198EXPORT_SYMBOL(fiemap_prep);
199
200static int ioctl_fiemap(struct file *filp, struct fiemap __user *ufiemap)
201{
202 struct fiemap fiemap;
203 struct fiemap_extent_info fieinfo = { 0, };
204 struct inode *inode = file_inode(filp);
205 int error;
206
207 if (!inode->i_op->fiemap)
208 return -EOPNOTSUPP;
209
210 if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap)))
211 return -EFAULT;
212
213 if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
214 return -EINVAL;
215
216 fieinfo.fi_flags = fiemap.fm_flags;
217 fieinfo.fi_extents_max = fiemap.fm_extent_count;
218 fieinfo.fi_extents_start = ufiemap->fm_extents;
219
220 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start,
221 fiemap.fm_length);
222
223 fiemap.fm_flags = fieinfo.fi_flags;
224 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
225 if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
226 error = -EFAULT;
227
228 return error;
229}
230
231static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
232 u64 off, u64 olen, u64 destoff)
233{
234 struct fd src_file = fdget(srcfd);
235 loff_t cloned;
236 int ret;
237
238 if (!src_file.file)
239 return -EBADF;
240 cloned = vfs_clone_file_range(src_file.file, off, dst_file, destoff,
241 olen, 0);
242 if (cloned < 0)
243 ret = cloned;
244 else if (olen && cloned != olen)
245 ret = -EINVAL;
246 else
247 ret = 0;
248 fdput(src_file);
249 return ret;
250}
251
252static long ioctl_file_clone_range(struct file *file,
253 struct file_clone_range __user *argp)
254{
255 struct file_clone_range args;
256
257 if (copy_from_user(&args, argp, sizeof(args)))
258 return -EFAULT;
259 return ioctl_file_clone(file, args.src_fd, args.src_offset,
260 args.src_length, args.dest_offset);
261}
262
263/*
264 * This provides compatibility with legacy XFS pre-allocation ioctls
265 * which predate the fallocate syscall.
266 *
267 * Only the l_start, l_len and l_whence fields of the 'struct space_resv'
268 * are used here, rest are ignored.
269 */
270static int ioctl_preallocate(struct file *filp, int mode, void __user *argp)
271{
272 struct inode *inode = file_inode(filp);
273 struct space_resv sr;
274
275 if (copy_from_user(&sr, argp, sizeof(sr)))
276 return -EFAULT;
277
278 switch (sr.l_whence) {
279 case SEEK_SET:
280 break;
281 case SEEK_CUR:
282 sr.l_start += filp->f_pos;
283 break;
284 case SEEK_END:
285 sr.l_start += i_size_read(inode);
286 break;
287 default:
288 return -EINVAL;
289 }
290
291 return vfs_fallocate(filp, mode | FALLOC_FL_KEEP_SIZE, sr.l_start,
292 sr.l_len);
293}
294
295/* on ia32 l_start is on a 32-bit boundary */
296#if defined CONFIG_COMPAT && defined(CONFIG_X86_64)
297/* just account for different alignment */
298static int compat_ioctl_preallocate(struct file *file, int mode,
299 struct space_resv_32 __user *argp)
300{
301 struct inode *inode = file_inode(file);
302 struct space_resv_32 sr;
303
304 if (copy_from_user(&sr, argp, sizeof(sr)))
305 return -EFAULT;
306
307 switch (sr.l_whence) {
308 case SEEK_SET:
309 break;
310 case SEEK_CUR:
311 sr.l_start += file->f_pos;
312 break;
313 case SEEK_END:
314 sr.l_start += i_size_read(inode);
315 break;
316 default:
317 return -EINVAL;
318 }
319
320 return vfs_fallocate(file, mode | FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
321}
322#endif
323
324static int file_ioctl(struct file *filp, unsigned int cmd, int __user *p)
325{
326 switch (cmd) {
327 case FIBMAP:
328 return ioctl_fibmap(filp, p);
329 case FS_IOC_RESVSP:
330 case FS_IOC_RESVSP64:
331 return ioctl_preallocate(filp, 0, p);
332 case FS_IOC_UNRESVSP:
333 case FS_IOC_UNRESVSP64:
334 return ioctl_preallocate(filp, FALLOC_FL_PUNCH_HOLE, p);
335 case FS_IOC_ZERO_RANGE:
336 return ioctl_preallocate(filp, FALLOC_FL_ZERO_RANGE, p);
337 }
338
339 return -ENOIOCTLCMD;
340}
341
342static int ioctl_fionbio(struct file *filp, int __user *argp)
343{
344 unsigned int flag;
345 int on, error;
346
347 error = get_user(on, argp);
348 if (error)
349 return error;
350 flag = O_NONBLOCK;
351#ifdef __sparc__
352 /* SunOS compatibility item. */
353 if (O_NONBLOCK != O_NDELAY)
354 flag |= O_NDELAY;
355#endif
356 spin_lock(&filp->f_lock);
357 if (on)
358 filp->f_flags |= flag;
359 else
360 filp->f_flags &= ~flag;
361 spin_unlock(&filp->f_lock);
362 return error;
363}
364
365static int ioctl_fioasync(unsigned int fd, struct file *filp,
366 int __user *argp)
367{
368 unsigned int flag;
369 int on, error;
370
371 error = get_user(on, argp);
372 if (error)
373 return error;
374 flag = on ? FASYNC : 0;
375
376 /* Did FASYNC state change ? */
377 if ((flag ^ filp->f_flags) & FASYNC) {
378 if (filp->f_op->fasync)
379 /* fasync() adjusts filp->f_flags */
380 error = filp->f_op->fasync(fd, filp, on);
381 else
382 error = -ENOTTY;
383 }
384 return error < 0 ? error : 0;
385}
386
387static int ioctl_fsfreeze(struct file *filp)
388{
389 struct super_block *sb = file_inode(filp)->i_sb;
390
391 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
392 return -EPERM;
393
394 /* If filesystem doesn't support freeze feature, return. */
395 if (sb->s_op->freeze_fs == NULL && sb->s_op->freeze_super == NULL)
396 return -EOPNOTSUPP;
397
398 /* Freeze */
399 if (sb->s_op->freeze_super)
400 return sb->s_op->freeze_super(sb, FREEZE_HOLDER_USERSPACE);
401 return freeze_super(sb, FREEZE_HOLDER_USERSPACE);
402}
403
404static int ioctl_fsthaw(struct file *filp)
405{
406 struct super_block *sb = file_inode(filp)->i_sb;
407
408 if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
409 return -EPERM;
410
411 /* Thaw */
412 if (sb->s_op->thaw_super)
413 return sb->s_op->thaw_super(sb, FREEZE_HOLDER_USERSPACE);
414 return thaw_super(sb, FREEZE_HOLDER_USERSPACE);
415}
416
417static int ioctl_file_dedupe_range(struct file *file,
418 struct file_dedupe_range __user *argp)
419{
420 struct file_dedupe_range *same = NULL;
421 int ret;
422 unsigned long size;
423 u16 count;
424
425 if (get_user(count, &argp->dest_count)) {
426 ret = -EFAULT;
427 goto out;
428 }
429
430 size = offsetof(struct file_dedupe_range, info[count]);
431 if (size > PAGE_SIZE) {
432 ret = -ENOMEM;
433 goto out;
434 }
435
436 same = memdup_user(argp, size);
437 if (IS_ERR(same)) {
438 ret = PTR_ERR(same);
439 same = NULL;
440 goto out;
441 }
442
443 same->dest_count = count;
444 ret = vfs_dedupe_file_range(file, same);
445 if (ret)
446 goto out;
447
448 ret = copy_to_user(argp, same, size);
449 if (ret)
450 ret = -EFAULT;
451
452out:
453 kfree(same);
454 return ret;
455}
456
457/**
458 * fileattr_fill_xflags - initialize fileattr with xflags
459 * @fa: fileattr pointer
460 * @xflags: FS_XFLAG_* flags
461 *
462 * Set ->fsx_xflags, ->fsx_valid and ->flags (translated xflags). All
463 * other fields are zeroed.
464 */
465void fileattr_fill_xflags(struct fileattr *fa, u32 xflags)
466{
467 memset(fa, 0, sizeof(*fa));
468 fa->fsx_valid = true;
469 fa->fsx_xflags = xflags;
470 if (fa->fsx_xflags & FS_XFLAG_IMMUTABLE)
471 fa->flags |= FS_IMMUTABLE_FL;
472 if (fa->fsx_xflags & FS_XFLAG_APPEND)
473 fa->flags |= FS_APPEND_FL;
474 if (fa->fsx_xflags & FS_XFLAG_SYNC)
475 fa->flags |= FS_SYNC_FL;
476 if (fa->fsx_xflags & FS_XFLAG_NOATIME)
477 fa->flags |= FS_NOATIME_FL;
478 if (fa->fsx_xflags & FS_XFLAG_NODUMP)
479 fa->flags |= FS_NODUMP_FL;
480 if (fa->fsx_xflags & FS_XFLAG_DAX)
481 fa->flags |= FS_DAX_FL;
482 if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
483 fa->flags |= FS_PROJINHERIT_FL;
484}
485EXPORT_SYMBOL(fileattr_fill_xflags);
486
487/**
488 * fileattr_fill_flags - initialize fileattr with flags
489 * @fa: fileattr pointer
490 * @flags: FS_*_FL flags
491 *
492 * Set ->flags, ->flags_valid and ->fsx_xflags (translated flags).
493 * All other fields are zeroed.
494 */
495void fileattr_fill_flags(struct fileattr *fa, u32 flags)
496{
497 memset(fa, 0, sizeof(*fa));
498 fa->flags_valid = true;
499 fa->flags = flags;
500 if (fa->flags & FS_SYNC_FL)
501 fa->fsx_xflags |= FS_XFLAG_SYNC;
502 if (fa->flags & FS_IMMUTABLE_FL)
503 fa->fsx_xflags |= FS_XFLAG_IMMUTABLE;
504 if (fa->flags & FS_APPEND_FL)
505 fa->fsx_xflags |= FS_XFLAG_APPEND;
506 if (fa->flags & FS_NODUMP_FL)
507 fa->fsx_xflags |= FS_XFLAG_NODUMP;
508 if (fa->flags & FS_NOATIME_FL)
509 fa->fsx_xflags |= FS_XFLAG_NOATIME;
510 if (fa->flags & FS_DAX_FL)
511 fa->fsx_xflags |= FS_XFLAG_DAX;
512 if (fa->flags & FS_PROJINHERIT_FL)
513 fa->fsx_xflags |= FS_XFLAG_PROJINHERIT;
514}
515EXPORT_SYMBOL(fileattr_fill_flags);
516
517/**
518 * vfs_fileattr_get - retrieve miscellaneous file attributes
519 * @dentry: the object to retrieve from
520 * @fa: fileattr pointer
521 *
522 * Call i_op->fileattr_get() callback, if exists.
523 *
524 * Return: 0 on success, or a negative error on failure.
525 */
526int vfs_fileattr_get(struct dentry *dentry, struct fileattr *fa)
527{
528 struct inode *inode = d_inode(dentry);
529
530 if (!inode->i_op->fileattr_get)
531 return -ENOIOCTLCMD;
532
533 return inode->i_op->fileattr_get(dentry, fa);
534}
535EXPORT_SYMBOL(vfs_fileattr_get);
536
537/**
538 * copy_fsxattr_to_user - copy fsxattr to userspace.
539 * @fa: fileattr pointer
540 * @ufa: fsxattr user pointer
541 *
542 * Return: 0 on success, or -EFAULT on failure.
543 */
544int copy_fsxattr_to_user(const struct fileattr *fa, struct fsxattr __user *ufa)
545{
546 struct fsxattr xfa;
547
548 memset(&xfa, 0, sizeof(xfa));
549 xfa.fsx_xflags = fa->fsx_xflags;
550 xfa.fsx_extsize = fa->fsx_extsize;
551 xfa.fsx_nextents = fa->fsx_nextents;
552 xfa.fsx_projid = fa->fsx_projid;
553 xfa.fsx_cowextsize = fa->fsx_cowextsize;
554
555 if (copy_to_user(ufa, &xfa, sizeof(xfa)))
556 return -EFAULT;
557
558 return 0;
559}
560EXPORT_SYMBOL(copy_fsxattr_to_user);
561
562static int copy_fsxattr_from_user(struct fileattr *fa,
563 struct fsxattr __user *ufa)
564{
565 struct fsxattr xfa;
566
567 if (copy_from_user(&xfa, ufa, sizeof(xfa)))
568 return -EFAULT;
569
570 fileattr_fill_xflags(fa, xfa.fsx_xflags);
571 fa->fsx_extsize = xfa.fsx_extsize;
572 fa->fsx_nextents = xfa.fsx_nextents;
573 fa->fsx_projid = xfa.fsx_projid;
574 fa->fsx_cowextsize = xfa.fsx_cowextsize;
575
576 return 0;
577}
578
579/*
580 * Generic function to check FS_IOC_FSSETXATTR/FS_IOC_SETFLAGS values and reject
581 * any invalid configurations.
582 *
583 * Note: must be called with inode lock held.
584 */
585static int fileattr_set_prepare(struct inode *inode,
586 const struct fileattr *old_ma,
587 struct fileattr *fa)
588{
589 int err;
590
591 /*
592 * The IMMUTABLE and APPEND_ONLY flags can only be changed by
593 * the relevant capability.
594 */
595 if ((fa->flags ^ old_ma->flags) & (FS_APPEND_FL | FS_IMMUTABLE_FL) &&
596 !capable(CAP_LINUX_IMMUTABLE))
597 return -EPERM;
598
599 err = fscrypt_prepare_setflags(inode, old_ma->flags, fa->flags);
600 if (err)
601 return err;
602
603 /*
604 * Project Quota ID state is only allowed to change from within the init
605 * namespace. Enforce that restriction only if we are trying to change
606 * the quota ID state. Everything else is allowed in user namespaces.
607 */
608 if (current_user_ns() != &init_user_ns) {
609 if (old_ma->fsx_projid != fa->fsx_projid)
610 return -EINVAL;
611 if ((old_ma->fsx_xflags ^ fa->fsx_xflags) &
612 FS_XFLAG_PROJINHERIT)
613 return -EINVAL;
614 } else {
615 /*
616 * Caller is allowed to change the project ID. If it is being
617 * changed, make sure that the new value is valid.
618 */
619 if (old_ma->fsx_projid != fa->fsx_projid &&
620 !projid_valid(make_kprojid(&init_user_ns, fa->fsx_projid)))
621 return -EINVAL;
622 }
623
624 /* Check extent size hints. */
625 if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(inode->i_mode))
626 return -EINVAL;
627
628 if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) &&
629 !S_ISDIR(inode->i_mode))
630 return -EINVAL;
631
632 if ((fa->fsx_xflags & FS_XFLAG_COWEXTSIZE) &&
633 !S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
634 return -EINVAL;
635
636 /*
637 * It is only valid to set the DAX flag on regular files and
638 * directories on filesystems.
639 */
640 if ((fa->fsx_xflags & FS_XFLAG_DAX) &&
641 !(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)))
642 return -EINVAL;
643
644 /* Extent size hints of zero turn off the flags. */
645 if (fa->fsx_extsize == 0)
646 fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE | FS_XFLAG_EXTSZINHERIT);
647 if (fa->fsx_cowextsize == 0)
648 fa->fsx_xflags &= ~FS_XFLAG_COWEXTSIZE;
649
650 return 0;
651}
652
653/**
654 * vfs_fileattr_set - change miscellaneous file attributes
655 * @idmap: idmap of the mount
656 * @dentry: the object to change
657 * @fa: fileattr pointer
658 *
659 * After verifying permissions, call i_op->fileattr_set() callback, if
660 * exists.
661 *
662 * Verifying attributes involves retrieving current attributes with
663 * i_op->fileattr_get(), this also allows initializing attributes that have
664 * not been set by the caller to current values. Inode lock is held
665 * thoughout to prevent racing with another instance.
666 *
667 * Return: 0 on success, or a negative error on failure.
668 */
669int vfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
670 struct fileattr *fa)
671{
672 struct inode *inode = d_inode(dentry);
673 struct fileattr old_ma = {};
674 int err;
675
676 if (!inode->i_op->fileattr_set)
677 return -ENOIOCTLCMD;
678
679 if (!inode_owner_or_capable(idmap, inode))
680 return -EPERM;
681
682 inode_lock(inode);
683 err = vfs_fileattr_get(dentry, &old_ma);
684 if (!err) {
685 /* initialize missing bits from old_ma */
686 if (fa->flags_valid) {
687 fa->fsx_xflags |= old_ma.fsx_xflags & ~FS_XFLAG_COMMON;
688 fa->fsx_extsize = old_ma.fsx_extsize;
689 fa->fsx_nextents = old_ma.fsx_nextents;
690 fa->fsx_projid = old_ma.fsx_projid;
691 fa->fsx_cowextsize = old_ma.fsx_cowextsize;
692 } else {
693 fa->flags |= old_ma.flags & ~FS_COMMON_FL;
694 }
695 err = fileattr_set_prepare(inode, &old_ma, fa);
696 if (!err)
697 err = inode->i_op->fileattr_set(idmap, dentry, fa);
698 }
699 inode_unlock(inode);
700
701 return err;
702}
703EXPORT_SYMBOL(vfs_fileattr_set);
704
705static int ioctl_getflags(struct file *file, unsigned int __user *argp)
706{
707 struct fileattr fa = { .flags_valid = true }; /* hint only */
708 int err;
709
710 err = vfs_fileattr_get(file->f_path.dentry, &fa);
711 if (!err)
712 err = put_user(fa.flags, argp);
713 return err;
714}
715
716static int ioctl_setflags(struct file *file, unsigned int __user *argp)
717{
718 struct mnt_idmap *idmap = file_mnt_idmap(file);
719 struct dentry *dentry = file->f_path.dentry;
720 struct fileattr fa;
721 unsigned int flags;
722 int err;
723
724 err = get_user(flags, argp);
725 if (!err) {
726 err = mnt_want_write_file(file);
727 if (!err) {
728 fileattr_fill_flags(&fa, flags);
729 err = vfs_fileattr_set(idmap, dentry, &fa);
730 mnt_drop_write_file(file);
731 }
732 }
733 return err;
734}
735
736static int ioctl_fsgetxattr(struct file *file, void __user *argp)
737{
738 struct fileattr fa = { .fsx_valid = true }; /* hint only */
739 int err;
740
741 err = vfs_fileattr_get(file->f_path.dentry, &fa);
742 if (!err)
743 err = copy_fsxattr_to_user(&fa, argp);
744
745 return err;
746}
747
748static int ioctl_fssetxattr(struct file *file, void __user *argp)
749{
750 struct mnt_idmap *idmap = file_mnt_idmap(file);
751 struct dentry *dentry = file->f_path.dentry;
752 struct fileattr fa;
753 int err;
754
755 err = copy_fsxattr_from_user(&fa, argp);
756 if (!err) {
757 err = mnt_want_write_file(file);
758 if (!err) {
759 err = vfs_fileattr_set(idmap, dentry, &fa);
760 mnt_drop_write_file(file);
761 }
762 }
763 return err;
764}
765
766/*
767 * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d.
768 * It's just a simple helper for sys_ioctl and compat_sys_ioctl.
769 *
770 * When you add any new common ioctls to the switches above and below,
771 * please ensure they have compatible arguments in compat mode.
772 */
773static int do_vfs_ioctl(struct file *filp, unsigned int fd,
774 unsigned int cmd, unsigned long arg)
775{
776 void __user *argp = (void __user *)arg;
777 struct inode *inode = file_inode(filp);
778
779 switch (cmd) {
780 case FIOCLEX:
781 set_close_on_exec(fd, 1);
782 return 0;
783
784 case FIONCLEX:
785 set_close_on_exec(fd, 0);
786 return 0;
787
788 case FIONBIO:
789 return ioctl_fionbio(filp, argp);
790
791 case FIOASYNC:
792 return ioctl_fioasync(fd, filp, argp);
793
794 case FIOQSIZE:
795 if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) ||
796 S_ISLNK(inode->i_mode)) {
797 loff_t res = inode_get_bytes(inode);
798 return copy_to_user(argp, &res, sizeof(res)) ?
799 -EFAULT : 0;
800 }
801
802 return -ENOTTY;
803
804 case FIFREEZE:
805 return ioctl_fsfreeze(filp);
806
807 case FITHAW:
808 return ioctl_fsthaw(filp);
809
810 case FS_IOC_FIEMAP:
811 return ioctl_fiemap(filp, argp);
812
813 case FIGETBSZ:
814 /* anon_bdev filesystems may not have a block size */
815 if (!inode->i_sb->s_blocksize)
816 return -EINVAL;
817
818 return put_user(inode->i_sb->s_blocksize, (int __user *)argp);
819
820 case FICLONE:
821 return ioctl_file_clone(filp, arg, 0, 0, 0);
822
823 case FICLONERANGE:
824 return ioctl_file_clone_range(filp, argp);
825
826 case FIDEDUPERANGE:
827 return ioctl_file_dedupe_range(filp, argp);
828
829 case FIONREAD:
830 if (!S_ISREG(inode->i_mode))
831 return vfs_ioctl(filp, cmd, arg);
832
833 return put_user(i_size_read(inode) - filp->f_pos,
834 (int __user *)argp);
835
836 case FS_IOC_GETFLAGS:
837 return ioctl_getflags(filp, argp);
838
839 case FS_IOC_SETFLAGS:
840 return ioctl_setflags(filp, argp);
841
842 case FS_IOC_FSGETXATTR:
843 return ioctl_fsgetxattr(filp, argp);
844
845 case FS_IOC_FSSETXATTR:
846 return ioctl_fssetxattr(filp, argp);
847
848 default:
849 if (S_ISREG(inode->i_mode))
850 return file_ioctl(filp, cmd, argp);
851 break;
852 }
853
854 return -ENOIOCTLCMD;
855}
856
857SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
858{
859 struct fd f = fdget(fd);
860 int error;
861
862 if (!f.file)
863 return -EBADF;
864
865 error = security_file_ioctl(f.file, cmd, arg);
866 if (error)
867 goto out;
868
869 error = do_vfs_ioctl(f.file, fd, cmd, arg);
870 if (error == -ENOIOCTLCMD)
871 error = vfs_ioctl(f.file, cmd, arg);
872
873out:
874 fdput(f);
875 return error;
876}
877
878#ifdef CONFIG_COMPAT
879/**
880 * compat_ptr_ioctl - generic implementation of .compat_ioctl file operation
881 * @file: The file to operate on.
882 * @cmd: The ioctl command number.
883 * @arg: The argument to the ioctl.
884 *
885 * This is not normally called as a function, but instead set in struct
886 * file_operations as
887 *
888 * .compat_ioctl = compat_ptr_ioctl,
889 *
890 * On most architectures, the compat_ptr_ioctl() just passes all arguments
891 * to the corresponding ->ioctl handler. The exception is arch/s390, where
892 * compat_ptr() clears the top bit of a 32-bit pointer value, so user space
893 * pointers to the second 2GB alias the first 2GB, as is the case for
894 * native 32-bit s390 user space.
895 *
896 * The compat_ptr_ioctl() function must therefore be used only with ioctl
897 * functions that either ignore the argument or pass a pointer to a
898 * compatible data type.
899 *
900 * If any ioctl command handled by fops->unlocked_ioctl passes a plain
901 * integer instead of a pointer, or any of the passed data types
902 * is incompatible between 32-bit and 64-bit architectures, a proper
903 * handler is required instead of compat_ptr_ioctl.
904 */
905long compat_ptr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
906{
907 if (!file->f_op->unlocked_ioctl)
908 return -ENOIOCTLCMD;
909
910 return file->f_op->unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
911}
912EXPORT_SYMBOL(compat_ptr_ioctl);
913
914COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
915 compat_ulong_t, arg)
916{
917 struct fd f = fdget(fd);
918 int error;
919
920 if (!f.file)
921 return -EBADF;
922
923 error = security_file_ioctl_compat(f.file, cmd, arg);
924 if (error)
925 goto out;
926
927 switch (cmd) {
928 /* FICLONE takes an int argument, so don't use compat_ptr() */
929 case FICLONE:
930 error = ioctl_file_clone(f.file, arg, 0, 0, 0);
931 break;
932
933#if defined(CONFIG_X86_64)
934 /* these get messy on amd64 due to alignment differences */
935 case FS_IOC_RESVSP_32:
936 case FS_IOC_RESVSP64_32:
937 error = compat_ioctl_preallocate(f.file, 0, compat_ptr(arg));
938 break;
939 case FS_IOC_UNRESVSP_32:
940 case FS_IOC_UNRESVSP64_32:
941 error = compat_ioctl_preallocate(f.file, FALLOC_FL_PUNCH_HOLE,
942 compat_ptr(arg));
943 break;
944 case FS_IOC_ZERO_RANGE_32:
945 error = compat_ioctl_preallocate(f.file, FALLOC_FL_ZERO_RANGE,
946 compat_ptr(arg));
947 break;
948#endif
949
950 /*
951 * These access 32-bit values anyway so no further handling is
952 * necessary.
953 */
954 case FS_IOC32_GETFLAGS:
955 case FS_IOC32_SETFLAGS:
956 cmd = (cmd == FS_IOC32_GETFLAGS) ?
957 FS_IOC_GETFLAGS : FS_IOC_SETFLAGS;
958 fallthrough;
959 /*
960 * everything else in do_vfs_ioctl() takes either a compatible
961 * pointer argument or no argument -- call it with a modified
962 * argument.
963 */
964 default:
965 error = do_vfs_ioctl(f.file, fd, cmd,
966 (unsigned long)compat_ptr(arg));
967 if (error != -ENOIOCTLCMD)
968 break;
969
970 if (f.file->f_op->compat_ioctl)
971 error = f.file->f_op->compat_ioctl(f.file, cmd, arg);
972 if (error == -ENOIOCTLCMD)
973 error = -ENOTTY;
974 break;
975 }
976
977 out:
978 fdput(f);
979
980 return error;
981}
982#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/ioctl.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
8#include <linux/syscalls.h>
9#include <linux/mm.h>
10#include <linux/capability.h>
11#include <linux/file.h>
12#include <linux/fs.h>
13#include <linux/security.h>
14#include <linux/export.h>
15#include <linux/uaccess.h>
16#include <linux/writeback.h>
17#include <linux/buffer_head.h>
18#include <linux/falloc.h>
19#include <linux/sched/signal.h>
20
21#include "internal.h"
22
23#include <asm/ioctls.h>
24
25/* So that the fiemap access checks can't overflow on 32 bit machines. */
26#define FIEMAP_MAX_EXTENTS (UINT_MAX / sizeof(struct fiemap_extent))
27
28/**
29 * vfs_ioctl - call filesystem specific ioctl methods
30 * @filp: open file to invoke ioctl method on
31 * @cmd: ioctl command to execute
32 * @arg: command-specific argument for ioctl
33 *
34 * Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise
35 * returns -ENOTTY.
36 *
37 * Returns 0 on success, -errno on error.
38 */
39long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
40{
41 int error = -ENOTTY;
42
43 if (!filp->f_op->unlocked_ioctl)
44 goto out;
45
46 error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
47 if (error == -ENOIOCTLCMD)
48 error = -ENOTTY;
49 out:
50 return error;
51}
52
53static int ioctl_fibmap(struct file *filp, int __user *p)
54{
55 struct address_space *mapping = filp->f_mapping;
56 int res, block;
57
58 /* do we support this mess? */
59 if (!mapping->a_ops->bmap)
60 return -EINVAL;
61 if (!capable(CAP_SYS_RAWIO))
62 return -EPERM;
63 res = get_user(block, p);
64 if (res)
65 return res;
66 res = mapping->a_ops->bmap(mapping, block);
67 return put_user(res, p);
68}
69
70/**
71 * fiemap_fill_next_extent - Fiemap helper function
72 * @fieinfo: Fiemap context passed into ->fiemap
73 * @logical: Extent logical start offset, in bytes
74 * @phys: Extent physical start offset, in bytes
75 * @len: Extent length, in bytes
76 * @flags: FIEMAP_EXTENT flags that describe this extent
77 *
78 * Called from file system ->fiemap callback. Will populate extent
79 * info as passed in via arguments and copy to user memory. On
80 * success, extent count on fieinfo is incremented.
81 *
82 * Returns 0 on success, -errno on error, 1 if this was the last
83 * extent that will fit in user array.
84 */
85#define SET_UNKNOWN_FLAGS (FIEMAP_EXTENT_DELALLOC)
86#define SET_NO_UNMOUNTED_IO_FLAGS (FIEMAP_EXTENT_DATA_ENCRYPTED)
87#define SET_NOT_ALIGNED_FLAGS (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
88int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
89 u64 phys, u64 len, u32 flags)
90{
91 struct fiemap_extent extent;
92 struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
93
94 /* only count the extents */
95 if (fieinfo->fi_extents_max == 0) {
96 fieinfo->fi_extents_mapped++;
97 return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
98 }
99
100 if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
101 return 1;
102
103 if (flags & SET_UNKNOWN_FLAGS)
104 flags |= FIEMAP_EXTENT_UNKNOWN;
105 if (flags & SET_NO_UNMOUNTED_IO_FLAGS)
106 flags |= FIEMAP_EXTENT_ENCODED;
107 if (flags & SET_NOT_ALIGNED_FLAGS)
108 flags |= FIEMAP_EXTENT_NOT_ALIGNED;
109
110 memset(&extent, 0, sizeof(extent));
111 extent.fe_logical = logical;
112 extent.fe_physical = phys;
113 extent.fe_length = len;
114 extent.fe_flags = flags;
115
116 dest += fieinfo->fi_extents_mapped;
117 if (copy_to_user(dest, &extent, sizeof(extent)))
118 return -EFAULT;
119
120 fieinfo->fi_extents_mapped++;
121 if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
122 return 1;
123 return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
124}
125EXPORT_SYMBOL(fiemap_fill_next_extent);
126
127/**
128 * fiemap_check_flags - check validity of requested flags for fiemap
129 * @fieinfo: Fiemap context passed into ->fiemap
130 * @fs_flags: Set of fiemap flags that the file system understands
131 *
132 * Called from file system ->fiemap callback. This will compute the
133 * intersection of valid fiemap flags and those that the fs supports. That
134 * value is then compared against the user supplied flags. In case of bad user
135 * flags, the invalid values will be written into the fieinfo structure, and
136 * -EBADR is returned, which tells ioctl_fiemap() to return those values to
137 * userspace. For this reason, a return code of -EBADR should be preserved.
138 *
139 * Returns 0 on success, -EBADR on bad flags.
140 */
141int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags)
142{
143 u32 incompat_flags;
144
145 incompat_flags = fieinfo->fi_flags & ~(FIEMAP_FLAGS_COMPAT & fs_flags);
146 if (incompat_flags) {
147 fieinfo->fi_flags = incompat_flags;
148 return -EBADR;
149 }
150 return 0;
151}
152EXPORT_SYMBOL(fiemap_check_flags);
153
154static int fiemap_check_ranges(struct super_block *sb,
155 u64 start, u64 len, u64 *new_len)
156{
157 u64 maxbytes = (u64) sb->s_maxbytes;
158
159 *new_len = len;
160
161 if (len == 0)
162 return -EINVAL;
163
164 if (start > maxbytes)
165 return -EFBIG;
166
167 /*
168 * Shrink request scope to what the fs can actually handle.
169 */
170 if (len > maxbytes || (maxbytes - len) < start)
171 *new_len = maxbytes - start;
172
173 return 0;
174}
175
176static int ioctl_fiemap(struct file *filp, unsigned long arg)
177{
178 struct fiemap fiemap;
179 struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
180 struct fiemap_extent_info fieinfo = { 0, };
181 struct inode *inode = file_inode(filp);
182 struct super_block *sb = inode->i_sb;
183 u64 len;
184 int error;
185
186 if (!inode->i_op->fiemap)
187 return -EOPNOTSUPP;
188
189 if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap)))
190 return -EFAULT;
191
192 if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
193 return -EINVAL;
194
195 error = fiemap_check_ranges(sb, fiemap.fm_start, fiemap.fm_length,
196 &len);
197 if (error)
198 return error;
199
200 fieinfo.fi_flags = fiemap.fm_flags;
201 fieinfo.fi_extents_max = fiemap.fm_extent_count;
202 fieinfo.fi_extents_start = ufiemap->fm_extents;
203
204 if (fiemap.fm_extent_count != 0 &&
205 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
206 fieinfo.fi_extents_max * sizeof(struct fiemap_extent)))
207 return -EFAULT;
208
209 if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
210 filemap_write_and_wait(inode->i_mapping);
211
212 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
213 fiemap.fm_flags = fieinfo.fi_flags;
214 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
215 if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
216 error = -EFAULT;
217
218 return error;
219}
220
221static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
222 u64 off, u64 olen, u64 destoff)
223{
224 struct fd src_file = fdget(srcfd);
225 int ret;
226
227 if (!src_file.file)
228 return -EBADF;
229 ret = -EXDEV;
230 if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
231 goto fdput;
232 ret = do_clone_file_range(src_file.file, off, dst_file, destoff, olen);
233fdput:
234 fdput(src_file);
235 return ret;
236}
237
238static long ioctl_file_clone_range(struct file *file, void __user *argp)
239{
240 struct file_clone_range args;
241
242 if (copy_from_user(&args, argp, sizeof(args)))
243 return -EFAULT;
244 return ioctl_file_clone(file, args.src_fd, args.src_offset,
245 args.src_length, args.dest_offset);
246}
247
248#ifdef CONFIG_BLOCK
249
250static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
251{
252 return (offset >> inode->i_blkbits);
253}
254
255static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
256{
257 return (blk << inode->i_blkbits);
258}
259
260/**
261 * __generic_block_fiemap - FIEMAP for block based inodes (no locking)
262 * @inode: the inode to map
263 * @fieinfo: the fiemap info struct that will be passed back to userspace
264 * @start: where to start mapping in the inode
265 * @len: how much space to map
266 * @get_block: the fs's get_block function
267 *
268 * This does FIEMAP for block based inodes. Basically it will just loop
269 * through get_block until we hit the number of extents we want to map, or we
270 * go past the end of the file and hit a hole.
271 *
272 * If it is possible to have data blocks beyond a hole past @inode->i_size, then
273 * please do not use this function, it will stop at the first unmapped block
274 * beyond i_size.
275 *
276 * If you use this function directly, you need to do your own locking. Use
277 * generic_block_fiemap if you want the locking done for you.
278 */
279
280int __generic_block_fiemap(struct inode *inode,
281 struct fiemap_extent_info *fieinfo, loff_t start,
282 loff_t len, get_block_t *get_block)
283{
284 struct buffer_head map_bh;
285 sector_t start_blk, last_blk;
286 loff_t isize = i_size_read(inode);
287 u64 logical = 0, phys = 0, size = 0;
288 u32 flags = FIEMAP_EXTENT_MERGED;
289 bool past_eof = false, whole_file = false;
290 int ret = 0;
291
292 ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
293 if (ret)
294 return ret;
295
296 /*
297 * Either the i_mutex or other appropriate locking needs to be held
298 * since we expect isize to not change at all through the duration of
299 * this call.
300 */
301 if (len >= isize) {
302 whole_file = true;
303 len = isize;
304 }
305
306 /*
307 * Some filesystems can't deal with being asked to map less than
308 * blocksize, so make sure our len is at least block length.
309 */
310 if (logical_to_blk(inode, len) == 0)
311 len = blk_to_logical(inode, 1);
312
313 start_blk = logical_to_blk(inode, start);
314 last_blk = logical_to_blk(inode, start + len - 1);
315
316 do {
317 /*
318 * we set b_size to the total size we want so it will map as
319 * many contiguous blocks as possible at once
320 */
321 memset(&map_bh, 0, sizeof(struct buffer_head));
322 map_bh.b_size = len;
323
324 ret = get_block(inode, start_blk, &map_bh, 0);
325 if (ret)
326 break;
327
328 /* HOLE */
329 if (!buffer_mapped(&map_bh)) {
330 start_blk++;
331
332 /*
333 * We want to handle the case where there is an
334 * allocated block at the front of the file, and then
335 * nothing but holes up to the end of the file properly,
336 * to make sure that extent at the front gets properly
337 * marked with FIEMAP_EXTENT_LAST
338 */
339 if (!past_eof &&
340 blk_to_logical(inode, start_blk) >= isize)
341 past_eof = 1;
342
343 /*
344 * First hole after going past the EOF, this is our
345 * last extent
346 */
347 if (past_eof && size) {
348 flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
349 ret = fiemap_fill_next_extent(fieinfo, logical,
350 phys, size,
351 flags);
352 } else if (size) {
353 ret = fiemap_fill_next_extent(fieinfo, logical,
354 phys, size, flags);
355 size = 0;
356 }
357
358 /* if we have holes up to/past EOF then we're done */
359 if (start_blk > last_blk || past_eof || ret)
360 break;
361 } else {
362 /*
363 * We have gone over the length of what we wanted to
364 * map, and it wasn't the entire file, so add the extent
365 * we got last time and exit.
366 *
367 * This is for the case where say we want to map all the
368 * way up to the second to the last block in a file, but
369 * the last block is a hole, making the second to last
370 * block FIEMAP_EXTENT_LAST. In this case we want to
371 * see if there is a hole after the second to last block
372 * so we can mark it properly. If we found data after
373 * we exceeded the length we were requesting, then we
374 * are good to go, just add the extent to the fieinfo
375 * and break
376 */
377 if (start_blk > last_blk && !whole_file) {
378 ret = fiemap_fill_next_extent(fieinfo, logical,
379 phys, size,
380 flags);
381 break;
382 }
383
384 /*
385 * if size != 0 then we know we already have an extent
386 * to add, so add it.
387 */
388 if (size) {
389 ret = fiemap_fill_next_extent(fieinfo, logical,
390 phys, size,
391 flags);
392 if (ret)
393 break;
394 }
395
396 logical = blk_to_logical(inode, start_blk);
397 phys = blk_to_logical(inode, map_bh.b_blocknr);
398 size = map_bh.b_size;
399 flags = FIEMAP_EXTENT_MERGED;
400
401 start_blk += logical_to_blk(inode, size);
402
403 /*
404 * If we are past the EOF, then we need to make sure as
405 * soon as we find a hole that the last extent we found
406 * is marked with FIEMAP_EXTENT_LAST
407 */
408 if (!past_eof && logical + size >= isize)
409 past_eof = true;
410 }
411 cond_resched();
412 if (fatal_signal_pending(current)) {
413 ret = -EINTR;
414 break;
415 }
416
417 } while (1);
418
419 /* If ret is 1 then we just hit the end of the extent array */
420 if (ret == 1)
421 ret = 0;
422
423 return ret;
424}
425EXPORT_SYMBOL(__generic_block_fiemap);
426
427/**
428 * generic_block_fiemap - FIEMAP for block based inodes
429 * @inode: The inode to map
430 * @fieinfo: The mapping information
431 * @start: The initial block to map
432 * @len: The length of the extect to attempt to map
433 * @get_block: The block mapping function for the fs
434 *
435 * Calls __generic_block_fiemap to map the inode, after taking
436 * the inode's mutex lock.
437 */
438
439int generic_block_fiemap(struct inode *inode,
440 struct fiemap_extent_info *fieinfo, u64 start,
441 u64 len, get_block_t *get_block)
442{
443 int ret;
444 inode_lock(inode);
445 ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block);
446 inode_unlock(inode);
447 return ret;
448}
449EXPORT_SYMBOL(generic_block_fiemap);
450
451#endif /* CONFIG_BLOCK */
452
453/*
454 * This provides compatibility with legacy XFS pre-allocation ioctls
455 * which predate the fallocate syscall.
456 *
457 * Only the l_start, l_len and l_whence fields of the 'struct space_resv'
458 * are used here, rest are ignored.
459 */
460int ioctl_preallocate(struct file *filp, void __user *argp)
461{
462 struct inode *inode = file_inode(filp);
463 struct space_resv sr;
464
465 if (copy_from_user(&sr, argp, sizeof(sr)))
466 return -EFAULT;
467
468 switch (sr.l_whence) {
469 case SEEK_SET:
470 break;
471 case SEEK_CUR:
472 sr.l_start += filp->f_pos;
473 break;
474 case SEEK_END:
475 sr.l_start += i_size_read(inode);
476 break;
477 default:
478 return -EINVAL;
479 }
480
481 return vfs_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
482}
483
484static int file_ioctl(struct file *filp, unsigned int cmd,
485 unsigned long arg)
486{
487 struct inode *inode = file_inode(filp);
488 int __user *p = (int __user *)arg;
489
490 switch (cmd) {
491 case FIBMAP:
492 return ioctl_fibmap(filp, p);
493 case FIONREAD:
494 return put_user(i_size_read(inode) - filp->f_pos, p);
495 case FS_IOC_RESVSP:
496 case FS_IOC_RESVSP64:
497 return ioctl_preallocate(filp, p);
498 }
499
500 return vfs_ioctl(filp, cmd, arg);
501}
502
503static int ioctl_fionbio(struct file *filp, int __user *argp)
504{
505 unsigned int flag;
506 int on, error;
507
508 error = get_user(on, argp);
509 if (error)
510 return error;
511 flag = O_NONBLOCK;
512#ifdef __sparc__
513 /* SunOS compatibility item. */
514 if (O_NONBLOCK != O_NDELAY)
515 flag |= O_NDELAY;
516#endif
517 spin_lock(&filp->f_lock);
518 if (on)
519 filp->f_flags |= flag;
520 else
521 filp->f_flags &= ~flag;
522 spin_unlock(&filp->f_lock);
523 return error;
524}
525
526static int ioctl_fioasync(unsigned int fd, struct file *filp,
527 int __user *argp)
528{
529 unsigned int flag;
530 int on, error;
531
532 error = get_user(on, argp);
533 if (error)
534 return error;
535 flag = on ? FASYNC : 0;
536
537 /* Did FASYNC state change ? */
538 if ((flag ^ filp->f_flags) & FASYNC) {
539 if (filp->f_op->fasync)
540 /* fasync() adjusts filp->f_flags */
541 error = filp->f_op->fasync(fd, filp, on);
542 else
543 error = -ENOTTY;
544 }
545 return error < 0 ? error : 0;
546}
547
548static int ioctl_fsfreeze(struct file *filp)
549{
550 struct super_block *sb = file_inode(filp)->i_sb;
551
552 if (!capable(CAP_SYS_ADMIN))
553 return -EPERM;
554
555 /* If filesystem doesn't support freeze feature, return. */
556 if (sb->s_op->freeze_fs == NULL && sb->s_op->freeze_super == NULL)
557 return -EOPNOTSUPP;
558
559 /* Freeze */
560 if (sb->s_op->freeze_super)
561 return sb->s_op->freeze_super(sb);
562 return freeze_super(sb);
563}
564
565static int ioctl_fsthaw(struct file *filp)
566{
567 struct super_block *sb = file_inode(filp)->i_sb;
568
569 if (!capable(CAP_SYS_ADMIN))
570 return -EPERM;
571
572 /* Thaw */
573 if (sb->s_op->thaw_super)
574 return sb->s_op->thaw_super(sb);
575 return thaw_super(sb);
576}
577
578static int ioctl_file_dedupe_range(struct file *file, void __user *arg)
579{
580 struct file_dedupe_range __user *argp = arg;
581 struct file_dedupe_range *same = NULL;
582 int ret;
583 unsigned long size;
584 u16 count;
585
586 if (get_user(count, &argp->dest_count)) {
587 ret = -EFAULT;
588 goto out;
589 }
590
591 size = offsetof(struct file_dedupe_range __user, info[count]);
592 if (size > PAGE_SIZE) {
593 ret = -ENOMEM;
594 goto out;
595 }
596
597 same = memdup_user(argp, size);
598 if (IS_ERR(same)) {
599 ret = PTR_ERR(same);
600 same = NULL;
601 goto out;
602 }
603
604 same->dest_count = count;
605 ret = vfs_dedupe_file_range(file, same);
606 if (ret)
607 goto out;
608
609 ret = copy_to_user(argp, same, size);
610 if (ret)
611 ret = -EFAULT;
612
613out:
614 kfree(same);
615 return ret;
616}
617
618/*
619 * When you add any new common ioctls to the switches above and below
620 * please update compat_sys_ioctl() too.
621 *
622 * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d.
623 * It's just a simple helper for sys_ioctl and compat_sys_ioctl.
624 */
625int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
626 unsigned long arg)
627{
628 int error = 0;
629 int __user *argp = (int __user *)arg;
630 struct inode *inode = file_inode(filp);
631
632 switch (cmd) {
633 case FIOCLEX:
634 set_close_on_exec(fd, 1);
635 break;
636
637 case FIONCLEX:
638 set_close_on_exec(fd, 0);
639 break;
640
641 case FIONBIO:
642 error = ioctl_fionbio(filp, argp);
643 break;
644
645 case FIOASYNC:
646 error = ioctl_fioasync(fd, filp, argp);
647 break;
648
649 case FIOQSIZE:
650 if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) ||
651 S_ISLNK(inode->i_mode)) {
652 loff_t res = inode_get_bytes(inode);
653 error = copy_to_user(argp, &res, sizeof(res)) ?
654 -EFAULT : 0;
655 } else
656 error = -ENOTTY;
657 break;
658
659 case FIFREEZE:
660 error = ioctl_fsfreeze(filp);
661 break;
662
663 case FITHAW:
664 error = ioctl_fsthaw(filp);
665 break;
666
667 case FS_IOC_FIEMAP:
668 return ioctl_fiemap(filp, arg);
669
670 case FIGETBSZ:
671 return put_user(inode->i_sb->s_blocksize, argp);
672
673 case FICLONE:
674 return ioctl_file_clone(filp, arg, 0, 0, 0);
675
676 case FICLONERANGE:
677 return ioctl_file_clone_range(filp, argp);
678
679 case FIDEDUPERANGE:
680 return ioctl_file_dedupe_range(filp, argp);
681
682 default:
683 if (S_ISREG(inode->i_mode))
684 error = file_ioctl(filp, cmd, arg);
685 else
686 error = vfs_ioctl(filp, cmd, arg);
687 break;
688 }
689 return error;
690}
691
692int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
693{
694 int error;
695 struct fd f = fdget(fd);
696
697 if (!f.file)
698 return -EBADF;
699 error = security_file_ioctl(f.file, cmd, arg);
700 if (!error)
701 error = do_vfs_ioctl(f.file, fd, cmd, arg);
702 fdput(f);
703 return error;
704}
705
706SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
707{
708 return ksys_ioctl(fd, cmd, arg);
709}