Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/fs/stat.c
  4 *
  5 *  Copyright (C) 1991, 1992  Linus Torvalds
  6 */
  7
  8#include <linux/blkdev.h>
  9#include <linux/export.h>
 10#include <linux/mm.h>
 11#include <linux/errno.h>
 12#include <linux/file.h>
 13#include <linux/highuid.h>
 14#include <linux/fs.h>
 15#include <linux/namei.h>
 16#include <linux/security.h>
 17#include <linux/cred.h>
 18#include <linux/syscalls.h>
 19#include <linux/pagemap.h>
 20#include <linux/compat.h>
 
 21
 22#include <linux/uaccess.h>
 23#include <asm/unistd.h>
 24
 25#include "internal.h"
 26#include "mount.h"
 27
 28/**
 29 * generic_fillattr - Fill in the basic attributes from the inode struct
 30 * @mnt_userns:	user namespace of the mount the inode was found from
 31 * @inode:	Inode to use as the source
 32 * @stat:	Where to fill in the attributes
 
 33 *
 34 * Fill in the basic attributes in the kstat structure from data that's to be
 35 * found on the VFS inode structure.  This is the default if no getattr inode
 36 * operation is supplied.
 37 *
 38 * If the inode has been found through an idmapped mount the user namespace of
 39 * the vfsmount must be passed through @mnt_userns. This function will then
 40 * take care to map the inode according to @mnt_userns before filling in the
 41 * uid and gid filds. On non-idmapped mounts or if permission checking is to be
 42 * performed on the raw inode simply passs init_user_ns.
 43 */
 44void generic_fillattr(struct user_namespace *mnt_userns, struct inode *inode,
 45		      struct kstat *stat)
 46{
 47	vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_userns, inode);
 48	vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
 49
 50	stat->dev = inode->i_sb->s_dev;
 51	stat->ino = inode->i_ino;
 52	stat->mode = inode->i_mode;
 53	stat->nlink = inode->i_nlink;
 54	stat->uid = vfsuid_into_kuid(vfsuid);
 55	stat->gid = vfsgid_into_kgid(vfsgid);
 56	stat->rdev = inode->i_rdev;
 57	stat->size = i_size_read(inode);
 58	stat->atime = inode->i_atime;
 59	stat->mtime = inode->i_mtime;
 60	stat->ctime = inode->i_ctime;
 61	stat->blksize = i_blocksize(inode);
 62	stat->blocks = inode->i_blocks;
 
 
 
 
 
 
 63}
 64EXPORT_SYMBOL(generic_fillattr);
 65
 66/**
 67 * generic_fill_statx_attr - Fill in the statx attributes from the inode flags
 68 * @inode:	Inode to use as the source
 69 * @stat:	Where to fill in the attribute flags
 70 *
 71 * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the
 72 * inode that are published on i_flags and enforced by the VFS.
 73 */
 74void generic_fill_statx_attr(struct inode *inode, struct kstat *stat)
 75{
 76	if (inode->i_flags & S_IMMUTABLE)
 77		stat->attributes |= STATX_ATTR_IMMUTABLE;
 78	if (inode->i_flags & S_APPEND)
 79		stat->attributes |= STATX_ATTR_APPEND;
 80	stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS;
 81}
 82EXPORT_SYMBOL(generic_fill_statx_attr);
 83
 84/**
 85 * vfs_getattr_nosec - getattr without security checks
 86 * @path: file to get attributes from
 87 * @stat: structure to return attributes in
 88 * @request_mask: STATX_xxx flags indicating what the caller wants
 89 * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
 90 *
 91 * Get attributes without calling security_inode_getattr.
 92 *
 93 * Currently the only caller other than vfs_getattr is internal to the
 94 * filehandle lookup code, which uses only the inode number and returns no
 95 * attributes to any user.  Any other code probably wants vfs_getattr.
 96 */
 97int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
 98		      u32 request_mask, unsigned int query_flags)
 99{
100	struct user_namespace *mnt_userns;
101	struct inode *inode = d_backing_inode(path->dentry);
102
103	memset(stat, 0, sizeof(*stat));
104	stat->result_mask |= STATX_BASIC_STATS;
105	query_flags &= AT_STATX_SYNC_TYPE;
106
107	/* allow the fs to override these if it really wants to */
108	/* SB_NOATIME means filesystem supplies dummy atime value */
109	if (inode->i_sb->s_flags & SB_NOATIME)
110		stat->result_mask &= ~STATX_ATIME;
111
112	/*
113	 * Note: If you add another clause to set an attribute flag, please
114	 * update attributes_mask below.
115	 */
116	if (IS_AUTOMOUNT(inode))
117		stat->attributes |= STATX_ATTR_AUTOMOUNT;
118
119	if (IS_DAX(inode))
120		stat->attributes |= STATX_ATTR_DAX;
121
122	stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
123				  STATX_ATTR_DAX);
124
125	mnt_userns = mnt_user_ns(path->mnt);
126	if (inode->i_op->getattr)
127		return inode->i_op->getattr(mnt_userns, path, stat,
128					    request_mask, query_flags);
 
129
130	generic_fillattr(mnt_userns, inode, stat);
131	return 0;
132}
133EXPORT_SYMBOL(vfs_getattr_nosec);
134
135/*
136 * vfs_getattr - Get the enhanced basic attributes of a file
137 * @path: The file of interest
138 * @stat: Where to return the statistics
139 * @request_mask: STATX_xxx flags indicating what the caller wants
140 * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
141 *
142 * Ask the filesystem for a file's attributes.  The caller must indicate in
143 * request_mask and query_flags to indicate what they want.
144 *
145 * If the file is remote, the filesystem can be forced to update the attributes
146 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
147 * suppress the update by passing AT_STATX_DONT_SYNC.
148 *
149 * Bits must have been set in request_mask to indicate which attributes the
150 * caller wants retrieving.  Any such attribute not requested may be returned
151 * anyway, but the value may be approximate, and, if remote, may not have been
152 * synchronised with the server.
153 *
154 * 0 will be returned on success, and a -ve error code if unsuccessful.
155 */
156int vfs_getattr(const struct path *path, struct kstat *stat,
157		u32 request_mask, unsigned int query_flags)
158{
159	int retval;
160
 
 
 
161	retval = security_inode_getattr(path);
162	if (retval)
163		return retval;
164	return vfs_getattr_nosec(path, stat, request_mask, query_flags);
165}
166EXPORT_SYMBOL(vfs_getattr);
167
168/**
169 * vfs_fstat - Get the basic attributes by file descriptor
170 * @fd: The file descriptor referring to the file of interest
171 * @stat: The result structure to fill in.
172 *
173 * This function is a wrapper around vfs_getattr().  The main difference is
174 * that it uses a file descriptor to determine the file location.
175 *
176 * 0 will be returned on success, and a -ve error code if unsuccessful.
177 */
178int vfs_fstat(int fd, struct kstat *stat)
179{
180	struct fd f;
181	int error;
182
183	f = fdget_raw(fd);
184	if (!f.file)
185		return -EBADF;
186	error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0);
187	fdput(f);
188	return error;
189}
190
191int getname_statx_lookup_flags(int flags)
192{
193	int lookup_flags = 0;
194
195	if (!(flags & AT_SYMLINK_NOFOLLOW))
196		lookup_flags |= LOOKUP_FOLLOW;
197	if (!(flags & AT_NO_AUTOMOUNT))
198		lookup_flags |= LOOKUP_AUTOMOUNT;
199	if (flags & AT_EMPTY_PATH)
200		lookup_flags |= LOOKUP_EMPTY;
201
202	return lookup_flags;
203}
204
205/**
206 * vfs_statx - Get basic and extra attributes by filename
207 * @dfd: A file descriptor representing the base dir for a relative filename
208 * @filename: The name of the file of interest
209 * @flags: Flags to control the query
210 * @stat: The result structure to fill in.
211 * @request_mask: STATX_xxx flags indicating what the caller wants
212 *
213 * This function is a wrapper around vfs_getattr().  The main difference is
214 * that it uses a filename and base directory to determine the file location.
215 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
216 * at the given name from being referenced.
217 *
218 * 0 will be returned on success, and a -ve error code if unsuccessful.
219 */
220static int vfs_statx(int dfd, struct filename *filename, int flags,
221	      struct kstat *stat, u32 request_mask)
222{
223	struct path path;
224	unsigned int lookup_flags = getname_statx_lookup_flags(flags);
225	int error;
226
227	if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH |
228		      AT_STATX_SYNC_TYPE))
229		return -EINVAL;
230
231retry:
232	error = filename_lookup(dfd, filename, lookup_flags, &path, NULL);
233	if (error)
234		goto out;
235
236	error = vfs_getattr(&path, stat, request_mask, flags);
237
238	stat->mnt_id = real_mount(path.mnt)->mnt_id;
239	stat->result_mask |= STATX_MNT_ID;
 
 
 
 
 
240
241	if (path.mnt->mnt_root == path.dentry)
242		stat->attributes |= STATX_ATTR_MOUNT_ROOT;
243	stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
244
245	/* Handle STATX_DIOALIGN for block devices. */
246	if (request_mask & STATX_DIOALIGN) {
247		struct inode *inode = d_backing_inode(path.dentry);
248
249		if (S_ISBLK(inode->i_mode))
250			bdev_statx_dioalign(inode, stat);
251	}
252
253	path_put(&path);
254	if (retry_estale(error, lookup_flags)) {
255		lookup_flags |= LOOKUP_REVAL;
256		goto retry;
257	}
258out:
259	return error;
260}
261
262int vfs_fstatat(int dfd, const char __user *filename,
263			      struct kstat *stat, int flags)
264{
265	int ret;
266	int statx_flags = flags | AT_NO_AUTOMOUNT;
267	struct filename *name;
268
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269	name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL);
270	ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS);
271	putname(name);
272
273	return ret;
274}
275
276#ifdef __ARCH_WANT_OLD_STAT
277
278/*
279 * For backward compatibility?  Maybe this should be moved
280 * into arch/i386 instead?
281 */
282static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
283{
284	static int warncount = 5;
285	struct __old_kernel_stat tmp;
286
287	if (warncount > 0) {
288		warncount--;
289		printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
290			current->comm);
291	} else if (warncount < 0) {
292		/* it's laughable, but... */
293		warncount = 0;
294	}
295
296	memset(&tmp, 0, sizeof(struct __old_kernel_stat));
297	tmp.st_dev = old_encode_dev(stat->dev);
298	tmp.st_ino = stat->ino;
299	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
300		return -EOVERFLOW;
301	tmp.st_mode = stat->mode;
302	tmp.st_nlink = stat->nlink;
303	if (tmp.st_nlink != stat->nlink)
304		return -EOVERFLOW;
305	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
306	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
307	tmp.st_rdev = old_encode_dev(stat->rdev);
308#if BITS_PER_LONG == 32
309	if (stat->size > MAX_NON_LFS)
310		return -EOVERFLOW;
311#endif
312	tmp.st_size = stat->size;
313	tmp.st_atime = stat->atime.tv_sec;
314	tmp.st_mtime = stat->mtime.tv_sec;
315	tmp.st_ctime = stat->ctime.tv_sec;
316	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
317}
318
319SYSCALL_DEFINE2(stat, const char __user *, filename,
320		struct __old_kernel_stat __user *, statbuf)
321{
322	struct kstat stat;
323	int error;
324
325	error = vfs_stat(filename, &stat);
326	if (error)
327		return error;
328
329	return cp_old_stat(&stat, statbuf);
330}
331
332SYSCALL_DEFINE2(lstat, const char __user *, filename,
333		struct __old_kernel_stat __user *, statbuf)
334{
335	struct kstat stat;
336	int error;
337
338	error = vfs_lstat(filename, &stat);
339	if (error)
340		return error;
341
342	return cp_old_stat(&stat, statbuf);
343}
344
345SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
346{
347	struct kstat stat;
348	int error = vfs_fstat(fd, &stat);
349
350	if (!error)
351		error = cp_old_stat(&stat, statbuf);
352
353	return error;
354}
355
356#endif /* __ARCH_WANT_OLD_STAT */
357
358#ifdef __ARCH_WANT_NEW_STAT
359
360#if BITS_PER_LONG == 32
361#  define choose_32_64(a,b) a
362#else
363#  define choose_32_64(a,b) b
364#endif
365
366#ifndef INIT_STRUCT_STAT_PADDING
367#  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
368#endif
369
370static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
371{
372	struct stat tmp;
373
374	if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
375		return -EOVERFLOW;
376	if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
377		return -EOVERFLOW;
378#if BITS_PER_LONG == 32
379	if (stat->size > MAX_NON_LFS)
380		return -EOVERFLOW;
381#endif
382
383	INIT_STRUCT_STAT_PADDING(tmp);
384	tmp.st_dev = new_encode_dev(stat->dev);
385	tmp.st_ino = stat->ino;
386	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
387		return -EOVERFLOW;
388	tmp.st_mode = stat->mode;
389	tmp.st_nlink = stat->nlink;
390	if (tmp.st_nlink != stat->nlink)
391		return -EOVERFLOW;
392	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
393	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
394	tmp.st_rdev = new_encode_dev(stat->rdev);
395	tmp.st_size = stat->size;
396	tmp.st_atime = stat->atime.tv_sec;
397	tmp.st_mtime = stat->mtime.tv_sec;
398	tmp.st_ctime = stat->ctime.tv_sec;
399#ifdef STAT_HAVE_NSEC
400	tmp.st_atime_nsec = stat->atime.tv_nsec;
401	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
402	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
403#endif
404	tmp.st_blocks = stat->blocks;
405	tmp.st_blksize = stat->blksize;
406	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
407}
408
409SYSCALL_DEFINE2(newstat, const char __user *, filename,
410		struct stat __user *, statbuf)
411{
412	struct kstat stat;
413	int error = vfs_stat(filename, &stat);
414
415	if (error)
416		return error;
417	return cp_new_stat(&stat, statbuf);
418}
419
420SYSCALL_DEFINE2(newlstat, const char __user *, filename,
421		struct stat __user *, statbuf)
422{
423	struct kstat stat;
424	int error;
425
426	error = vfs_lstat(filename, &stat);
427	if (error)
428		return error;
429
430	return cp_new_stat(&stat, statbuf);
431}
432
433#if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
434SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
435		struct stat __user *, statbuf, int, flag)
436{
437	struct kstat stat;
438	int error;
439
440	error = vfs_fstatat(dfd, filename, &stat, flag);
441	if (error)
442		return error;
443	return cp_new_stat(&stat, statbuf);
444}
445#endif
446
447SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
448{
449	struct kstat stat;
450	int error = vfs_fstat(fd, &stat);
451
452	if (!error)
453		error = cp_new_stat(&stat, statbuf);
454
455	return error;
456}
457#endif
458
459static int do_readlinkat(int dfd, const char __user *pathname,
460			 char __user *buf, int bufsiz)
461{
462	struct path path;
463	int error;
464	int empty = 0;
465	unsigned int lookup_flags = LOOKUP_EMPTY;
466
467	if (bufsiz <= 0)
468		return -EINVAL;
469
470retry:
471	error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
472	if (!error) {
473		struct inode *inode = d_backing_inode(path.dentry);
474
475		error = empty ? -ENOENT : -EINVAL;
476		/*
477		 * AFS mountpoints allow readlink(2) but are not symlinks
478		 */
479		if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
480			error = security_inode_readlink(path.dentry);
481			if (!error) {
482				touch_atime(&path);
483				error = vfs_readlink(path.dentry, buf, bufsiz);
484			}
485		}
486		path_put(&path);
487		if (retry_estale(error, lookup_flags)) {
488			lookup_flags |= LOOKUP_REVAL;
489			goto retry;
490		}
491	}
492	return error;
493}
494
495SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
496		char __user *, buf, int, bufsiz)
497{
498	return do_readlinkat(dfd, pathname, buf, bufsiz);
499}
500
501SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
502		int, bufsiz)
503{
504	return do_readlinkat(AT_FDCWD, path, buf, bufsiz);
505}
506
507
508/* ---------- LFS-64 ----------- */
509#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
510
511#ifndef INIT_STRUCT_STAT64_PADDING
512#  define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
513#endif
514
515static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
516{
517	struct stat64 tmp;
518
519	INIT_STRUCT_STAT64_PADDING(tmp);
520#ifdef CONFIG_MIPS
521	/* mips has weird padding, so we don't get 64 bits there */
522	tmp.st_dev = new_encode_dev(stat->dev);
523	tmp.st_rdev = new_encode_dev(stat->rdev);
524#else
525	tmp.st_dev = huge_encode_dev(stat->dev);
526	tmp.st_rdev = huge_encode_dev(stat->rdev);
527#endif
528	tmp.st_ino = stat->ino;
529	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
530		return -EOVERFLOW;
531#ifdef STAT64_HAS_BROKEN_ST_INO
532	tmp.__st_ino = stat->ino;
533#endif
534	tmp.st_mode = stat->mode;
535	tmp.st_nlink = stat->nlink;
536	tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
537	tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
538	tmp.st_atime = stat->atime.tv_sec;
539	tmp.st_atime_nsec = stat->atime.tv_nsec;
540	tmp.st_mtime = stat->mtime.tv_sec;
541	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
542	tmp.st_ctime = stat->ctime.tv_sec;
543	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
544	tmp.st_size = stat->size;
545	tmp.st_blocks = stat->blocks;
546	tmp.st_blksize = stat->blksize;
547	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
548}
549
550SYSCALL_DEFINE2(stat64, const char __user *, filename,
551		struct stat64 __user *, statbuf)
552{
553	struct kstat stat;
554	int error = vfs_stat(filename, &stat);
555
556	if (!error)
557		error = cp_new_stat64(&stat, statbuf);
558
559	return error;
560}
561
562SYSCALL_DEFINE2(lstat64, const char __user *, filename,
563		struct stat64 __user *, statbuf)
564{
565	struct kstat stat;
566	int error = vfs_lstat(filename, &stat);
567
568	if (!error)
569		error = cp_new_stat64(&stat, statbuf);
570
571	return error;
572}
573
574SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
575{
576	struct kstat stat;
577	int error = vfs_fstat(fd, &stat);
578
579	if (!error)
580		error = cp_new_stat64(&stat, statbuf);
581
582	return error;
583}
584
585SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
586		struct stat64 __user *, statbuf, int, flag)
587{
588	struct kstat stat;
589	int error;
590
591	error = vfs_fstatat(dfd, filename, &stat, flag);
592	if (error)
593		return error;
594	return cp_new_stat64(&stat, statbuf);
595}
596#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
597
598static noinline_for_stack int
599cp_statx(const struct kstat *stat, struct statx __user *buffer)
600{
601	struct statx tmp;
602
603	memset(&tmp, 0, sizeof(tmp));
604
605	tmp.stx_mask = stat->result_mask;
 
606	tmp.stx_blksize = stat->blksize;
607	tmp.stx_attributes = stat->attributes;
 
608	tmp.stx_nlink = stat->nlink;
609	tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
610	tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
611	tmp.stx_mode = stat->mode;
612	tmp.stx_ino = stat->ino;
613	tmp.stx_size = stat->size;
614	tmp.stx_blocks = stat->blocks;
615	tmp.stx_attributes_mask = stat->attributes_mask;
616	tmp.stx_atime.tv_sec = stat->atime.tv_sec;
617	tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
618	tmp.stx_btime.tv_sec = stat->btime.tv_sec;
619	tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
620	tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
621	tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
622	tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
623	tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
624	tmp.stx_rdev_major = MAJOR(stat->rdev);
625	tmp.stx_rdev_minor = MINOR(stat->rdev);
626	tmp.stx_dev_major = MAJOR(stat->dev);
627	tmp.stx_dev_minor = MINOR(stat->dev);
628	tmp.stx_mnt_id = stat->mnt_id;
629	tmp.stx_dio_mem_align = stat->dio_mem_align;
630	tmp.stx_dio_offset_align = stat->dio_offset_align;
631
632	return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
633}
634
635int do_statx(int dfd, struct filename *filename, unsigned int flags,
636	     unsigned int mask, struct statx __user *buffer)
637{
638	struct kstat stat;
639	int error;
640
641	if (mask & STATX__RESERVED)
642		return -EINVAL;
643	if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
644		return -EINVAL;
 
 
 
 
 
645
646	error = vfs_statx(dfd, filename, flags, &stat, mask);
647	if (error)
648		return error;
649
650	return cp_statx(&stat, buffer);
651}
652
653/**
654 * sys_statx - System call to get enhanced stats
655 * @dfd: Base directory to pathwalk from *or* fd to stat.
656 * @filename: File to stat or "" with AT_EMPTY_PATH
657 * @flags: AT_* flags to control pathwalk.
658 * @mask: Parts of statx struct actually required.
659 * @buffer: Result buffer.
660 *
661 * Note that fstat() can be emulated by setting dfd to the fd of interest,
662 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
663 */
664SYSCALL_DEFINE5(statx,
665		int, dfd, const char __user *, filename, unsigned, flags,
666		unsigned int, mask,
667		struct statx __user *, buffer)
668{
669	int ret;
670	struct filename *name;
671
672	name = getname_flags(filename, getname_statx_lookup_flags(flags), NULL);
673	ret = do_statx(dfd, name, flags, mask, buffer);
674	putname(name);
675
676	return ret;
677}
678
679#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT)
680static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
681{
682	struct compat_stat tmp;
683
684	if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
685		return -EOVERFLOW;
686	if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
687		return -EOVERFLOW;
688
689	memset(&tmp, 0, sizeof(tmp));
690	tmp.st_dev = new_encode_dev(stat->dev);
691	tmp.st_ino = stat->ino;
692	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
693		return -EOVERFLOW;
694	tmp.st_mode = stat->mode;
695	tmp.st_nlink = stat->nlink;
696	if (tmp.st_nlink != stat->nlink)
697		return -EOVERFLOW;
698	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
699	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
700	tmp.st_rdev = new_encode_dev(stat->rdev);
701	if ((u64) stat->size > MAX_NON_LFS)
702		return -EOVERFLOW;
703	tmp.st_size = stat->size;
704	tmp.st_atime = stat->atime.tv_sec;
705	tmp.st_atime_nsec = stat->atime.tv_nsec;
706	tmp.st_mtime = stat->mtime.tv_sec;
707	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
708	tmp.st_ctime = stat->ctime.tv_sec;
709	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
710	tmp.st_blocks = stat->blocks;
711	tmp.st_blksize = stat->blksize;
712	return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
713}
714
715COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
716		       struct compat_stat __user *, statbuf)
717{
718	struct kstat stat;
719	int error;
720
721	error = vfs_stat(filename, &stat);
722	if (error)
723		return error;
724	return cp_compat_stat(&stat, statbuf);
725}
726
727COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
728		       struct compat_stat __user *, statbuf)
729{
730	struct kstat stat;
731	int error;
732
733	error = vfs_lstat(filename, &stat);
734	if (error)
735		return error;
736	return cp_compat_stat(&stat, statbuf);
737}
738
739#ifndef __ARCH_WANT_STAT64
740COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
741		       const char __user *, filename,
742		       struct compat_stat __user *, statbuf, int, flag)
743{
744	struct kstat stat;
745	int error;
746
747	error = vfs_fstatat(dfd, filename, &stat, flag);
748	if (error)
749		return error;
750	return cp_compat_stat(&stat, statbuf);
751}
752#endif
753
754COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
755		       struct compat_stat __user *, statbuf)
756{
757	struct kstat stat;
758	int error = vfs_fstat(fd, &stat);
759
760	if (!error)
761		error = cp_compat_stat(&stat, statbuf);
762	return error;
763}
764#endif
765
766/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
767void __inode_add_bytes(struct inode *inode, loff_t bytes)
768{
769	inode->i_blocks += bytes >> 9;
770	bytes &= 511;
771	inode->i_bytes += bytes;
772	if (inode->i_bytes >= 512) {
773		inode->i_blocks++;
774		inode->i_bytes -= 512;
775	}
776}
777EXPORT_SYMBOL(__inode_add_bytes);
778
779void inode_add_bytes(struct inode *inode, loff_t bytes)
780{
781	spin_lock(&inode->i_lock);
782	__inode_add_bytes(inode, bytes);
783	spin_unlock(&inode->i_lock);
784}
785
786EXPORT_SYMBOL(inode_add_bytes);
787
788void __inode_sub_bytes(struct inode *inode, loff_t bytes)
789{
790	inode->i_blocks -= bytes >> 9;
791	bytes &= 511;
792	if (inode->i_bytes < bytes) {
793		inode->i_blocks--;
794		inode->i_bytes += 512;
795	}
796	inode->i_bytes -= bytes;
797}
798
799EXPORT_SYMBOL(__inode_sub_bytes);
800
801void inode_sub_bytes(struct inode *inode, loff_t bytes)
802{
803	spin_lock(&inode->i_lock);
804	__inode_sub_bytes(inode, bytes);
805	spin_unlock(&inode->i_lock);
806}
807
808EXPORT_SYMBOL(inode_sub_bytes);
809
810loff_t inode_get_bytes(struct inode *inode)
811{
812	loff_t ret;
813
814	spin_lock(&inode->i_lock);
815	ret = __inode_get_bytes(inode);
816	spin_unlock(&inode->i_lock);
817	return ret;
818}
819
820EXPORT_SYMBOL(inode_get_bytes);
821
822void inode_set_bytes(struct inode *inode, loff_t bytes)
823{
824	/* Caller is here responsible for sufficient locking
825	 * (ie. inode->i_lock) */
826	inode->i_blocks = bytes >> 9;
827	inode->i_bytes = bytes & 511;
828}
829
830EXPORT_SYMBOL(inode_set_bytes);
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/fs/stat.c
  4 *
  5 *  Copyright (C) 1991, 1992  Linus Torvalds
  6 */
  7
  8#include <linux/blkdev.h>
  9#include <linux/export.h>
 10#include <linux/mm.h>
 11#include <linux/errno.h>
 12#include <linux/file.h>
 13#include <linux/highuid.h>
 14#include <linux/fs.h>
 15#include <linux/namei.h>
 16#include <linux/security.h>
 17#include <linux/cred.h>
 18#include <linux/syscalls.h>
 19#include <linux/pagemap.h>
 20#include <linux/compat.h>
 21#include <linux/iversion.h>
 22
 23#include <linux/uaccess.h>
 24#include <asm/unistd.h>
 25
 26#include "internal.h"
 27#include "mount.h"
 28
 29/**
 30 * generic_fillattr - Fill in the basic attributes from the inode struct
 31 * @idmap:		idmap of the mount the inode was found from
 32 * @request_mask:	statx request_mask
 33 * @inode:		Inode to use as the source
 34 * @stat:		Where to fill in the attributes
 35 *
 36 * Fill in the basic attributes in the kstat structure from data that's to be
 37 * found on the VFS inode structure.  This is the default if no getattr inode
 38 * operation is supplied.
 39 *
 40 * If the inode has been found through an idmapped mount the idmap of
 41 * the vfsmount must be passed through @idmap. This function will then
 42 * take care to map the inode according to @idmap before filling in the
 43 * uid and gid filds. On non-idmapped mounts or if permission checking is to be
 44 * performed on the raw inode simply pass @nop_mnt_idmap.
 45 */
 46void generic_fillattr(struct mnt_idmap *idmap, u32 request_mask,
 47		      struct inode *inode, struct kstat *stat)
 48{
 49	vfsuid_t vfsuid = i_uid_into_vfsuid(idmap, inode);
 50	vfsgid_t vfsgid = i_gid_into_vfsgid(idmap, inode);
 51
 52	stat->dev = inode->i_sb->s_dev;
 53	stat->ino = inode->i_ino;
 54	stat->mode = inode->i_mode;
 55	stat->nlink = inode->i_nlink;
 56	stat->uid = vfsuid_into_kuid(vfsuid);
 57	stat->gid = vfsgid_into_kgid(vfsgid);
 58	stat->rdev = inode->i_rdev;
 59	stat->size = i_size_read(inode);
 60	stat->atime = inode_get_atime(inode);
 61	stat->mtime = inode_get_mtime(inode);
 62	stat->ctime = inode_get_ctime(inode);
 63	stat->blksize = i_blocksize(inode);
 64	stat->blocks = inode->i_blocks;
 65
 66	if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) {
 67		stat->result_mask |= STATX_CHANGE_COOKIE;
 68		stat->change_cookie = inode_query_iversion(inode);
 69	}
 70
 71}
 72EXPORT_SYMBOL(generic_fillattr);
 73
 74/**
 75 * generic_fill_statx_attr - Fill in the statx attributes from the inode flags
 76 * @inode:	Inode to use as the source
 77 * @stat:	Where to fill in the attribute flags
 78 *
 79 * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the
 80 * inode that are published on i_flags and enforced by the VFS.
 81 */
 82void generic_fill_statx_attr(struct inode *inode, struct kstat *stat)
 83{
 84	if (inode->i_flags & S_IMMUTABLE)
 85		stat->attributes |= STATX_ATTR_IMMUTABLE;
 86	if (inode->i_flags & S_APPEND)
 87		stat->attributes |= STATX_ATTR_APPEND;
 88	stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS;
 89}
 90EXPORT_SYMBOL(generic_fill_statx_attr);
 91
 92/**
 93 * vfs_getattr_nosec - getattr without security checks
 94 * @path: file to get attributes from
 95 * @stat: structure to return attributes in
 96 * @request_mask: STATX_xxx flags indicating what the caller wants
 97 * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
 98 *
 99 * Get attributes without calling security_inode_getattr.
100 *
101 * Currently the only caller other than vfs_getattr is internal to the
102 * filehandle lookup code, which uses only the inode number and returns no
103 * attributes to any user.  Any other code probably wants vfs_getattr.
104 */
105int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
106		      u32 request_mask, unsigned int query_flags)
107{
108	struct mnt_idmap *idmap;
109	struct inode *inode = d_backing_inode(path->dentry);
110
111	memset(stat, 0, sizeof(*stat));
112	stat->result_mask |= STATX_BASIC_STATS;
113	query_flags &= AT_STATX_SYNC_TYPE;
114
115	/* allow the fs to override these if it really wants to */
116	/* SB_NOATIME means filesystem supplies dummy atime value */
117	if (inode->i_sb->s_flags & SB_NOATIME)
118		stat->result_mask &= ~STATX_ATIME;
119
120	/*
121	 * Note: If you add another clause to set an attribute flag, please
122	 * update attributes_mask below.
123	 */
124	if (IS_AUTOMOUNT(inode))
125		stat->attributes |= STATX_ATTR_AUTOMOUNT;
126
127	if (IS_DAX(inode))
128		stat->attributes |= STATX_ATTR_DAX;
129
130	stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
131				  STATX_ATTR_DAX);
132
133	idmap = mnt_idmap(path->mnt);
134	if (inode->i_op->getattr)
135		return inode->i_op->getattr(idmap, path, stat,
136					    request_mask,
137					    query_flags | AT_GETATTR_NOSEC);
138
139	generic_fillattr(idmap, request_mask, inode, stat);
140	return 0;
141}
142EXPORT_SYMBOL(vfs_getattr_nosec);
143
144/*
145 * vfs_getattr - Get the enhanced basic attributes of a file
146 * @path: The file of interest
147 * @stat: Where to return the statistics
148 * @request_mask: STATX_xxx flags indicating what the caller wants
149 * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
150 *
151 * Ask the filesystem for a file's attributes.  The caller must indicate in
152 * request_mask and query_flags to indicate what they want.
153 *
154 * If the file is remote, the filesystem can be forced to update the attributes
155 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
156 * suppress the update by passing AT_STATX_DONT_SYNC.
157 *
158 * Bits must have been set in request_mask to indicate which attributes the
159 * caller wants retrieving.  Any such attribute not requested may be returned
160 * anyway, but the value may be approximate, and, if remote, may not have been
161 * synchronised with the server.
162 *
163 * 0 will be returned on success, and a -ve error code if unsuccessful.
164 */
165int vfs_getattr(const struct path *path, struct kstat *stat,
166		u32 request_mask, unsigned int query_flags)
167{
168	int retval;
169
170	if (WARN_ON_ONCE(query_flags & AT_GETATTR_NOSEC))
171		return -EPERM;
172
173	retval = security_inode_getattr(path);
174	if (retval)
175		return retval;
176	return vfs_getattr_nosec(path, stat, request_mask, query_flags);
177}
178EXPORT_SYMBOL(vfs_getattr);
179
180/**
181 * vfs_fstat - Get the basic attributes by file descriptor
182 * @fd: The file descriptor referring to the file of interest
183 * @stat: The result structure to fill in.
184 *
185 * This function is a wrapper around vfs_getattr().  The main difference is
186 * that it uses a file descriptor to determine the file location.
187 *
188 * 0 will be returned on success, and a -ve error code if unsuccessful.
189 */
190int vfs_fstat(int fd, struct kstat *stat)
191{
192	struct fd f;
193	int error;
194
195	f = fdget_raw(fd);
196	if (!f.file)
197		return -EBADF;
198	error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0);
199	fdput(f);
200	return error;
201}
202
203int getname_statx_lookup_flags(int flags)
204{
205	int lookup_flags = 0;
206
207	if (!(flags & AT_SYMLINK_NOFOLLOW))
208		lookup_flags |= LOOKUP_FOLLOW;
209	if (!(flags & AT_NO_AUTOMOUNT))
210		lookup_flags |= LOOKUP_AUTOMOUNT;
211	if (flags & AT_EMPTY_PATH)
212		lookup_flags |= LOOKUP_EMPTY;
213
214	return lookup_flags;
215}
216
217/**
218 * vfs_statx - Get basic and extra attributes by filename
219 * @dfd: A file descriptor representing the base dir for a relative filename
220 * @filename: The name of the file of interest
221 * @flags: Flags to control the query
222 * @stat: The result structure to fill in.
223 * @request_mask: STATX_xxx flags indicating what the caller wants
224 *
225 * This function is a wrapper around vfs_getattr().  The main difference is
226 * that it uses a filename and base directory to determine the file location.
227 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
228 * at the given name from being referenced.
229 *
230 * 0 will be returned on success, and a -ve error code if unsuccessful.
231 */
232static int vfs_statx(int dfd, struct filename *filename, int flags,
233	      struct kstat *stat, u32 request_mask)
234{
235	struct path path;
236	unsigned int lookup_flags = getname_statx_lookup_flags(flags);
237	int error;
238
239	if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH |
240		      AT_STATX_SYNC_TYPE))
241		return -EINVAL;
242
243retry:
244	error = filename_lookup(dfd, filename, lookup_flags, &path, NULL);
245	if (error)
246		goto out;
247
248	error = vfs_getattr(&path, stat, request_mask, flags);
249
250	if (request_mask & STATX_MNT_ID_UNIQUE) {
251		stat->mnt_id = real_mount(path.mnt)->mnt_id_unique;
252		stat->result_mask |= STATX_MNT_ID_UNIQUE;
253	} else {
254		stat->mnt_id = real_mount(path.mnt)->mnt_id;
255		stat->result_mask |= STATX_MNT_ID;
256	}
257
258	if (path.mnt->mnt_root == path.dentry)
259		stat->attributes |= STATX_ATTR_MOUNT_ROOT;
260	stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
261
262	/* Handle STATX_DIOALIGN for block devices. */
263	if (request_mask & STATX_DIOALIGN) {
264		struct inode *inode = d_backing_inode(path.dentry);
265
266		if (S_ISBLK(inode->i_mode))
267			bdev_statx_dioalign(inode, stat);
268	}
269
270	path_put(&path);
271	if (retry_estale(error, lookup_flags)) {
272		lookup_flags |= LOOKUP_REVAL;
273		goto retry;
274	}
275out:
276	return error;
277}
278
279int vfs_fstatat(int dfd, const char __user *filename,
280			      struct kstat *stat, int flags)
281{
282	int ret;
283	int statx_flags = flags | AT_NO_AUTOMOUNT;
284	struct filename *name;
285
286	/*
287	 * Work around glibc turning fstat() into fstatat(AT_EMPTY_PATH)
288	 *
289	 * If AT_EMPTY_PATH is set, we expect the common case to be that
290	 * empty path, and avoid doing all the extra pathname work.
291	 */
292	if (dfd >= 0 && flags == AT_EMPTY_PATH) {
293		char c;
294
295		ret = get_user(c, filename);
296		if (unlikely(ret))
297			return ret;
298
299		if (likely(!c))
300			return vfs_fstat(dfd, stat);
301	}
302
303	name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL);
304	ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS);
305	putname(name);
306
307	return ret;
308}
309
310#ifdef __ARCH_WANT_OLD_STAT
311
312/*
313 * For backward compatibility?  Maybe this should be moved
314 * into arch/i386 instead?
315 */
316static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
317{
318	static int warncount = 5;
319	struct __old_kernel_stat tmp;
320
321	if (warncount > 0) {
322		warncount--;
323		printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
324			current->comm);
325	} else if (warncount < 0) {
326		/* it's laughable, but... */
327		warncount = 0;
328	}
329
330	memset(&tmp, 0, sizeof(struct __old_kernel_stat));
331	tmp.st_dev = old_encode_dev(stat->dev);
332	tmp.st_ino = stat->ino;
333	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
334		return -EOVERFLOW;
335	tmp.st_mode = stat->mode;
336	tmp.st_nlink = stat->nlink;
337	if (tmp.st_nlink != stat->nlink)
338		return -EOVERFLOW;
339	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
340	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
341	tmp.st_rdev = old_encode_dev(stat->rdev);
342#if BITS_PER_LONG == 32
343	if (stat->size > MAX_NON_LFS)
344		return -EOVERFLOW;
345#endif
346	tmp.st_size = stat->size;
347	tmp.st_atime = stat->atime.tv_sec;
348	tmp.st_mtime = stat->mtime.tv_sec;
349	tmp.st_ctime = stat->ctime.tv_sec;
350	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
351}
352
353SYSCALL_DEFINE2(stat, const char __user *, filename,
354		struct __old_kernel_stat __user *, statbuf)
355{
356	struct kstat stat;
357	int error;
358
359	error = vfs_stat(filename, &stat);
360	if (error)
361		return error;
362
363	return cp_old_stat(&stat, statbuf);
364}
365
366SYSCALL_DEFINE2(lstat, const char __user *, filename,
367		struct __old_kernel_stat __user *, statbuf)
368{
369	struct kstat stat;
370	int error;
371
372	error = vfs_lstat(filename, &stat);
373	if (error)
374		return error;
375
376	return cp_old_stat(&stat, statbuf);
377}
378
379SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
380{
381	struct kstat stat;
382	int error = vfs_fstat(fd, &stat);
383
384	if (!error)
385		error = cp_old_stat(&stat, statbuf);
386
387	return error;
388}
389
390#endif /* __ARCH_WANT_OLD_STAT */
391
392#ifdef __ARCH_WANT_NEW_STAT
393
 
 
 
 
 
 
394#ifndef INIT_STRUCT_STAT_PADDING
395#  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
396#endif
397
398static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
399{
400	struct stat tmp;
401
402	if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
403		return -EOVERFLOW;
404	if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
405		return -EOVERFLOW;
406#if BITS_PER_LONG == 32
407	if (stat->size > MAX_NON_LFS)
408		return -EOVERFLOW;
409#endif
410
411	INIT_STRUCT_STAT_PADDING(tmp);
412	tmp.st_dev = new_encode_dev(stat->dev);
413	tmp.st_ino = stat->ino;
414	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
415		return -EOVERFLOW;
416	tmp.st_mode = stat->mode;
417	tmp.st_nlink = stat->nlink;
418	if (tmp.st_nlink != stat->nlink)
419		return -EOVERFLOW;
420	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
421	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
422	tmp.st_rdev = new_encode_dev(stat->rdev);
423	tmp.st_size = stat->size;
424	tmp.st_atime = stat->atime.tv_sec;
425	tmp.st_mtime = stat->mtime.tv_sec;
426	tmp.st_ctime = stat->ctime.tv_sec;
427#ifdef STAT_HAVE_NSEC
428	tmp.st_atime_nsec = stat->atime.tv_nsec;
429	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
430	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
431#endif
432	tmp.st_blocks = stat->blocks;
433	tmp.st_blksize = stat->blksize;
434	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
435}
436
437SYSCALL_DEFINE2(newstat, const char __user *, filename,
438		struct stat __user *, statbuf)
439{
440	struct kstat stat;
441	int error = vfs_stat(filename, &stat);
442
443	if (error)
444		return error;
445	return cp_new_stat(&stat, statbuf);
446}
447
448SYSCALL_DEFINE2(newlstat, const char __user *, filename,
449		struct stat __user *, statbuf)
450{
451	struct kstat stat;
452	int error;
453
454	error = vfs_lstat(filename, &stat);
455	if (error)
456		return error;
457
458	return cp_new_stat(&stat, statbuf);
459}
460
461#if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
462SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
463		struct stat __user *, statbuf, int, flag)
464{
465	struct kstat stat;
466	int error;
467
468	error = vfs_fstatat(dfd, filename, &stat, flag);
469	if (error)
470		return error;
471	return cp_new_stat(&stat, statbuf);
472}
473#endif
474
475SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
476{
477	struct kstat stat;
478	int error = vfs_fstat(fd, &stat);
479
480	if (!error)
481		error = cp_new_stat(&stat, statbuf);
482
483	return error;
484}
485#endif
486
487static int do_readlinkat(int dfd, const char __user *pathname,
488			 char __user *buf, int bufsiz)
489{
490	struct path path;
491	int error;
492	int empty = 0;
493	unsigned int lookup_flags = LOOKUP_EMPTY;
494
495	if (bufsiz <= 0)
496		return -EINVAL;
497
498retry:
499	error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
500	if (!error) {
501		struct inode *inode = d_backing_inode(path.dentry);
502
503		error = empty ? -ENOENT : -EINVAL;
504		/*
505		 * AFS mountpoints allow readlink(2) but are not symlinks
506		 */
507		if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
508			error = security_inode_readlink(path.dentry);
509			if (!error) {
510				touch_atime(&path);
511				error = vfs_readlink(path.dentry, buf, bufsiz);
512			}
513		}
514		path_put(&path);
515		if (retry_estale(error, lookup_flags)) {
516			lookup_flags |= LOOKUP_REVAL;
517			goto retry;
518		}
519	}
520	return error;
521}
522
523SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
524		char __user *, buf, int, bufsiz)
525{
526	return do_readlinkat(dfd, pathname, buf, bufsiz);
527}
528
529SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
530		int, bufsiz)
531{
532	return do_readlinkat(AT_FDCWD, path, buf, bufsiz);
533}
534
535
536/* ---------- LFS-64 ----------- */
537#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
538
539#ifndef INIT_STRUCT_STAT64_PADDING
540#  define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
541#endif
542
543static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
544{
545	struct stat64 tmp;
546
547	INIT_STRUCT_STAT64_PADDING(tmp);
548#ifdef CONFIG_MIPS
549	/* mips has weird padding, so we don't get 64 bits there */
550	tmp.st_dev = new_encode_dev(stat->dev);
551	tmp.st_rdev = new_encode_dev(stat->rdev);
552#else
553	tmp.st_dev = huge_encode_dev(stat->dev);
554	tmp.st_rdev = huge_encode_dev(stat->rdev);
555#endif
556	tmp.st_ino = stat->ino;
557	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
558		return -EOVERFLOW;
559#ifdef STAT64_HAS_BROKEN_ST_INO
560	tmp.__st_ino = stat->ino;
561#endif
562	tmp.st_mode = stat->mode;
563	tmp.st_nlink = stat->nlink;
564	tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
565	tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
566	tmp.st_atime = stat->atime.tv_sec;
567	tmp.st_atime_nsec = stat->atime.tv_nsec;
568	tmp.st_mtime = stat->mtime.tv_sec;
569	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
570	tmp.st_ctime = stat->ctime.tv_sec;
571	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
572	tmp.st_size = stat->size;
573	tmp.st_blocks = stat->blocks;
574	tmp.st_blksize = stat->blksize;
575	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
576}
577
578SYSCALL_DEFINE2(stat64, const char __user *, filename,
579		struct stat64 __user *, statbuf)
580{
581	struct kstat stat;
582	int error = vfs_stat(filename, &stat);
583
584	if (!error)
585		error = cp_new_stat64(&stat, statbuf);
586
587	return error;
588}
589
590SYSCALL_DEFINE2(lstat64, const char __user *, filename,
591		struct stat64 __user *, statbuf)
592{
593	struct kstat stat;
594	int error = vfs_lstat(filename, &stat);
595
596	if (!error)
597		error = cp_new_stat64(&stat, statbuf);
598
599	return error;
600}
601
602SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
603{
604	struct kstat stat;
605	int error = vfs_fstat(fd, &stat);
606
607	if (!error)
608		error = cp_new_stat64(&stat, statbuf);
609
610	return error;
611}
612
613SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
614		struct stat64 __user *, statbuf, int, flag)
615{
616	struct kstat stat;
617	int error;
618
619	error = vfs_fstatat(dfd, filename, &stat, flag);
620	if (error)
621		return error;
622	return cp_new_stat64(&stat, statbuf);
623}
624#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
625
626static noinline_for_stack int
627cp_statx(const struct kstat *stat, struct statx __user *buffer)
628{
629	struct statx tmp;
630
631	memset(&tmp, 0, sizeof(tmp));
632
633	/* STATX_CHANGE_COOKIE is kernel-only for now */
634	tmp.stx_mask = stat->result_mask & ~STATX_CHANGE_COOKIE;
635	tmp.stx_blksize = stat->blksize;
636	/* STATX_ATTR_CHANGE_MONOTONIC is kernel-only for now */
637	tmp.stx_attributes = stat->attributes & ~STATX_ATTR_CHANGE_MONOTONIC;
638	tmp.stx_nlink = stat->nlink;
639	tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
640	tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
641	tmp.stx_mode = stat->mode;
642	tmp.stx_ino = stat->ino;
643	tmp.stx_size = stat->size;
644	tmp.stx_blocks = stat->blocks;
645	tmp.stx_attributes_mask = stat->attributes_mask;
646	tmp.stx_atime.tv_sec = stat->atime.tv_sec;
647	tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
648	tmp.stx_btime.tv_sec = stat->btime.tv_sec;
649	tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
650	tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
651	tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
652	tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
653	tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
654	tmp.stx_rdev_major = MAJOR(stat->rdev);
655	tmp.stx_rdev_minor = MINOR(stat->rdev);
656	tmp.stx_dev_major = MAJOR(stat->dev);
657	tmp.stx_dev_minor = MINOR(stat->dev);
658	tmp.stx_mnt_id = stat->mnt_id;
659	tmp.stx_dio_mem_align = stat->dio_mem_align;
660	tmp.stx_dio_offset_align = stat->dio_offset_align;
661
662	return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
663}
664
665int do_statx(int dfd, struct filename *filename, unsigned int flags,
666	     unsigned int mask, struct statx __user *buffer)
667{
668	struct kstat stat;
669	int error;
670
671	if (mask & STATX__RESERVED)
672		return -EINVAL;
673	if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
674		return -EINVAL;
675
676	/* STATX_CHANGE_COOKIE is kernel-only for now. Ignore requests
677	 * from userland.
678	 */
679	mask &= ~STATX_CHANGE_COOKIE;
680
681	error = vfs_statx(dfd, filename, flags, &stat, mask);
682	if (error)
683		return error;
684
685	return cp_statx(&stat, buffer);
686}
687
688/**
689 * sys_statx - System call to get enhanced stats
690 * @dfd: Base directory to pathwalk from *or* fd to stat.
691 * @filename: File to stat or "" with AT_EMPTY_PATH
692 * @flags: AT_* flags to control pathwalk.
693 * @mask: Parts of statx struct actually required.
694 * @buffer: Result buffer.
695 *
696 * Note that fstat() can be emulated by setting dfd to the fd of interest,
697 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
698 */
699SYSCALL_DEFINE5(statx,
700		int, dfd, const char __user *, filename, unsigned, flags,
701		unsigned int, mask,
702		struct statx __user *, buffer)
703{
704	int ret;
705	struct filename *name;
706
707	name = getname_flags(filename, getname_statx_lookup_flags(flags), NULL);
708	ret = do_statx(dfd, name, flags, mask, buffer);
709	putname(name);
710
711	return ret;
712}
713
714#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT)
715static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
716{
717	struct compat_stat tmp;
718
719	if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
720		return -EOVERFLOW;
721	if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
722		return -EOVERFLOW;
723
724	memset(&tmp, 0, sizeof(tmp));
725	tmp.st_dev = new_encode_dev(stat->dev);
726	tmp.st_ino = stat->ino;
727	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
728		return -EOVERFLOW;
729	tmp.st_mode = stat->mode;
730	tmp.st_nlink = stat->nlink;
731	if (tmp.st_nlink != stat->nlink)
732		return -EOVERFLOW;
733	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
734	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
735	tmp.st_rdev = new_encode_dev(stat->rdev);
736	if ((u64) stat->size > MAX_NON_LFS)
737		return -EOVERFLOW;
738	tmp.st_size = stat->size;
739	tmp.st_atime = stat->atime.tv_sec;
740	tmp.st_atime_nsec = stat->atime.tv_nsec;
741	tmp.st_mtime = stat->mtime.tv_sec;
742	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
743	tmp.st_ctime = stat->ctime.tv_sec;
744	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
745	tmp.st_blocks = stat->blocks;
746	tmp.st_blksize = stat->blksize;
747	return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
748}
749
750COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
751		       struct compat_stat __user *, statbuf)
752{
753	struct kstat stat;
754	int error;
755
756	error = vfs_stat(filename, &stat);
757	if (error)
758		return error;
759	return cp_compat_stat(&stat, statbuf);
760}
761
762COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
763		       struct compat_stat __user *, statbuf)
764{
765	struct kstat stat;
766	int error;
767
768	error = vfs_lstat(filename, &stat);
769	if (error)
770		return error;
771	return cp_compat_stat(&stat, statbuf);
772}
773
774#ifndef __ARCH_WANT_STAT64
775COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
776		       const char __user *, filename,
777		       struct compat_stat __user *, statbuf, int, flag)
778{
779	struct kstat stat;
780	int error;
781
782	error = vfs_fstatat(dfd, filename, &stat, flag);
783	if (error)
784		return error;
785	return cp_compat_stat(&stat, statbuf);
786}
787#endif
788
789COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
790		       struct compat_stat __user *, statbuf)
791{
792	struct kstat stat;
793	int error = vfs_fstat(fd, &stat);
794
795	if (!error)
796		error = cp_compat_stat(&stat, statbuf);
797	return error;
798}
799#endif
800
801/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
802void __inode_add_bytes(struct inode *inode, loff_t bytes)
803{
804	inode->i_blocks += bytes >> 9;
805	bytes &= 511;
806	inode->i_bytes += bytes;
807	if (inode->i_bytes >= 512) {
808		inode->i_blocks++;
809		inode->i_bytes -= 512;
810	}
811}
812EXPORT_SYMBOL(__inode_add_bytes);
813
814void inode_add_bytes(struct inode *inode, loff_t bytes)
815{
816	spin_lock(&inode->i_lock);
817	__inode_add_bytes(inode, bytes);
818	spin_unlock(&inode->i_lock);
819}
820
821EXPORT_SYMBOL(inode_add_bytes);
822
823void __inode_sub_bytes(struct inode *inode, loff_t bytes)
824{
825	inode->i_blocks -= bytes >> 9;
826	bytes &= 511;
827	if (inode->i_bytes < bytes) {
828		inode->i_blocks--;
829		inode->i_bytes += 512;
830	}
831	inode->i_bytes -= bytes;
832}
833
834EXPORT_SYMBOL(__inode_sub_bytes);
835
836void inode_sub_bytes(struct inode *inode, loff_t bytes)
837{
838	spin_lock(&inode->i_lock);
839	__inode_sub_bytes(inode, bytes);
840	spin_unlock(&inode->i_lock);
841}
842
843EXPORT_SYMBOL(inode_sub_bytes);
844
845loff_t inode_get_bytes(struct inode *inode)
846{
847	loff_t ret;
848
849	spin_lock(&inode->i_lock);
850	ret = __inode_get_bytes(inode);
851	spin_unlock(&inode->i_lock);
852	return ret;
853}
854
855EXPORT_SYMBOL(inode_get_bytes);
856
857void inode_set_bytes(struct inode *inode, loff_t bytes)
858{
859	/* Caller is here responsible for sufficient locking
860	 * (ie. inode->i_lock) */
861	inode->i_blocks = bytes >> 9;
862	inode->i_bytes = bytes & 511;
863}
864
865EXPORT_SYMBOL(inode_set_bytes);