Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/fs/stat.c
  4 *
  5 *  Copyright (C) 1991, 1992  Linus Torvalds
  6 */
  7
 
  8#include <linux/export.h>
  9#include <linux/mm.h>
 10#include <linux/errno.h>
 11#include <linux/file.h>
 12#include <linux/highuid.h>
 13#include <linux/fs.h>
 14#include <linux/namei.h>
 15#include <linux/security.h>
 16#include <linux/cred.h>
 17#include <linux/syscalls.h>
 18#include <linux/pagemap.h>
 19#include <linux/compat.h>
 20
 21#include <linux/uaccess.h>
 22#include <asm/unistd.h>
 23
 
 
 
 24/**
 25 * generic_fillattr - Fill in the basic attributes from the inode struct
 26 * @inode: Inode to use as the source
 27 * @stat: Where to fill in the attributes
 
 28 *
 29 * Fill in the basic attributes in the kstat structure from data that's to be
 30 * found on the VFS inode structure.  This is the default if no getattr inode
 31 * operation is supplied.
 
 
 
 
 
 
 32 */
 33void generic_fillattr(struct inode *inode, struct kstat *stat)
 
 34{
 
 
 
 35	stat->dev = inode->i_sb->s_dev;
 36	stat->ino = inode->i_ino;
 37	stat->mode = inode->i_mode;
 38	stat->nlink = inode->i_nlink;
 39	stat->uid = inode->i_uid;
 40	stat->gid = inode->i_gid;
 41	stat->rdev = inode->i_rdev;
 42	stat->size = i_size_read(inode);
 43	stat->atime = inode->i_atime;
 44	stat->mtime = inode->i_mtime;
 45	stat->ctime = inode->i_ctime;
 46	stat->blksize = i_blocksize(inode);
 47	stat->blocks = inode->i_blocks;
 48
 49	if (IS_NOATIME(inode))
 50		stat->result_mask &= ~STATX_ATIME;
 51	if (IS_AUTOMOUNT(inode))
 52		stat->attributes |= STATX_ATTR_AUTOMOUNT;
 53}
 54EXPORT_SYMBOL(generic_fillattr);
 55
 56/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 57 * vfs_getattr_nosec - getattr without security checks
 58 * @path: file to get attributes from
 59 * @stat: structure to return attributes in
 60 * @request_mask: STATX_xxx flags indicating what the caller wants
 61 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
 62 *
 63 * Get attributes without calling security_inode_getattr.
 64 *
 65 * Currently the only caller other than vfs_getattr is internal to the
 66 * filehandle lookup code, which uses only the inode number and returns no
 67 * attributes to any user.  Any other code probably wants vfs_getattr.
 68 */
 69int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
 70		      u32 request_mask, unsigned int query_flags)
 71{
 
 72	struct inode *inode = d_backing_inode(path->dentry);
 73
 74	memset(stat, 0, sizeof(*stat));
 75	stat->result_mask |= STATX_BASIC_STATS;
 76	request_mask &= STATX_ALL;
 77	query_flags &= KSTAT_QUERY_FLAGS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 78	if (inode->i_op->getattr)
 79		return inode->i_op->getattr(path, stat, request_mask,
 80					    query_flags);
 81
 82	generic_fillattr(inode, stat);
 83	return 0;
 84}
 85EXPORT_SYMBOL(vfs_getattr_nosec);
 86
 87/*
 88 * vfs_getattr - Get the enhanced basic attributes of a file
 89 * @path: The file of interest
 90 * @stat: Where to return the statistics
 91 * @request_mask: STATX_xxx flags indicating what the caller wants
 92 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
 93 *
 94 * Ask the filesystem for a file's attributes.  The caller must indicate in
 95 * request_mask and query_flags to indicate what they want.
 96 *
 97 * If the file is remote, the filesystem can be forced to update the attributes
 98 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
 99 * suppress the update by passing AT_STATX_DONT_SYNC.
100 *
101 * Bits must have been set in request_mask to indicate which attributes the
102 * caller wants retrieving.  Any such attribute not requested may be returned
103 * anyway, but the value may be approximate, and, if remote, may not have been
104 * synchronised with the server.
105 *
106 * 0 will be returned on success, and a -ve error code if unsuccessful.
107 */
108int vfs_getattr(const struct path *path, struct kstat *stat,
109		u32 request_mask, unsigned int query_flags)
110{
111	int retval;
112
113	retval = security_inode_getattr(path);
114	if (retval)
115		return retval;
116	return vfs_getattr_nosec(path, stat, request_mask, query_flags);
117}
118EXPORT_SYMBOL(vfs_getattr);
119
120/**
121 * vfs_statx_fd - Get the enhanced basic attributes by file descriptor
122 * @fd: The file descriptor referring to the file of interest
123 * @stat: The result structure to fill in.
124 * @request_mask: STATX_xxx flags indicating what the caller wants
125 * @query_flags: Query mode (KSTAT_QUERY_FLAGS)
126 *
127 * This function is a wrapper around vfs_getattr().  The main difference is
128 * that it uses a file descriptor to determine the file location.
129 *
130 * 0 will be returned on success, and a -ve error code if unsuccessful.
131 */
132int vfs_statx_fd(unsigned int fd, struct kstat *stat,
133		 u32 request_mask, unsigned int query_flags)
134{
135	struct fd f;
136	int error = -EBADF;
137
138	if (query_flags & ~KSTAT_QUERY_FLAGS)
139		return -EINVAL;
140
141	f = fdget_raw(fd);
142	if (f.file) {
143		error = vfs_getattr(&f.file->f_path, stat,
144				    request_mask, query_flags);
145		fdput(f);
146	}
147	return error;
148}
149EXPORT_SYMBOL(vfs_statx_fd);
 
 
 
 
 
 
 
 
 
 
 
 
 
150
151/**
152 * vfs_statx - Get basic and extra attributes by filename
153 * @dfd: A file descriptor representing the base dir for a relative filename
154 * @filename: The name of the file of interest
155 * @flags: Flags to control the query
156 * @stat: The result structure to fill in.
157 * @request_mask: STATX_xxx flags indicating what the caller wants
158 *
159 * This function is a wrapper around vfs_getattr().  The main difference is
160 * that it uses a filename and base directory to determine the file location.
161 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
162 * at the given name from being referenced.
163 *
164 * 0 will be returned on success, and a -ve error code if unsuccessful.
165 */
166int vfs_statx(int dfd, const char __user *filename, int flags,
167	      struct kstat *stat, u32 request_mask)
168{
169	struct path path;
170	int error = -EINVAL;
171	unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT;
172
173	if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT |
174		       AT_EMPTY_PATH | KSTAT_QUERY_FLAGS)) != 0)
175		return -EINVAL;
176
177	if (flags & AT_SYMLINK_NOFOLLOW)
178		lookup_flags &= ~LOOKUP_FOLLOW;
179	if (flags & AT_NO_AUTOMOUNT)
180		lookup_flags &= ~LOOKUP_AUTOMOUNT;
181	if (flags & AT_EMPTY_PATH)
182		lookup_flags |= LOOKUP_EMPTY;
183
184retry:
185	error = user_path_at(dfd, filename, lookup_flags, &path);
186	if (error)
187		goto out;
188
189	error = vfs_getattr(&path, stat, request_mask, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190	path_put(&path);
191	if (retry_estale(error, lookup_flags)) {
192		lookup_flags |= LOOKUP_REVAL;
193		goto retry;
194	}
195out:
196	return error;
197}
198EXPORT_SYMBOL(vfs_statx);
199
 
 
 
 
 
 
 
 
 
 
 
 
 
200
201#ifdef __ARCH_WANT_OLD_STAT
202
203/*
204 * For backward compatibility?  Maybe this should be moved
205 * into arch/i386 instead?
206 */
207static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
208{
209	static int warncount = 5;
210	struct __old_kernel_stat tmp;
211
212	if (warncount > 0) {
213		warncount--;
214		printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
215			current->comm);
216	} else if (warncount < 0) {
217		/* it's laughable, but... */
218		warncount = 0;
219	}
220
221	memset(&tmp, 0, sizeof(struct __old_kernel_stat));
222	tmp.st_dev = old_encode_dev(stat->dev);
223	tmp.st_ino = stat->ino;
224	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
225		return -EOVERFLOW;
226	tmp.st_mode = stat->mode;
227	tmp.st_nlink = stat->nlink;
228	if (tmp.st_nlink != stat->nlink)
229		return -EOVERFLOW;
230	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
231	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
232	tmp.st_rdev = old_encode_dev(stat->rdev);
233#if BITS_PER_LONG == 32
234	if (stat->size > MAX_NON_LFS)
235		return -EOVERFLOW;
236#endif
237	tmp.st_size = stat->size;
238	tmp.st_atime = stat->atime.tv_sec;
239	tmp.st_mtime = stat->mtime.tv_sec;
240	tmp.st_ctime = stat->ctime.tv_sec;
241	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
242}
243
244SYSCALL_DEFINE2(stat, const char __user *, filename,
245		struct __old_kernel_stat __user *, statbuf)
246{
247	struct kstat stat;
248	int error;
249
250	error = vfs_stat(filename, &stat);
251	if (error)
252		return error;
253
254	return cp_old_stat(&stat, statbuf);
255}
256
257SYSCALL_DEFINE2(lstat, const char __user *, filename,
258		struct __old_kernel_stat __user *, statbuf)
259{
260	struct kstat stat;
261	int error;
262
263	error = vfs_lstat(filename, &stat);
264	if (error)
265		return error;
266
267	return cp_old_stat(&stat, statbuf);
268}
269
270SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
271{
272	struct kstat stat;
273	int error = vfs_fstat(fd, &stat);
274
275	if (!error)
276		error = cp_old_stat(&stat, statbuf);
277
278	return error;
279}
280
281#endif /* __ARCH_WANT_OLD_STAT */
282
 
 
283#if BITS_PER_LONG == 32
284#  define choose_32_64(a,b) a
285#else
286#  define choose_32_64(a,b) b
287#endif
288
289#define valid_dev(x)  choose_32_64(old_valid_dev(x),true)
290#define encode_dev(x) choose_32_64(old_encode_dev,new_encode_dev)(x)
291
292#ifndef INIT_STRUCT_STAT_PADDING
293#  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
294#endif
295
296static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
297{
298	struct stat tmp;
299
300	if (!valid_dev(stat->dev) || !valid_dev(stat->rdev))
 
 
301		return -EOVERFLOW;
302#if BITS_PER_LONG == 32
303	if (stat->size > MAX_NON_LFS)
304		return -EOVERFLOW;
305#endif
306
307	INIT_STRUCT_STAT_PADDING(tmp);
308	tmp.st_dev = encode_dev(stat->dev);
309	tmp.st_ino = stat->ino;
310	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
311		return -EOVERFLOW;
312	tmp.st_mode = stat->mode;
313	tmp.st_nlink = stat->nlink;
314	if (tmp.st_nlink != stat->nlink)
315		return -EOVERFLOW;
316	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
317	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
318	tmp.st_rdev = encode_dev(stat->rdev);
319	tmp.st_size = stat->size;
320	tmp.st_atime = stat->atime.tv_sec;
321	tmp.st_mtime = stat->mtime.tv_sec;
322	tmp.st_ctime = stat->ctime.tv_sec;
323#ifdef STAT_HAVE_NSEC
324	tmp.st_atime_nsec = stat->atime.tv_nsec;
325	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
326	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
327#endif
328	tmp.st_blocks = stat->blocks;
329	tmp.st_blksize = stat->blksize;
330	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
331}
332
333SYSCALL_DEFINE2(newstat, const char __user *, filename,
334		struct stat __user *, statbuf)
335{
336	struct kstat stat;
337	int error = vfs_stat(filename, &stat);
338
339	if (error)
340		return error;
341	return cp_new_stat(&stat, statbuf);
342}
343
344SYSCALL_DEFINE2(newlstat, const char __user *, filename,
345		struct stat __user *, statbuf)
346{
347	struct kstat stat;
348	int error;
349
350	error = vfs_lstat(filename, &stat);
351	if (error)
352		return error;
353
354	return cp_new_stat(&stat, statbuf);
355}
356
357#if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
358SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
359		struct stat __user *, statbuf, int, flag)
360{
361	struct kstat stat;
362	int error;
363
364	error = vfs_fstatat(dfd, filename, &stat, flag);
365	if (error)
366		return error;
367	return cp_new_stat(&stat, statbuf);
368}
369#endif
370
371SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
372{
373	struct kstat stat;
374	int error = vfs_fstat(fd, &stat);
375
376	if (!error)
377		error = cp_new_stat(&stat, statbuf);
378
379	return error;
380}
 
381
382static int do_readlinkat(int dfd, const char __user *pathname,
383			 char __user *buf, int bufsiz)
384{
385	struct path path;
386	int error;
387	int empty = 0;
388	unsigned int lookup_flags = LOOKUP_EMPTY;
389
390	if (bufsiz <= 0)
391		return -EINVAL;
392
393retry:
394	error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
395	if (!error) {
396		struct inode *inode = d_backing_inode(path.dentry);
397
398		error = empty ? -ENOENT : -EINVAL;
399		/*
400		 * AFS mountpoints allow readlink(2) but are not symlinks
401		 */
402		if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
403			error = security_inode_readlink(path.dentry);
404			if (!error) {
405				touch_atime(&path);
406				error = vfs_readlink(path.dentry, buf, bufsiz);
407			}
408		}
409		path_put(&path);
410		if (retry_estale(error, lookup_flags)) {
411			lookup_flags |= LOOKUP_REVAL;
412			goto retry;
413		}
414	}
415	return error;
416}
417
418SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
419		char __user *, buf, int, bufsiz)
420{
421	return do_readlinkat(dfd, pathname, buf, bufsiz);
422}
423
424SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
425		int, bufsiz)
426{
427	return do_readlinkat(AT_FDCWD, path, buf, bufsiz);
428}
429
430
431/* ---------- LFS-64 ----------- */
432#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
433
434#ifndef INIT_STRUCT_STAT64_PADDING
435#  define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
436#endif
437
438static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
439{
440	struct stat64 tmp;
441
442	INIT_STRUCT_STAT64_PADDING(tmp);
443#ifdef CONFIG_MIPS
444	/* mips has weird padding, so we don't get 64 bits there */
445	tmp.st_dev = new_encode_dev(stat->dev);
446	tmp.st_rdev = new_encode_dev(stat->rdev);
447#else
448	tmp.st_dev = huge_encode_dev(stat->dev);
449	tmp.st_rdev = huge_encode_dev(stat->rdev);
450#endif
451	tmp.st_ino = stat->ino;
452	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
453		return -EOVERFLOW;
454#ifdef STAT64_HAS_BROKEN_ST_INO
455	tmp.__st_ino = stat->ino;
456#endif
457	tmp.st_mode = stat->mode;
458	tmp.st_nlink = stat->nlink;
459	tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
460	tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
461	tmp.st_atime = stat->atime.tv_sec;
462	tmp.st_atime_nsec = stat->atime.tv_nsec;
463	tmp.st_mtime = stat->mtime.tv_sec;
464	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
465	tmp.st_ctime = stat->ctime.tv_sec;
466	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
467	tmp.st_size = stat->size;
468	tmp.st_blocks = stat->blocks;
469	tmp.st_blksize = stat->blksize;
470	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
471}
472
473SYSCALL_DEFINE2(stat64, const char __user *, filename,
474		struct stat64 __user *, statbuf)
475{
476	struct kstat stat;
477	int error = vfs_stat(filename, &stat);
478
479	if (!error)
480		error = cp_new_stat64(&stat, statbuf);
481
482	return error;
483}
484
485SYSCALL_DEFINE2(lstat64, const char __user *, filename,
486		struct stat64 __user *, statbuf)
487{
488	struct kstat stat;
489	int error = vfs_lstat(filename, &stat);
490
491	if (!error)
492		error = cp_new_stat64(&stat, statbuf);
493
494	return error;
495}
496
497SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
498{
499	struct kstat stat;
500	int error = vfs_fstat(fd, &stat);
501
502	if (!error)
503		error = cp_new_stat64(&stat, statbuf);
504
505	return error;
506}
507
508SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
509		struct stat64 __user *, statbuf, int, flag)
510{
511	struct kstat stat;
512	int error;
513
514	error = vfs_fstatat(dfd, filename, &stat, flag);
515	if (error)
516		return error;
517	return cp_new_stat64(&stat, statbuf);
518}
519#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
520
521static noinline_for_stack int
522cp_statx(const struct kstat *stat, struct statx __user *buffer)
523{
524	struct statx tmp;
525
526	memset(&tmp, 0, sizeof(tmp));
527
528	tmp.stx_mask = stat->result_mask;
529	tmp.stx_blksize = stat->blksize;
530	tmp.stx_attributes = stat->attributes;
531	tmp.stx_nlink = stat->nlink;
532	tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
533	tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
534	tmp.stx_mode = stat->mode;
535	tmp.stx_ino = stat->ino;
536	tmp.stx_size = stat->size;
537	tmp.stx_blocks = stat->blocks;
538	tmp.stx_attributes_mask = stat->attributes_mask;
539	tmp.stx_atime.tv_sec = stat->atime.tv_sec;
540	tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
541	tmp.stx_btime.tv_sec = stat->btime.tv_sec;
542	tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
543	tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
544	tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
545	tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
546	tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
547	tmp.stx_rdev_major = MAJOR(stat->rdev);
548	tmp.stx_rdev_minor = MINOR(stat->rdev);
549	tmp.stx_dev_major = MAJOR(stat->dev);
550	tmp.stx_dev_minor = MINOR(stat->dev);
 
 
 
551
552	return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
553}
554
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
555/**
556 * sys_statx - System call to get enhanced stats
557 * @dfd: Base directory to pathwalk from *or* fd to stat.
558 * @filename: File to stat or "" with AT_EMPTY_PATH
559 * @flags: AT_* flags to control pathwalk.
560 * @mask: Parts of statx struct actually required.
561 * @buffer: Result buffer.
562 *
563 * Note that fstat() can be emulated by setting dfd to the fd of interest,
564 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
565 */
566SYSCALL_DEFINE5(statx,
567		int, dfd, const char __user *, filename, unsigned, flags,
568		unsigned int, mask,
569		struct statx __user *, buffer)
570{
571	struct kstat stat;
572	int error;
573
574	if (mask & STATX__RESERVED)
575		return -EINVAL;
576	if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
577		return -EINVAL;
578
579	error = vfs_statx(dfd, filename, flags, &stat, mask);
580	if (error)
581		return error;
582
583	return cp_statx(&stat, buffer);
584}
585
586#ifdef CONFIG_COMPAT
587static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
588{
589	struct compat_stat tmp;
590
591	if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
 
 
592		return -EOVERFLOW;
593
594	memset(&tmp, 0, sizeof(tmp));
595	tmp.st_dev = old_encode_dev(stat->dev);
596	tmp.st_ino = stat->ino;
597	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
598		return -EOVERFLOW;
599	tmp.st_mode = stat->mode;
600	tmp.st_nlink = stat->nlink;
601	if (tmp.st_nlink != stat->nlink)
602		return -EOVERFLOW;
603	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
604	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
605	tmp.st_rdev = old_encode_dev(stat->rdev);
606	if ((u64) stat->size > MAX_NON_LFS)
607		return -EOVERFLOW;
608	tmp.st_size = stat->size;
609	tmp.st_atime = stat->atime.tv_sec;
610	tmp.st_atime_nsec = stat->atime.tv_nsec;
611	tmp.st_mtime = stat->mtime.tv_sec;
612	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
613	tmp.st_ctime = stat->ctime.tv_sec;
614	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
615	tmp.st_blocks = stat->blocks;
616	tmp.st_blksize = stat->blksize;
617	return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
618}
619
620COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
621		       struct compat_stat __user *, statbuf)
622{
623	struct kstat stat;
624	int error;
625
626	error = vfs_stat(filename, &stat);
627	if (error)
628		return error;
629	return cp_compat_stat(&stat, statbuf);
630}
631
632COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
633		       struct compat_stat __user *, statbuf)
634{
635	struct kstat stat;
636	int error;
637
638	error = vfs_lstat(filename, &stat);
639	if (error)
640		return error;
641	return cp_compat_stat(&stat, statbuf);
642}
643
644#ifndef __ARCH_WANT_STAT64
645COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
646		       const char __user *, filename,
647		       struct compat_stat __user *, statbuf, int, flag)
648{
649	struct kstat stat;
650	int error;
651
652	error = vfs_fstatat(dfd, filename, &stat, flag);
653	if (error)
654		return error;
655	return cp_compat_stat(&stat, statbuf);
656}
657#endif
658
659COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
660		       struct compat_stat __user *, statbuf)
661{
662	struct kstat stat;
663	int error = vfs_fstat(fd, &stat);
664
665	if (!error)
666		error = cp_compat_stat(&stat, statbuf);
667	return error;
668}
669#endif
670
671/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
672void __inode_add_bytes(struct inode *inode, loff_t bytes)
673{
674	inode->i_blocks += bytes >> 9;
675	bytes &= 511;
676	inode->i_bytes += bytes;
677	if (inode->i_bytes >= 512) {
678		inode->i_blocks++;
679		inode->i_bytes -= 512;
680	}
681}
682EXPORT_SYMBOL(__inode_add_bytes);
683
684void inode_add_bytes(struct inode *inode, loff_t bytes)
685{
686	spin_lock(&inode->i_lock);
687	__inode_add_bytes(inode, bytes);
688	spin_unlock(&inode->i_lock);
689}
690
691EXPORT_SYMBOL(inode_add_bytes);
692
693void __inode_sub_bytes(struct inode *inode, loff_t bytes)
694{
695	inode->i_blocks -= bytes >> 9;
696	bytes &= 511;
697	if (inode->i_bytes < bytes) {
698		inode->i_blocks--;
699		inode->i_bytes += 512;
700	}
701	inode->i_bytes -= bytes;
702}
703
704EXPORT_SYMBOL(__inode_sub_bytes);
705
706void inode_sub_bytes(struct inode *inode, loff_t bytes)
707{
708	spin_lock(&inode->i_lock);
709	__inode_sub_bytes(inode, bytes);
710	spin_unlock(&inode->i_lock);
711}
712
713EXPORT_SYMBOL(inode_sub_bytes);
714
715loff_t inode_get_bytes(struct inode *inode)
716{
717	loff_t ret;
718
719	spin_lock(&inode->i_lock);
720	ret = __inode_get_bytes(inode);
721	spin_unlock(&inode->i_lock);
722	return ret;
723}
724
725EXPORT_SYMBOL(inode_get_bytes);
726
727void inode_set_bytes(struct inode *inode, loff_t bytes)
728{
729	/* Caller is here responsible for sufficient locking
730	 * (ie. inode->i_lock) */
731	inode->i_blocks = bytes >> 9;
732	inode->i_bytes = bytes & 511;
733}
734
735EXPORT_SYMBOL(inode_set_bytes);
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/fs/stat.c
  4 *
  5 *  Copyright (C) 1991, 1992  Linus Torvalds
  6 */
  7
  8#include <linux/blkdev.h>
  9#include <linux/export.h>
 10#include <linux/mm.h>
 11#include <linux/errno.h>
 12#include <linux/file.h>
 13#include <linux/highuid.h>
 14#include <linux/fs.h>
 15#include <linux/namei.h>
 16#include <linux/security.h>
 17#include <linux/cred.h>
 18#include <linux/syscalls.h>
 19#include <linux/pagemap.h>
 20#include <linux/compat.h>
 21
 22#include <linux/uaccess.h>
 23#include <asm/unistd.h>
 24
 25#include "internal.h"
 26#include "mount.h"
 27
 28/**
 29 * generic_fillattr - Fill in the basic attributes from the inode struct
 30 * @mnt_userns:	user namespace of the mount the inode was found from
 31 * @inode:	Inode to use as the source
 32 * @stat:	Where to fill in the attributes
 33 *
 34 * Fill in the basic attributes in the kstat structure from data that's to be
 35 * found on the VFS inode structure.  This is the default if no getattr inode
 36 * operation is supplied.
 37 *
 38 * If the inode has been found through an idmapped mount the user namespace of
 39 * the vfsmount must be passed through @mnt_userns. This function will then
 40 * take care to map the inode according to @mnt_userns before filling in the
 41 * uid and gid filds. On non-idmapped mounts or if permission checking is to be
 42 * performed on the raw inode simply passs init_user_ns.
 43 */
 44void generic_fillattr(struct user_namespace *mnt_userns, struct inode *inode,
 45		      struct kstat *stat)
 46{
 47	vfsuid_t vfsuid = i_uid_into_vfsuid(mnt_userns, inode);
 48	vfsgid_t vfsgid = i_gid_into_vfsgid(mnt_userns, inode);
 49
 50	stat->dev = inode->i_sb->s_dev;
 51	stat->ino = inode->i_ino;
 52	stat->mode = inode->i_mode;
 53	stat->nlink = inode->i_nlink;
 54	stat->uid = vfsuid_into_kuid(vfsuid);
 55	stat->gid = vfsgid_into_kgid(vfsgid);
 56	stat->rdev = inode->i_rdev;
 57	stat->size = i_size_read(inode);
 58	stat->atime = inode->i_atime;
 59	stat->mtime = inode->i_mtime;
 60	stat->ctime = inode->i_ctime;
 61	stat->blksize = i_blocksize(inode);
 62	stat->blocks = inode->i_blocks;
 
 
 
 
 
 63}
 64EXPORT_SYMBOL(generic_fillattr);
 65
 66/**
 67 * generic_fill_statx_attr - Fill in the statx attributes from the inode flags
 68 * @inode:	Inode to use as the source
 69 * @stat:	Where to fill in the attribute flags
 70 *
 71 * Fill in the STATX_ATTR_* flags in the kstat structure for properties of the
 72 * inode that are published on i_flags and enforced by the VFS.
 73 */
 74void generic_fill_statx_attr(struct inode *inode, struct kstat *stat)
 75{
 76	if (inode->i_flags & S_IMMUTABLE)
 77		stat->attributes |= STATX_ATTR_IMMUTABLE;
 78	if (inode->i_flags & S_APPEND)
 79		stat->attributes |= STATX_ATTR_APPEND;
 80	stat->attributes_mask |= KSTAT_ATTR_VFS_FLAGS;
 81}
 82EXPORT_SYMBOL(generic_fill_statx_attr);
 83
 84/**
 85 * vfs_getattr_nosec - getattr without security checks
 86 * @path: file to get attributes from
 87 * @stat: structure to return attributes in
 88 * @request_mask: STATX_xxx flags indicating what the caller wants
 89 * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
 90 *
 91 * Get attributes without calling security_inode_getattr.
 92 *
 93 * Currently the only caller other than vfs_getattr is internal to the
 94 * filehandle lookup code, which uses only the inode number and returns no
 95 * attributes to any user.  Any other code probably wants vfs_getattr.
 96 */
 97int vfs_getattr_nosec(const struct path *path, struct kstat *stat,
 98		      u32 request_mask, unsigned int query_flags)
 99{
100	struct user_namespace *mnt_userns;
101	struct inode *inode = d_backing_inode(path->dentry);
102
103	memset(stat, 0, sizeof(*stat));
104	stat->result_mask |= STATX_BASIC_STATS;
105	query_flags &= AT_STATX_SYNC_TYPE;
106
107	/* allow the fs to override these if it really wants to */
108	/* SB_NOATIME means filesystem supplies dummy atime value */
109	if (inode->i_sb->s_flags & SB_NOATIME)
110		stat->result_mask &= ~STATX_ATIME;
111
112	/*
113	 * Note: If you add another clause to set an attribute flag, please
114	 * update attributes_mask below.
115	 */
116	if (IS_AUTOMOUNT(inode))
117		stat->attributes |= STATX_ATTR_AUTOMOUNT;
118
119	if (IS_DAX(inode))
120		stat->attributes |= STATX_ATTR_DAX;
121
122	stat->attributes_mask |= (STATX_ATTR_AUTOMOUNT |
123				  STATX_ATTR_DAX);
124
125	mnt_userns = mnt_user_ns(path->mnt);
126	if (inode->i_op->getattr)
127		return inode->i_op->getattr(mnt_userns, path, stat,
128					    request_mask, query_flags);
129
130	generic_fillattr(mnt_userns, inode, stat);
131	return 0;
132}
133EXPORT_SYMBOL(vfs_getattr_nosec);
134
135/*
136 * vfs_getattr - Get the enhanced basic attributes of a file
137 * @path: The file of interest
138 * @stat: Where to return the statistics
139 * @request_mask: STATX_xxx flags indicating what the caller wants
140 * @query_flags: Query mode (AT_STATX_SYNC_TYPE)
141 *
142 * Ask the filesystem for a file's attributes.  The caller must indicate in
143 * request_mask and query_flags to indicate what they want.
144 *
145 * If the file is remote, the filesystem can be forced to update the attributes
146 * from the backing store by passing AT_STATX_FORCE_SYNC in query_flags or can
147 * suppress the update by passing AT_STATX_DONT_SYNC.
148 *
149 * Bits must have been set in request_mask to indicate which attributes the
150 * caller wants retrieving.  Any such attribute not requested may be returned
151 * anyway, but the value may be approximate, and, if remote, may not have been
152 * synchronised with the server.
153 *
154 * 0 will be returned on success, and a -ve error code if unsuccessful.
155 */
156int vfs_getattr(const struct path *path, struct kstat *stat,
157		u32 request_mask, unsigned int query_flags)
158{
159	int retval;
160
161	retval = security_inode_getattr(path);
162	if (retval)
163		return retval;
164	return vfs_getattr_nosec(path, stat, request_mask, query_flags);
165}
166EXPORT_SYMBOL(vfs_getattr);
167
168/**
169 * vfs_fstat - Get the basic attributes by file descriptor
170 * @fd: The file descriptor referring to the file of interest
171 * @stat: The result structure to fill in.
 
 
172 *
173 * This function is a wrapper around vfs_getattr().  The main difference is
174 * that it uses a file descriptor to determine the file location.
175 *
176 * 0 will be returned on success, and a -ve error code if unsuccessful.
177 */
178int vfs_fstat(int fd, struct kstat *stat)
 
179{
180	struct fd f;
181	int error;
 
 
 
182
183	f = fdget_raw(fd);
184	if (!f.file)
185		return -EBADF;
186	error = vfs_getattr(&f.file->f_path, stat, STATX_BASIC_STATS, 0);
187	fdput(f);
 
188	return error;
189}
190
191int getname_statx_lookup_flags(int flags)
192{
193	int lookup_flags = 0;
194
195	if (!(flags & AT_SYMLINK_NOFOLLOW))
196		lookup_flags |= LOOKUP_FOLLOW;
197	if (!(flags & AT_NO_AUTOMOUNT))
198		lookup_flags |= LOOKUP_AUTOMOUNT;
199	if (flags & AT_EMPTY_PATH)
200		lookup_flags |= LOOKUP_EMPTY;
201
202	return lookup_flags;
203}
204
205/**
206 * vfs_statx - Get basic and extra attributes by filename
207 * @dfd: A file descriptor representing the base dir for a relative filename
208 * @filename: The name of the file of interest
209 * @flags: Flags to control the query
210 * @stat: The result structure to fill in.
211 * @request_mask: STATX_xxx flags indicating what the caller wants
212 *
213 * This function is a wrapper around vfs_getattr().  The main difference is
214 * that it uses a filename and base directory to determine the file location.
215 * Additionally, the use of AT_SYMLINK_NOFOLLOW in flags will prevent a symlink
216 * at the given name from being referenced.
217 *
218 * 0 will be returned on success, and a -ve error code if unsuccessful.
219 */
220static int vfs_statx(int dfd, struct filename *filename, int flags,
221	      struct kstat *stat, u32 request_mask)
222{
223	struct path path;
224	unsigned int lookup_flags = getname_statx_lookup_flags(flags);
225	int error;
226
227	if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT | AT_EMPTY_PATH |
228		      AT_STATX_SYNC_TYPE))
229		return -EINVAL;
230
 
 
 
 
 
 
 
231retry:
232	error = filename_lookup(dfd, filename, lookup_flags, &path, NULL);
233	if (error)
234		goto out;
235
236	error = vfs_getattr(&path, stat, request_mask, flags);
237
238	stat->mnt_id = real_mount(path.mnt)->mnt_id;
239	stat->result_mask |= STATX_MNT_ID;
240
241	if (path.mnt->mnt_root == path.dentry)
242		stat->attributes |= STATX_ATTR_MOUNT_ROOT;
243	stat->attributes_mask |= STATX_ATTR_MOUNT_ROOT;
244
245	/* Handle STATX_DIOALIGN for block devices. */
246	if (request_mask & STATX_DIOALIGN) {
247		struct inode *inode = d_backing_inode(path.dentry);
248
249		if (S_ISBLK(inode->i_mode))
250			bdev_statx_dioalign(inode, stat);
251	}
252
253	path_put(&path);
254	if (retry_estale(error, lookup_flags)) {
255		lookup_flags |= LOOKUP_REVAL;
256		goto retry;
257	}
258out:
259	return error;
260}
 
261
262int vfs_fstatat(int dfd, const char __user *filename,
263			      struct kstat *stat, int flags)
264{
265	int ret;
266	int statx_flags = flags | AT_NO_AUTOMOUNT;
267	struct filename *name;
268
269	name = getname_flags(filename, getname_statx_lookup_flags(statx_flags), NULL);
270	ret = vfs_statx(dfd, name, statx_flags, stat, STATX_BASIC_STATS);
271	putname(name);
272
273	return ret;
274}
275
276#ifdef __ARCH_WANT_OLD_STAT
277
278/*
279 * For backward compatibility?  Maybe this should be moved
280 * into arch/i386 instead?
281 */
282static int cp_old_stat(struct kstat *stat, struct __old_kernel_stat __user * statbuf)
283{
284	static int warncount = 5;
285	struct __old_kernel_stat tmp;
286
287	if (warncount > 0) {
288		warncount--;
289		printk(KERN_WARNING "VFS: Warning: %s using old stat() call. Recompile your binary.\n",
290			current->comm);
291	} else if (warncount < 0) {
292		/* it's laughable, but... */
293		warncount = 0;
294	}
295
296	memset(&tmp, 0, sizeof(struct __old_kernel_stat));
297	tmp.st_dev = old_encode_dev(stat->dev);
298	tmp.st_ino = stat->ino;
299	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
300		return -EOVERFLOW;
301	tmp.st_mode = stat->mode;
302	tmp.st_nlink = stat->nlink;
303	if (tmp.st_nlink != stat->nlink)
304		return -EOVERFLOW;
305	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
306	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
307	tmp.st_rdev = old_encode_dev(stat->rdev);
308#if BITS_PER_LONG == 32
309	if (stat->size > MAX_NON_LFS)
310		return -EOVERFLOW;
311#endif
312	tmp.st_size = stat->size;
313	tmp.st_atime = stat->atime.tv_sec;
314	tmp.st_mtime = stat->mtime.tv_sec;
315	tmp.st_ctime = stat->ctime.tv_sec;
316	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
317}
318
319SYSCALL_DEFINE2(stat, const char __user *, filename,
320		struct __old_kernel_stat __user *, statbuf)
321{
322	struct kstat stat;
323	int error;
324
325	error = vfs_stat(filename, &stat);
326	if (error)
327		return error;
328
329	return cp_old_stat(&stat, statbuf);
330}
331
332SYSCALL_DEFINE2(lstat, const char __user *, filename,
333		struct __old_kernel_stat __user *, statbuf)
334{
335	struct kstat stat;
336	int error;
337
338	error = vfs_lstat(filename, &stat);
339	if (error)
340		return error;
341
342	return cp_old_stat(&stat, statbuf);
343}
344
345SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, statbuf)
346{
347	struct kstat stat;
348	int error = vfs_fstat(fd, &stat);
349
350	if (!error)
351		error = cp_old_stat(&stat, statbuf);
352
353	return error;
354}
355
356#endif /* __ARCH_WANT_OLD_STAT */
357
358#ifdef __ARCH_WANT_NEW_STAT
359
360#if BITS_PER_LONG == 32
361#  define choose_32_64(a,b) a
362#else
363#  define choose_32_64(a,b) b
364#endif
365
 
 
 
366#ifndef INIT_STRUCT_STAT_PADDING
367#  define INIT_STRUCT_STAT_PADDING(st) memset(&st, 0, sizeof(st))
368#endif
369
370static int cp_new_stat(struct kstat *stat, struct stat __user *statbuf)
371{
372	struct stat tmp;
373
374	if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
375		return -EOVERFLOW;
376	if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
377		return -EOVERFLOW;
378#if BITS_PER_LONG == 32
379	if (stat->size > MAX_NON_LFS)
380		return -EOVERFLOW;
381#endif
382
383	INIT_STRUCT_STAT_PADDING(tmp);
384	tmp.st_dev = new_encode_dev(stat->dev);
385	tmp.st_ino = stat->ino;
386	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
387		return -EOVERFLOW;
388	tmp.st_mode = stat->mode;
389	tmp.st_nlink = stat->nlink;
390	if (tmp.st_nlink != stat->nlink)
391		return -EOVERFLOW;
392	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
393	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
394	tmp.st_rdev = new_encode_dev(stat->rdev);
395	tmp.st_size = stat->size;
396	tmp.st_atime = stat->atime.tv_sec;
397	tmp.st_mtime = stat->mtime.tv_sec;
398	tmp.st_ctime = stat->ctime.tv_sec;
399#ifdef STAT_HAVE_NSEC
400	tmp.st_atime_nsec = stat->atime.tv_nsec;
401	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
402	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
403#endif
404	tmp.st_blocks = stat->blocks;
405	tmp.st_blksize = stat->blksize;
406	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
407}
408
409SYSCALL_DEFINE2(newstat, const char __user *, filename,
410		struct stat __user *, statbuf)
411{
412	struct kstat stat;
413	int error = vfs_stat(filename, &stat);
414
415	if (error)
416		return error;
417	return cp_new_stat(&stat, statbuf);
418}
419
420SYSCALL_DEFINE2(newlstat, const char __user *, filename,
421		struct stat __user *, statbuf)
422{
423	struct kstat stat;
424	int error;
425
426	error = vfs_lstat(filename, &stat);
427	if (error)
428		return error;
429
430	return cp_new_stat(&stat, statbuf);
431}
432
433#if !defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_SYS_NEWFSTATAT)
434SYSCALL_DEFINE4(newfstatat, int, dfd, const char __user *, filename,
435		struct stat __user *, statbuf, int, flag)
436{
437	struct kstat stat;
438	int error;
439
440	error = vfs_fstatat(dfd, filename, &stat, flag);
441	if (error)
442		return error;
443	return cp_new_stat(&stat, statbuf);
444}
445#endif
446
447SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
448{
449	struct kstat stat;
450	int error = vfs_fstat(fd, &stat);
451
452	if (!error)
453		error = cp_new_stat(&stat, statbuf);
454
455	return error;
456}
457#endif
458
459static int do_readlinkat(int dfd, const char __user *pathname,
460			 char __user *buf, int bufsiz)
461{
462	struct path path;
463	int error;
464	int empty = 0;
465	unsigned int lookup_flags = LOOKUP_EMPTY;
466
467	if (bufsiz <= 0)
468		return -EINVAL;
469
470retry:
471	error = user_path_at_empty(dfd, pathname, lookup_flags, &path, &empty);
472	if (!error) {
473		struct inode *inode = d_backing_inode(path.dentry);
474
475		error = empty ? -ENOENT : -EINVAL;
476		/*
477		 * AFS mountpoints allow readlink(2) but are not symlinks
478		 */
479		if (d_is_symlink(path.dentry) || inode->i_op->readlink) {
480			error = security_inode_readlink(path.dentry);
481			if (!error) {
482				touch_atime(&path);
483				error = vfs_readlink(path.dentry, buf, bufsiz);
484			}
485		}
486		path_put(&path);
487		if (retry_estale(error, lookup_flags)) {
488			lookup_flags |= LOOKUP_REVAL;
489			goto retry;
490		}
491	}
492	return error;
493}
494
495SYSCALL_DEFINE4(readlinkat, int, dfd, const char __user *, pathname,
496		char __user *, buf, int, bufsiz)
497{
498	return do_readlinkat(dfd, pathname, buf, bufsiz);
499}
500
501SYSCALL_DEFINE3(readlink, const char __user *, path, char __user *, buf,
502		int, bufsiz)
503{
504	return do_readlinkat(AT_FDCWD, path, buf, bufsiz);
505}
506
507
508/* ---------- LFS-64 ----------- */
509#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
510
511#ifndef INIT_STRUCT_STAT64_PADDING
512#  define INIT_STRUCT_STAT64_PADDING(st) memset(&st, 0, sizeof(st))
513#endif
514
515static long cp_new_stat64(struct kstat *stat, struct stat64 __user *statbuf)
516{
517	struct stat64 tmp;
518
519	INIT_STRUCT_STAT64_PADDING(tmp);
520#ifdef CONFIG_MIPS
521	/* mips has weird padding, so we don't get 64 bits there */
522	tmp.st_dev = new_encode_dev(stat->dev);
523	tmp.st_rdev = new_encode_dev(stat->rdev);
524#else
525	tmp.st_dev = huge_encode_dev(stat->dev);
526	tmp.st_rdev = huge_encode_dev(stat->rdev);
527#endif
528	tmp.st_ino = stat->ino;
529	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
530		return -EOVERFLOW;
531#ifdef STAT64_HAS_BROKEN_ST_INO
532	tmp.__st_ino = stat->ino;
533#endif
534	tmp.st_mode = stat->mode;
535	tmp.st_nlink = stat->nlink;
536	tmp.st_uid = from_kuid_munged(current_user_ns(), stat->uid);
537	tmp.st_gid = from_kgid_munged(current_user_ns(), stat->gid);
538	tmp.st_atime = stat->atime.tv_sec;
539	tmp.st_atime_nsec = stat->atime.tv_nsec;
540	tmp.st_mtime = stat->mtime.tv_sec;
541	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
542	tmp.st_ctime = stat->ctime.tv_sec;
543	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
544	tmp.st_size = stat->size;
545	tmp.st_blocks = stat->blocks;
546	tmp.st_blksize = stat->blksize;
547	return copy_to_user(statbuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
548}
549
550SYSCALL_DEFINE2(stat64, const char __user *, filename,
551		struct stat64 __user *, statbuf)
552{
553	struct kstat stat;
554	int error = vfs_stat(filename, &stat);
555
556	if (!error)
557		error = cp_new_stat64(&stat, statbuf);
558
559	return error;
560}
561
562SYSCALL_DEFINE2(lstat64, const char __user *, filename,
563		struct stat64 __user *, statbuf)
564{
565	struct kstat stat;
566	int error = vfs_lstat(filename, &stat);
567
568	if (!error)
569		error = cp_new_stat64(&stat, statbuf);
570
571	return error;
572}
573
574SYSCALL_DEFINE2(fstat64, unsigned long, fd, struct stat64 __user *, statbuf)
575{
576	struct kstat stat;
577	int error = vfs_fstat(fd, &stat);
578
579	if (!error)
580		error = cp_new_stat64(&stat, statbuf);
581
582	return error;
583}
584
585SYSCALL_DEFINE4(fstatat64, int, dfd, const char __user *, filename,
586		struct stat64 __user *, statbuf, int, flag)
587{
588	struct kstat stat;
589	int error;
590
591	error = vfs_fstatat(dfd, filename, &stat, flag);
592	if (error)
593		return error;
594	return cp_new_stat64(&stat, statbuf);
595}
596#endif /* __ARCH_WANT_STAT64 || __ARCH_WANT_COMPAT_STAT64 */
597
598static noinline_for_stack int
599cp_statx(const struct kstat *stat, struct statx __user *buffer)
600{
601	struct statx tmp;
602
603	memset(&tmp, 0, sizeof(tmp));
604
605	tmp.stx_mask = stat->result_mask;
606	tmp.stx_blksize = stat->blksize;
607	tmp.stx_attributes = stat->attributes;
608	tmp.stx_nlink = stat->nlink;
609	tmp.stx_uid = from_kuid_munged(current_user_ns(), stat->uid);
610	tmp.stx_gid = from_kgid_munged(current_user_ns(), stat->gid);
611	tmp.stx_mode = stat->mode;
612	tmp.stx_ino = stat->ino;
613	tmp.stx_size = stat->size;
614	tmp.stx_blocks = stat->blocks;
615	tmp.stx_attributes_mask = stat->attributes_mask;
616	tmp.stx_atime.tv_sec = stat->atime.tv_sec;
617	tmp.stx_atime.tv_nsec = stat->atime.tv_nsec;
618	tmp.stx_btime.tv_sec = stat->btime.tv_sec;
619	tmp.stx_btime.tv_nsec = stat->btime.tv_nsec;
620	tmp.stx_ctime.tv_sec = stat->ctime.tv_sec;
621	tmp.stx_ctime.tv_nsec = stat->ctime.tv_nsec;
622	tmp.stx_mtime.tv_sec = stat->mtime.tv_sec;
623	tmp.stx_mtime.tv_nsec = stat->mtime.tv_nsec;
624	tmp.stx_rdev_major = MAJOR(stat->rdev);
625	tmp.stx_rdev_minor = MINOR(stat->rdev);
626	tmp.stx_dev_major = MAJOR(stat->dev);
627	tmp.stx_dev_minor = MINOR(stat->dev);
628	tmp.stx_mnt_id = stat->mnt_id;
629	tmp.stx_dio_mem_align = stat->dio_mem_align;
630	tmp.stx_dio_offset_align = stat->dio_offset_align;
631
632	return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0;
633}
634
635int do_statx(int dfd, struct filename *filename, unsigned int flags,
636	     unsigned int mask, struct statx __user *buffer)
637{
638	struct kstat stat;
639	int error;
640
641	if (mask & STATX__RESERVED)
642		return -EINVAL;
643	if ((flags & AT_STATX_SYNC_TYPE) == AT_STATX_SYNC_TYPE)
644		return -EINVAL;
645
646	error = vfs_statx(dfd, filename, flags, &stat, mask);
647	if (error)
648		return error;
649
650	return cp_statx(&stat, buffer);
651}
652
653/**
654 * sys_statx - System call to get enhanced stats
655 * @dfd: Base directory to pathwalk from *or* fd to stat.
656 * @filename: File to stat or "" with AT_EMPTY_PATH
657 * @flags: AT_* flags to control pathwalk.
658 * @mask: Parts of statx struct actually required.
659 * @buffer: Result buffer.
660 *
661 * Note that fstat() can be emulated by setting dfd to the fd of interest,
662 * supplying "" as the filename and setting AT_EMPTY_PATH in the flags.
663 */
664SYSCALL_DEFINE5(statx,
665		int, dfd, const char __user *, filename, unsigned, flags,
666		unsigned int, mask,
667		struct statx __user *, buffer)
668{
669	int ret;
670	struct filename *name;
671
672	name = getname_flags(filename, getname_statx_lookup_flags(flags), NULL);
673	ret = do_statx(dfd, name, flags, mask, buffer);
674	putname(name);
 
675
676	return ret;
 
 
 
 
677}
678
679#if defined(CONFIG_COMPAT) && defined(__ARCH_WANT_COMPAT_STAT)
680static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
681{
682	struct compat_stat tmp;
683
684	if (sizeof(tmp.st_dev) < 4 && !old_valid_dev(stat->dev))
685		return -EOVERFLOW;
686	if (sizeof(tmp.st_rdev) < 4 && !old_valid_dev(stat->rdev))
687		return -EOVERFLOW;
688
689	memset(&tmp, 0, sizeof(tmp));
690	tmp.st_dev = new_encode_dev(stat->dev);
691	tmp.st_ino = stat->ino;
692	if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
693		return -EOVERFLOW;
694	tmp.st_mode = stat->mode;
695	tmp.st_nlink = stat->nlink;
696	if (tmp.st_nlink != stat->nlink)
697		return -EOVERFLOW;
698	SET_UID(tmp.st_uid, from_kuid_munged(current_user_ns(), stat->uid));
699	SET_GID(tmp.st_gid, from_kgid_munged(current_user_ns(), stat->gid));
700	tmp.st_rdev = new_encode_dev(stat->rdev);
701	if ((u64) stat->size > MAX_NON_LFS)
702		return -EOVERFLOW;
703	tmp.st_size = stat->size;
704	tmp.st_atime = stat->atime.tv_sec;
705	tmp.st_atime_nsec = stat->atime.tv_nsec;
706	tmp.st_mtime = stat->mtime.tv_sec;
707	tmp.st_mtime_nsec = stat->mtime.tv_nsec;
708	tmp.st_ctime = stat->ctime.tv_sec;
709	tmp.st_ctime_nsec = stat->ctime.tv_nsec;
710	tmp.st_blocks = stat->blocks;
711	tmp.st_blksize = stat->blksize;
712	return copy_to_user(ubuf, &tmp, sizeof(tmp)) ? -EFAULT : 0;
713}
714
715COMPAT_SYSCALL_DEFINE2(newstat, const char __user *, filename,
716		       struct compat_stat __user *, statbuf)
717{
718	struct kstat stat;
719	int error;
720
721	error = vfs_stat(filename, &stat);
722	if (error)
723		return error;
724	return cp_compat_stat(&stat, statbuf);
725}
726
727COMPAT_SYSCALL_DEFINE2(newlstat, const char __user *, filename,
728		       struct compat_stat __user *, statbuf)
729{
730	struct kstat stat;
731	int error;
732
733	error = vfs_lstat(filename, &stat);
734	if (error)
735		return error;
736	return cp_compat_stat(&stat, statbuf);
737}
738
739#ifndef __ARCH_WANT_STAT64
740COMPAT_SYSCALL_DEFINE4(newfstatat, unsigned int, dfd,
741		       const char __user *, filename,
742		       struct compat_stat __user *, statbuf, int, flag)
743{
744	struct kstat stat;
745	int error;
746
747	error = vfs_fstatat(dfd, filename, &stat, flag);
748	if (error)
749		return error;
750	return cp_compat_stat(&stat, statbuf);
751}
752#endif
753
754COMPAT_SYSCALL_DEFINE2(newfstat, unsigned int, fd,
755		       struct compat_stat __user *, statbuf)
756{
757	struct kstat stat;
758	int error = vfs_fstat(fd, &stat);
759
760	if (!error)
761		error = cp_compat_stat(&stat, statbuf);
762	return error;
763}
764#endif
765
766/* Caller is here responsible for sufficient locking (ie. inode->i_lock) */
767void __inode_add_bytes(struct inode *inode, loff_t bytes)
768{
769	inode->i_blocks += bytes >> 9;
770	bytes &= 511;
771	inode->i_bytes += bytes;
772	if (inode->i_bytes >= 512) {
773		inode->i_blocks++;
774		inode->i_bytes -= 512;
775	}
776}
777EXPORT_SYMBOL(__inode_add_bytes);
778
779void inode_add_bytes(struct inode *inode, loff_t bytes)
780{
781	spin_lock(&inode->i_lock);
782	__inode_add_bytes(inode, bytes);
783	spin_unlock(&inode->i_lock);
784}
785
786EXPORT_SYMBOL(inode_add_bytes);
787
788void __inode_sub_bytes(struct inode *inode, loff_t bytes)
789{
790	inode->i_blocks -= bytes >> 9;
791	bytes &= 511;
792	if (inode->i_bytes < bytes) {
793		inode->i_blocks--;
794		inode->i_bytes += 512;
795	}
796	inode->i_bytes -= bytes;
797}
798
799EXPORT_SYMBOL(__inode_sub_bytes);
800
801void inode_sub_bytes(struct inode *inode, loff_t bytes)
802{
803	spin_lock(&inode->i_lock);
804	__inode_sub_bytes(inode, bytes);
805	spin_unlock(&inode->i_lock);
806}
807
808EXPORT_SYMBOL(inode_sub_bytes);
809
810loff_t inode_get_bytes(struct inode *inode)
811{
812	loff_t ret;
813
814	spin_lock(&inode->i_lock);
815	ret = __inode_get_bytes(inode);
816	spin_unlock(&inode->i_lock);
817	return ret;
818}
819
820EXPORT_SYMBOL(inode_get_bytes);
821
822void inode_set_bytes(struct inode *inode, loff_t bytes)
823{
824	/* Caller is here responsible for sufficient locking
825	 * (ie. inode->i_lock) */
826	inode->i_blocks = bytes >> 9;
827	inode->i_bytes = bytes & 511;
828}
829
830EXPORT_SYMBOL(inode_set_bytes);