Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * File operations used by nfsd. Some of these have been ripped from
4 * other parts of the kernel because they weren't exported, others
5 * are partial duplicates with added or changed functionality.
6 *
7 * Note that several functions dget() the dentry upon which they want
8 * to act, most notably those that create directory entries. Response
9 * dentry's are dput()'d if necessary in the release callback.
10 * So if you notice code paths that apparently fail to dput() the
11 * dentry, don't worry--they have been taken care of.
12 *
13 * Copyright (C) 1995-1999 Olaf Kirch <okir@monad.swb.de>
14 * Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp>
15 */
16
17#include <linux/fs.h>
18#include <linux/file.h>
19#include <linux/splice.h>
20#include <linux/falloc.h>
21#include <linux/fcntl.h>
22#include <linux/namei.h>
23#include <linux/delay.h>
24#include <linux/fsnotify.h>
25#include <linux/posix_acl_xattr.h>
26#include <linux/xattr.h>
27#include <linux/jhash.h>
28#include <linux/pagemap.h>
29#include <linux/slab.h>
30#include <linux/uaccess.h>
31#include <linux/exportfs.h>
32#include <linux/writeback.h>
33#include <linux/security.h>
34
35#include "xdr3.h"
36
37#ifdef CONFIG_NFSD_V4
38#include "../internal.h"
39#include "acl.h"
40#include "idmap.h"
41#include "xdr4.h"
42#endif /* CONFIG_NFSD_V4 */
43
44#include "nfsd.h"
45#include "vfs.h"
46#include "filecache.h"
47#include "trace.h"
48
49#define NFSDDBG_FACILITY NFSDDBG_FILEOP
50
51/**
52 * nfserrno - Map Linux errnos to NFS errnos
53 * @errno: POSIX(-ish) error code to be mapped
54 *
55 * Returns the appropriate (net-endian) nfserr_* (or nfs_ok if errno is 0). If
56 * it's an error we don't expect, log it once and return nfserr_io.
57 */
58__be32
59nfserrno (int errno)
60{
61 static struct {
62 __be32 nfserr;
63 int syserr;
64 } nfs_errtbl[] = {
65 { nfs_ok, 0 },
66 { nfserr_perm, -EPERM },
67 { nfserr_noent, -ENOENT },
68 { nfserr_io, -EIO },
69 { nfserr_nxio, -ENXIO },
70 { nfserr_fbig, -E2BIG },
71 { nfserr_stale, -EBADF },
72 { nfserr_acces, -EACCES },
73 { nfserr_exist, -EEXIST },
74 { nfserr_xdev, -EXDEV },
75 { nfserr_mlink, -EMLINK },
76 { nfserr_nodev, -ENODEV },
77 { nfserr_notdir, -ENOTDIR },
78 { nfserr_isdir, -EISDIR },
79 { nfserr_inval, -EINVAL },
80 { nfserr_fbig, -EFBIG },
81 { nfserr_nospc, -ENOSPC },
82 { nfserr_rofs, -EROFS },
83 { nfserr_mlink, -EMLINK },
84 { nfserr_nametoolong, -ENAMETOOLONG },
85 { nfserr_notempty, -ENOTEMPTY },
86 { nfserr_dquot, -EDQUOT },
87 { nfserr_stale, -ESTALE },
88 { nfserr_jukebox, -ETIMEDOUT },
89 { nfserr_jukebox, -ERESTARTSYS },
90 { nfserr_jukebox, -EAGAIN },
91 { nfserr_jukebox, -EWOULDBLOCK },
92 { nfserr_jukebox, -ENOMEM },
93 { nfserr_io, -ETXTBSY },
94 { nfserr_notsupp, -EOPNOTSUPP },
95 { nfserr_toosmall, -ETOOSMALL },
96 { nfserr_serverfault, -ESERVERFAULT },
97 { nfserr_serverfault, -ENFILE },
98 { nfserr_io, -EREMOTEIO },
99 { nfserr_stale, -EOPENSTALE },
100 { nfserr_io, -EUCLEAN },
101 { nfserr_perm, -ENOKEY },
102 { nfserr_no_grace, -ENOGRACE},
103 };
104 int i;
105
106 for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) {
107 if (nfs_errtbl[i].syserr == errno)
108 return nfs_errtbl[i].nfserr;
109 }
110 WARN_ONCE(1, "nfsd: non-standard errno: %d\n", errno);
111 return nfserr_io;
112}
113
114/*
115 * Called from nfsd_lookup and encode_dirent. Check if we have crossed
116 * a mount point.
117 * Returns -EAGAIN or -ETIMEDOUT leaving *dpp and *expp unchanged,
118 * or nfs_ok having possibly changed *dpp and *expp
119 */
120int
121nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
122 struct svc_export **expp)
123{
124 struct svc_export *exp = *expp, *exp2 = NULL;
125 struct dentry *dentry = *dpp;
126 struct path path = {.mnt = mntget(exp->ex_path.mnt),
127 .dentry = dget(dentry)};
128 unsigned int follow_flags = 0;
129 int err = 0;
130
131 if (exp->ex_flags & NFSEXP_CROSSMOUNT)
132 follow_flags = LOOKUP_AUTOMOUNT;
133
134 err = follow_down(&path, follow_flags);
135 if (err < 0)
136 goto out;
137 if (path.mnt == exp->ex_path.mnt && path.dentry == dentry &&
138 nfsd_mountpoint(dentry, exp) == 2) {
139 /* This is only a mountpoint in some other namespace */
140 path_put(&path);
141 goto out;
142 }
143
144 exp2 = rqst_exp_get_by_name(rqstp, &path);
145 if (IS_ERR(exp2)) {
146 err = PTR_ERR(exp2);
147 /*
148 * We normally allow NFS clients to continue
149 * "underneath" a mountpoint that is not exported.
150 * The exception is V4ROOT, where no traversal is ever
151 * allowed without an explicit export of the new
152 * directory.
153 */
154 if (err == -ENOENT && !(exp->ex_flags & NFSEXP_V4ROOT))
155 err = 0;
156 path_put(&path);
157 goto out;
158 }
159 if (nfsd_v4client(rqstp) ||
160 (exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) {
161 /* successfully crossed mount point */
162 /*
163 * This is subtle: path.dentry is *not* on path.mnt
164 * at this point. The only reason we are safe is that
165 * original mnt is pinned down by exp, so we should
166 * put path *before* putting exp
167 */
168 *dpp = path.dentry;
169 path.dentry = dentry;
170 *expp = exp2;
171 exp2 = exp;
172 }
173 path_put(&path);
174 exp_put(exp2);
175out:
176 return err;
177}
178
179static void follow_to_parent(struct path *path)
180{
181 struct dentry *dp;
182
183 while (path->dentry == path->mnt->mnt_root && follow_up(path))
184 ;
185 dp = dget_parent(path->dentry);
186 dput(path->dentry);
187 path->dentry = dp;
188}
189
190static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, struct svc_export **exp, struct dentry **dentryp)
191{
192 struct svc_export *exp2;
193 struct path path = {.mnt = mntget((*exp)->ex_path.mnt),
194 .dentry = dget(dparent)};
195
196 follow_to_parent(&path);
197
198 exp2 = rqst_exp_parent(rqstp, &path);
199 if (PTR_ERR(exp2) == -ENOENT) {
200 *dentryp = dget(dparent);
201 } else if (IS_ERR(exp2)) {
202 path_put(&path);
203 return PTR_ERR(exp2);
204 } else {
205 *dentryp = dget(path.dentry);
206 exp_put(*exp);
207 *exp = exp2;
208 }
209 path_put(&path);
210 return 0;
211}
212
213/*
214 * For nfsd purposes, we treat V4ROOT exports as though there was an
215 * export at *every* directory.
216 * We return:
217 * '1' if this dentry *must* be an export point,
218 * '2' if it might be, if there is really a mount here, and
219 * '0' if there is no chance of an export point here.
220 */
221int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
222{
223 if (!d_inode(dentry))
224 return 0;
225 if (exp->ex_flags & NFSEXP_V4ROOT)
226 return 1;
227 if (nfsd4_is_junction(dentry))
228 return 1;
229 if (d_managed(dentry))
230 /*
231 * Might only be a mountpoint in a different namespace,
232 * but we need to check.
233 */
234 return 2;
235 return 0;
236}
237
238__be32
239nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
240 const char *name, unsigned int len,
241 struct svc_export **exp_ret, struct dentry **dentry_ret)
242{
243 struct svc_export *exp;
244 struct dentry *dparent;
245 struct dentry *dentry;
246 int host_err;
247
248 dprintk("nfsd: nfsd_lookup(fh %s, %.*s)\n", SVCFH_fmt(fhp), len,name);
249
250 dparent = fhp->fh_dentry;
251 exp = exp_get(fhp->fh_export);
252
253 /* Lookup the name, but don't follow links */
254 if (isdotent(name, len)) {
255 if (len==1)
256 dentry = dget(dparent);
257 else if (dparent != exp->ex_path.dentry)
258 dentry = dget_parent(dparent);
259 else if (!EX_NOHIDE(exp) && !nfsd_v4client(rqstp))
260 dentry = dget(dparent); /* .. == . just like at / */
261 else {
262 /* checking mountpoint crossing is very different when stepping up */
263 host_err = nfsd_lookup_parent(rqstp, dparent, &exp, &dentry);
264 if (host_err)
265 goto out_nfserr;
266 }
267 } else {
268 dentry = lookup_one_len_unlocked(name, dparent, len);
269 host_err = PTR_ERR(dentry);
270 if (IS_ERR(dentry))
271 goto out_nfserr;
272 if (nfsd_mountpoint(dentry, exp)) {
273 host_err = nfsd_cross_mnt(rqstp, &dentry, &exp);
274 if (host_err) {
275 dput(dentry);
276 goto out_nfserr;
277 }
278 }
279 }
280 *dentry_ret = dentry;
281 *exp_ret = exp;
282 return 0;
283
284out_nfserr:
285 exp_put(exp);
286 return nfserrno(host_err);
287}
288
289/**
290 * nfsd_lookup - look up a single path component for nfsd
291 *
292 * @rqstp: the request context
293 * @fhp: the file handle of the directory
294 * @name: the component name, or %NULL to look up parent
295 * @len: length of name to examine
296 * @resfh: pointer to pre-initialised filehandle to hold result.
297 *
298 * Look up one component of a pathname.
299 * N.B. After this call _both_ fhp and resfh need an fh_put
300 *
301 * If the lookup would cross a mountpoint, and the mounted filesystem
302 * is exported to the client with NFSEXP_NOHIDE, then the lookup is
303 * accepted as it stands and the mounted directory is
304 * returned. Otherwise the covered directory is returned.
305 * NOTE: this mountpoint crossing is not supported properly by all
306 * clients and is explicitly disallowed for NFSv3
307 *
308 */
309__be32
310nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
311 unsigned int len, struct svc_fh *resfh)
312{
313 struct svc_export *exp;
314 struct dentry *dentry;
315 __be32 err;
316
317 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
318 if (err)
319 return err;
320 err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry);
321 if (err)
322 return err;
323 err = check_nfsd_access(exp, rqstp);
324 if (err)
325 goto out;
326 /*
327 * Note: we compose the file handle now, but as the
328 * dentry may be negative, it may need to be updated.
329 */
330 err = fh_compose(resfh, exp, dentry, fhp);
331 if (!err && d_really_is_negative(dentry))
332 err = nfserr_noent;
333out:
334 dput(dentry);
335 exp_put(exp);
336 return err;
337}
338
339static void
340commit_reset_write_verifier(struct nfsd_net *nn, struct svc_rqst *rqstp,
341 int err)
342{
343 switch (err) {
344 case -EAGAIN:
345 case -ESTALE:
346 /*
347 * Neither of these are the result of a problem with
348 * durable storage, so avoid a write verifier reset.
349 */
350 break;
351 default:
352 nfsd_reset_write_verifier(nn);
353 trace_nfsd_writeverf_reset(nn, rqstp, err);
354 }
355}
356
357/*
358 * Commit metadata changes to stable storage.
359 */
360static int
361commit_inode_metadata(struct inode *inode)
362{
363 const struct export_operations *export_ops = inode->i_sb->s_export_op;
364
365 if (export_ops->commit_metadata)
366 return export_ops->commit_metadata(inode);
367 return sync_inode_metadata(inode, 1);
368}
369
370static int
371commit_metadata(struct svc_fh *fhp)
372{
373 struct inode *inode = d_inode(fhp->fh_dentry);
374
375 if (!EX_ISSYNC(fhp->fh_export))
376 return 0;
377 return commit_inode_metadata(inode);
378}
379
380/*
381 * Go over the attributes and take care of the small differences between
382 * NFS semantics and what Linux expects.
383 */
384static void
385nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
386{
387 /* Ignore mode updates on symlinks */
388 if (S_ISLNK(inode->i_mode))
389 iap->ia_valid &= ~ATTR_MODE;
390
391 /* sanitize the mode change */
392 if (iap->ia_valid & ATTR_MODE) {
393 iap->ia_mode &= S_IALLUGO;
394 iap->ia_mode |= (inode->i_mode & ~S_IALLUGO);
395 }
396
397 /* Revoke setuid/setgid on chown */
398 if (!S_ISDIR(inode->i_mode) &&
399 ((iap->ia_valid & ATTR_UID) || (iap->ia_valid & ATTR_GID))) {
400 iap->ia_valid |= ATTR_KILL_PRIV;
401 if (iap->ia_valid & ATTR_MODE) {
402 /* we're setting mode too, just clear the s*id bits */
403 iap->ia_mode &= ~S_ISUID;
404 if (iap->ia_mode & S_IXGRP)
405 iap->ia_mode &= ~S_ISGID;
406 } else {
407 /* set ATTR_KILL_* bits and let VFS handle it */
408 iap->ia_valid |= ATTR_KILL_SUID;
409 iap->ia_valid |=
410 setattr_should_drop_sgid(&nop_mnt_idmap, inode);
411 }
412 }
413}
414
415static __be32
416nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
417 struct iattr *iap)
418{
419 struct inode *inode = d_inode(fhp->fh_dentry);
420
421 if (iap->ia_size < inode->i_size) {
422 __be32 err;
423
424 err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
425 NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
426 if (err)
427 return err;
428 }
429 return nfserrno(get_write_access(inode));
430}
431
432static int __nfsd_setattr(struct dentry *dentry, struct iattr *iap)
433{
434 int host_err;
435
436 if (iap->ia_valid & ATTR_SIZE) {
437 /*
438 * RFC5661, Section 18.30.4:
439 * Changing the size of a file with SETATTR indirectly
440 * changes the time_modify and change attributes.
441 *
442 * (and similar for the older RFCs)
443 */
444 struct iattr size_attr = {
445 .ia_valid = ATTR_SIZE | ATTR_CTIME | ATTR_MTIME,
446 .ia_size = iap->ia_size,
447 };
448
449 if (iap->ia_size < 0)
450 return -EFBIG;
451
452 host_err = notify_change(&nop_mnt_idmap, dentry, &size_attr, NULL);
453 if (host_err)
454 return host_err;
455 iap->ia_valid &= ~ATTR_SIZE;
456
457 /*
458 * Avoid the additional setattr call below if the only other
459 * attribute that the client sends is the mtime, as we update
460 * it as part of the size change above.
461 */
462 if ((iap->ia_valid & ~ATTR_MTIME) == 0)
463 return 0;
464 }
465
466 if (!iap->ia_valid)
467 return 0;
468
469 iap->ia_valid |= ATTR_CTIME;
470 return notify_change(&nop_mnt_idmap, dentry, iap, NULL);
471}
472
473/**
474 * nfsd_setattr - Set various file attributes.
475 * @rqstp: controlling RPC transaction
476 * @fhp: filehandle of target
477 * @attr: attributes to set
478 * @guardtime: do not act if ctime.tv_sec does not match this timestamp
479 *
480 * This call may adjust the contents of @attr (in particular, this
481 * call may change the bits in the na_iattr.ia_valid field).
482 *
483 * Returns nfs_ok on success, otherwise an NFS status code is
484 * returned. Caller must release @fhp by calling fh_put in either
485 * case.
486 */
487__be32
488nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
489 struct nfsd_attrs *attr, const struct timespec64 *guardtime)
490{
491 struct dentry *dentry;
492 struct inode *inode;
493 struct iattr *iap = attr->na_iattr;
494 int accmode = NFSD_MAY_SATTR;
495 umode_t ftype = 0;
496 __be32 err;
497 int host_err = 0;
498 bool get_write_count;
499 bool size_change = (iap->ia_valid & ATTR_SIZE);
500 int retries;
501
502 if (iap->ia_valid & ATTR_SIZE) {
503 accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
504 ftype = S_IFREG;
505 }
506
507 /*
508 * If utimes(2) and friends are called with times not NULL, we should
509 * not set NFSD_MAY_WRITE bit. Otherwise fh_verify->nfsd_permission
510 * will return EACCES, when the caller's effective UID does not match
511 * the owner of the file, and the caller is not privileged. In this
512 * situation, we should return EPERM(notify_change will return this).
513 */
514 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME)) {
515 accmode |= NFSD_MAY_OWNER_OVERRIDE;
516 if (!(iap->ia_valid & (ATTR_ATIME_SET | ATTR_MTIME_SET)))
517 accmode |= NFSD_MAY_WRITE;
518 }
519
520 /* Callers that do fh_verify should do the fh_want_write: */
521 get_write_count = !fhp->fh_dentry;
522
523 /* Get inode */
524 err = fh_verify(rqstp, fhp, ftype, accmode);
525 if (err)
526 return err;
527 if (get_write_count) {
528 host_err = fh_want_write(fhp);
529 if (host_err)
530 goto out;
531 }
532
533 dentry = fhp->fh_dentry;
534 inode = d_inode(dentry);
535
536 nfsd_sanitize_attrs(inode, iap);
537
538 /*
539 * The size case is special, it changes the file in addition to the
540 * attributes, and file systems don't expect it to be mixed with
541 * "random" attribute changes. We thus split out the size change
542 * into a separate call to ->setattr, and do the rest as a separate
543 * setattr call.
544 */
545 if (size_change) {
546 err = nfsd_get_write_access(rqstp, fhp, iap);
547 if (err)
548 return err;
549 }
550
551 inode_lock(inode);
552 err = fh_fill_pre_attrs(fhp);
553 if (err)
554 goto out_unlock;
555
556 if (guardtime) {
557 struct timespec64 ctime = inode_get_ctime(inode);
558 if ((u32)guardtime->tv_sec != (u32)ctime.tv_sec ||
559 guardtime->tv_nsec != ctime.tv_nsec) {
560 err = nfserr_notsync;
561 goto out_fill_attrs;
562 }
563 }
564
565 for (retries = 1;;) {
566 struct iattr attrs;
567
568 /*
569 * notify_change() can alter its iattr argument, making
570 * @iap unsuitable for submission multiple times. Make a
571 * copy for every loop iteration.
572 */
573 attrs = *iap;
574 host_err = __nfsd_setattr(dentry, &attrs);
575 if (host_err != -EAGAIN || !retries--)
576 break;
577 if (!nfsd_wait_for_delegreturn(rqstp, inode))
578 break;
579 }
580 if (attr->na_seclabel && attr->na_seclabel->len)
581 attr->na_labelerr = security_inode_setsecctx(dentry,
582 attr->na_seclabel->data, attr->na_seclabel->len);
583 if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && attr->na_pacl)
584 attr->na_aclerr = set_posix_acl(&nop_mnt_idmap,
585 dentry, ACL_TYPE_ACCESS,
586 attr->na_pacl);
587 if (IS_ENABLED(CONFIG_FS_POSIX_ACL) &&
588 !attr->na_aclerr && attr->na_dpacl && S_ISDIR(inode->i_mode))
589 attr->na_aclerr = set_posix_acl(&nop_mnt_idmap,
590 dentry, ACL_TYPE_DEFAULT,
591 attr->na_dpacl);
592out_fill_attrs:
593 /*
594 * RFC 1813 Section 3.3.2 does not mandate that an NFS server
595 * returns wcc_data for SETATTR. Some client implementations
596 * depend on receiving wcc_data, however, to sort out partial
597 * updates (eg., the client requested that size and mode be
598 * modified, but the server changed only the file mode).
599 */
600 fh_fill_post_attrs(fhp);
601out_unlock:
602 inode_unlock(inode);
603 if (size_change)
604 put_write_access(inode);
605out:
606 if (!host_err)
607 host_err = commit_metadata(fhp);
608 return err != 0 ? err : nfserrno(host_err);
609}
610
611#if defined(CONFIG_NFSD_V4)
612/*
613 * NFS junction information is stored in an extended attribute.
614 */
615#define NFSD_JUNCTION_XATTR_NAME XATTR_TRUSTED_PREFIX "junction.nfs"
616
617/**
618 * nfsd4_is_junction - Test if an object could be an NFS junction
619 *
620 * @dentry: object to test
621 *
622 * Returns 1 if "dentry" appears to contain NFS junction information.
623 * Otherwise 0 is returned.
624 */
625int nfsd4_is_junction(struct dentry *dentry)
626{
627 struct inode *inode = d_inode(dentry);
628
629 if (inode == NULL)
630 return 0;
631 if (inode->i_mode & S_IXUGO)
632 return 0;
633 if (!(inode->i_mode & S_ISVTX))
634 return 0;
635 if (vfs_getxattr(&nop_mnt_idmap, dentry, NFSD_JUNCTION_XATTR_NAME,
636 NULL, 0) <= 0)
637 return 0;
638 return 1;
639}
640
641static struct nfsd4_compound_state *nfsd4_get_cstate(struct svc_rqst *rqstp)
642{
643 return &((struct nfsd4_compoundres *)rqstp->rq_resp)->cstate;
644}
645
646__be32 nfsd4_clone_file_range(struct svc_rqst *rqstp,
647 struct nfsd_file *nf_src, u64 src_pos,
648 struct nfsd_file *nf_dst, u64 dst_pos,
649 u64 count, bool sync)
650{
651 struct file *src = nf_src->nf_file;
652 struct file *dst = nf_dst->nf_file;
653 errseq_t since;
654 loff_t cloned;
655 __be32 ret = 0;
656
657 since = READ_ONCE(dst->f_wb_err);
658 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
659 if (cloned < 0) {
660 ret = nfserrno(cloned);
661 goto out_err;
662 }
663 if (count && cloned != count) {
664 ret = nfserrno(-EINVAL);
665 goto out_err;
666 }
667 if (sync) {
668 loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX;
669 int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
670
671 if (!status)
672 status = filemap_check_wb_err(dst->f_mapping, since);
673 if (!status)
674 status = commit_inode_metadata(file_inode(src));
675 if (status < 0) {
676 struct nfsd_net *nn = net_generic(nf_dst->nf_net,
677 nfsd_net_id);
678
679 trace_nfsd_clone_file_range_err(rqstp,
680 &nfsd4_get_cstate(rqstp)->save_fh,
681 src_pos,
682 &nfsd4_get_cstate(rqstp)->current_fh,
683 dst_pos,
684 count, status);
685 commit_reset_write_verifier(nn, rqstp, status);
686 ret = nfserrno(status);
687 }
688 }
689out_err:
690 return ret;
691}
692
693ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
694 u64 dst_pos, u64 count)
695{
696 ssize_t ret;
697
698 /*
699 * Limit copy to 4MB to prevent indefinitely blocking an nfsd
700 * thread and client rpc slot. The choice of 4MB is somewhat
701 * arbitrary. We might instead base this on r/wsize, or make it
702 * tunable, or use a time instead of a byte limit, or implement
703 * asynchronous copy. In theory a client could also recognize a
704 * limit like this and pipeline multiple COPY requests.
705 */
706 count = min_t(u64, count, 1 << 22);
707 ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0);
708
709 if (ret == -EOPNOTSUPP || ret == -EXDEV)
710 ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count,
711 COPY_FILE_SPLICE);
712 return ret;
713}
714
715__be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp,
716 struct file *file, loff_t offset, loff_t len,
717 int flags)
718{
719 int error;
720
721 if (!S_ISREG(file_inode(file)->i_mode))
722 return nfserr_inval;
723
724 error = vfs_fallocate(file, flags, offset, len);
725 if (!error)
726 error = commit_metadata(fhp);
727
728 return nfserrno(error);
729}
730#endif /* defined(CONFIG_NFSD_V4) */
731
732/*
733 * Check server access rights to a file system object
734 */
735struct accessmap {
736 u32 access;
737 int how;
738};
739static struct accessmap nfs3_regaccess[] = {
740 { NFS3_ACCESS_READ, NFSD_MAY_READ },
741 { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC },
742 { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_TRUNC },
743 { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE },
744
745#ifdef CONFIG_NFSD_V4
746 { NFS4_ACCESS_XAREAD, NFSD_MAY_READ },
747 { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE },
748 { NFS4_ACCESS_XALIST, NFSD_MAY_READ },
749#endif
750
751 { 0, 0 }
752};
753
754static struct accessmap nfs3_diraccess[] = {
755 { NFS3_ACCESS_READ, NFSD_MAY_READ },
756 { NFS3_ACCESS_LOOKUP, NFSD_MAY_EXEC },
757 { NFS3_ACCESS_MODIFY, NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC},
758 { NFS3_ACCESS_EXTEND, NFSD_MAY_EXEC|NFSD_MAY_WRITE },
759 { NFS3_ACCESS_DELETE, NFSD_MAY_REMOVE },
760
761#ifdef CONFIG_NFSD_V4
762 { NFS4_ACCESS_XAREAD, NFSD_MAY_READ },
763 { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE },
764 { NFS4_ACCESS_XALIST, NFSD_MAY_READ },
765#endif
766
767 { 0, 0 }
768};
769
770static struct accessmap nfs3_anyaccess[] = {
771 /* Some clients - Solaris 2.6 at least, make an access call
772 * to the server to check for access for things like /dev/null
773 * (which really, the server doesn't care about). So
774 * We provide simple access checking for them, looking
775 * mainly at mode bits, and we make sure to ignore read-only
776 * filesystem checks
777 */
778 { NFS3_ACCESS_READ, NFSD_MAY_READ },
779 { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC },
780 { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS },
781 { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS },
782
783 { 0, 0 }
784};
785
786__be32
787nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *supported)
788{
789 struct accessmap *map;
790 struct svc_export *export;
791 struct dentry *dentry;
792 u32 query, result = 0, sresult = 0;
793 __be32 error;
794
795 error = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP);
796 if (error)
797 goto out;
798
799 export = fhp->fh_export;
800 dentry = fhp->fh_dentry;
801
802 if (d_is_reg(dentry))
803 map = nfs3_regaccess;
804 else if (d_is_dir(dentry))
805 map = nfs3_diraccess;
806 else
807 map = nfs3_anyaccess;
808
809
810 query = *access;
811 for (; map->access; map++) {
812 if (map->access & query) {
813 __be32 err2;
814
815 sresult |= map->access;
816
817 err2 = nfsd_permission(rqstp, export, dentry, map->how);
818 switch (err2) {
819 case nfs_ok:
820 result |= map->access;
821 break;
822
823 /* the following error codes just mean the access was not allowed,
824 * rather than an error occurred */
825 case nfserr_rofs:
826 case nfserr_acces:
827 case nfserr_perm:
828 /* simply don't "or" in the access bit. */
829 break;
830 default:
831 error = err2;
832 goto out;
833 }
834 }
835 }
836 *access = result;
837 if (supported)
838 *supported = sresult;
839
840 out:
841 return error;
842}
843
844int nfsd_open_break_lease(struct inode *inode, int access)
845{
846 unsigned int mode;
847
848 if (access & NFSD_MAY_NOT_BREAK_LEASE)
849 return 0;
850 mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY;
851 return break_lease(inode, mode | O_NONBLOCK);
852}
853
854/*
855 * Open an existing file or directory.
856 * The may_flags argument indicates the type of open (read/write/lock)
857 * and additional flags.
858 * N.B. After this call fhp needs an fh_put
859 */
860static int
861__nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
862 int may_flags, struct file **filp)
863{
864 struct path path;
865 struct inode *inode;
866 struct file *file;
867 int flags = O_RDONLY|O_LARGEFILE;
868 int host_err = -EPERM;
869
870 path.mnt = fhp->fh_export->ex_path.mnt;
871 path.dentry = fhp->fh_dentry;
872 inode = d_inode(path.dentry);
873
874 if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
875 goto out;
876
877 if (!inode->i_fop)
878 goto out;
879
880 host_err = nfsd_open_break_lease(inode, may_flags);
881 if (host_err) /* NOMEM or WOULDBLOCK */
882 goto out;
883
884 if (may_flags & NFSD_MAY_WRITE) {
885 if (may_flags & NFSD_MAY_READ)
886 flags = O_RDWR|O_LARGEFILE;
887 else
888 flags = O_WRONLY|O_LARGEFILE;
889 }
890
891 file = dentry_open(&path, flags, current_cred());
892 if (IS_ERR(file)) {
893 host_err = PTR_ERR(file);
894 goto out;
895 }
896
897 host_err = security_file_post_open(file, may_flags);
898 if (host_err) {
899 fput(file);
900 goto out;
901 }
902
903 if (may_flags & NFSD_MAY_64BIT_COOKIE)
904 file->f_mode |= FMODE_64BITHASH;
905 else
906 file->f_mode |= FMODE_32BITHASH;
907
908 *filp = file;
909out:
910 return host_err;
911}
912
913__be32
914nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
915 int may_flags, struct file **filp)
916{
917 __be32 err;
918 int host_err;
919 bool retried = false;
920
921 /*
922 * If we get here, then the client has already done an "open",
923 * and (hopefully) checked permission - so allow OWNER_OVERRIDE
924 * in case a chmod has now revoked permission.
925 *
926 * Arguably we should also allow the owner override for
927 * directories, but we never have and it doesn't seem to have
928 * caused anyone a problem. If we were to change this, note
929 * also that our filldir callbacks would need a variant of
930 * lookup_one_len that doesn't check permissions.
931 */
932 if (type == S_IFREG)
933 may_flags |= NFSD_MAY_OWNER_OVERRIDE;
934retry:
935 err = fh_verify(rqstp, fhp, type, may_flags);
936 if (!err) {
937 host_err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
938 if (host_err == -EOPENSTALE && !retried) {
939 retried = true;
940 fh_put(fhp);
941 goto retry;
942 }
943 err = nfserrno(host_err);
944 }
945 return err;
946}
947
948/**
949 * nfsd_open_verified - Open a regular file for the filecache
950 * @rqstp: RPC request
951 * @fhp: NFS filehandle of the file to open
952 * @may_flags: internal permission flags
953 * @filp: OUT: open "struct file *"
954 *
955 * Returns zero on success, or a negative errno value.
956 */
957int
958nfsd_open_verified(struct svc_rqst *rqstp, struct svc_fh *fhp, int may_flags,
959 struct file **filp)
960{
961 return __nfsd_open(rqstp, fhp, S_IFREG, may_flags, filp);
962}
963
964/*
965 * Grab and keep cached pages associated with a file in the svc_rqst
966 * so that they can be passed to the network sendmsg routines
967 * directly. They will be released after the sending has completed.
968 *
969 * Return values: Number of bytes consumed, or -EIO if there are no
970 * remaining pages in rqstp->rq_pages.
971 */
972static int
973nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
974 struct splice_desc *sd)
975{
976 struct svc_rqst *rqstp = sd->u.data;
977 struct page *page = buf->page; // may be a compound one
978 unsigned offset = buf->offset;
979 struct page *last_page;
980
981 last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
982 for (page += offset / PAGE_SIZE; page <= last_page; page++) {
983 /*
984 * Skip page replacement when extending the contents of the
985 * current page. But note that we may get two zero_pages in a
986 * row from shmem.
987 */
988 if (page == *(rqstp->rq_next_page - 1) &&
989 offset_in_page(rqstp->rq_res.page_base +
990 rqstp->rq_res.page_len))
991 continue;
992 if (unlikely(!svc_rqst_replace_page(rqstp, page)))
993 return -EIO;
994 }
995 if (rqstp->rq_res.page_len == 0) // first call
996 rqstp->rq_res.page_base = offset % PAGE_SIZE;
997 rqstp->rq_res.page_len += sd->len;
998 return sd->len;
999}
1000
1001static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe,
1002 struct splice_desc *sd)
1003{
1004 return __splice_from_pipe(pipe, sd, nfsd_splice_actor);
1005}
1006
1007static u32 nfsd_eof_on_read(struct file *file, loff_t offset, ssize_t len,
1008 size_t expected)
1009{
1010 if (expected != 0 && len == 0)
1011 return 1;
1012 if (offset+len >= i_size_read(file_inode(file)))
1013 return 1;
1014 return 0;
1015}
1016
1017static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
1018 struct file *file, loff_t offset,
1019 unsigned long *count, u32 *eof, ssize_t host_err)
1020{
1021 if (host_err >= 0) {
1022 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1023
1024 nfsd_stats_io_read_add(nn, fhp->fh_export, host_err);
1025 *eof = nfsd_eof_on_read(file, offset, host_err, *count);
1026 *count = host_err;
1027 fsnotify_access(file);
1028 trace_nfsd_read_io_done(rqstp, fhp, offset, *count);
1029 return 0;
1030 } else {
1031 trace_nfsd_read_err(rqstp, fhp, offset, host_err);
1032 return nfserrno(host_err);
1033 }
1034}
1035
1036/**
1037 * nfsd_splice_read - Perform a VFS read using a splice pipe
1038 * @rqstp: RPC transaction context
1039 * @fhp: file handle of file to be read
1040 * @file: opened struct file of file to be read
1041 * @offset: starting byte offset
1042 * @count: IN: requested number of bytes; OUT: number of bytes read
1043 * @eof: OUT: set non-zero if operation reached the end of the file
1044 *
1045 * Returns nfs_ok on success, otherwise an nfserr stat value is
1046 * returned.
1047 */
1048__be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
1049 struct file *file, loff_t offset, unsigned long *count,
1050 u32 *eof)
1051{
1052 struct splice_desc sd = {
1053 .len = 0,
1054 .total_len = *count,
1055 .pos = offset,
1056 .u.data = rqstp,
1057 };
1058 ssize_t host_err;
1059
1060 trace_nfsd_read_splice(rqstp, fhp, offset, *count);
1061 host_err = rw_verify_area(READ, file, &offset, *count);
1062 if (!host_err)
1063 host_err = splice_direct_to_actor(file, &sd,
1064 nfsd_direct_splice_actor);
1065 return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
1066}
1067
1068/**
1069 * nfsd_iter_read - Perform a VFS read using an iterator
1070 * @rqstp: RPC transaction context
1071 * @fhp: file handle of file to be read
1072 * @file: opened struct file of file to be read
1073 * @offset: starting byte offset
1074 * @count: IN: requested number of bytes; OUT: number of bytes read
1075 * @base: offset in first page of read buffer
1076 * @eof: OUT: set non-zero if operation reached the end of the file
1077 *
1078 * Some filesystems or situations cannot use nfsd_splice_read. This
1079 * function is the slightly less-performant fallback for those cases.
1080 *
1081 * Returns nfs_ok on success, otherwise an nfserr stat value is
1082 * returned.
1083 */
1084__be32 nfsd_iter_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
1085 struct file *file, loff_t offset, unsigned long *count,
1086 unsigned int base, u32 *eof)
1087{
1088 unsigned long v, total;
1089 struct iov_iter iter;
1090 loff_t ppos = offset;
1091 struct page *page;
1092 ssize_t host_err;
1093
1094 v = 0;
1095 total = *count;
1096 while (total) {
1097 page = *(rqstp->rq_next_page++);
1098 rqstp->rq_vec[v].iov_base = page_address(page) + base;
1099 rqstp->rq_vec[v].iov_len = min_t(size_t, total, PAGE_SIZE - base);
1100 total -= rqstp->rq_vec[v].iov_len;
1101 ++v;
1102 base = 0;
1103 }
1104 WARN_ON_ONCE(v > ARRAY_SIZE(rqstp->rq_vec));
1105
1106 trace_nfsd_read_vector(rqstp, fhp, offset, *count);
1107 iov_iter_kvec(&iter, ITER_DEST, rqstp->rq_vec, v, *count);
1108 host_err = vfs_iter_read(file, &iter, &ppos, 0);
1109 return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
1110}
1111
1112/*
1113 * Gathered writes: If another process is currently writing to the file,
1114 * there's a high chance this is another nfsd (triggered by a bulk write
1115 * from a client's biod). Rather than syncing the file with each write
1116 * request, we sleep for 10 msec.
1117 *
1118 * I don't know if this roughly approximates C. Juszak's idea of
1119 * gathered writes, but it's a nice and simple solution (IMHO), and it
1120 * seems to work:-)
1121 *
1122 * Note: we do this only in the NFSv2 case, since v3 and higher have a
1123 * better tool (separate unstable writes and commits) for solving this
1124 * problem.
1125 */
1126static int wait_for_concurrent_writes(struct file *file)
1127{
1128 struct inode *inode = file_inode(file);
1129 static ino_t last_ino;
1130 static dev_t last_dev;
1131 int err = 0;
1132
1133 if (atomic_read(&inode->i_writecount) > 1
1134 || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
1135 dprintk("nfsd: write defer %d\n", task_pid_nr(current));
1136 msleep(10);
1137 dprintk("nfsd: write resume %d\n", task_pid_nr(current));
1138 }
1139
1140 if (inode->i_state & I_DIRTY) {
1141 dprintk("nfsd: write sync %d\n", task_pid_nr(current));
1142 err = vfs_fsync(file, 0);
1143 }
1144 last_ino = inode->i_ino;
1145 last_dev = inode->i_sb->s_dev;
1146 return err;
1147}
1148
1149__be32
1150nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
1151 loff_t offset, struct kvec *vec, int vlen,
1152 unsigned long *cnt, int stable,
1153 __be32 *verf)
1154{
1155 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1156 struct file *file = nf->nf_file;
1157 struct super_block *sb = file_inode(file)->i_sb;
1158 struct svc_export *exp;
1159 struct iov_iter iter;
1160 errseq_t since;
1161 __be32 nfserr;
1162 int host_err;
1163 int use_wgather;
1164 loff_t pos = offset;
1165 unsigned long exp_op_flags = 0;
1166 unsigned int pflags = current->flags;
1167 rwf_t flags = 0;
1168 bool restore_flags = false;
1169
1170 trace_nfsd_write_opened(rqstp, fhp, offset, *cnt);
1171
1172 if (sb->s_export_op)
1173 exp_op_flags = sb->s_export_op->flags;
1174
1175 if (test_bit(RQ_LOCAL, &rqstp->rq_flags) &&
1176 !(exp_op_flags & EXPORT_OP_REMOTE_FS)) {
1177 /*
1178 * We want throttling in balance_dirty_pages()
1179 * and shrink_inactive_list() to only consider
1180 * the backingdev we are writing to, so that nfs to
1181 * localhost doesn't cause nfsd to lock up due to all
1182 * the client's dirty pages or its congested queue.
1183 */
1184 current->flags |= PF_LOCAL_THROTTLE;
1185 restore_flags = true;
1186 }
1187
1188 exp = fhp->fh_export;
1189 use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp);
1190
1191 if (!EX_ISSYNC(exp))
1192 stable = NFS_UNSTABLE;
1193
1194 if (stable && !use_wgather)
1195 flags |= RWF_SYNC;
1196
1197 iov_iter_kvec(&iter, ITER_SOURCE, vec, vlen, *cnt);
1198 since = READ_ONCE(file->f_wb_err);
1199 if (verf)
1200 nfsd_copy_write_verifier(verf, nn);
1201 host_err = vfs_iter_write(file, &iter, &pos, flags);
1202 if (host_err < 0) {
1203 commit_reset_write_verifier(nn, rqstp, host_err);
1204 goto out_nfserr;
1205 }
1206 *cnt = host_err;
1207 nfsd_stats_io_write_add(nn, exp, *cnt);
1208 fsnotify_modify(file);
1209 host_err = filemap_check_wb_err(file->f_mapping, since);
1210 if (host_err < 0)
1211 goto out_nfserr;
1212
1213 if (stable && use_wgather) {
1214 host_err = wait_for_concurrent_writes(file);
1215 if (host_err < 0)
1216 commit_reset_write_verifier(nn, rqstp, host_err);
1217 }
1218
1219out_nfserr:
1220 if (host_err >= 0) {
1221 trace_nfsd_write_io_done(rqstp, fhp, offset, *cnt);
1222 nfserr = nfs_ok;
1223 } else {
1224 trace_nfsd_write_err(rqstp, fhp, offset, host_err);
1225 nfserr = nfserrno(host_err);
1226 }
1227 if (restore_flags)
1228 current_restore_flags(pflags, PF_LOCAL_THROTTLE);
1229 return nfserr;
1230}
1231
1232/**
1233 * nfsd_read_splice_ok - check if spliced reading is supported
1234 * @rqstp: RPC transaction context
1235 *
1236 * Return values:
1237 * %true: nfsd_splice_read() may be used
1238 * %false: nfsd_splice_read() must not be used
1239 *
1240 * NFS READ normally uses splice to send data in-place. However the
1241 * data in cache can change after the reply's MIC is computed but
1242 * before the RPC reply is sent. To prevent the client from
1243 * rejecting the server-computed MIC in this somewhat rare case, do
1244 * not use splice with the GSS integrity and privacy services.
1245 */
1246bool nfsd_read_splice_ok(struct svc_rqst *rqstp)
1247{
1248 switch (svc_auth_flavor(rqstp)) {
1249 case RPC_AUTH_GSS_KRB5I:
1250 case RPC_AUTH_GSS_KRB5P:
1251 return false;
1252 }
1253 return true;
1254}
1255
1256/**
1257 * nfsd_read - Read data from a file
1258 * @rqstp: RPC transaction context
1259 * @fhp: file handle of file to be read
1260 * @offset: starting byte offset
1261 * @count: IN: requested number of bytes; OUT: number of bytes read
1262 * @eof: OUT: set non-zero if operation reached the end of the file
1263 *
1264 * The caller must verify that there is enough space in @rqstp.rq_res
1265 * to perform this operation.
1266 *
1267 * N.B. After this call fhp needs an fh_put
1268 *
1269 * Returns nfs_ok on success, otherwise an nfserr stat value is
1270 * returned.
1271 */
1272__be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
1273 loff_t offset, unsigned long *count, u32 *eof)
1274{
1275 struct nfsd_file *nf;
1276 struct file *file;
1277 __be32 err;
1278
1279 trace_nfsd_read_start(rqstp, fhp, offset, *count);
1280 err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_READ, &nf);
1281 if (err)
1282 return err;
1283
1284 file = nf->nf_file;
1285 if (file->f_op->splice_read && nfsd_read_splice_ok(rqstp))
1286 err = nfsd_splice_read(rqstp, fhp, file, offset, count, eof);
1287 else
1288 err = nfsd_iter_read(rqstp, fhp, file, offset, count, 0, eof);
1289
1290 nfsd_file_put(nf);
1291 trace_nfsd_read_done(rqstp, fhp, offset, *count);
1292 return err;
1293}
1294
1295/*
1296 * Write data to a file.
1297 * The stable flag requests synchronous writes.
1298 * N.B. After this call fhp needs an fh_put
1299 */
1300__be32
1301nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
1302 struct kvec *vec, int vlen, unsigned long *cnt, int stable,
1303 __be32 *verf)
1304{
1305 struct nfsd_file *nf;
1306 __be32 err;
1307
1308 trace_nfsd_write_start(rqstp, fhp, offset, *cnt);
1309
1310 err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_WRITE, &nf);
1311 if (err)
1312 goto out;
1313
1314 err = nfsd_vfs_write(rqstp, fhp, nf, offset, vec,
1315 vlen, cnt, stable, verf);
1316 nfsd_file_put(nf);
1317out:
1318 trace_nfsd_write_done(rqstp, fhp, offset, *cnt);
1319 return err;
1320}
1321
1322/**
1323 * nfsd_commit - Commit pending writes to stable storage
1324 * @rqstp: RPC request being processed
1325 * @fhp: NFS filehandle
1326 * @nf: target file
1327 * @offset: raw offset from beginning of file
1328 * @count: raw count of bytes to sync
1329 * @verf: filled in with the server's current write verifier
1330 *
1331 * Note: we guarantee that data that lies within the range specified
1332 * by the 'offset' and 'count' parameters will be synced. The server
1333 * is permitted to sync data that lies outside this range at the
1334 * same time.
1335 *
1336 * Unfortunately we cannot lock the file to make sure we return full WCC
1337 * data to the client, as locking happens lower down in the filesystem.
1338 *
1339 * Return values:
1340 * An nfsstat value in network byte order.
1341 */
1342__be32
1343nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
1344 u64 offset, u32 count, __be32 *verf)
1345{
1346 __be32 err = nfs_ok;
1347 u64 maxbytes;
1348 loff_t start, end;
1349 struct nfsd_net *nn;
1350
1351 /*
1352 * Convert the client-provided (offset, count) range to a
1353 * (start, end) range. If the client-provided range falls
1354 * outside the maximum file size of the underlying FS,
1355 * clamp the sync range appropriately.
1356 */
1357 start = 0;
1358 end = LLONG_MAX;
1359 maxbytes = (u64)fhp->fh_dentry->d_sb->s_maxbytes;
1360 if (offset < maxbytes) {
1361 start = offset;
1362 if (count && (offset + count - 1 < maxbytes))
1363 end = offset + count - 1;
1364 }
1365
1366 nn = net_generic(nf->nf_net, nfsd_net_id);
1367 if (EX_ISSYNC(fhp->fh_export)) {
1368 errseq_t since = READ_ONCE(nf->nf_file->f_wb_err);
1369 int err2;
1370
1371 err2 = vfs_fsync_range(nf->nf_file, start, end, 0);
1372 switch (err2) {
1373 case 0:
1374 nfsd_copy_write_verifier(verf, nn);
1375 err2 = filemap_check_wb_err(nf->nf_file->f_mapping,
1376 since);
1377 err = nfserrno(err2);
1378 break;
1379 case -EINVAL:
1380 err = nfserr_notsupp;
1381 break;
1382 default:
1383 commit_reset_write_verifier(nn, rqstp, err2);
1384 err = nfserrno(err2);
1385 }
1386 } else
1387 nfsd_copy_write_verifier(verf, nn);
1388
1389 return err;
1390}
1391
1392/**
1393 * nfsd_create_setattr - Set a created file's attributes
1394 * @rqstp: RPC transaction being executed
1395 * @fhp: NFS filehandle of parent directory
1396 * @resfhp: NFS filehandle of new object
1397 * @attrs: requested attributes of new object
1398 *
1399 * Returns nfs_ok on success, or an nfsstat in network byte order.
1400 */
1401__be32
1402nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
1403 struct svc_fh *resfhp, struct nfsd_attrs *attrs)
1404{
1405 struct iattr *iap = attrs->na_iattr;
1406 __be32 status;
1407
1408 /*
1409 * Mode has already been set by file creation.
1410 */
1411 iap->ia_valid &= ~ATTR_MODE;
1412
1413 /*
1414 * Setting uid/gid works only for root. Irix appears to
1415 * send along the gid on create when it tries to implement
1416 * setgid directories via NFS:
1417 */
1418 if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID))
1419 iap->ia_valid &= ~(ATTR_UID|ATTR_GID);
1420
1421 /*
1422 * Callers expect new file metadata to be committed even
1423 * if the attributes have not changed.
1424 */
1425 if (iap->ia_valid)
1426 status = nfsd_setattr(rqstp, resfhp, attrs, NULL);
1427 else
1428 status = nfserrno(commit_metadata(resfhp));
1429
1430 /*
1431 * Transactional filesystems had a chance to commit changes
1432 * for both parent and child simultaneously making the
1433 * following commit_metadata a noop in many cases.
1434 */
1435 if (!status)
1436 status = nfserrno(commit_metadata(fhp));
1437
1438 /*
1439 * Update the new filehandle to pick up the new attributes.
1440 */
1441 if (!status)
1442 status = fh_update(resfhp);
1443
1444 return status;
1445}
1446
1447/* HPUX client sometimes creates a file in mode 000, and sets size to 0.
1448 * setting size to 0 may fail for some specific file systems by the permission
1449 * checking which requires WRITE permission but the mode is 000.
1450 * we ignore the resizing(to 0) on the just new created file, since the size is
1451 * 0 after file created.
1452 *
1453 * call this only after vfs_create() is called.
1454 * */
1455static void
1456nfsd_check_ignore_resizing(struct iattr *iap)
1457{
1458 if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0))
1459 iap->ia_valid &= ~ATTR_SIZE;
1460}
1461
1462/* The parent directory should already be locked: */
1463__be32
1464nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
1465 struct nfsd_attrs *attrs,
1466 int type, dev_t rdev, struct svc_fh *resfhp)
1467{
1468 struct dentry *dentry, *dchild;
1469 struct inode *dirp;
1470 struct iattr *iap = attrs->na_iattr;
1471 __be32 err;
1472 int host_err;
1473
1474 dentry = fhp->fh_dentry;
1475 dirp = d_inode(dentry);
1476
1477 dchild = dget(resfhp->fh_dentry);
1478 err = nfsd_permission(rqstp, fhp->fh_export, dentry, NFSD_MAY_CREATE);
1479 if (err)
1480 goto out;
1481
1482 if (!(iap->ia_valid & ATTR_MODE))
1483 iap->ia_mode = 0;
1484 iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
1485
1486 if (!IS_POSIXACL(dirp))
1487 iap->ia_mode &= ~current_umask();
1488
1489 err = 0;
1490 switch (type) {
1491 case S_IFREG:
1492 host_err = vfs_create(&nop_mnt_idmap, dirp, dchild,
1493 iap->ia_mode, true);
1494 if (!host_err)
1495 nfsd_check_ignore_resizing(iap);
1496 break;
1497 case S_IFDIR:
1498 host_err = vfs_mkdir(&nop_mnt_idmap, dirp, dchild, iap->ia_mode);
1499 if (!host_err && unlikely(d_unhashed(dchild))) {
1500 struct dentry *d;
1501 d = lookup_one_len(dchild->d_name.name,
1502 dchild->d_parent,
1503 dchild->d_name.len);
1504 if (IS_ERR(d)) {
1505 host_err = PTR_ERR(d);
1506 break;
1507 }
1508 if (unlikely(d_is_negative(d))) {
1509 dput(d);
1510 err = nfserr_serverfault;
1511 goto out;
1512 }
1513 dput(resfhp->fh_dentry);
1514 resfhp->fh_dentry = dget(d);
1515 err = fh_update(resfhp);
1516 dput(dchild);
1517 dchild = d;
1518 if (err)
1519 goto out;
1520 }
1521 break;
1522 case S_IFCHR:
1523 case S_IFBLK:
1524 case S_IFIFO:
1525 case S_IFSOCK:
1526 host_err = vfs_mknod(&nop_mnt_idmap, dirp, dchild,
1527 iap->ia_mode, rdev);
1528 break;
1529 default:
1530 printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n",
1531 type);
1532 host_err = -EINVAL;
1533 }
1534 if (host_err < 0)
1535 goto out_nfserr;
1536
1537 err = nfsd_create_setattr(rqstp, fhp, resfhp, attrs);
1538
1539out:
1540 dput(dchild);
1541 return err;
1542
1543out_nfserr:
1544 err = nfserrno(host_err);
1545 goto out;
1546}
1547
1548/*
1549 * Create a filesystem object (regular, directory, special).
1550 * Note that the parent directory is left locked.
1551 *
1552 * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp
1553 */
1554__be32
1555nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
1556 char *fname, int flen, struct nfsd_attrs *attrs,
1557 int type, dev_t rdev, struct svc_fh *resfhp)
1558{
1559 struct dentry *dentry, *dchild = NULL;
1560 __be32 err;
1561 int host_err;
1562
1563 if (isdotent(fname, flen))
1564 return nfserr_exist;
1565
1566 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_NOP);
1567 if (err)
1568 return err;
1569
1570 dentry = fhp->fh_dentry;
1571
1572 host_err = fh_want_write(fhp);
1573 if (host_err)
1574 return nfserrno(host_err);
1575
1576 inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT);
1577 dchild = lookup_one_len(fname, dentry, flen);
1578 host_err = PTR_ERR(dchild);
1579 if (IS_ERR(dchild)) {
1580 err = nfserrno(host_err);
1581 goto out_unlock;
1582 }
1583 err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
1584 /*
1585 * We unconditionally drop our ref to dchild as fh_compose will have
1586 * already grabbed its own ref for it.
1587 */
1588 dput(dchild);
1589 if (err)
1590 goto out_unlock;
1591 err = fh_fill_pre_attrs(fhp);
1592 if (err != nfs_ok)
1593 goto out_unlock;
1594 err = nfsd_create_locked(rqstp, fhp, attrs, type, rdev, resfhp);
1595 fh_fill_post_attrs(fhp);
1596out_unlock:
1597 inode_unlock(dentry->d_inode);
1598 return err;
1599}
1600
1601/*
1602 * Read a symlink. On entry, *lenp must contain the maximum path length that
1603 * fits into the buffer. On return, it contains the true length.
1604 * N.B. After this call fhp needs an fh_put
1605 */
1606__be32
1607nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
1608{
1609 __be32 err;
1610 const char *link;
1611 struct path path;
1612 DEFINE_DELAYED_CALL(done);
1613 int len;
1614
1615 err = fh_verify(rqstp, fhp, S_IFLNK, NFSD_MAY_NOP);
1616 if (unlikely(err))
1617 return err;
1618
1619 path.mnt = fhp->fh_export->ex_path.mnt;
1620 path.dentry = fhp->fh_dentry;
1621
1622 if (unlikely(!d_is_symlink(path.dentry)))
1623 return nfserr_inval;
1624
1625 touch_atime(&path);
1626
1627 link = vfs_get_link(path.dentry, &done);
1628 if (IS_ERR(link))
1629 return nfserrno(PTR_ERR(link));
1630
1631 len = strlen(link);
1632 if (len < *lenp)
1633 *lenp = len;
1634 memcpy(buf, link, *lenp);
1635 do_delayed_call(&done);
1636 return 0;
1637}
1638
1639/**
1640 * nfsd_symlink - Create a symlink and look up its inode
1641 * @rqstp: RPC transaction being executed
1642 * @fhp: NFS filehandle of parent directory
1643 * @fname: filename of the new symlink
1644 * @flen: length of @fname
1645 * @path: content of the new symlink (NUL-terminated)
1646 * @attrs: requested attributes of new object
1647 * @resfhp: NFS filehandle of new object
1648 *
1649 * N.B. After this call _both_ fhp and resfhp need an fh_put
1650 *
1651 * Returns nfs_ok on success, or an nfsstat in network byte order.
1652 */
1653__be32
1654nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
1655 char *fname, int flen,
1656 char *path, struct nfsd_attrs *attrs,
1657 struct svc_fh *resfhp)
1658{
1659 struct dentry *dentry, *dnew;
1660 __be32 err, cerr;
1661 int host_err;
1662
1663 err = nfserr_noent;
1664 if (!flen || path[0] == '\0')
1665 goto out;
1666 err = nfserr_exist;
1667 if (isdotent(fname, flen))
1668 goto out;
1669
1670 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
1671 if (err)
1672 goto out;
1673
1674 host_err = fh_want_write(fhp);
1675 if (host_err) {
1676 err = nfserrno(host_err);
1677 goto out;
1678 }
1679
1680 dentry = fhp->fh_dentry;
1681 inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT);
1682 dnew = lookup_one_len(fname, dentry, flen);
1683 if (IS_ERR(dnew)) {
1684 err = nfserrno(PTR_ERR(dnew));
1685 inode_unlock(dentry->d_inode);
1686 goto out_drop_write;
1687 }
1688 err = fh_fill_pre_attrs(fhp);
1689 if (err != nfs_ok)
1690 goto out_unlock;
1691 host_err = vfs_symlink(&nop_mnt_idmap, d_inode(dentry), dnew, path);
1692 err = nfserrno(host_err);
1693 cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
1694 if (!err)
1695 nfsd_create_setattr(rqstp, fhp, resfhp, attrs);
1696 fh_fill_post_attrs(fhp);
1697out_unlock:
1698 inode_unlock(dentry->d_inode);
1699 if (!err)
1700 err = nfserrno(commit_metadata(fhp));
1701 dput(dnew);
1702 if (err==0) err = cerr;
1703out_drop_write:
1704 fh_drop_write(fhp);
1705out:
1706 return err;
1707}
1708
1709/*
1710 * Create a hardlink
1711 * N.B. After this call _both_ ffhp and tfhp need an fh_put
1712 */
1713__be32
1714nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
1715 char *name, int len, struct svc_fh *tfhp)
1716{
1717 struct dentry *ddir, *dnew, *dold;
1718 struct inode *dirp;
1719 __be32 err;
1720 int host_err;
1721
1722 err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_CREATE);
1723 if (err)
1724 goto out;
1725 err = fh_verify(rqstp, tfhp, 0, NFSD_MAY_NOP);
1726 if (err)
1727 goto out;
1728 err = nfserr_isdir;
1729 if (d_is_dir(tfhp->fh_dentry))
1730 goto out;
1731 err = nfserr_perm;
1732 if (!len)
1733 goto out;
1734 err = nfserr_exist;
1735 if (isdotent(name, len))
1736 goto out;
1737
1738 host_err = fh_want_write(tfhp);
1739 if (host_err) {
1740 err = nfserrno(host_err);
1741 goto out;
1742 }
1743
1744 ddir = ffhp->fh_dentry;
1745 dirp = d_inode(ddir);
1746 inode_lock_nested(dirp, I_MUTEX_PARENT);
1747
1748 dnew = lookup_one_len(name, ddir, len);
1749 if (IS_ERR(dnew)) {
1750 err = nfserrno(PTR_ERR(dnew));
1751 goto out_unlock;
1752 }
1753
1754 dold = tfhp->fh_dentry;
1755
1756 err = nfserr_noent;
1757 if (d_really_is_negative(dold))
1758 goto out_dput;
1759 err = fh_fill_pre_attrs(ffhp);
1760 if (err != nfs_ok)
1761 goto out_dput;
1762 host_err = vfs_link(dold, &nop_mnt_idmap, dirp, dnew, NULL);
1763 fh_fill_post_attrs(ffhp);
1764 inode_unlock(dirp);
1765 if (!host_err) {
1766 err = nfserrno(commit_metadata(ffhp));
1767 if (!err)
1768 err = nfserrno(commit_metadata(tfhp));
1769 } else {
1770 if (host_err == -EXDEV && rqstp->rq_vers == 2)
1771 err = nfserr_acces;
1772 else
1773 err = nfserrno(host_err);
1774 }
1775 dput(dnew);
1776out_drop_write:
1777 fh_drop_write(tfhp);
1778out:
1779 return err;
1780
1781out_dput:
1782 dput(dnew);
1783out_unlock:
1784 inode_unlock(dirp);
1785 goto out_drop_write;
1786}
1787
1788static void
1789nfsd_close_cached_files(struct dentry *dentry)
1790{
1791 struct inode *inode = d_inode(dentry);
1792
1793 if (inode && S_ISREG(inode->i_mode))
1794 nfsd_file_close_inode_sync(inode);
1795}
1796
1797static bool
1798nfsd_has_cached_files(struct dentry *dentry)
1799{
1800 bool ret = false;
1801 struct inode *inode = d_inode(dentry);
1802
1803 if (inode && S_ISREG(inode->i_mode))
1804 ret = nfsd_file_is_cached(inode);
1805 return ret;
1806}
1807
1808/*
1809 * Rename a file
1810 * N.B. After this call _both_ ffhp and tfhp need an fh_put
1811 */
1812__be32
1813nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
1814 struct svc_fh *tfhp, char *tname, int tlen)
1815{
1816 struct dentry *fdentry, *tdentry, *odentry, *ndentry, *trap;
1817 struct inode *fdir, *tdir;
1818 __be32 err;
1819 int host_err;
1820 bool close_cached = false;
1821
1822 err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_REMOVE);
1823 if (err)
1824 goto out;
1825 err = fh_verify(rqstp, tfhp, S_IFDIR, NFSD_MAY_CREATE);
1826 if (err)
1827 goto out;
1828
1829 fdentry = ffhp->fh_dentry;
1830 fdir = d_inode(fdentry);
1831
1832 tdentry = tfhp->fh_dentry;
1833 tdir = d_inode(tdentry);
1834
1835 err = nfserr_perm;
1836 if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
1837 goto out;
1838
1839 err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
1840 if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
1841 goto out;
1842 if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
1843 goto out;
1844
1845retry:
1846 host_err = fh_want_write(ffhp);
1847 if (host_err) {
1848 err = nfserrno(host_err);
1849 goto out;
1850 }
1851
1852 trap = lock_rename(tdentry, fdentry);
1853 if (IS_ERR(trap)) {
1854 err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
1855 goto out_want_write;
1856 }
1857 err = fh_fill_pre_attrs(ffhp);
1858 if (err != nfs_ok)
1859 goto out_unlock;
1860 err = fh_fill_pre_attrs(tfhp);
1861 if (err != nfs_ok)
1862 goto out_unlock;
1863
1864 odentry = lookup_one_len(fname, fdentry, flen);
1865 host_err = PTR_ERR(odentry);
1866 if (IS_ERR(odentry))
1867 goto out_nfserr;
1868
1869 host_err = -ENOENT;
1870 if (d_really_is_negative(odentry))
1871 goto out_dput_old;
1872 host_err = -EINVAL;
1873 if (odentry == trap)
1874 goto out_dput_old;
1875
1876 ndentry = lookup_one_len(tname, tdentry, tlen);
1877 host_err = PTR_ERR(ndentry);
1878 if (IS_ERR(ndentry))
1879 goto out_dput_old;
1880 host_err = -ENOTEMPTY;
1881 if (ndentry == trap)
1882 goto out_dput_new;
1883
1884 if ((ndentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) &&
1885 nfsd_has_cached_files(ndentry)) {
1886 close_cached = true;
1887 goto out_dput_old;
1888 } else {
1889 struct renamedata rd = {
1890 .old_mnt_idmap = &nop_mnt_idmap,
1891 .old_dir = fdir,
1892 .old_dentry = odentry,
1893 .new_mnt_idmap = &nop_mnt_idmap,
1894 .new_dir = tdir,
1895 .new_dentry = ndentry,
1896 };
1897 int retries;
1898
1899 for (retries = 1;;) {
1900 host_err = vfs_rename(&rd);
1901 if (host_err != -EAGAIN || !retries--)
1902 break;
1903 if (!nfsd_wait_for_delegreturn(rqstp, d_inode(odentry)))
1904 break;
1905 }
1906 if (!host_err) {
1907 host_err = commit_metadata(tfhp);
1908 if (!host_err)
1909 host_err = commit_metadata(ffhp);
1910 }
1911 }
1912 out_dput_new:
1913 dput(ndentry);
1914 out_dput_old:
1915 dput(odentry);
1916 out_nfserr:
1917 err = nfserrno(host_err);
1918
1919 if (!close_cached) {
1920 fh_fill_post_attrs(ffhp);
1921 fh_fill_post_attrs(tfhp);
1922 }
1923out_unlock:
1924 unlock_rename(tdentry, fdentry);
1925out_want_write:
1926 fh_drop_write(ffhp);
1927
1928 /*
1929 * If the target dentry has cached open files, then we need to
1930 * try to close them prior to doing the rename. Final fput
1931 * shouldn't be done with locks held however, so we delay it
1932 * until this point and then reattempt the whole shebang.
1933 */
1934 if (close_cached) {
1935 close_cached = false;
1936 nfsd_close_cached_files(ndentry);
1937 dput(ndentry);
1938 goto retry;
1939 }
1940out:
1941 return err;
1942}
1943
1944/*
1945 * Unlink a file or directory
1946 * N.B. After this call fhp needs an fh_put
1947 */
1948__be32
1949nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
1950 char *fname, int flen)
1951{
1952 struct dentry *dentry, *rdentry;
1953 struct inode *dirp;
1954 struct inode *rinode;
1955 __be32 err;
1956 int host_err;
1957
1958 err = nfserr_acces;
1959 if (!flen || isdotent(fname, flen))
1960 goto out;
1961 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_REMOVE);
1962 if (err)
1963 goto out;
1964
1965 host_err = fh_want_write(fhp);
1966 if (host_err)
1967 goto out_nfserr;
1968
1969 dentry = fhp->fh_dentry;
1970 dirp = d_inode(dentry);
1971 inode_lock_nested(dirp, I_MUTEX_PARENT);
1972
1973 rdentry = lookup_one_len(fname, dentry, flen);
1974 host_err = PTR_ERR(rdentry);
1975 if (IS_ERR(rdentry))
1976 goto out_unlock;
1977
1978 if (d_really_is_negative(rdentry)) {
1979 dput(rdentry);
1980 host_err = -ENOENT;
1981 goto out_unlock;
1982 }
1983 rinode = d_inode(rdentry);
1984 err = fh_fill_pre_attrs(fhp);
1985 if (err != nfs_ok)
1986 goto out_unlock;
1987
1988 ihold(rinode);
1989 if (!type)
1990 type = d_inode(rdentry)->i_mode & S_IFMT;
1991
1992 if (type != S_IFDIR) {
1993 int retries;
1994
1995 if (rdentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK)
1996 nfsd_close_cached_files(rdentry);
1997
1998 for (retries = 1;;) {
1999 host_err = vfs_unlink(&nop_mnt_idmap, dirp, rdentry, NULL);
2000 if (host_err != -EAGAIN || !retries--)
2001 break;
2002 if (!nfsd_wait_for_delegreturn(rqstp, rinode))
2003 break;
2004 }
2005 } else {
2006 host_err = vfs_rmdir(&nop_mnt_idmap, dirp, rdentry);
2007 }
2008 fh_fill_post_attrs(fhp);
2009
2010 inode_unlock(dirp);
2011 if (!host_err)
2012 host_err = commit_metadata(fhp);
2013 dput(rdentry);
2014 iput(rinode); /* truncate the inode here */
2015
2016out_drop_write:
2017 fh_drop_write(fhp);
2018out_nfserr:
2019 if (host_err == -EBUSY) {
2020 /* name is mounted-on. There is no perfect
2021 * error status.
2022 */
2023 if (nfsd_v4client(rqstp))
2024 err = nfserr_file_open;
2025 else
2026 err = nfserr_acces;
2027 } else {
2028 err = nfserrno(host_err);
2029 }
2030out:
2031 return err;
2032out_unlock:
2033 inode_unlock(dirp);
2034 goto out_drop_write;
2035}
2036
2037/*
2038 * We do this buffering because we must not call back into the file
2039 * system's ->lookup() method from the filldir callback. That may well
2040 * deadlock a number of file systems.
2041 *
2042 * This is based heavily on the implementation of same in XFS.
2043 */
2044struct buffered_dirent {
2045 u64 ino;
2046 loff_t offset;
2047 int namlen;
2048 unsigned int d_type;
2049 char name[];
2050};
2051
2052struct readdir_data {
2053 struct dir_context ctx;
2054 char *dirent;
2055 size_t used;
2056 int full;
2057};
2058
2059static bool nfsd_buffered_filldir(struct dir_context *ctx, const char *name,
2060 int namlen, loff_t offset, u64 ino,
2061 unsigned int d_type)
2062{
2063 struct readdir_data *buf =
2064 container_of(ctx, struct readdir_data, ctx);
2065 struct buffered_dirent *de = (void *)(buf->dirent + buf->used);
2066 unsigned int reclen;
2067
2068 reclen = ALIGN(sizeof(struct buffered_dirent) + namlen, sizeof(u64));
2069 if (buf->used + reclen > PAGE_SIZE) {
2070 buf->full = 1;
2071 return false;
2072 }
2073
2074 de->namlen = namlen;
2075 de->offset = offset;
2076 de->ino = ino;
2077 de->d_type = d_type;
2078 memcpy(de->name, name, namlen);
2079 buf->used += reclen;
2080
2081 return true;
2082}
2083
2084static __be32 nfsd_buffered_readdir(struct file *file, struct svc_fh *fhp,
2085 nfsd_filldir_t func, struct readdir_cd *cdp,
2086 loff_t *offsetp)
2087{
2088 struct buffered_dirent *de;
2089 int host_err;
2090 int size;
2091 loff_t offset;
2092 struct readdir_data buf = {
2093 .ctx.actor = nfsd_buffered_filldir,
2094 .dirent = (void *)__get_free_page(GFP_KERNEL)
2095 };
2096
2097 if (!buf.dirent)
2098 return nfserrno(-ENOMEM);
2099
2100 offset = *offsetp;
2101
2102 while (1) {
2103 unsigned int reclen;
2104
2105 cdp->err = nfserr_eof; /* will be cleared on successful read */
2106 buf.used = 0;
2107 buf.full = 0;
2108
2109 host_err = iterate_dir(file, &buf.ctx);
2110 if (buf.full)
2111 host_err = 0;
2112
2113 if (host_err < 0)
2114 break;
2115
2116 size = buf.used;
2117
2118 if (!size)
2119 break;
2120
2121 de = (struct buffered_dirent *)buf.dirent;
2122 while (size > 0) {
2123 offset = de->offset;
2124
2125 if (func(cdp, de->name, de->namlen, de->offset,
2126 de->ino, de->d_type))
2127 break;
2128
2129 if (cdp->err != nfs_ok)
2130 break;
2131
2132 trace_nfsd_dirent(fhp, de->ino, de->name, de->namlen);
2133
2134 reclen = ALIGN(sizeof(*de) + de->namlen,
2135 sizeof(u64));
2136 size -= reclen;
2137 de = (struct buffered_dirent *)((char *)de + reclen);
2138 }
2139 if (size > 0) /* We bailed out early */
2140 break;
2141
2142 offset = vfs_llseek(file, 0, SEEK_CUR);
2143 }
2144
2145 free_page((unsigned long)(buf.dirent));
2146
2147 if (host_err)
2148 return nfserrno(host_err);
2149
2150 *offsetp = offset;
2151 return cdp->err;
2152}
2153
2154/**
2155 * nfsd_readdir - Read entries from a directory
2156 * @rqstp: RPC transaction context
2157 * @fhp: NFS file handle of directory to be read
2158 * @offsetp: OUT: seek offset of final entry that was read
2159 * @cdp: OUT: an eof error value
2160 * @func: entry filler actor
2161 *
2162 * This implementation ignores the NFSv3/4 verifier cookie.
2163 *
2164 * NB: normal system calls hold file->f_pos_lock when calling
2165 * ->iterate_shared and ->llseek, but nfsd_readdir() does not.
2166 * Because the struct file acquired here is not visible to other
2167 * threads, it's internal state does not need mutex protection.
2168 *
2169 * Returns nfs_ok on success, otherwise an nfsstat code is
2170 * returned.
2171 */
2172__be32
2173nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
2174 struct readdir_cd *cdp, nfsd_filldir_t func)
2175{
2176 __be32 err;
2177 struct file *file;
2178 loff_t offset = *offsetp;
2179 int may_flags = NFSD_MAY_READ;
2180
2181 /* NFSv2 only supports 32 bit cookies */
2182 if (rqstp->rq_vers > 2)
2183 may_flags |= NFSD_MAY_64BIT_COOKIE;
2184
2185 err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file);
2186 if (err)
2187 goto out;
2188
2189 offset = vfs_llseek(file, offset, SEEK_SET);
2190 if (offset < 0) {
2191 err = nfserrno((int)offset);
2192 goto out_close;
2193 }
2194
2195 err = nfsd_buffered_readdir(file, fhp, func, cdp, offsetp);
2196
2197 if (err == nfserr_eof || err == nfserr_toosmall)
2198 err = nfs_ok; /* can still be found in ->err */
2199out_close:
2200 nfsd_filp_close(file);
2201out:
2202 return err;
2203}
2204
2205/**
2206 * nfsd_filp_close: close a file synchronously
2207 * @fp: the file to close
2208 *
2209 * nfsd_filp_close() is similar in behaviour to filp_close().
2210 * The difference is that if this is the final close on the
2211 * file, the that finalisation happens immediately, rather then
2212 * being handed over to a work_queue, as it the case for
2213 * filp_close().
2214 * When a user-space process closes a file (even when using
2215 * filp_close() the finalisation happens before returning to
2216 * userspace, so it is effectively synchronous. When a kernel thread
2217 * uses file_close(), on the other hand, the handling is completely
2218 * asynchronous. This means that any cost imposed by that finalisation
2219 * is not imposed on the nfsd thread, and nfsd could potentually
2220 * close files more quickly than the work queue finalises the close,
2221 * which would lead to unbounded growth in the queue.
2222 *
2223 * In some contexts is it not safe to synchronously wait for
2224 * close finalisation (see comment for __fput_sync()), but nfsd
2225 * does not match those contexts. In partcilarly it does not, at the
2226 * time that this function is called, hold and locks and no finalisation
2227 * of any file, socket, or device driver would have any cause to wait
2228 * for nfsd to make progress.
2229 */
2230void nfsd_filp_close(struct file *fp)
2231{
2232 get_file(fp);
2233 filp_close(fp, NULL);
2234 __fput_sync(fp);
2235}
2236
2237/*
2238 * Get file system stats
2239 * N.B. After this call fhp needs an fh_put
2240 */
2241__be32
2242nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access)
2243{
2244 __be32 err;
2245
2246 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access);
2247 if (!err) {
2248 struct path path = {
2249 .mnt = fhp->fh_export->ex_path.mnt,
2250 .dentry = fhp->fh_dentry,
2251 };
2252 if (vfs_statfs(&path, stat))
2253 err = nfserr_io;
2254 }
2255 return err;
2256}
2257
2258static int exp_rdonly(struct svc_rqst *rqstp, struct svc_export *exp)
2259{
2260 return nfsexp_flags(rqstp, exp) & NFSEXP_READONLY;
2261}
2262
2263#ifdef CONFIG_NFSD_V4
2264/*
2265 * Helper function to translate error numbers. In the case of xattr operations,
2266 * some error codes need to be translated outside of the standard translations.
2267 *
2268 * ENODATA needs to be translated to nfserr_noxattr.
2269 * E2BIG to nfserr_xattr2big.
2270 *
2271 * Additionally, vfs_listxattr can return -ERANGE. This means that the
2272 * file has too many extended attributes to retrieve inside an
2273 * XATTR_LIST_MAX sized buffer. This is a bug in the xattr implementation:
2274 * filesystems will allow the adding of extended attributes until they hit
2275 * their own internal limit. This limit may be larger than XATTR_LIST_MAX.
2276 * So, at that point, the attributes are present and valid, but can't
2277 * be retrieved using listxattr, since the upper level xattr code enforces
2278 * the XATTR_LIST_MAX limit.
2279 *
2280 * This bug means that we need to deal with listxattr returning -ERANGE. The
2281 * best mapping is to return TOOSMALL.
2282 */
2283static __be32
2284nfsd_xattr_errno(int err)
2285{
2286 switch (err) {
2287 case -ENODATA:
2288 return nfserr_noxattr;
2289 case -E2BIG:
2290 return nfserr_xattr2big;
2291 case -ERANGE:
2292 return nfserr_toosmall;
2293 }
2294 return nfserrno(err);
2295}
2296
2297/*
2298 * Retrieve the specified user extended attribute. To avoid always
2299 * having to allocate the maximum size (since we are not getting
2300 * a maximum size from the RPC), do a probe + alloc. Hold a reader
2301 * lock on i_rwsem to prevent the extended attribute from changing
2302 * size while we're doing this.
2303 */
2304__be32
2305nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
2306 void **bufp, int *lenp)
2307{
2308 ssize_t len;
2309 __be32 err;
2310 char *buf;
2311 struct inode *inode;
2312 struct dentry *dentry;
2313
2314 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ);
2315 if (err)
2316 return err;
2317
2318 err = nfs_ok;
2319 dentry = fhp->fh_dentry;
2320 inode = d_inode(dentry);
2321
2322 inode_lock_shared(inode);
2323
2324 len = vfs_getxattr(&nop_mnt_idmap, dentry, name, NULL, 0);
2325
2326 /*
2327 * Zero-length attribute, just return.
2328 */
2329 if (len == 0) {
2330 *bufp = NULL;
2331 *lenp = 0;
2332 goto out;
2333 }
2334
2335 if (len < 0) {
2336 err = nfsd_xattr_errno(len);
2337 goto out;
2338 }
2339
2340 if (len > *lenp) {
2341 err = nfserr_toosmall;
2342 goto out;
2343 }
2344
2345 buf = kvmalloc(len, GFP_KERNEL);
2346 if (buf == NULL) {
2347 err = nfserr_jukebox;
2348 goto out;
2349 }
2350
2351 len = vfs_getxattr(&nop_mnt_idmap, dentry, name, buf, len);
2352 if (len <= 0) {
2353 kvfree(buf);
2354 buf = NULL;
2355 err = nfsd_xattr_errno(len);
2356 }
2357
2358 *lenp = len;
2359 *bufp = buf;
2360
2361out:
2362 inode_unlock_shared(inode);
2363
2364 return err;
2365}
2366
2367/*
2368 * Retrieve the xattr names. Since we can't know how many are
2369 * user extended attributes, we must get all attributes here,
2370 * and have the XDR encode filter out the "user." ones.
2371 *
2372 * While this could always just allocate an XATTR_LIST_MAX
2373 * buffer, that's a waste, so do a probe + allocate. To
2374 * avoid any changes between the probe and allocate, wrap
2375 * this in inode_lock.
2376 */
2377__be32
2378nfsd_listxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char **bufp,
2379 int *lenp)
2380{
2381 ssize_t len;
2382 __be32 err;
2383 char *buf;
2384 struct inode *inode;
2385 struct dentry *dentry;
2386
2387 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ);
2388 if (err)
2389 return err;
2390
2391 dentry = fhp->fh_dentry;
2392 inode = d_inode(dentry);
2393 *lenp = 0;
2394
2395 inode_lock_shared(inode);
2396
2397 len = vfs_listxattr(dentry, NULL, 0);
2398 if (len <= 0) {
2399 err = nfsd_xattr_errno(len);
2400 goto out;
2401 }
2402
2403 if (len > XATTR_LIST_MAX) {
2404 err = nfserr_xattr2big;
2405 goto out;
2406 }
2407
2408 buf = kvmalloc(len, GFP_KERNEL);
2409 if (buf == NULL) {
2410 err = nfserr_jukebox;
2411 goto out;
2412 }
2413
2414 len = vfs_listxattr(dentry, buf, len);
2415 if (len <= 0) {
2416 kvfree(buf);
2417 err = nfsd_xattr_errno(len);
2418 goto out;
2419 }
2420
2421 *lenp = len;
2422 *bufp = buf;
2423
2424 err = nfs_ok;
2425out:
2426 inode_unlock_shared(inode);
2427
2428 return err;
2429}
2430
2431/**
2432 * nfsd_removexattr - Remove an extended attribute
2433 * @rqstp: RPC transaction being executed
2434 * @fhp: NFS filehandle of object with xattr to remove
2435 * @name: name of xattr to remove (NUL-terminate)
2436 *
2437 * Pass in a NULL pointer for delegated_inode, and let the client deal
2438 * with NFS4ERR_DELAY (same as with e.g. setattr and remove).
2439 *
2440 * Returns nfs_ok on success, or an nfsstat in network byte order.
2441 */
2442__be32
2443nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name)
2444{
2445 __be32 err;
2446 int ret;
2447
2448 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE);
2449 if (err)
2450 return err;
2451
2452 ret = fh_want_write(fhp);
2453 if (ret)
2454 return nfserrno(ret);
2455
2456 inode_lock(fhp->fh_dentry->d_inode);
2457 err = fh_fill_pre_attrs(fhp);
2458 if (err != nfs_ok)
2459 goto out_unlock;
2460 ret = __vfs_removexattr_locked(&nop_mnt_idmap, fhp->fh_dentry,
2461 name, NULL);
2462 err = nfsd_xattr_errno(ret);
2463 fh_fill_post_attrs(fhp);
2464out_unlock:
2465 inode_unlock(fhp->fh_dentry->d_inode);
2466 fh_drop_write(fhp);
2467
2468 return err;
2469}
2470
2471__be32
2472nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
2473 void *buf, u32 len, u32 flags)
2474{
2475 __be32 err;
2476 int ret;
2477
2478 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE);
2479 if (err)
2480 return err;
2481
2482 ret = fh_want_write(fhp);
2483 if (ret)
2484 return nfserrno(ret);
2485 inode_lock(fhp->fh_dentry->d_inode);
2486 err = fh_fill_pre_attrs(fhp);
2487 if (err != nfs_ok)
2488 goto out_unlock;
2489 ret = __vfs_setxattr_locked(&nop_mnt_idmap, fhp->fh_dentry,
2490 name, buf, len, flags, NULL);
2491 fh_fill_post_attrs(fhp);
2492 err = nfsd_xattr_errno(ret);
2493out_unlock:
2494 inode_unlock(fhp->fh_dentry->d_inode);
2495 fh_drop_write(fhp);
2496 return err;
2497}
2498#endif
2499
2500/*
2501 * Check for a user's access permissions to this inode.
2502 */
2503__be32
2504nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
2505 struct dentry *dentry, int acc)
2506{
2507 struct inode *inode = d_inode(dentry);
2508 int err;
2509
2510 if ((acc & NFSD_MAY_MASK) == NFSD_MAY_NOP)
2511 return 0;
2512#if 0
2513 dprintk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n",
2514 acc,
2515 (acc & NFSD_MAY_READ)? " read" : "",
2516 (acc & NFSD_MAY_WRITE)? " write" : "",
2517 (acc & NFSD_MAY_EXEC)? " exec" : "",
2518 (acc & NFSD_MAY_SATTR)? " sattr" : "",
2519 (acc & NFSD_MAY_TRUNC)? " trunc" : "",
2520 (acc & NFSD_MAY_LOCK)? " lock" : "",
2521 (acc & NFSD_MAY_OWNER_OVERRIDE)? " owneroverride" : "",
2522 inode->i_mode,
2523 IS_IMMUTABLE(inode)? " immut" : "",
2524 IS_APPEND(inode)? " append" : "",
2525 __mnt_is_readonly(exp->ex_path.mnt)? " ro" : "");
2526 dprintk(" owner %d/%d user %d/%d\n",
2527 inode->i_uid, inode->i_gid, current_fsuid(), current_fsgid());
2528#endif
2529
2530 /* Normally we reject any write/sattr etc access on a read-only file
2531 * system. But if it is IRIX doing check on write-access for a
2532 * device special file, we ignore rofs.
2533 */
2534 if (!(acc & NFSD_MAY_LOCAL_ACCESS))
2535 if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) {
2536 if (exp_rdonly(rqstp, exp) ||
2537 __mnt_is_readonly(exp->ex_path.mnt))
2538 return nfserr_rofs;
2539 if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode))
2540 return nfserr_perm;
2541 }
2542 if ((acc & NFSD_MAY_TRUNC) && IS_APPEND(inode))
2543 return nfserr_perm;
2544
2545 if (acc & NFSD_MAY_LOCK) {
2546 /* If we cannot rely on authentication in NLM requests,
2547 * just allow locks, otherwise require read permission, or
2548 * ownership
2549 */
2550 if (exp->ex_flags & NFSEXP_NOAUTHNLM)
2551 return 0;
2552 else
2553 acc = NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE;
2554 }
2555 /*
2556 * The file owner always gets access permission for accesses that
2557 * would normally be checked at open time. This is to make
2558 * file access work even when the client has done a fchmod(fd, 0).
2559 *
2560 * However, `cp foo bar' should fail nevertheless when bar is
2561 * readonly. A sensible way to do this might be to reject all
2562 * attempts to truncate a read-only file, because a creat() call
2563 * always implies file truncation.
2564 * ... but this isn't really fair. A process may reasonably call
2565 * ftruncate on an open file descriptor on a file with perm 000.
2566 * We must trust the client to do permission checking - using "ACCESS"
2567 * with NFSv3.
2568 */
2569 if ((acc & NFSD_MAY_OWNER_OVERRIDE) &&
2570 uid_eq(inode->i_uid, current_fsuid()))
2571 return 0;
2572
2573 /* This assumes NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */
2574 err = inode_permission(&nop_mnt_idmap, inode,
2575 acc & (MAY_READ | MAY_WRITE | MAY_EXEC));
2576
2577 /* Allow read access to binaries even when mode 111 */
2578 if (err == -EACCES && S_ISREG(inode->i_mode) &&
2579 (acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) ||
2580 acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC)))
2581 err = inode_permission(&nop_mnt_idmap, inode, MAY_EXEC);
2582
2583 return err? nfserrno(err) : 0;
2584}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * File operations used by nfsd. Some of these have been ripped from
4 * other parts of the kernel because they weren't exported, others
5 * are partial duplicates with added or changed functionality.
6 *
7 * Note that several functions dget() the dentry upon which they want
8 * to act, most notably those that create directory entries. Response
9 * dentry's are dput()'d if necessary in the release callback.
10 * So if you notice code paths that apparently fail to dput() the
11 * dentry, don't worry--they have been taken care of.
12 *
13 * Copyright (C) 1995-1999 Olaf Kirch <okir@monad.swb.de>
14 * Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp>
15 */
16
17#include <linux/fs.h>
18#include <linux/file.h>
19#include <linux/splice.h>
20#include <linux/falloc.h>
21#include <linux/fcntl.h>
22#include <linux/namei.h>
23#include <linux/delay.h>
24#include <linux/fsnotify.h>
25#include <linux/posix_acl_xattr.h>
26#include <linux/xattr.h>
27#include <linux/jhash.h>
28#include <linux/pagemap.h>
29#include <linux/slab.h>
30#include <linux/uaccess.h>
31#include <linux/exportfs.h>
32#include <linux/writeback.h>
33#include <linux/security.h>
34
35#include "xdr3.h"
36
37#ifdef CONFIG_NFSD_V4
38#include "acl.h"
39#include "idmap.h"
40#include "xdr4.h"
41#endif /* CONFIG_NFSD_V4 */
42
43#include "nfsd.h"
44#include "vfs.h"
45#include "filecache.h"
46#include "trace.h"
47
48#define NFSDDBG_FACILITY NFSDDBG_FILEOP
49
50/**
51 * nfserrno - Map Linux errnos to NFS errnos
52 * @errno: POSIX(-ish) error code to be mapped
53 *
54 * Returns the appropriate (net-endian) nfserr_* (or nfs_ok if errno is 0). If
55 * it's an error we don't expect, log it once and return nfserr_io.
56 */
57__be32
58nfserrno (int errno)
59{
60 static struct {
61 __be32 nfserr;
62 int syserr;
63 } nfs_errtbl[] = {
64 { nfs_ok, 0 },
65 { nfserr_perm, -EPERM },
66 { nfserr_noent, -ENOENT },
67 { nfserr_io, -EIO },
68 { nfserr_nxio, -ENXIO },
69 { nfserr_fbig, -E2BIG },
70 { nfserr_stale, -EBADF },
71 { nfserr_acces, -EACCES },
72 { nfserr_exist, -EEXIST },
73 { nfserr_xdev, -EXDEV },
74 { nfserr_mlink, -EMLINK },
75 { nfserr_nodev, -ENODEV },
76 { nfserr_notdir, -ENOTDIR },
77 { nfserr_isdir, -EISDIR },
78 { nfserr_inval, -EINVAL },
79 { nfserr_fbig, -EFBIG },
80 { nfserr_nospc, -ENOSPC },
81 { nfserr_rofs, -EROFS },
82 { nfserr_mlink, -EMLINK },
83 { nfserr_nametoolong, -ENAMETOOLONG },
84 { nfserr_notempty, -ENOTEMPTY },
85 { nfserr_dquot, -EDQUOT },
86 { nfserr_stale, -ESTALE },
87 { nfserr_jukebox, -ETIMEDOUT },
88 { nfserr_jukebox, -ERESTARTSYS },
89 { nfserr_jukebox, -EAGAIN },
90 { nfserr_jukebox, -EWOULDBLOCK },
91 { nfserr_jukebox, -ENOMEM },
92 { nfserr_io, -ETXTBSY },
93 { nfserr_notsupp, -EOPNOTSUPP },
94 { nfserr_toosmall, -ETOOSMALL },
95 { nfserr_serverfault, -ESERVERFAULT },
96 { nfserr_serverfault, -ENFILE },
97 { nfserr_io, -EREMOTEIO },
98 { nfserr_stale, -EOPENSTALE },
99 { nfserr_io, -EUCLEAN },
100 { nfserr_perm, -ENOKEY },
101 { nfserr_no_grace, -ENOGRACE},
102 { nfserr_io, -EBADMSG },
103 };
104 int i;
105
106 for (i = 0; i < ARRAY_SIZE(nfs_errtbl); i++) {
107 if (nfs_errtbl[i].syserr == errno)
108 return nfs_errtbl[i].nfserr;
109 }
110 WARN_ONCE(1, "nfsd: non-standard errno: %d\n", errno);
111 return nfserr_io;
112}
113
114/*
115 * Called from nfsd_lookup and encode_dirent. Check if we have crossed
116 * a mount point.
117 * Returns -EAGAIN or -ETIMEDOUT leaving *dpp and *expp unchanged,
118 * or nfs_ok having possibly changed *dpp and *expp
119 */
120int
121nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
122 struct svc_export **expp)
123{
124 struct svc_export *exp = *expp, *exp2 = NULL;
125 struct dentry *dentry = *dpp;
126 struct path path = {.mnt = mntget(exp->ex_path.mnt),
127 .dentry = dget(dentry)};
128 unsigned int follow_flags = 0;
129 int err = 0;
130
131 if (exp->ex_flags & NFSEXP_CROSSMOUNT)
132 follow_flags = LOOKUP_AUTOMOUNT;
133
134 err = follow_down(&path, follow_flags);
135 if (err < 0)
136 goto out;
137 if (path.mnt == exp->ex_path.mnt && path.dentry == dentry &&
138 nfsd_mountpoint(dentry, exp) == 2) {
139 /* This is only a mountpoint in some other namespace */
140 path_put(&path);
141 goto out;
142 }
143
144 exp2 = rqst_exp_get_by_name(rqstp, &path);
145 if (IS_ERR(exp2)) {
146 err = PTR_ERR(exp2);
147 /*
148 * We normally allow NFS clients to continue
149 * "underneath" a mountpoint that is not exported.
150 * The exception is V4ROOT, where no traversal is ever
151 * allowed without an explicit export of the new
152 * directory.
153 */
154 if (err == -ENOENT && !(exp->ex_flags & NFSEXP_V4ROOT))
155 err = 0;
156 path_put(&path);
157 goto out;
158 }
159 if (nfsd_v4client(rqstp) ||
160 (exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) {
161 /* successfully crossed mount point */
162 /*
163 * This is subtle: path.dentry is *not* on path.mnt
164 * at this point. The only reason we are safe is that
165 * original mnt is pinned down by exp, so we should
166 * put path *before* putting exp
167 */
168 *dpp = path.dentry;
169 path.dentry = dentry;
170 *expp = exp2;
171 exp2 = exp;
172 }
173 path_put(&path);
174 exp_put(exp2);
175out:
176 return err;
177}
178
179static void follow_to_parent(struct path *path)
180{
181 struct dentry *dp;
182
183 while (path->dentry == path->mnt->mnt_root && follow_up(path))
184 ;
185 dp = dget_parent(path->dentry);
186 dput(path->dentry);
187 path->dentry = dp;
188}
189
190static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, struct svc_export **exp, struct dentry **dentryp)
191{
192 struct svc_export *exp2;
193 struct path path = {.mnt = mntget((*exp)->ex_path.mnt),
194 .dentry = dget(dparent)};
195
196 follow_to_parent(&path);
197
198 exp2 = rqst_exp_parent(rqstp, &path);
199 if (PTR_ERR(exp2) == -ENOENT) {
200 *dentryp = dget(dparent);
201 } else if (IS_ERR(exp2)) {
202 path_put(&path);
203 return PTR_ERR(exp2);
204 } else {
205 *dentryp = dget(path.dentry);
206 exp_put(*exp);
207 *exp = exp2;
208 }
209 path_put(&path);
210 return 0;
211}
212
213/*
214 * For nfsd purposes, we treat V4ROOT exports as though there was an
215 * export at *every* directory.
216 * We return:
217 * '1' if this dentry *must* be an export point,
218 * '2' if it might be, if there is really a mount here, and
219 * '0' if there is no chance of an export point here.
220 */
221int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
222{
223 if (!d_inode(dentry))
224 return 0;
225 if (exp->ex_flags & NFSEXP_V4ROOT)
226 return 1;
227 if (nfsd4_is_junction(dentry))
228 return 1;
229 if (d_managed(dentry))
230 /*
231 * Might only be a mountpoint in a different namespace,
232 * but we need to check.
233 */
234 return 2;
235 return 0;
236}
237
238__be32
239nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
240 const char *name, unsigned int len,
241 struct svc_export **exp_ret, struct dentry **dentry_ret)
242{
243 struct svc_export *exp;
244 struct dentry *dparent;
245 struct dentry *dentry;
246 int host_err;
247
248 dprintk("nfsd: nfsd_lookup(fh %s, %.*s)\n", SVCFH_fmt(fhp), len,name);
249
250 dparent = fhp->fh_dentry;
251 exp = exp_get(fhp->fh_export);
252
253 /* Lookup the name, but don't follow links */
254 if (isdotent(name, len)) {
255 if (len==1)
256 dentry = dget(dparent);
257 else if (dparent != exp->ex_path.dentry)
258 dentry = dget_parent(dparent);
259 else if (!EX_NOHIDE(exp) && !nfsd_v4client(rqstp))
260 dentry = dget(dparent); /* .. == . just like at / */
261 else {
262 /* checking mountpoint crossing is very different when stepping up */
263 host_err = nfsd_lookup_parent(rqstp, dparent, &exp, &dentry);
264 if (host_err)
265 goto out_nfserr;
266 }
267 } else {
268 dentry = lookup_one_len_unlocked(name, dparent, len);
269 host_err = PTR_ERR(dentry);
270 if (IS_ERR(dentry))
271 goto out_nfserr;
272 if (nfsd_mountpoint(dentry, exp)) {
273 host_err = nfsd_cross_mnt(rqstp, &dentry, &exp);
274 if (host_err) {
275 dput(dentry);
276 goto out_nfserr;
277 }
278 }
279 }
280 *dentry_ret = dentry;
281 *exp_ret = exp;
282 return 0;
283
284out_nfserr:
285 exp_put(exp);
286 return nfserrno(host_err);
287}
288
289/**
290 * nfsd_lookup - look up a single path component for nfsd
291 *
292 * @rqstp: the request context
293 * @fhp: the file handle of the directory
294 * @name: the component name, or %NULL to look up parent
295 * @len: length of name to examine
296 * @resfh: pointer to pre-initialised filehandle to hold result.
297 *
298 * Look up one component of a pathname.
299 * N.B. After this call _both_ fhp and resfh need an fh_put
300 *
301 * If the lookup would cross a mountpoint, and the mounted filesystem
302 * is exported to the client with NFSEXP_NOHIDE, then the lookup is
303 * accepted as it stands and the mounted directory is
304 * returned. Otherwise the covered directory is returned.
305 * NOTE: this mountpoint crossing is not supported properly by all
306 * clients and is explicitly disallowed for NFSv3
307 *
308 */
309__be32
310nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
311 unsigned int len, struct svc_fh *resfh)
312{
313 struct svc_export *exp;
314 struct dentry *dentry;
315 __be32 err;
316
317 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
318 if (err)
319 return err;
320 err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry);
321 if (err)
322 return err;
323 err = check_nfsd_access(exp, rqstp, false);
324 if (err)
325 goto out;
326 /*
327 * Note: we compose the file handle now, but as the
328 * dentry may be negative, it may need to be updated.
329 */
330 err = fh_compose(resfh, exp, dentry, fhp);
331 if (!err && d_really_is_negative(dentry))
332 err = nfserr_noent;
333out:
334 dput(dentry);
335 exp_put(exp);
336 return err;
337}
338
339static void
340commit_reset_write_verifier(struct nfsd_net *nn, struct svc_rqst *rqstp,
341 int err)
342{
343 switch (err) {
344 case -EAGAIN:
345 case -ESTALE:
346 /*
347 * Neither of these are the result of a problem with
348 * durable storage, so avoid a write verifier reset.
349 */
350 break;
351 default:
352 nfsd_reset_write_verifier(nn);
353 trace_nfsd_writeverf_reset(nn, rqstp, err);
354 }
355}
356
357/*
358 * Commit metadata changes to stable storage.
359 */
360static int
361commit_inode_metadata(struct inode *inode)
362{
363 const struct export_operations *export_ops = inode->i_sb->s_export_op;
364
365 if (export_ops->commit_metadata)
366 return export_ops->commit_metadata(inode);
367 return sync_inode_metadata(inode, 1);
368}
369
370static int
371commit_metadata(struct svc_fh *fhp)
372{
373 struct inode *inode = d_inode(fhp->fh_dentry);
374
375 if (!EX_ISSYNC(fhp->fh_export))
376 return 0;
377 return commit_inode_metadata(inode);
378}
379
380/*
381 * Go over the attributes and take care of the small differences between
382 * NFS semantics and what Linux expects.
383 */
384static void
385nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
386{
387 /* Ignore mode updates on symlinks */
388 if (S_ISLNK(inode->i_mode))
389 iap->ia_valid &= ~ATTR_MODE;
390
391 /* sanitize the mode change */
392 if (iap->ia_valid & ATTR_MODE) {
393 iap->ia_mode &= S_IALLUGO;
394 iap->ia_mode |= (inode->i_mode & ~S_IALLUGO);
395 }
396
397 /* Revoke setuid/setgid on chown */
398 if (!S_ISDIR(inode->i_mode) &&
399 ((iap->ia_valid & ATTR_UID) || (iap->ia_valid & ATTR_GID))) {
400 iap->ia_valid |= ATTR_KILL_PRIV;
401 if (iap->ia_valid & ATTR_MODE) {
402 /* we're setting mode too, just clear the s*id bits */
403 iap->ia_mode &= ~S_ISUID;
404 if (iap->ia_mode & S_IXGRP)
405 iap->ia_mode &= ~S_ISGID;
406 } else {
407 /* set ATTR_KILL_* bits and let VFS handle it */
408 iap->ia_valid |= ATTR_KILL_SUID;
409 iap->ia_valid |=
410 setattr_should_drop_sgid(&nop_mnt_idmap, inode);
411 }
412 }
413}
414
415static __be32
416nfsd_get_write_access(struct svc_rqst *rqstp, struct svc_fh *fhp,
417 struct iattr *iap)
418{
419 struct inode *inode = d_inode(fhp->fh_dentry);
420
421 if (iap->ia_size < inode->i_size) {
422 __be32 err;
423
424 err = nfsd_permission(&rqstp->rq_cred,
425 fhp->fh_export, fhp->fh_dentry,
426 NFSD_MAY_TRUNC | NFSD_MAY_OWNER_OVERRIDE);
427 if (err)
428 return err;
429 }
430 return nfserrno(get_write_access(inode));
431}
432
433static int __nfsd_setattr(struct dentry *dentry, struct iattr *iap)
434{
435 int host_err;
436
437 if (iap->ia_valid & ATTR_SIZE) {
438 /*
439 * RFC5661, Section 18.30.4:
440 * Changing the size of a file with SETATTR indirectly
441 * changes the time_modify and change attributes.
442 *
443 * (and similar for the older RFCs)
444 */
445 struct iattr size_attr = {
446 .ia_valid = ATTR_SIZE | ATTR_CTIME | ATTR_MTIME,
447 .ia_size = iap->ia_size,
448 };
449
450 if (iap->ia_size < 0)
451 return -EFBIG;
452
453 host_err = notify_change(&nop_mnt_idmap, dentry, &size_attr, NULL);
454 if (host_err)
455 return host_err;
456 iap->ia_valid &= ~ATTR_SIZE;
457
458 /*
459 * Avoid the additional setattr call below if the only other
460 * attribute that the client sends is the mtime, as we update
461 * it as part of the size change above.
462 */
463 if ((iap->ia_valid & ~ATTR_MTIME) == 0)
464 return 0;
465 }
466
467 if (!iap->ia_valid)
468 return 0;
469
470 iap->ia_valid |= ATTR_CTIME;
471 return notify_change(&nop_mnt_idmap, dentry, iap, NULL);
472}
473
474/**
475 * nfsd_setattr - Set various file attributes.
476 * @rqstp: controlling RPC transaction
477 * @fhp: filehandle of target
478 * @attr: attributes to set
479 * @guardtime: do not act if ctime.tv_sec does not match this timestamp
480 *
481 * This call may adjust the contents of @attr (in particular, this
482 * call may change the bits in the na_iattr.ia_valid field).
483 *
484 * Returns nfs_ok on success, otherwise an NFS status code is
485 * returned. Caller must release @fhp by calling fh_put in either
486 * case.
487 */
488__be32
489nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
490 struct nfsd_attrs *attr, const struct timespec64 *guardtime)
491{
492 struct dentry *dentry;
493 struct inode *inode;
494 struct iattr *iap = attr->na_iattr;
495 int accmode = NFSD_MAY_SATTR;
496 umode_t ftype = 0;
497 __be32 err;
498 int host_err = 0;
499 bool get_write_count;
500 bool size_change = (iap->ia_valid & ATTR_SIZE);
501 int retries;
502
503 if (iap->ia_valid & ATTR_SIZE) {
504 accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
505 ftype = S_IFREG;
506 }
507
508 /*
509 * If utimes(2) and friends are called with times not NULL, we should
510 * not set NFSD_MAY_WRITE bit. Otherwise fh_verify->nfsd_permission
511 * will return EACCES, when the caller's effective UID does not match
512 * the owner of the file, and the caller is not privileged. In this
513 * situation, we should return EPERM(notify_change will return this).
514 */
515 if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME)) {
516 accmode |= NFSD_MAY_OWNER_OVERRIDE;
517 if (!(iap->ia_valid & (ATTR_ATIME_SET | ATTR_MTIME_SET)))
518 accmode |= NFSD_MAY_WRITE;
519 }
520
521 /* Callers that do fh_verify should do the fh_want_write: */
522 get_write_count = !fhp->fh_dentry;
523
524 /* Get inode */
525 err = fh_verify(rqstp, fhp, ftype, accmode);
526 if (err)
527 return err;
528 if (get_write_count) {
529 host_err = fh_want_write(fhp);
530 if (host_err)
531 goto out;
532 }
533
534 dentry = fhp->fh_dentry;
535 inode = d_inode(dentry);
536
537 nfsd_sanitize_attrs(inode, iap);
538
539 /*
540 * The size case is special, it changes the file in addition to the
541 * attributes, and file systems don't expect it to be mixed with
542 * "random" attribute changes. We thus split out the size change
543 * into a separate call to ->setattr, and do the rest as a separate
544 * setattr call.
545 */
546 if (size_change) {
547 err = nfsd_get_write_access(rqstp, fhp, iap);
548 if (err)
549 return err;
550 }
551
552 inode_lock(inode);
553 err = fh_fill_pre_attrs(fhp);
554 if (err)
555 goto out_unlock;
556
557 if (guardtime) {
558 struct timespec64 ctime = inode_get_ctime(inode);
559 if ((u32)guardtime->tv_sec != (u32)ctime.tv_sec ||
560 guardtime->tv_nsec != ctime.tv_nsec) {
561 err = nfserr_notsync;
562 goto out_fill_attrs;
563 }
564 }
565
566 for (retries = 1;;) {
567 struct iattr attrs;
568
569 /*
570 * notify_change() can alter its iattr argument, making
571 * @iap unsuitable for submission multiple times. Make a
572 * copy for every loop iteration.
573 */
574 attrs = *iap;
575 host_err = __nfsd_setattr(dentry, &attrs);
576 if (host_err != -EAGAIN || !retries--)
577 break;
578 if (!nfsd_wait_for_delegreturn(rqstp, inode))
579 break;
580 }
581 if (attr->na_seclabel && attr->na_seclabel->len)
582 attr->na_labelerr = security_inode_setsecctx(dentry,
583 attr->na_seclabel->data, attr->na_seclabel->len);
584 if (IS_ENABLED(CONFIG_FS_POSIX_ACL) && attr->na_pacl)
585 attr->na_aclerr = set_posix_acl(&nop_mnt_idmap,
586 dentry, ACL_TYPE_ACCESS,
587 attr->na_pacl);
588 if (IS_ENABLED(CONFIG_FS_POSIX_ACL) &&
589 !attr->na_aclerr && attr->na_dpacl && S_ISDIR(inode->i_mode))
590 attr->na_aclerr = set_posix_acl(&nop_mnt_idmap,
591 dentry, ACL_TYPE_DEFAULT,
592 attr->na_dpacl);
593out_fill_attrs:
594 /*
595 * RFC 1813 Section 3.3.2 does not mandate that an NFS server
596 * returns wcc_data for SETATTR. Some client implementations
597 * depend on receiving wcc_data, however, to sort out partial
598 * updates (eg., the client requested that size and mode be
599 * modified, but the server changed only the file mode).
600 */
601 fh_fill_post_attrs(fhp);
602out_unlock:
603 inode_unlock(inode);
604 if (size_change)
605 put_write_access(inode);
606out:
607 if (!host_err)
608 host_err = commit_metadata(fhp);
609 return err != 0 ? err : nfserrno(host_err);
610}
611
612#if defined(CONFIG_NFSD_V4)
613/*
614 * NFS junction information is stored in an extended attribute.
615 */
616#define NFSD_JUNCTION_XATTR_NAME XATTR_TRUSTED_PREFIX "junction.nfs"
617
618/**
619 * nfsd4_is_junction - Test if an object could be an NFS junction
620 *
621 * @dentry: object to test
622 *
623 * Returns 1 if "dentry" appears to contain NFS junction information.
624 * Otherwise 0 is returned.
625 */
626int nfsd4_is_junction(struct dentry *dentry)
627{
628 struct inode *inode = d_inode(dentry);
629
630 if (inode == NULL)
631 return 0;
632 if (inode->i_mode & S_IXUGO)
633 return 0;
634 if (!(inode->i_mode & S_ISVTX))
635 return 0;
636 if (vfs_getxattr(&nop_mnt_idmap, dentry, NFSD_JUNCTION_XATTR_NAME,
637 NULL, 0) <= 0)
638 return 0;
639 return 1;
640}
641
642static struct nfsd4_compound_state *nfsd4_get_cstate(struct svc_rqst *rqstp)
643{
644 return &((struct nfsd4_compoundres *)rqstp->rq_resp)->cstate;
645}
646
647__be32 nfsd4_clone_file_range(struct svc_rqst *rqstp,
648 struct nfsd_file *nf_src, u64 src_pos,
649 struct nfsd_file *nf_dst, u64 dst_pos,
650 u64 count, bool sync)
651{
652 struct file *src = nf_src->nf_file;
653 struct file *dst = nf_dst->nf_file;
654 errseq_t since;
655 loff_t cloned;
656 __be32 ret = 0;
657
658 since = READ_ONCE(dst->f_wb_err);
659 cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
660 if (cloned < 0) {
661 ret = nfserrno(cloned);
662 goto out_err;
663 }
664 if (count && cloned != count) {
665 ret = nfserrno(-EINVAL);
666 goto out_err;
667 }
668 if (sync) {
669 loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX;
670 int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
671
672 if (!status)
673 status = filemap_check_wb_err(dst->f_mapping, since);
674 if (!status)
675 status = commit_inode_metadata(file_inode(src));
676 if (status < 0) {
677 struct nfsd_net *nn = net_generic(nf_dst->nf_net,
678 nfsd_net_id);
679
680 trace_nfsd_clone_file_range_err(rqstp,
681 &nfsd4_get_cstate(rqstp)->save_fh,
682 src_pos,
683 &nfsd4_get_cstate(rqstp)->current_fh,
684 dst_pos,
685 count, status);
686 commit_reset_write_verifier(nn, rqstp, status);
687 ret = nfserrno(status);
688 }
689 }
690out_err:
691 return ret;
692}
693
694ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst,
695 u64 dst_pos, u64 count)
696{
697 ssize_t ret;
698
699 /*
700 * Limit copy to 4MB to prevent indefinitely blocking an nfsd
701 * thread and client rpc slot. The choice of 4MB is somewhat
702 * arbitrary. We might instead base this on r/wsize, or make it
703 * tunable, or use a time instead of a byte limit, or implement
704 * asynchronous copy. In theory a client could also recognize a
705 * limit like this and pipeline multiple COPY requests.
706 */
707 count = min_t(u64, count, 1 << 22);
708 ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count, 0);
709
710 if (ret == -EOPNOTSUPP || ret == -EXDEV)
711 ret = vfs_copy_file_range(src, src_pos, dst, dst_pos, count,
712 COPY_FILE_SPLICE);
713 return ret;
714}
715
716__be32 nfsd4_vfs_fallocate(struct svc_rqst *rqstp, struct svc_fh *fhp,
717 struct file *file, loff_t offset, loff_t len,
718 int flags)
719{
720 int error;
721
722 if (!S_ISREG(file_inode(file)->i_mode))
723 return nfserr_inval;
724
725 error = vfs_fallocate(file, flags, offset, len);
726 if (!error)
727 error = commit_metadata(fhp);
728
729 return nfserrno(error);
730}
731#endif /* defined(CONFIG_NFSD_V4) */
732
733/*
734 * Check server access rights to a file system object
735 */
736struct accessmap {
737 u32 access;
738 int how;
739};
740static struct accessmap nfs3_regaccess[] = {
741 { NFS3_ACCESS_READ, NFSD_MAY_READ },
742 { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC },
743 { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_TRUNC },
744 { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE },
745
746#ifdef CONFIG_NFSD_V4
747 { NFS4_ACCESS_XAREAD, NFSD_MAY_READ },
748 { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE },
749 { NFS4_ACCESS_XALIST, NFSD_MAY_READ },
750#endif
751
752 { 0, 0 }
753};
754
755static struct accessmap nfs3_diraccess[] = {
756 { NFS3_ACCESS_READ, NFSD_MAY_READ },
757 { NFS3_ACCESS_LOOKUP, NFSD_MAY_EXEC },
758 { NFS3_ACCESS_MODIFY, NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC},
759 { NFS3_ACCESS_EXTEND, NFSD_MAY_EXEC|NFSD_MAY_WRITE },
760 { NFS3_ACCESS_DELETE, NFSD_MAY_REMOVE },
761
762#ifdef CONFIG_NFSD_V4
763 { NFS4_ACCESS_XAREAD, NFSD_MAY_READ },
764 { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE },
765 { NFS4_ACCESS_XALIST, NFSD_MAY_READ },
766#endif
767
768 { 0, 0 }
769};
770
771static struct accessmap nfs3_anyaccess[] = {
772 /* Some clients - Solaris 2.6 at least, make an access call
773 * to the server to check for access for things like /dev/null
774 * (which really, the server doesn't care about). So
775 * We provide simple access checking for them, looking
776 * mainly at mode bits, and we make sure to ignore read-only
777 * filesystem checks
778 */
779 { NFS3_ACCESS_READ, NFSD_MAY_READ },
780 { NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC },
781 { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS },
782 { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS },
783
784 { 0, 0 }
785};
786
787__be32
788nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *supported)
789{
790 struct accessmap *map;
791 struct svc_export *export;
792 struct dentry *dentry;
793 u32 query, result = 0, sresult = 0;
794 __be32 error;
795
796 error = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP);
797 if (error)
798 goto out;
799
800 export = fhp->fh_export;
801 dentry = fhp->fh_dentry;
802
803 if (d_is_reg(dentry))
804 map = nfs3_regaccess;
805 else if (d_is_dir(dentry))
806 map = nfs3_diraccess;
807 else
808 map = nfs3_anyaccess;
809
810
811 query = *access;
812 for (; map->access; map++) {
813 if (map->access & query) {
814 __be32 err2;
815
816 sresult |= map->access;
817
818 err2 = nfsd_permission(&rqstp->rq_cred, export,
819 dentry, map->how);
820 switch (err2) {
821 case nfs_ok:
822 result |= map->access;
823 break;
824
825 /* the following error codes just mean the access was not allowed,
826 * rather than an error occurred */
827 case nfserr_rofs:
828 case nfserr_acces:
829 case nfserr_perm:
830 /* simply don't "or" in the access bit. */
831 break;
832 default:
833 error = err2;
834 goto out;
835 }
836 }
837 }
838 *access = result;
839 if (supported)
840 *supported = sresult;
841
842 out:
843 return error;
844}
845
846int nfsd_open_break_lease(struct inode *inode, int access)
847{
848 unsigned int mode;
849
850 if (access & NFSD_MAY_NOT_BREAK_LEASE)
851 return 0;
852 mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY;
853 return break_lease(inode, mode | O_NONBLOCK);
854}
855
856/*
857 * Open an existing file or directory.
858 * The may_flags argument indicates the type of open (read/write/lock)
859 * and additional flags.
860 * N.B. After this call fhp needs an fh_put
861 */
862static int
863__nfsd_open(struct svc_fh *fhp, umode_t type, int may_flags, struct file **filp)
864{
865 struct path path;
866 struct inode *inode;
867 struct file *file;
868 int flags = O_RDONLY|O_LARGEFILE;
869 int host_err = -EPERM;
870
871 path.mnt = fhp->fh_export->ex_path.mnt;
872 path.dentry = fhp->fh_dentry;
873 inode = d_inode(path.dentry);
874
875 if (IS_APPEND(inode) && (may_flags & NFSD_MAY_WRITE))
876 goto out;
877
878 if (!inode->i_fop)
879 goto out;
880
881 host_err = nfsd_open_break_lease(inode, may_flags);
882 if (host_err) /* NOMEM or WOULDBLOCK */
883 goto out;
884
885 if (may_flags & NFSD_MAY_WRITE) {
886 if (may_flags & NFSD_MAY_READ)
887 flags = O_RDWR|O_LARGEFILE;
888 else
889 flags = O_WRONLY|O_LARGEFILE;
890 }
891
892 file = dentry_open(&path, flags, current_cred());
893 if (IS_ERR(file)) {
894 host_err = PTR_ERR(file);
895 goto out;
896 }
897
898 host_err = security_file_post_open(file, may_flags);
899 if (host_err) {
900 fput(file);
901 goto out;
902 }
903
904 *filp = file;
905out:
906 return host_err;
907}
908
909__be32
910nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
911 int may_flags, struct file **filp)
912{
913 __be32 err;
914 int host_err;
915 bool retried = false;
916
917 /*
918 * If we get here, then the client has already done an "open",
919 * and (hopefully) checked permission - so allow OWNER_OVERRIDE
920 * in case a chmod has now revoked permission.
921 *
922 * Arguably we should also allow the owner override for
923 * directories, but we never have and it doesn't seem to have
924 * caused anyone a problem. If we were to change this, note
925 * also that our filldir callbacks would need a variant of
926 * lookup_one_len that doesn't check permissions.
927 */
928 if (type == S_IFREG)
929 may_flags |= NFSD_MAY_OWNER_OVERRIDE;
930retry:
931 err = fh_verify(rqstp, fhp, type, may_flags);
932 if (!err) {
933 host_err = __nfsd_open(fhp, type, may_flags, filp);
934 if (host_err == -EOPENSTALE && !retried) {
935 retried = true;
936 fh_put(fhp);
937 goto retry;
938 }
939 err = nfserrno(host_err);
940 }
941 return err;
942}
943
944/**
945 * nfsd_open_verified - Open a regular file for the filecache
946 * @fhp: NFS filehandle of the file to open
947 * @may_flags: internal permission flags
948 * @filp: OUT: open "struct file *"
949 *
950 * Returns zero on success, or a negative errno value.
951 */
952int
953nfsd_open_verified(struct svc_fh *fhp, int may_flags, struct file **filp)
954{
955 return __nfsd_open(fhp, S_IFREG, may_flags, filp);
956}
957
958/*
959 * Grab and keep cached pages associated with a file in the svc_rqst
960 * so that they can be passed to the network sendmsg routines
961 * directly. They will be released after the sending has completed.
962 *
963 * Return values: Number of bytes consumed, or -EIO if there are no
964 * remaining pages in rqstp->rq_pages.
965 */
966static int
967nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
968 struct splice_desc *sd)
969{
970 struct svc_rqst *rqstp = sd->u.data;
971 struct page *page = buf->page; // may be a compound one
972 unsigned offset = buf->offset;
973 struct page *last_page;
974
975 last_page = page + (offset + sd->len - 1) / PAGE_SIZE;
976 for (page += offset / PAGE_SIZE; page <= last_page; page++) {
977 /*
978 * Skip page replacement when extending the contents of the
979 * current page. But note that we may get two zero_pages in a
980 * row from shmem.
981 */
982 if (page == *(rqstp->rq_next_page - 1) &&
983 offset_in_page(rqstp->rq_res.page_base +
984 rqstp->rq_res.page_len))
985 continue;
986 if (unlikely(!svc_rqst_replace_page(rqstp, page)))
987 return -EIO;
988 }
989 if (rqstp->rq_res.page_len == 0) // first call
990 rqstp->rq_res.page_base = offset % PAGE_SIZE;
991 rqstp->rq_res.page_len += sd->len;
992 return sd->len;
993}
994
995static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe,
996 struct splice_desc *sd)
997{
998 return __splice_from_pipe(pipe, sd, nfsd_splice_actor);
999}
1000
1001static u32 nfsd_eof_on_read(struct file *file, loff_t offset, ssize_t len,
1002 size_t expected)
1003{
1004 if (expected != 0 && len == 0)
1005 return 1;
1006 if (offset+len >= i_size_read(file_inode(file)))
1007 return 1;
1008 return 0;
1009}
1010
1011static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
1012 struct file *file, loff_t offset,
1013 unsigned long *count, u32 *eof, ssize_t host_err)
1014{
1015 if (host_err >= 0) {
1016 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1017
1018 nfsd_stats_io_read_add(nn, fhp->fh_export, host_err);
1019 *eof = nfsd_eof_on_read(file, offset, host_err, *count);
1020 *count = host_err;
1021 fsnotify_access(file);
1022 trace_nfsd_read_io_done(rqstp, fhp, offset, *count);
1023 return 0;
1024 } else {
1025 trace_nfsd_read_err(rqstp, fhp, offset, host_err);
1026 return nfserrno(host_err);
1027 }
1028}
1029
1030/**
1031 * nfsd_splice_read - Perform a VFS read using a splice pipe
1032 * @rqstp: RPC transaction context
1033 * @fhp: file handle of file to be read
1034 * @file: opened struct file of file to be read
1035 * @offset: starting byte offset
1036 * @count: IN: requested number of bytes; OUT: number of bytes read
1037 * @eof: OUT: set non-zero if operation reached the end of the file
1038 *
1039 * Returns nfs_ok on success, otherwise an nfserr stat value is
1040 * returned.
1041 */
1042__be32 nfsd_splice_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
1043 struct file *file, loff_t offset, unsigned long *count,
1044 u32 *eof)
1045{
1046 struct splice_desc sd = {
1047 .len = 0,
1048 .total_len = *count,
1049 .pos = offset,
1050 .u.data = rqstp,
1051 };
1052 ssize_t host_err;
1053
1054 trace_nfsd_read_splice(rqstp, fhp, offset, *count);
1055 host_err = rw_verify_area(READ, file, &offset, *count);
1056 if (!host_err)
1057 host_err = splice_direct_to_actor(file, &sd,
1058 nfsd_direct_splice_actor);
1059 return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
1060}
1061
1062/**
1063 * nfsd_iter_read - Perform a VFS read using an iterator
1064 * @rqstp: RPC transaction context
1065 * @fhp: file handle of file to be read
1066 * @file: opened struct file of file to be read
1067 * @offset: starting byte offset
1068 * @count: IN: requested number of bytes; OUT: number of bytes read
1069 * @base: offset in first page of read buffer
1070 * @eof: OUT: set non-zero if operation reached the end of the file
1071 *
1072 * Some filesystems or situations cannot use nfsd_splice_read. This
1073 * function is the slightly less-performant fallback for those cases.
1074 *
1075 * Returns nfs_ok on success, otherwise an nfserr stat value is
1076 * returned.
1077 */
1078__be32 nfsd_iter_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
1079 struct file *file, loff_t offset, unsigned long *count,
1080 unsigned int base, u32 *eof)
1081{
1082 unsigned long v, total;
1083 struct iov_iter iter;
1084 loff_t ppos = offset;
1085 struct page *page;
1086 ssize_t host_err;
1087
1088 v = 0;
1089 total = *count;
1090 while (total) {
1091 page = *(rqstp->rq_next_page++);
1092 rqstp->rq_vec[v].iov_base = page_address(page) + base;
1093 rqstp->rq_vec[v].iov_len = min_t(size_t, total, PAGE_SIZE - base);
1094 total -= rqstp->rq_vec[v].iov_len;
1095 ++v;
1096 base = 0;
1097 }
1098 WARN_ON_ONCE(v > ARRAY_SIZE(rqstp->rq_vec));
1099
1100 trace_nfsd_read_vector(rqstp, fhp, offset, *count);
1101 iov_iter_kvec(&iter, ITER_DEST, rqstp->rq_vec, v, *count);
1102 host_err = vfs_iter_read(file, &iter, &ppos, 0);
1103 return nfsd_finish_read(rqstp, fhp, file, offset, count, eof, host_err);
1104}
1105
1106/*
1107 * Gathered writes: If another process is currently writing to the file,
1108 * there's a high chance this is another nfsd (triggered by a bulk write
1109 * from a client's biod). Rather than syncing the file with each write
1110 * request, we sleep for 10 msec.
1111 *
1112 * I don't know if this roughly approximates C. Juszak's idea of
1113 * gathered writes, but it's a nice and simple solution (IMHO), and it
1114 * seems to work:-)
1115 *
1116 * Note: we do this only in the NFSv2 case, since v3 and higher have a
1117 * better tool (separate unstable writes and commits) for solving this
1118 * problem.
1119 */
1120static int wait_for_concurrent_writes(struct file *file)
1121{
1122 struct inode *inode = file_inode(file);
1123 static ino_t last_ino;
1124 static dev_t last_dev;
1125 int err = 0;
1126
1127 if (atomic_read(&inode->i_writecount) > 1
1128 || (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
1129 dprintk("nfsd: write defer %d\n", task_pid_nr(current));
1130 msleep(10);
1131 dprintk("nfsd: write resume %d\n", task_pid_nr(current));
1132 }
1133
1134 if (inode->i_state & I_DIRTY) {
1135 dprintk("nfsd: write sync %d\n", task_pid_nr(current));
1136 err = vfs_fsync(file, 0);
1137 }
1138 last_ino = inode->i_ino;
1139 last_dev = inode->i_sb->s_dev;
1140 return err;
1141}
1142
1143__be32
1144nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
1145 loff_t offset, struct kvec *vec, int vlen,
1146 unsigned long *cnt, int stable,
1147 __be32 *verf)
1148{
1149 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1150 struct file *file = nf->nf_file;
1151 struct super_block *sb = file_inode(file)->i_sb;
1152 struct svc_export *exp;
1153 struct iov_iter iter;
1154 errseq_t since;
1155 __be32 nfserr;
1156 int host_err;
1157 loff_t pos = offset;
1158 unsigned long exp_op_flags = 0;
1159 unsigned int pflags = current->flags;
1160 rwf_t flags = 0;
1161 bool restore_flags = false;
1162
1163 trace_nfsd_write_opened(rqstp, fhp, offset, *cnt);
1164
1165 if (sb->s_export_op)
1166 exp_op_flags = sb->s_export_op->flags;
1167
1168 if (test_bit(RQ_LOCAL, &rqstp->rq_flags) &&
1169 !(exp_op_flags & EXPORT_OP_REMOTE_FS)) {
1170 /*
1171 * We want throttling in balance_dirty_pages()
1172 * and shrink_inactive_list() to only consider
1173 * the backingdev we are writing to, so that nfs to
1174 * localhost doesn't cause nfsd to lock up due to all
1175 * the client's dirty pages or its congested queue.
1176 */
1177 current->flags |= PF_LOCAL_THROTTLE;
1178 restore_flags = true;
1179 }
1180
1181 exp = fhp->fh_export;
1182
1183 if (!EX_ISSYNC(exp))
1184 stable = NFS_UNSTABLE;
1185
1186 if (stable && !fhp->fh_use_wgather)
1187 flags |= RWF_SYNC;
1188
1189 iov_iter_kvec(&iter, ITER_SOURCE, vec, vlen, *cnt);
1190 since = READ_ONCE(file->f_wb_err);
1191 if (verf)
1192 nfsd_copy_write_verifier(verf, nn);
1193 host_err = vfs_iter_write(file, &iter, &pos, flags);
1194 if (host_err < 0) {
1195 commit_reset_write_verifier(nn, rqstp, host_err);
1196 goto out_nfserr;
1197 }
1198 *cnt = host_err;
1199 nfsd_stats_io_write_add(nn, exp, *cnt);
1200 fsnotify_modify(file);
1201 host_err = filemap_check_wb_err(file->f_mapping, since);
1202 if (host_err < 0)
1203 goto out_nfserr;
1204
1205 if (stable && fhp->fh_use_wgather) {
1206 host_err = wait_for_concurrent_writes(file);
1207 if (host_err < 0)
1208 commit_reset_write_verifier(nn, rqstp, host_err);
1209 }
1210
1211out_nfserr:
1212 if (host_err >= 0) {
1213 trace_nfsd_write_io_done(rqstp, fhp, offset, *cnt);
1214 nfserr = nfs_ok;
1215 } else {
1216 trace_nfsd_write_err(rqstp, fhp, offset, host_err);
1217 nfserr = nfserrno(host_err);
1218 }
1219 if (restore_flags)
1220 current_restore_flags(pflags, PF_LOCAL_THROTTLE);
1221 return nfserr;
1222}
1223
1224/**
1225 * nfsd_read_splice_ok - check if spliced reading is supported
1226 * @rqstp: RPC transaction context
1227 *
1228 * Return values:
1229 * %true: nfsd_splice_read() may be used
1230 * %false: nfsd_splice_read() must not be used
1231 *
1232 * NFS READ normally uses splice to send data in-place. However the
1233 * data in cache can change after the reply's MIC is computed but
1234 * before the RPC reply is sent. To prevent the client from
1235 * rejecting the server-computed MIC in this somewhat rare case, do
1236 * not use splice with the GSS integrity and privacy services.
1237 */
1238bool nfsd_read_splice_ok(struct svc_rqst *rqstp)
1239{
1240 switch (svc_auth_flavor(rqstp)) {
1241 case RPC_AUTH_GSS_KRB5I:
1242 case RPC_AUTH_GSS_KRB5P:
1243 return false;
1244 }
1245 return true;
1246}
1247
1248/**
1249 * nfsd_read - Read data from a file
1250 * @rqstp: RPC transaction context
1251 * @fhp: file handle of file to be read
1252 * @offset: starting byte offset
1253 * @count: IN: requested number of bytes; OUT: number of bytes read
1254 * @eof: OUT: set non-zero if operation reached the end of the file
1255 *
1256 * The caller must verify that there is enough space in @rqstp.rq_res
1257 * to perform this operation.
1258 *
1259 * N.B. After this call fhp needs an fh_put
1260 *
1261 * Returns nfs_ok on success, otherwise an nfserr stat value is
1262 * returned.
1263 */
1264__be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
1265 loff_t offset, unsigned long *count, u32 *eof)
1266{
1267 struct nfsd_file *nf;
1268 struct file *file;
1269 __be32 err;
1270
1271 trace_nfsd_read_start(rqstp, fhp, offset, *count);
1272 err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_READ, &nf);
1273 if (err)
1274 return err;
1275
1276 file = nf->nf_file;
1277 if (file->f_op->splice_read && nfsd_read_splice_ok(rqstp))
1278 err = nfsd_splice_read(rqstp, fhp, file, offset, count, eof);
1279 else
1280 err = nfsd_iter_read(rqstp, fhp, file, offset, count, 0, eof);
1281
1282 nfsd_file_put(nf);
1283 trace_nfsd_read_done(rqstp, fhp, offset, *count);
1284 return err;
1285}
1286
1287/*
1288 * Write data to a file.
1289 * The stable flag requests synchronous writes.
1290 * N.B. After this call fhp needs an fh_put
1291 */
1292__be32
1293nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
1294 struct kvec *vec, int vlen, unsigned long *cnt, int stable,
1295 __be32 *verf)
1296{
1297 struct nfsd_file *nf;
1298 __be32 err;
1299
1300 trace_nfsd_write_start(rqstp, fhp, offset, *cnt);
1301
1302 err = nfsd_file_acquire_gc(rqstp, fhp, NFSD_MAY_WRITE, &nf);
1303 if (err)
1304 goto out;
1305
1306 err = nfsd_vfs_write(rqstp, fhp, nf, offset, vec,
1307 vlen, cnt, stable, verf);
1308 nfsd_file_put(nf);
1309out:
1310 trace_nfsd_write_done(rqstp, fhp, offset, *cnt);
1311 return err;
1312}
1313
1314/**
1315 * nfsd_commit - Commit pending writes to stable storage
1316 * @rqstp: RPC request being processed
1317 * @fhp: NFS filehandle
1318 * @nf: target file
1319 * @offset: raw offset from beginning of file
1320 * @count: raw count of bytes to sync
1321 * @verf: filled in with the server's current write verifier
1322 *
1323 * Note: we guarantee that data that lies within the range specified
1324 * by the 'offset' and 'count' parameters will be synced. The server
1325 * is permitted to sync data that lies outside this range at the
1326 * same time.
1327 *
1328 * Unfortunately we cannot lock the file to make sure we return full WCC
1329 * data to the client, as locking happens lower down in the filesystem.
1330 *
1331 * Return values:
1332 * An nfsstat value in network byte order.
1333 */
1334__be32
1335nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
1336 u64 offset, u32 count, __be32 *verf)
1337{
1338 __be32 err = nfs_ok;
1339 u64 maxbytes;
1340 loff_t start, end;
1341 struct nfsd_net *nn;
1342
1343 /*
1344 * Convert the client-provided (offset, count) range to a
1345 * (start, end) range. If the client-provided range falls
1346 * outside the maximum file size of the underlying FS,
1347 * clamp the sync range appropriately.
1348 */
1349 start = 0;
1350 end = LLONG_MAX;
1351 maxbytes = (u64)fhp->fh_dentry->d_sb->s_maxbytes;
1352 if (offset < maxbytes) {
1353 start = offset;
1354 if (count && (offset + count - 1 < maxbytes))
1355 end = offset + count - 1;
1356 }
1357
1358 nn = net_generic(nf->nf_net, nfsd_net_id);
1359 if (EX_ISSYNC(fhp->fh_export)) {
1360 errseq_t since = READ_ONCE(nf->nf_file->f_wb_err);
1361 int err2;
1362
1363 err2 = vfs_fsync_range(nf->nf_file, start, end, 0);
1364 switch (err2) {
1365 case 0:
1366 nfsd_copy_write_verifier(verf, nn);
1367 err2 = filemap_check_wb_err(nf->nf_file->f_mapping,
1368 since);
1369 err = nfserrno(err2);
1370 break;
1371 case -EINVAL:
1372 err = nfserr_notsupp;
1373 break;
1374 default:
1375 commit_reset_write_verifier(nn, rqstp, err2);
1376 err = nfserrno(err2);
1377 }
1378 } else
1379 nfsd_copy_write_verifier(verf, nn);
1380
1381 return err;
1382}
1383
1384/**
1385 * nfsd_create_setattr - Set a created file's attributes
1386 * @rqstp: RPC transaction being executed
1387 * @fhp: NFS filehandle of parent directory
1388 * @resfhp: NFS filehandle of new object
1389 * @attrs: requested attributes of new object
1390 *
1391 * Returns nfs_ok on success, or an nfsstat in network byte order.
1392 */
1393__be32
1394nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
1395 struct svc_fh *resfhp, struct nfsd_attrs *attrs)
1396{
1397 struct iattr *iap = attrs->na_iattr;
1398 __be32 status;
1399
1400 /*
1401 * Mode has already been set by file creation.
1402 */
1403 iap->ia_valid &= ~ATTR_MODE;
1404
1405 /*
1406 * Setting uid/gid works only for root. Irix appears to
1407 * send along the gid on create when it tries to implement
1408 * setgid directories via NFS:
1409 */
1410 if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID))
1411 iap->ia_valid &= ~(ATTR_UID|ATTR_GID);
1412
1413 /*
1414 * Callers expect new file metadata to be committed even
1415 * if the attributes have not changed.
1416 */
1417 if (nfsd_attrs_valid(attrs))
1418 status = nfsd_setattr(rqstp, resfhp, attrs, NULL);
1419 else
1420 status = nfserrno(commit_metadata(resfhp));
1421
1422 /*
1423 * Transactional filesystems had a chance to commit changes
1424 * for both parent and child simultaneously making the
1425 * following commit_metadata a noop in many cases.
1426 */
1427 if (!status)
1428 status = nfserrno(commit_metadata(fhp));
1429
1430 /*
1431 * Update the new filehandle to pick up the new attributes.
1432 */
1433 if (!status)
1434 status = fh_update(resfhp);
1435
1436 return status;
1437}
1438
1439/* HPUX client sometimes creates a file in mode 000, and sets size to 0.
1440 * setting size to 0 may fail for some specific file systems by the permission
1441 * checking which requires WRITE permission but the mode is 000.
1442 * we ignore the resizing(to 0) on the just new created file, since the size is
1443 * 0 after file created.
1444 *
1445 * call this only after vfs_create() is called.
1446 * */
1447static void
1448nfsd_check_ignore_resizing(struct iattr *iap)
1449{
1450 if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0))
1451 iap->ia_valid &= ~ATTR_SIZE;
1452}
1453
1454/* The parent directory should already be locked: */
1455__be32
1456nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
1457 struct nfsd_attrs *attrs,
1458 int type, dev_t rdev, struct svc_fh *resfhp)
1459{
1460 struct dentry *dentry, *dchild;
1461 struct inode *dirp;
1462 struct iattr *iap = attrs->na_iattr;
1463 __be32 err;
1464 int host_err;
1465
1466 dentry = fhp->fh_dentry;
1467 dirp = d_inode(dentry);
1468
1469 dchild = dget(resfhp->fh_dentry);
1470 err = nfsd_permission(&rqstp->rq_cred, fhp->fh_export, dentry,
1471 NFSD_MAY_CREATE);
1472 if (err)
1473 goto out;
1474
1475 if (!(iap->ia_valid & ATTR_MODE))
1476 iap->ia_mode = 0;
1477 iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
1478
1479 if (!IS_POSIXACL(dirp))
1480 iap->ia_mode &= ~current_umask();
1481
1482 err = 0;
1483 switch (type) {
1484 case S_IFREG:
1485 host_err = vfs_create(&nop_mnt_idmap, dirp, dchild,
1486 iap->ia_mode, true);
1487 if (!host_err)
1488 nfsd_check_ignore_resizing(iap);
1489 break;
1490 case S_IFDIR:
1491 host_err = vfs_mkdir(&nop_mnt_idmap, dirp, dchild, iap->ia_mode);
1492 if (!host_err && unlikely(d_unhashed(dchild))) {
1493 struct dentry *d;
1494 d = lookup_one_len(dchild->d_name.name,
1495 dchild->d_parent,
1496 dchild->d_name.len);
1497 if (IS_ERR(d)) {
1498 host_err = PTR_ERR(d);
1499 break;
1500 }
1501 if (unlikely(d_is_negative(d))) {
1502 dput(d);
1503 err = nfserr_serverfault;
1504 goto out;
1505 }
1506 dput(resfhp->fh_dentry);
1507 resfhp->fh_dentry = dget(d);
1508 err = fh_update(resfhp);
1509 dput(dchild);
1510 dchild = d;
1511 if (err)
1512 goto out;
1513 }
1514 break;
1515 case S_IFCHR:
1516 case S_IFBLK:
1517 case S_IFIFO:
1518 case S_IFSOCK:
1519 host_err = vfs_mknod(&nop_mnt_idmap, dirp, dchild,
1520 iap->ia_mode, rdev);
1521 break;
1522 default:
1523 printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n",
1524 type);
1525 host_err = -EINVAL;
1526 }
1527 if (host_err < 0)
1528 goto out_nfserr;
1529
1530 err = nfsd_create_setattr(rqstp, fhp, resfhp, attrs);
1531
1532out:
1533 dput(dchild);
1534 return err;
1535
1536out_nfserr:
1537 err = nfserrno(host_err);
1538 goto out;
1539}
1540
1541/*
1542 * Create a filesystem object (regular, directory, special).
1543 * Note that the parent directory is left locked.
1544 *
1545 * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp
1546 */
1547__be32
1548nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
1549 char *fname, int flen, struct nfsd_attrs *attrs,
1550 int type, dev_t rdev, struct svc_fh *resfhp)
1551{
1552 struct dentry *dentry, *dchild = NULL;
1553 __be32 err;
1554 int host_err;
1555
1556 if (isdotent(fname, flen))
1557 return nfserr_exist;
1558
1559 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_NOP);
1560 if (err)
1561 return err;
1562
1563 dentry = fhp->fh_dentry;
1564
1565 host_err = fh_want_write(fhp);
1566 if (host_err)
1567 return nfserrno(host_err);
1568
1569 inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT);
1570 dchild = lookup_one_len(fname, dentry, flen);
1571 host_err = PTR_ERR(dchild);
1572 if (IS_ERR(dchild)) {
1573 err = nfserrno(host_err);
1574 goto out_unlock;
1575 }
1576 err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
1577 /*
1578 * We unconditionally drop our ref to dchild as fh_compose will have
1579 * already grabbed its own ref for it.
1580 */
1581 dput(dchild);
1582 if (err)
1583 goto out_unlock;
1584 err = fh_fill_pre_attrs(fhp);
1585 if (err != nfs_ok)
1586 goto out_unlock;
1587 err = nfsd_create_locked(rqstp, fhp, attrs, type, rdev, resfhp);
1588 fh_fill_post_attrs(fhp);
1589out_unlock:
1590 inode_unlock(dentry->d_inode);
1591 return err;
1592}
1593
1594/*
1595 * Read a symlink. On entry, *lenp must contain the maximum path length that
1596 * fits into the buffer. On return, it contains the true length.
1597 * N.B. After this call fhp needs an fh_put
1598 */
1599__be32
1600nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
1601{
1602 __be32 err;
1603 const char *link;
1604 struct path path;
1605 DEFINE_DELAYED_CALL(done);
1606 int len;
1607
1608 err = fh_verify(rqstp, fhp, S_IFLNK, NFSD_MAY_NOP);
1609 if (unlikely(err))
1610 return err;
1611
1612 path.mnt = fhp->fh_export->ex_path.mnt;
1613 path.dentry = fhp->fh_dentry;
1614
1615 if (unlikely(!d_is_symlink(path.dentry)))
1616 return nfserr_inval;
1617
1618 touch_atime(&path);
1619
1620 link = vfs_get_link(path.dentry, &done);
1621 if (IS_ERR(link))
1622 return nfserrno(PTR_ERR(link));
1623
1624 len = strlen(link);
1625 if (len < *lenp)
1626 *lenp = len;
1627 memcpy(buf, link, *lenp);
1628 do_delayed_call(&done);
1629 return 0;
1630}
1631
1632/**
1633 * nfsd_symlink - Create a symlink and look up its inode
1634 * @rqstp: RPC transaction being executed
1635 * @fhp: NFS filehandle of parent directory
1636 * @fname: filename of the new symlink
1637 * @flen: length of @fname
1638 * @path: content of the new symlink (NUL-terminated)
1639 * @attrs: requested attributes of new object
1640 * @resfhp: NFS filehandle of new object
1641 *
1642 * N.B. After this call _both_ fhp and resfhp need an fh_put
1643 *
1644 * Returns nfs_ok on success, or an nfsstat in network byte order.
1645 */
1646__be32
1647nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
1648 char *fname, int flen,
1649 char *path, struct nfsd_attrs *attrs,
1650 struct svc_fh *resfhp)
1651{
1652 struct dentry *dentry, *dnew;
1653 __be32 err, cerr;
1654 int host_err;
1655
1656 err = nfserr_noent;
1657 if (!flen || path[0] == '\0')
1658 goto out;
1659 err = nfserr_exist;
1660 if (isdotent(fname, flen))
1661 goto out;
1662
1663 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
1664 if (err)
1665 goto out;
1666
1667 host_err = fh_want_write(fhp);
1668 if (host_err) {
1669 err = nfserrno(host_err);
1670 goto out;
1671 }
1672
1673 dentry = fhp->fh_dentry;
1674 inode_lock_nested(dentry->d_inode, I_MUTEX_PARENT);
1675 dnew = lookup_one_len(fname, dentry, flen);
1676 if (IS_ERR(dnew)) {
1677 err = nfserrno(PTR_ERR(dnew));
1678 inode_unlock(dentry->d_inode);
1679 goto out_drop_write;
1680 }
1681 err = fh_fill_pre_attrs(fhp);
1682 if (err != nfs_ok)
1683 goto out_unlock;
1684 host_err = vfs_symlink(&nop_mnt_idmap, d_inode(dentry), dnew, path);
1685 err = nfserrno(host_err);
1686 cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
1687 if (!err)
1688 nfsd_create_setattr(rqstp, fhp, resfhp, attrs);
1689 fh_fill_post_attrs(fhp);
1690out_unlock:
1691 inode_unlock(dentry->d_inode);
1692 if (!err)
1693 err = nfserrno(commit_metadata(fhp));
1694 dput(dnew);
1695 if (err==0) err = cerr;
1696out_drop_write:
1697 fh_drop_write(fhp);
1698out:
1699 return err;
1700}
1701
1702/*
1703 * Create a hardlink
1704 * N.B. After this call _both_ ffhp and tfhp need an fh_put
1705 */
1706__be32
1707nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
1708 char *name, int len, struct svc_fh *tfhp)
1709{
1710 struct dentry *ddir, *dnew, *dold;
1711 struct inode *dirp;
1712 __be32 err;
1713 int host_err;
1714
1715 err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_CREATE);
1716 if (err)
1717 goto out;
1718 err = fh_verify(rqstp, tfhp, 0, NFSD_MAY_NOP);
1719 if (err)
1720 goto out;
1721 err = nfserr_isdir;
1722 if (d_is_dir(tfhp->fh_dentry))
1723 goto out;
1724 err = nfserr_perm;
1725 if (!len)
1726 goto out;
1727 err = nfserr_exist;
1728 if (isdotent(name, len))
1729 goto out;
1730
1731 host_err = fh_want_write(tfhp);
1732 if (host_err) {
1733 err = nfserrno(host_err);
1734 goto out;
1735 }
1736
1737 ddir = ffhp->fh_dentry;
1738 dirp = d_inode(ddir);
1739 inode_lock_nested(dirp, I_MUTEX_PARENT);
1740
1741 dnew = lookup_one_len(name, ddir, len);
1742 if (IS_ERR(dnew)) {
1743 err = nfserrno(PTR_ERR(dnew));
1744 goto out_unlock;
1745 }
1746
1747 dold = tfhp->fh_dentry;
1748
1749 err = nfserr_noent;
1750 if (d_really_is_negative(dold))
1751 goto out_dput;
1752 err = fh_fill_pre_attrs(ffhp);
1753 if (err != nfs_ok)
1754 goto out_dput;
1755 host_err = vfs_link(dold, &nop_mnt_idmap, dirp, dnew, NULL);
1756 fh_fill_post_attrs(ffhp);
1757 inode_unlock(dirp);
1758 if (!host_err) {
1759 err = nfserrno(commit_metadata(ffhp));
1760 if (!err)
1761 err = nfserrno(commit_metadata(tfhp));
1762 } else {
1763 err = nfserrno(host_err);
1764 }
1765 dput(dnew);
1766out_drop_write:
1767 fh_drop_write(tfhp);
1768out:
1769 return err;
1770
1771out_dput:
1772 dput(dnew);
1773out_unlock:
1774 inode_unlock(dirp);
1775 goto out_drop_write;
1776}
1777
1778static void
1779nfsd_close_cached_files(struct dentry *dentry)
1780{
1781 struct inode *inode = d_inode(dentry);
1782
1783 if (inode && S_ISREG(inode->i_mode))
1784 nfsd_file_close_inode_sync(inode);
1785}
1786
1787static bool
1788nfsd_has_cached_files(struct dentry *dentry)
1789{
1790 bool ret = false;
1791 struct inode *inode = d_inode(dentry);
1792
1793 if (inode && S_ISREG(inode->i_mode))
1794 ret = nfsd_file_is_cached(inode);
1795 return ret;
1796}
1797
1798/*
1799 * Rename a file
1800 * N.B. After this call _both_ ffhp and tfhp need an fh_put
1801 */
1802__be32
1803nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
1804 struct svc_fh *tfhp, char *tname, int tlen)
1805{
1806 struct dentry *fdentry, *tdentry, *odentry, *ndentry, *trap;
1807 struct inode *fdir, *tdir;
1808 __be32 err;
1809 int host_err;
1810 bool close_cached = false;
1811
1812 err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_REMOVE);
1813 if (err)
1814 goto out;
1815 err = fh_verify(rqstp, tfhp, S_IFDIR, NFSD_MAY_CREATE);
1816 if (err)
1817 goto out;
1818
1819 fdentry = ffhp->fh_dentry;
1820 fdir = d_inode(fdentry);
1821
1822 tdentry = tfhp->fh_dentry;
1823 tdir = d_inode(tdentry);
1824
1825 err = nfserr_perm;
1826 if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
1827 goto out;
1828
1829 err = nfserr_xdev;
1830 if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
1831 goto out;
1832 if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry)
1833 goto out;
1834
1835retry:
1836 host_err = fh_want_write(ffhp);
1837 if (host_err) {
1838 err = nfserrno(host_err);
1839 goto out;
1840 }
1841
1842 trap = lock_rename(tdentry, fdentry);
1843 if (IS_ERR(trap)) {
1844 err = nfserr_xdev;
1845 goto out_want_write;
1846 }
1847 err = fh_fill_pre_attrs(ffhp);
1848 if (err != nfs_ok)
1849 goto out_unlock;
1850 err = fh_fill_pre_attrs(tfhp);
1851 if (err != nfs_ok)
1852 goto out_unlock;
1853
1854 odentry = lookup_one_len(fname, fdentry, flen);
1855 host_err = PTR_ERR(odentry);
1856 if (IS_ERR(odentry))
1857 goto out_nfserr;
1858
1859 host_err = -ENOENT;
1860 if (d_really_is_negative(odentry))
1861 goto out_dput_old;
1862 host_err = -EINVAL;
1863 if (odentry == trap)
1864 goto out_dput_old;
1865
1866 ndentry = lookup_one_len(tname, tdentry, tlen);
1867 host_err = PTR_ERR(ndentry);
1868 if (IS_ERR(ndentry))
1869 goto out_dput_old;
1870 host_err = -ENOTEMPTY;
1871 if (ndentry == trap)
1872 goto out_dput_new;
1873
1874 if ((ndentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK) &&
1875 nfsd_has_cached_files(ndentry)) {
1876 close_cached = true;
1877 goto out_dput_old;
1878 } else {
1879 struct renamedata rd = {
1880 .old_mnt_idmap = &nop_mnt_idmap,
1881 .old_dir = fdir,
1882 .old_dentry = odentry,
1883 .new_mnt_idmap = &nop_mnt_idmap,
1884 .new_dir = tdir,
1885 .new_dentry = ndentry,
1886 };
1887 int retries;
1888
1889 for (retries = 1;;) {
1890 host_err = vfs_rename(&rd);
1891 if (host_err != -EAGAIN || !retries--)
1892 break;
1893 if (!nfsd_wait_for_delegreturn(rqstp, d_inode(odentry)))
1894 break;
1895 }
1896 if (!host_err) {
1897 host_err = commit_metadata(tfhp);
1898 if (!host_err)
1899 host_err = commit_metadata(ffhp);
1900 }
1901 }
1902 out_dput_new:
1903 dput(ndentry);
1904 out_dput_old:
1905 dput(odentry);
1906 out_nfserr:
1907 err = nfserrno(host_err);
1908
1909 if (!close_cached) {
1910 fh_fill_post_attrs(ffhp);
1911 fh_fill_post_attrs(tfhp);
1912 }
1913out_unlock:
1914 unlock_rename(tdentry, fdentry);
1915out_want_write:
1916 fh_drop_write(ffhp);
1917
1918 /*
1919 * If the target dentry has cached open files, then we need to
1920 * try to close them prior to doing the rename. Final fput
1921 * shouldn't be done with locks held however, so we delay it
1922 * until this point and then reattempt the whole shebang.
1923 */
1924 if (close_cached) {
1925 close_cached = false;
1926 nfsd_close_cached_files(ndentry);
1927 dput(ndentry);
1928 goto retry;
1929 }
1930out:
1931 return err;
1932}
1933
1934/*
1935 * Unlink a file or directory
1936 * N.B. After this call fhp needs an fh_put
1937 */
1938__be32
1939nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
1940 char *fname, int flen)
1941{
1942 struct dentry *dentry, *rdentry;
1943 struct inode *dirp;
1944 struct inode *rinode;
1945 __be32 err;
1946 int host_err;
1947
1948 err = nfserr_acces;
1949 if (!flen || isdotent(fname, flen))
1950 goto out;
1951 err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_REMOVE);
1952 if (err)
1953 goto out;
1954
1955 host_err = fh_want_write(fhp);
1956 if (host_err)
1957 goto out_nfserr;
1958
1959 dentry = fhp->fh_dentry;
1960 dirp = d_inode(dentry);
1961 inode_lock_nested(dirp, I_MUTEX_PARENT);
1962
1963 rdentry = lookup_one_len(fname, dentry, flen);
1964 host_err = PTR_ERR(rdentry);
1965 if (IS_ERR(rdentry))
1966 goto out_unlock;
1967
1968 if (d_really_is_negative(rdentry)) {
1969 dput(rdentry);
1970 host_err = -ENOENT;
1971 goto out_unlock;
1972 }
1973 rinode = d_inode(rdentry);
1974 err = fh_fill_pre_attrs(fhp);
1975 if (err != nfs_ok)
1976 goto out_unlock;
1977
1978 ihold(rinode);
1979 if (!type)
1980 type = d_inode(rdentry)->i_mode & S_IFMT;
1981
1982 if (type != S_IFDIR) {
1983 int retries;
1984
1985 if (rdentry->d_sb->s_export_op->flags & EXPORT_OP_CLOSE_BEFORE_UNLINK)
1986 nfsd_close_cached_files(rdentry);
1987
1988 for (retries = 1;;) {
1989 host_err = vfs_unlink(&nop_mnt_idmap, dirp, rdentry, NULL);
1990 if (host_err != -EAGAIN || !retries--)
1991 break;
1992 if (!nfsd_wait_for_delegreturn(rqstp, rinode))
1993 break;
1994 }
1995 } else {
1996 host_err = vfs_rmdir(&nop_mnt_idmap, dirp, rdentry);
1997 }
1998 fh_fill_post_attrs(fhp);
1999
2000 inode_unlock(dirp);
2001 if (!host_err)
2002 host_err = commit_metadata(fhp);
2003 dput(rdentry);
2004 iput(rinode); /* truncate the inode here */
2005
2006out_drop_write:
2007 fh_drop_write(fhp);
2008out_nfserr:
2009 if (host_err == -EBUSY) {
2010 /* name is mounted-on. There is no perfect
2011 * error status.
2012 */
2013 err = nfserr_file_open;
2014 } else {
2015 err = nfserrno(host_err);
2016 }
2017out:
2018 return err;
2019out_unlock:
2020 inode_unlock(dirp);
2021 goto out_drop_write;
2022}
2023
2024/*
2025 * We do this buffering because we must not call back into the file
2026 * system's ->lookup() method from the filldir callback. That may well
2027 * deadlock a number of file systems.
2028 *
2029 * This is based heavily on the implementation of same in XFS.
2030 */
2031struct buffered_dirent {
2032 u64 ino;
2033 loff_t offset;
2034 int namlen;
2035 unsigned int d_type;
2036 char name[];
2037};
2038
2039struct readdir_data {
2040 struct dir_context ctx;
2041 char *dirent;
2042 size_t used;
2043 int full;
2044};
2045
2046static bool nfsd_buffered_filldir(struct dir_context *ctx, const char *name,
2047 int namlen, loff_t offset, u64 ino,
2048 unsigned int d_type)
2049{
2050 struct readdir_data *buf =
2051 container_of(ctx, struct readdir_data, ctx);
2052 struct buffered_dirent *de = (void *)(buf->dirent + buf->used);
2053 unsigned int reclen;
2054
2055 reclen = ALIGN(sizeof(struct buffered_dirent) + namlen, sizeof(u64));
2056 if (buf->used + reclen > PAGE_SIZE) {
2057 buf->full = 1;
2058 return false;
2059 }
2060
2061 de->namlen = namlen;
2062 de->offset = offset;
2063 de->ino = ino;
2064 de->d_type = d_type;
2065 memcpy(de->name, name, namlen);
2066 buf->used += reclen;
2067
2068 return true;
2069}
2070
2071static __be32 nfsd_buffered_readdir(struct file *file, struct svc_fh *fhp,
2072 nfsd_filldir_t func, struct readdir_cd *cdp,
2073 loff_t *offsetp)
2074{
2075 struct buffered_dirent *de;
2076 int host_err;
2077 int size;
2078 loff_t offset;
2079 struct readdir_data buf = {
2080 .ctx.actor = nfsd_buffered_filldir,
2081 .dirent = (void *)__get_free_page(GFP_KERNEL)
2082 };
2083
2084 if (!buf.dirent)
2085 return nfserrno(-ENOMEM);
2086
2087 offset = *offsetp;
2088
2089 while (1) {
2090 unsigned int reclen;
2091
2092 cdp->err = nfserr_eof; /* will be cleared on successful read */
2093 buf.used = 0;
2094 buf.full = 0;
2095
2096 host_err = iterate_dir(file, &buf.ctx);
2097 if (buf.full)
2098 host_err = 0;
2099
2100 if (host_err < 0)
2101 break;
2102
2103 size = buf.used;
2104
2105 if (!size)
2106 break;
2107
2108 de = (struct buffered_dirent *)buf.dirent;
2109 while (size > 0) {
2110 offset = de->offset;
2111
2112 if (func(cdp, de->name, de->namlen, de->offset,
2113 de->ino, de->d_type))
2114 break;
2115
2116 if (cdp->err != nfs_ok)
2117 break;
2118
2119 trace_nfsd_dirent(fhp, de->ino, de->name, de->namlen);
2120
2121 reclen = ALIGN(sizeof(*de) + de->namlen,
2122 sizeof(u64));
2123 size -= reclen;
2124 de = (struct buffered_dirent *)((char *)de + reclen);
2125 }
2126 if (size > 0) /* We bailed out early */
2127 break;
2128
2129 offset = vfs_llseek(file, 0, SEEK_CUR);
2130 }
2131
2132 free_page((unsigned long)(buf.dirent));
2133
2134 if (host_err)
2135 return nfserrno(host_err);
2136
2137 *offsetp = offset;
2138 return cdp->err;
2139}
2140
2141/**
2142 * nfsd_readdir - Read entries from a directory
2143 * @rqstp: RPC transaction context
2144 * @fhp: NFS file handle of directory to be read
2145 * @offsetp: OUT: seek offset of final entry that was read
2146 * @cdp: OUT: an eof error value
2147 * @func: entry filler actor
2148 *
2149 * This implementation ignores the NFSv3/4 verifier cookie.
2150 *
2151 * NB: normal system calls hold file->f_pos_lock when calling
2152 * ->iterate_shared and ->llseek, but nfsd_readdir() does not.
2153 * Because the struct file acquired here is not visible to other
2154 * threads, it's internal state does not need mutex protection.
2155 *
2156 * Returns nfs_ok on success, otherwise an nfsstat code is
2157 * returned.
2158 */
2159__be32
2160nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
2161 struct readdir_cd *cdp, nfsd_filldir_t func)
2162{
2163 __be32 err;
2164 struct file *file;
2165 loff_t offset = *offsetp;
2166 int may_flags = NFSD_MAY_READ;
2167
2168 err = nfsd_open(rqstp, fhp, S_IFDIR, may_flags, &file);
2169 if (err)
2170 goto out;
2171
2172 if (fhp->fh_64bit_cookies)
2173 file->f_mode |= FMODE_64BITHASH;
2174 else
2175 file->f_mode |= FMODE_32BITHASH;
2176
2177 offset = vfs_llseek(file, offset, SEEK_SET);
2178 if (offset < 0) {
2179 err = nfserrno((int)offset);
2180 goto out_close;
2181 }
2182
2183 err = nfsd_buffered_readdir(file, fhp, func, cdp, offsetp);
2184
2185 if (err == nfserr_eof || err == nfserr_toosmall)
2186 err = nfs_ok; /* can still be found in ->err */
2187out_close:
2188 nfsd_filp_close(file);
2189out:
2190 return err;
2191}
2192
2193/**
2194 * nfsd_filp_close: close a file synchronously
2195 * @fp: the file to close
2196 *
2197 * nfsd_filp_close() is similar in behaviour to filp_close().
2198 * The difference is that if this is the final close on the
2199 * file, the that finalisation happens immediately, rather then
2200 * being handed over to a work_queue, as it the case for
2201 * filp_close().
2202 * When a user-space process closes a file (even when using
2203 * filp_close() the finalisation happens before returning to
2204 * userspace, so it is effectively synchronous. When a kernel thread
2205 * uses file_close(), on the other hand, the handling is completely
2206 * asynchronous. This means that any cost imposed by that finalisation
2207 * is not imposed on the nfsd thread, and nfsd could potentually
2208 * close files more quickly than the work queue finalises the close,
2209 * which would lead to unbounded growth in the queue.
2210 *
2211 * In some contexts is it not safe to synchronously wait for
2212 * close finalisation (see comment for __fput_sync()), but nfsd
2213 * does not match those contexts. In partcilarly it does not, at the
2214 * time that this function is called, hold and locks and no finalisation
2215 * of any file, socket, or device driver would have any cause to wait
2216 * for nfsd to make progress.
2217 */
2218void nfsd_filp_close(struct file *fp)
2219{
2220 get_file(fp);
2221 filp_close(fp, NULL);
2222 __fput_sync(fp);
2223}
2224
2225/*
2226 * Get file system stats
2227 * N.B. After this call fhp needs an fh_put
2228 */
2229__be32
2230nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access)
2231{
2232 __be32 err;
2233
2234 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access);
2235 if (!err) {
2236 struct path path = {
2237 .mnt = fhp->fh_export->ex_path.mnt,
2238 .dentry = fhp->fh_dentry,
2239 };
2240 if (vfs_statfs(&path, stat))
2241 err = nfserr_io;
2242 }
2243 return err;
2244}
2245
2246static int exp_rdonly(struct svc_cred *cred, struct svc_export *exp)
2247{
2248 return nfsexp_flags(cred, exp) & NFSEXP_READONLY;
2249}
2250
2251#ifdef CONFIG_NFSD_V4
2252/*
2253 * Helper function to translate error numbers. In the case of xattr operations,
2254 * some error codes need to be translated outside of the standard translations.
2255 *
2256 * ENODATA needs to be translated to nfserr_noxattr.
2257 * E2BIG to nfserr_xattr2big.
2258 *
2259 * Additionally, vfs_listxattr can return -ERANGE. This means that the
2260 * file has too many extended attributes to retrieve inside an
2261 * XATTR_LIST_MAX sized buffer. This is a bug in the xattr implementation:
2262 * filesystems will allow the adding of extended attributes until they hit
2263 * their own internal limit. This limit may be larger than XATTR_LIST_MAX.
2264 * So, at that point, the attributes are present and valid, but can't
2265 * be retrieved using listxattr, since the upper level xattr code enforces
2266 * the XATTR_LIST_MAX limit.
2267 *
2268 * This bug means that we need to deal with listxattr returning -ERANGE. The
2269 * best mapping is to return TOOSMALL.
2270 */
2271static __be32
2272nfsd_xattr_errno(int err)
2273{
2274 switch (err) {
2275 case -ENODATA:
2276 return nfserr_noxattr;
2277 case -E2BIG:
2278 return nfserr_xattr2big;
2279 case -ERANGE:
2280 return nfserr_toosmall;
2281 }
2282 return nfserrno(err);
2283}
2284
2285/*
2286 * Retrieve the specified user extended attribute. To avoid always
2287 * having to allocate the maximum size (since we are not getting
2288 * a maximum size from the RPC), do a probe + alloc. Hold a reader
2289 * lock on i_rwsem to prevent the extended attribute from changing
2290 * size while we're doing this.
2291 */
2292__be32
2293nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
2294 void **bufp, int *lenp)
2295{
2296 ssize_t len;
2297 __be32 err;
2298 char *buf;
2299 struct inode *inode;
2300 struct dentry *dentry;
2301
2302 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ);
2303 if (err)
2304 return err;
2305
2306 err = nfs_ok;
2307 dentry = fhp->fh_dentry;
2308 inode = d_inode(dentry);
2309
2310 inode_lock_shared(inode);
2311
2312 len = vfs_getxattr(&nop_mnt_idmap, dentry, name, NULL, 0);
2313
2314 /*
2315 * Zero-length attribute, just return.
2316 */
2317 if (len == 0) {
2318 *bufp = NULL;
2319 *lenp = 0;
2320 goto out;
2321 }
2322
2323 if (len < 0) {
2324 err = nfsd_xattr_errno(len);
2325 goto out;
2326 }
2327
2328 if (len > *lenp) {
2329 err = nfserr_toosmall;
2330 goto out;
2331 }
2332
2333 buf = kvmalloc(len, GFP_KERNEL);
2334 if (buf == NULL) {
2335 err = nfserr_jukebox;
2336 goto out;
2337 }
2338
2339 len = vfs_getxattr(&nop_mnt_idmap, dentry, name, buf, len);
2340 if (len <= 0) {
2341 kvfree(buf);
2342 buf = NULL;
2343 err = nfsd_xattr_errno(len);
2344 }
2345
2346 *lenp = len;
2347 *bufp = buf;
2348
2349out:
2350 inode_unlock_shared(inode);
2351
2352 return err;
2353}
2354
2355/*
2356 * Retrieve the xattr names. Since we can't know how many are
2357 * user extended attributes, we must get all attributes here,
2358 * and have the XDR encode filter out the "user." ones.
2359 *
2360 * While this could always just allocate an XATTR_LIST_MAX
2361 * buffer, that's a waste, so do a probe + allocate. To
2362 * avoid any changes between the probe and allocate, wrap
2363 * this in inode_lock.
2364 */
2365__be32
2366nfsd_listxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char **bufp,
2367 int *lenp)
2368{
2369 ssize_t len;
2370 __be32 err;
2371 char *buf;
2372 struct inode *inode;
2373 struct dentry *dentry;
2374
2375 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ);
2376 if (err)
2377 return err;
2378
2379 dentry = fhp->fh_dentry;
2380 inode = d_inode(dentry);
2381 *lenp = 0;
2382
2383 inode_lock_shared(inode);
2384
2385 len = vfs_listxattr(dentry, NULL, 0);
2386 if (len <= 0) {
2387 err = nfsd_xattr_errno(len);
2388 goto out;
2389 }
2390
2391 if (len > XATTR_LIST_MAX) {
2392 err = nfserr_xattr2big;
2393 goto out;
2394 }
2395
2396 buf = kvmalloc(len, GFP_KERNEL);
2397 if (buf == NULL) {
2398 err = nfserr_jukebox;
2399 goto out;
2400 }
2401
2402 len = vfs_listxattr(dentry, buf, len);
2403 if (len <= 0) {
2404 kvfree(buf);
2405 err = nfsd_xattr_errno(len);
2406 goto out;
2407 }
2408
2409 *lenp = len;
2410 *bufp = buf;
2411
2412 err = nfs_ok;
2413out:
2414 inode_unlock_shared(inode);
2415
2416 return err;
2417}
2418
2419/**
2420 * nfsd_removexattr - Remove an extended attribute
2421 * @rqstp: RPC transaction being executed
2422 * @fhp: NFS filehandle of object with xattr to remove
2423 * @name: name of xattr to remove (NUL-terminate)
2424 *
2425 * Pass in a NULL pointer for delegated_inode, and let the client deal
2426 * with NFS4ERR_DELAY (same as with e.g. setattr and remove).
2427 *
2428 * Returns nfs_ok on success, or an nfsstat in network byte order.
2429 */
2430__be32
2431nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name)
2432{
2433 __be32 err;
2434 int ret;
2435
2436 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE);
2437 if (err)
2438 return err;
2439
2440 ret = fh_want_write(fhp);
2441 if (ret)
2442 return nfserrno(ret);
2443
2444 inode_lock(fhp->fh_dentry->d_inode);
2445 err = fh_fill_pre_attrs(fhp);
2446 if (err != nfs_ok)
2447 goto out_unlock;
2448 ret = __vfs_removexattr_locked(&nop_mnt_idmap, fhp->fh_dentry,
2449 name, NULL);
2450 err = nfsd_xattr_errno(ret);
2451 fh_fill_post_attrs(fhp);
2452out_unlock:
2453 inode_unlock(fhp->fh_dentry->d_inode);
2454 fh_drop_write(fhp);
2455
2456 return err;
2457}
2458
2459__be32
2460nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name,
2461 void *buf, u32 len, u32 flags)
2462{
2463 __be32 err;
2464 int ret;
2465
2466 err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE);
2467 if (err)
2468 return err;
2469
2470 ret = fh_want_write(fhp);
2471 if (ret)
2472 return nfserrno(ret);
2473 inode_lock(fhp->fh_dentry->d_inode);
2474 err = fh_fill_pre_attrs(fhp);
2475 if (err != nfs_ok)
2476 goto out_unlock;
2477 ret = __vfs_setxattr_locked(&nop_mnt_idmap, fhp->fh_dentry,
2478 name, buf, len, flags, NULL);
2479 fh_fill_post_attrs(fhp);
2480 err = nfsd_xattr_errno(ret);
2481out_unlock:
2482 inode_unlock(fhp->fh_dentry->d_inode);
2483 fh_drop_write(fhp);
2484 return err;
2485}
2486#endif
2487
2488/*
2489 * Check for a user's access permissions to this inode.
2490 */
2491__be32
2492nfsd_permission(struct svc_cred *cred, struct svc_export *exp,
2493 struct dentry *dentry, int acc)
2494{
2495 struct inode *inode = d_inode(dentry);
2496 int err;
2497
2498 if ((acc & NFSD_MAY_MASK) == NFSD_MAY_NOP)
2499 return 0;
2500#if 0
2501 dprintk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n",
2502 acc,
2503 (acc & NFSD_MAY_READ)? " read" : "",
2504 (acc & NFSD_MAY_WRITE)? " write" : "",
2505 (acc & NFSD_MAY_EXEC)? " exec" : "",
2506 (acc & NFSD_MAY_SATTR)? " sattr" : "",
2507 (acc & NFSD_MAY_TRUNC)? " trunc" : "",
2508 (acc & NFSD_MAY_NLM)? " nlm" : "",
2509 (acc & NFSD_MAY_OWNER_OVERRIDE)? " owneroverride" : "",
2510 inode->i_mode,
2511 IS_IMMUTABLE(inode)? " immut" : "",
2512 IS_APPEND(inode)? " append" : "",
2513 __mnt_is_readonly(exp->ex_path.mnt)? " ro" : "");
2514 dprintk(" owner %d/%d user %d/%d\n",
2515 inode->i_uid, inode->i_gid, current_fsuid(), current_fsgid());
2516#endif
2517
2518 /* Normally we reject any write/sattr etc access on a read-only file
2519 * system. But if it is IRIX doing check on write-access for a
2520 * device special file, we ignore rofs.
2521 */
2522 if (!(acc & NFSD_MAY_LOCAL_ACCESS))
2523 if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) {
2524 if (exp_rdonly(cred, exp) ||
2525 __mnt_is_readonly(exp->ex_path.mnt))
2526 return nfserr_rofs;
2527 if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode))
2528 return nfserr_perm;
2529 }
2530 if ((acc & NFSD_MAY_TRUNC) && IS_APPEND(inode))
2531 return nfserr_perm;
2532
2533 /*
2534 * The file owner always gets access permission for accesses that
2535 * would normally be checked at open time. This is to make
2536 * file access work even when the client has done a fchmod(fd, 0).
2537 *
2538 * However, `cp foo bar' should fail nevertheless when bar is
2539 * readonly. A sensible way to do this might be to reject all
2540 * attempts to truncate a read-only file, because a creat() call
2541 * always implies file truncation.
2542 * ... but this isn't really fair. A process may reasonably call
2543 * ftruncate on an open file descriptor on a file with perm 000.
2544 * We must trust the client to do permission checking - using "ACCESS"
2545 * with NFSv3.
2546 */
2547 if ((acc & NFSD_MAY_OWNER_OVERRIDE) &&
2548 uid_eq(inode->i_uid, current_fsuid()))
2549 return 0;
2550
2551 /* This assumes NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */
2552 err = inode_permission(&nop_mnt_idmap, inode,
2553 acc & (MAY_READ | MAY_WRITE | MAY_EXEC));
2554
2555 /* Allow read access to binaries even when mode 111 */
2556 if (err == -EACCES && S_ISREG(inode->i_mode) &&
2557 (acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) ||
2558 acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC)))
2559 err = inode_permission(&nop_mnt_idmap, inode, MAY_EXEC);
2560
2561 return err? nfserrno(err) : 0;
2562}