Loading...
1/*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/mm.h>
39#include <linux/delay.h>
40#include <linux/errno.h>
41#include <linux/string.h>
42#include <linux/ratelimit.h>
43#include <linux/printk.h>
44#include <linux/slab.h>
45#include <linux/sunrpc/clnt.h>
46#include <linux/nfs.h>
47#include <linux/nfs4.h>
48#include <linux/nfs_fs.h>
49#include <linux/nfs_page.h>
50#include <linux/nfs_mount.h>
51#include <linux/namei.h>
52#include <linux/mount.h>
53#include <linux/module.h>
54#include <linux/xattr.h>
55#include <linux/utsname.h>
56#include <linux/freezer.h>
57#include <linux/iversion.h>
58
59#include "nfs4_fs.h"
60#include "delegation.h"
61#include "internal.h"
62#include "iostat.h"
63#include "callback.h"
64#include "pnfs.h"
65#include "netns.h"
66#include "nfs4idmap.h"
67#include "nfs4session.h"
68#include "fscache.h"
69
70#include "nfs4trace.h"
71
72#define NFSDBG_FACILITY NFSDBG_PROC
73
74#define NFS4_POLL_RETRY_MIN (HZ/10)
75#define NFS4_POLL_RETRY_MAX (15*HZ)
76
77/* file attributes which can be mapped to nfs attributes */
78#define NFS4_VALID_ATTRS (ATTR_MODE \
79 | ATTR_UID \
80 | ATTR_GID \
81 | ATTR_SIZE \
82 | ATTR_ATIME \
83 | ATTR_MTIME \
84 | ATTR_CTIME \
85 | ATTR_ATIME_SET \
86 | ATTR_MTIME_SET)
87
88struct nfs4_opendata;
89static int _nfs4_proc_open(struct nfs4_opendata *data);
90static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
91static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
92static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
93static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
94static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
95static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
96 struct nfs_fattr *fattr, struct iattr *sattr,
97 struct nfs_open_context *ctx, struct nfs4_label *ilabel,
98 struct nfs4_label *olabel);
99#ifdef CONFIG_NFS_V4_1
100static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
101 struct rpc_cred *cred,
102 struct nfs4_slot *slot,
103 bool is_privileged);
104static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
105 struct rpc_cred *);
106static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
107 struct rpc_cred *, bool);
108#endif
109
110#ifdef CONFIG_NFS_V4_SECURITY_LABEL
111static inline struct nfs4_label *
112nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
113 struct iattr *sattr, struct nfs4_label *label)
114{
115 int err;
116
117 if (label == NULL)
118 return NULL;
119
120 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
121 return NULL;
122
123 err = security_dentry_init_security(dentry, sattr->ia_mode,
124 &dentry->d_name, (void **)&label->label, &label->len);
125 if (err == 0)
126 return label;
127
128 return NULL;
129}
130static inline void
131nfs4_label_release_security(struct nfs4_label *label)
132{
133 if (label)
134 security_release_secctx(label->label, label->len);
135}
136static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
137{
138 if (label)
139 return server->attr_bitmask;
140
141 return server->attr_bitmask_nl;
142}
143#else
144static inline struct nfs4_label *
145nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
146 struct iattr *sattr, struct nfs4_label *l)
147{ return NULL; }
148static inline void
149nfs4_label_release_security(struct nfs4_label *label)
150{ return; }
151static inline u32 *
152nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
153{ return server->attr_bitmask; }
154#endif
155
156/* Prevent leaks of NFSv4 errors into userland */
157static int nfs4_map_errors(int err)
158{
159 if (err >= -1000)
160 return err;
161 switch (err) {
162 case -NFS4ERR_RESOURCE:
163 case -NFS4ERR_LAYOUTTRYLATER:
164 case -NFS4ERR_RECALLCONFLICT:
165 return -EREMOTEIO;
166 case -NFS4ERR_WRONGSEC:
167 case -NFS4ERR_WRONG_CRED:
168 return -EPERM;
169 case -NFS4ERR_BADOWNER:
170 case -NFS4ERR_BADNAME:
171 return -EINVAL;
172 case -NFS4ERR_SHARE_DENIED:
173 return -EACCES;
174 case -NFS4ERR_MINOR_VERS_MISMATCH:
175 return -EPROTONOSUPPORT;
176 case -NFS4ERR_FILE_OPEN:
177 return -EBUSY;
178 default:
179 dprintk("%s could not handle NFSv4 error %d\n",
180 __func__, -err);
181 break;
182 }
183 return -EIO;
184}
185
186/*
187 * This is our standard bitmap for GETATTR requests.
188 */
189const u32 nfs4_fattr_bitmap[3] = {
190 FATTR4_WORD0_TYPE
191 | FATTR4_WORD0_CHANGE
192 | FATTR4_WORD0_SIZE
193 | FATTR4_WORD0_FSID
194 | FATTR4_WORD0_FILEID,
195 FATTR4_WORD1_MODE
196 | FATTR4_WORD1_NUMLINKS
197 | FATTR4_WORD1_OWNER
198 | FATTR4_WORD1_OWNER_GROUP
199 | FATTR4_WORD1_RAWDEV
200 | FATTR4_WORD1_SPACE_USED
201 | FATTR4_WORD1_TIME_ACCESS
202 | FATTR4_WORD1_TIME_METADATA
203 | FATTR4_WORD1_TIME_MODIFY
204 | FATTR4_WORD1_MOUNTED_ON_FILEID,
205#ifdef CONFIG_NFS_V4_SECURITY_LABEL
206 FATTR4_WORD2_SECURITY_LABEL
207#endif
208};
209
210static const u32 nfs4_pnfs_open_bitmap[3] = {
211 FATTR4_WORD0_TYPE
212 | FATTR4_WORD0_CHANGE
213 | FATTR4_WORD0_SIZE
214 | FATTR4_WORD0_FSID
215 | FATTR4_WORD0_FILEID,
216 FATTR4_WORD1_MODE
217 | FATTR4_WORD1_NUMLINKS
218 | FATTR4_WORD1_OWNER
219 | FATTR4_WORD1_OWNER_GROUP
220 | FATTR4_WORD1_RAWDEV
221 | FATTR4_WORD1_SPACE_USED
222 | FATTR4_WORD1_TIME_ACCESS
223 | FATTR4_WORD1_TIME_METADATA
224 | FATTR4_WORD1_TIME_MODIFY,
225 FATTR4_WORD2_MDSTHRESHOLD
226#ifdef CONFIG_NFS_V4_SECURITY_LABEL
227 | FATTR4_WORD2_SECURITY_LABEL
228#endif
229};
230
231static const u32 nfs4_open_noattr_bitmap[3] = {
232 FATTR4_WORD0_TYPE
233 | FATTR4_WORD0_FILEID,
234};
235
236const u32 nfs4_statfs_bitmap[3] = {
237 FATTR4_WORD0_FILES_AVAIL
238 | FATTR4_WORD0_FILES_FREE
239 | FATTR4_WORD0_FILES_TOTAL,
240 FATTR4_WORD1_SPACE_AVAIL
241 | FATTR4_WORD1_SPACE_FREE
242 | FATTR4_WORD1_SPACE_TOTAL
243};
244
245const u32 nfs4_pathconf_bitmap[3] = {
246 FATTR4_WORD0_MAXLINK
247 | FATTR4_WORD0_MAXNAME,
248 0
249};
250
251const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
252 | FATTR4_WORD0_MAXREAD
253 | FATTR4_WORD0_MAXWRITE
254 | FATTR4_WORD0_LEASE_TIME,
255 FATTR4_WORD1_TIME_DELTA
256 | FATTR4_WORD1_FS_LAYOUT_TYPES,
257 FATTR4_WORD2_LAYOUT_BLKSIZE
258 | FATTR4_WORD2_CLONE_BLKSIZE
259};
260
261const u32 nfs4_fs_locations_bitmap[3] = {
262 FATTR4_WORD0_CHANGE
263 | FATTR4_WORD0_SIZE
264 | FATTR4_WORD0_FSID
265 | FATTR4_WORD0_FILEID
266 | FATTR4_WORD0_FS_LOCATIONS,
267 FATTR4_WORD1_OWNER
268 | FATTR4_WORD1_OWNER_GROUP
269 | FATTR4_WORD1_RAWDEV
270 | FATTR4_WORD1_SPACE_USED
271 | FATTR4_WORD1_TIME_ACCESS
272 | FATTR4_WORD1_TIME_METADATA
273 | FATTR4_WORD1_TIME_MODIFY
274 | FATTR4_WORD1_MOUNTED_ON_FILEID,
275};
276
277static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
278 struct nfs4_readdir_arg *readdir)
279{
280 unsigned int attrs = FATTR4_WORD0_FILEID | FATTR4_WORD0_TYPE;
281 __be32 *start, *p;
282
283 if (cookie > 2) {
284 readdir->cookie = cookie;
285 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
286 return;
287 }
288
289 readdir->cookie = 0;
290 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
291 if (cookie == 2)
292 return;
293
294 /*
295 * NFSv4 servers do not return entries for '.' and '..'
296 * Therefore, we fake these entries here. We let '.'
297 * have cookie 0 and '..' have cookie 1. Note that
298 * when talking to the server, we always send cookie 0
299 * instead of 1 or 2.
300 */
301 start = p = kmap_atomic(*readdir->pages);
302
303 if (cookie == 0) {
304 *p++ = xdr_one; /* next */
305 *p++ = xdr_zero; /* cookie, first word */
306 *p++ = xdr_one; /* cookie, second word */
307 *p++ = xdr_one; /* entry len */
308 memcpy(p, ".\0\0\0", 4); /* entry */
309 p++;
310 *p++ = xdr_one; /* bitmap length */
311 *p++ = htonl(attrs); /* bitmap */
312 *p++ = htonl(12); /* attribute buffer length */
313 *p++ = htonl(NF4DIR);
314 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
315 }
316
317 *p++ = xdr_one; /* next */
318 *p++ = xdr_zero; /* cookie, first word */
319 *p++ = xdr_two; /* cookie, second word */
320 *p++ = xdr_two; /* entry len */
321 memcpy(p, "..\0\0", 4); /* entry */
322 p++;
323 *p++ = xdr_one; /* bitmap length */
324 *p++ = htonl(attrs); /* bitmap */
325 *p++ = htonl(12); /* attribute buffer length */
326 *p++ = htonl(NF4DIR);
327 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
328
329 readdir->pgbase = (char *)p - (char *)start;
330 readdir->count -= readdir->pgbase;
331 kunmap_atomic(start);
332}
333
334static void nfs4_test_and_free_stateid(struct nfs_server *server,
335 nfs4_stateid *stateid,
336 struct rpc_cred *cred)
337{
338 const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
339
340 ops->test_and_free_expired(server, stateid, cred);
341}
342
343static void __nfs4_free_revoked_stateid(struct nfs_server *server,
344 nfs4_stateid *stateid,
345 struct rpc_cred *cred)
346{
347 stateid->type = NFS4_REVOKED_STATEID_TYPE;
348 nfs4_test_and_free_stateid(server, stateid, cred);
349}
350
351static void nfs4_free_revoked_stateid(struct nfs_server *server,
352 const nfs4_stateid *stateid,
353 struct rpc_cred *cred)
354{
355 nfs4_stateid tmp;
356
357 nfs4_stateid_copy(&tmp, stateid);
358 __nfs4_free_revoked_stateid(server, &tmp, cred);
359}
360
361static long nfs4_update_delay(long *timeout)
362{
363 long ret;
364 if (!timeout)
365 return NFS4_POLL_RETRY_MAX;
366 if (*timeout <= 0)
367 *timeout = NFS4_POLL_RETRY_MIN;
368 if (*timeout > NFS4_POLL_RETRY_MAX)
369 *timeout = NFS4_POLL_RETRY_MAX;
370 ret = *timeout;
371 *timeout <<= 1;
372 return ret;
373}
374
375static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
376{
377 int res = 0;
378
379 might_sleep();
380
381 freezable_schedule_timeout_killable_unsafe(
382 nfs4_update_delay(timeout));
383 if (fatal_signal_pending(current))
384 res = -ERESTARTSYS;
385 return res;
386}
387
388/* This is the error handling routine for processes that are allowed
389 * to sleep.
390 */
391static int nfs4_do_handle_exception(struct nfs_server *server,
392 int errorcode, struct nfs4_exception *exception)
393{
394 struct nfs_client *clp = server->nfs_client;
395 struct nfs4_state *state = exception->state;
396 const nfs4_stateid *stateid = exception->stateid;
397 struct inode *inode = exception->inode;
398 int ret = errorcode;
399
400 exception->delay = 0;
401 exception->recovering = 0;
402 exception->retry = 0;
403
404 if (stateid == NULL && state != NULL)
405 stateid = &state->stateid;
406
407 switch(errorcode) {
408 case 0:
409 return 0;
410 case -NFS4ERR_DELEG_REVOKED:
411 case -NFS4ERR_ADMIN_REVOKED:
412 case -NFS4ERR_EXPIRED:
413 case -NFS4ERR_BAD_STATEID:
414 if (inode != NULL && stateid != NULL) {
415 nfs_inode_find_state_and_recover(inode,
416 stateid);
417 goto wait_on_recovery;
418 }
419 case -NFS4ERR_OPENMODE:
420 if (inode) {
421 int err;
422
423 err = nfs_async_inode_return_delegation(inode,
424 stateid);
425 if (err == 0)
426 goto wait_on_recovery;
427 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
428 exception->retry = 1;
429 break;
430 }
431 }
432 if (state == NULL)
433 break;
434 ret = nfs4_schedule_stateid_recovery(server, state);
435 if (ret < 0)
436 break;
437 goto wait_on_recovery;
438 case -NFS4ERR_STALE_STATEID:
439 case -NFS4ERR_STALE_CLIENTID:
440 nfs4_schedule_lease_recovery(clp);
441 goto wait_on_recovery;
442 case -NFS4ERR_MOVED:
443 ret = nfs4_schedule_migration_recovery(server);
444 if (ret < 0)
445 break;
446 goto wait_on_recovery;
447 case -NFS4ERR_LEASE_MOVED:
448 nfs4_schedule_lease_moved_recovery(clp);
449 goto wait_on_recovery;
450#if defined(CONFIG_NFS_V4_1)
451 case -NFS4ERR_BADSESSION:
452 case -NFS4ERR_BADSLOT:
453 case -NFS4ERR_BAD_HIGH_SLOT:
454 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
455 case -NFS4ERR_DEADSESSION:
456 case -NFS4ERR_SEQ_FALSE_RETRY:
457 case -NFS4ERR_SEQ_MISORDERED:
458 dprintk("%s ERROR: %d Reset session\n", __func__,
459 errorcode);
460 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
461 goto wait_on_recovery;
462#endif /* defined(CONFIG_NFS_V4_1) */
463 case -NFS4ERR_FILE_OPEN:
464 if (exception->timeout > HZ) {
465 /* We have retried a decent amount, time to
466 * fail
467 */
468 ret = -EBUSY;
469 break;
470 }
471 case -NFS4ERR_DELAY:
472 nfs_inc_server_stats(server, NFSIOS_DELAY);
473 case -NFS4ERR_GRACE:
474 case -NFS4ERR_LAYOUTTRYLATER:
475 case -NFS4ERR_RECALLCONFLICT:
476 exception->delay = 1;
477 return 0;
478
479 case -NFS4ERR_RETRY_UNCACHED_REP:
480 case -NFS4ERR_OLD_STATEID:
481 exception->retry = 1;
482 break;
483 case -NFS4ERR_BADOWNER:
484 /* The following works around a Linux server bug! */
485 case -NFS4ERR_BADNAME:
486 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
487 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
488 exception->retry = 1;
489 printk(KERN_WARNING "NFS: v4 server %s "
490 "does not accept raw "
491 "uid/gids. "
492 "Reenabling the idmapper.\n",
493 server->nfs_client->cl_hostname);
494 }
495 }
496 /* We failed to handle the error */
497 return nfs4_map_errors(ret);
498wait_on_recovery:
499 exception->recovering = 1;
500 return 0;
501}
502
503/* This is the error handling routine for processes that are allowed
504 * to sleep.
505 */
506int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
507{
508 struct nfs_client *clp = server->nfs_client;
509 int ret;
510
511 ret = nfs4_do_handle_exception(server, errorcode, exception);
512 if (exception->delay) {
513 ret = nfs4_delay(server->client, &exception->timeout);
514 goto out_retry;
515 }
516 if (exception->recovering) {
517 ret = nfs4_wait_clnt_recover(clp);
518 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
519 return -EIO;
520 goto out_retry;
521 }
522 return ret;
523out_retry:
524 if (ret == 0)
525 exception->retry = 1;
526 return ret;
527}
528
529static int
530nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
531 int errorcode, struct nfs4_exception *exception)
532{
533 struct nfs_client *clp = server->nfs_client;
534 int ret;
535
536 ret = nfs4_do_handle_exception(server, errorcode, exception);
537 if (exception->delay) {
538 rpc_delay(task, nfs4_update_delay(&exception->timeout));
539 goto out_retry;
540 }
541 if (exception->recovering) {
542 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
543 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
544 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
545 goto out_retry;
546 }
547 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
548 ret = -EIO;
549 return ret;
550out_retry:
551 if (ret == 0)
552 exception->retry = 1;
553 return ret;
554}
555
556static int
557nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
558 struct nfs4_state *state, long *timeout)
559{
560 struct nfs4_exception exception = {
561 .state = state,
562 };
563
564 if (task->tk_status >= 0)
565 return 0;
566 if (timeout)
567 exception.timeout = *timeout;
568 task->tk_status = nfs4_async_handle_exception(task, server,
569 task->tk_status,
570 &exception);
571 if (exception.delay && timeout)
572 *timeout = exception.timeout;
573 if (exception.retry)
574 return -EAGAIN;
575 return 0;
576}
577
578/*
579 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
580 * or 'false' otherwise.
581 */
582static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
583{
584 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
585 return (flavor == RPC_AUTH_GSS_KRB5I) || (flavor == RPC_AUTH_GSS_KRB5P);
586}
587
588static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
589{
590 spin_lock(&clp->cl_lock);
591 if (time_before(clp->cl_last_renewal,timestamp))
592 clp->cl_last_renewal = timestamp;
593 spin_unlock(&clp->cl_lock);
594}
595
596static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
597{
598 struct nfs_client *clp = server->nfs_client;
599
600 if (!nfs4_has_session(clp))
601 do_renew_lease(clp, timestamp);
602}
603
604struct nfs4_call_sync_data {
605 const struct nfs_server *seq_server;
606 struct nfs4_sequence_args *seq_args;
607 struct nfs4_sequence_res *seq_res;
608};
609
610void nfs4_init_sequence(struct nfs4_sequence_args *args,
611 struct nfs4_sequence_res *res, int cache_reply)
612{
613 args->sa_slot = NULL;
614 args->sa_cache_this = cache_reply;
615 args->sa_privileged = 0;
616
617 res->sr_slot = NULL;
618}
619
620static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
621{
622 args->sa_privileged = 1;
623}
624
625static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
626{
627 struct nfs4_slot *slot = res->sr_slot;
628 struct nfs4_slot_table *tbl;
629
630 tbl = slot->table;
631 spin_lock(&tbl->slot_tbl_lock);
632 if (!nfs41_wake_and_assign_slot(tbl, slot))
633 nfs4_free_slot(tbl, slot);
634 spin_unlock(&tbl->slot_tbl_lock);
635
636 res->sr_slot = NULL;
637}
638
639static int nfs40_sequence_done(struct rpc_task *task,
640 struct nfs4_sequence_res *res)
641{
642 if (res->sr_slot != NULL)
643 nfs40_sequence_free_slot(res);
644 return 1;
645}
646
647#if defined(CONFIG_NFS_V4_1)
648
649static void nfs41_release_slot(struct nfs4_slot *slot)
650{
651 struct nfs4_session *session;
652 struct nfs4_slot_table *tbl;
653 bool send_new_highest_used_slotid = false;
654
655 if (!slot)
656 return;
657 tbl = slot->table;
658 session = tbl->session;
659
660 /* Bump the slot sequence number */
661 if (slot->seq_done)
662 slot->seq_nr++;
663 slot->seq_done = 0;
664
665 spin_lock(&tbl->slot_tbl_lock);
666 /* Be nice to the server: try to ensure that the last transmitted
667 * value for highest_user_slotid <= target_highest_slotid
668 */
669 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
670 send_new_highest_used_slotid = true;
671
672 if (nfs41_wake_and_assign_slot(tbl, slot)) {
673 send_new_highest_used_slotid = false;
674 goto out_unlock;
675 }
676 nfs4_free_slot(tbl, slot);
677
678 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
679 send_new_highest_used_slotid = false;
680out_unlock:
681 spin_unlock(&tbl->slot_tbl_lock);
682 if (send_new_highest_used_slotid)
683 nfs41_notify_server(session->clp);
684 if (waitqueue_active(&tbl->slot_waitq))
685 wake_up_all(&tbl->slot_waitq);
686}
687
688static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
689{
690 nfs41_release_slot(res->sr_slot);
691 res->sr_slot = NULL;
692}
693
694static int nfs41_sequence_process(struct rpc_task *task,
695 struct nfs4_sequence_res *res)
696{
697 struct nfs4_session *session;
698 struct nfs4_slot *slot = res->sr_slot;
699 struct nfs_client *clp;
700 bool interrupted = false;
701 int ret = 1;
702
703 if (slot == NULL)
704 goto out_noaction;
705 /* don't increment the sequence number if the task wasn't sent */
706 if (!RPC_WAS_SENT(task))
707 goto out;
708
709 session = slot->table->session;
710
711 if (slot->interrupted) {
712 if (res->sr_status != -NFS4ERR_DELAY)
713 slot->interrupted = 0;
714 interrupted = true;
715 }
716
717 trace_nfs4_sequence_done(session, res);
718 /* Check the SEQUENCE operation status */
719 switch (res->sr_status) {
720 case 0:
721 /* Update the slot's sequence and clientid lease timer */
722 slot->seq_done = 1;
723 clp = session->clp;
724 do_renew_lease(clp, res->sr_timestamp);
725 /* Check sequence flags */
726 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags,
727 !!slot->privileged);
728 nfs41_update_target_slotid(slot->table, slot, res);
729 break;
730 case 1:
731 /*
732 * sr_status remains 1 if an RPC level error occurred.
733 * The server may or may not have processed the sequence
734 * operation..
735 * Mark the slot as having hosted an interrupted RPC call.
736 */
737 slot->interrupted = 1;
738 goto out;
739 case -NFS4ERR_DELAY:
740 /* The server detected a resend of the RPC call and
741 * returned NFS4ERR_DELAY as per Section 2.10.6.2
742 * of RFC5661.
743 */
744 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
745 __func__,
746 slot->slot_nr,
747 slot->seq_nr);
748 goto out_retry;
749 case -NFS4ERR_BADSLOT:
750 /*
751 * The slot id we used was probably retired. Try again
752 * using a different slot id.
753 */
754 if (slot->seq_nr < slot->table->target_highest_slotid)
755 goto session_recover;
756 goto retry_nowait;
757 case -NFS4ERR_SEQ_MISORDERED:
758 /*
759 * Was the last operation on this sequence interrupted?
760 * If so, retry after bumping the sequence number.
761 */
762 if (interrupted)
763 goto retry_new_seq;
764 /*
765 * Could this slot have been previously retired?
766 * If so, then the server may be expecting seq_nr = 1!
767 */
768 if (slot->seq_nr != 1) {
769 slot->seq_nr = 1;
770 goto retry_nowait;
771 }
772 goto session_recover;
773 case -NFS4ERR_SEQ_FALSE_RETRY:
774 if (interrupted)
775 goto retry_new_seq;
776 goto session_recover;
777 default:
778 /* Just update the slot sequence no. */
779 slot->seq_done = 1;
780 }
781out:
782 /* The session may be reset by one of the error handlers. */
783 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
784out_noaction:
785 return ret;
786session_recover:
787 nfs4_schedule_session_recovery(session, res->sr_status);
788 goto retry_nowait;
789retry_new_seq:
790 ++slot->seq_nr;
791retry_nowait:
792 if (rpc_restart_call_prepare(task)) {
793 nfs41_sequence_free_slot(res);
794 task->tk_status = 0;
795 ret = 0;
796 }
797 goto out;
798out_retry:
799 if (!rpc_restart_call(task))
800 goto out;
801 rpc_delay(task, NFS4_POLL_RETRY_MAX);
802 return 0;
803}
804
805int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
806{
807 if (!nfs41_sequence_process(task, res))
808 return 0;
809 if (res->sr_slot != NULL)
810 nfs41_sequence_free_slot(res);
811 return 1;
812
813}
814EXPORT_SYMBOL_GPL(nfs41_sequence_done);
815
816static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
817{
818 if (res->sr_slot == NULL)
819 return 1;
820 if (res->sr_slot->table->session != NULL)
821 return nfs41_sequence_process(task, res);
822 return nfs40_sequence_done(task, res);
823}
824
825static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
826{
827 if (res->sr_slot != NULL) {
828 if (res->sr_slot->table->session != NULL)
829 nfs41_sequence_free_slot(res);
830 else
831 nfs40_sequence_free_slot(res);
832 }
833}
834
835int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
836{
837 if (res->sr_slot == NULL)
838 return 1;
839 if (!res->sr_slot->table->session)
840 return nfs40_sequence_done(task, res);
841 return nfs41_sequence_done(task, res);
842}
843EXPORT_SYMBOL_GPL(nfs4_sequence_done);
844
845static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
846{
847 struct nfs4_call_sync_data *data = calldata;
848
849 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
850
851 nfs4_setup_sequence(data->seq_server->nfs_client,
852 data->seq_args, data->seq_res, task);
853}
854
855static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
856{
857 struct nfs4_call_sync_data *data = calldata;
858
859 nfs41_sequence_done(task, data->seq_res);
860}
861
862static const struct rpc_call_ops nfs41_call_sync_ops = {
863 .rpc_call_prepare = nfs41_call_sync_prepare,
864 .rpc_call_done = nfs41_call_sync_done,
865};
866
867static void
868nfs4_sequence_process_interrupted(struct nfs_client *client,
869 struct nfs4_slot *slot, struct rpc_cred *cred)
870{
871 struct rpc_task *task;
872
873 task = _nfs41_proc_sequence(client, cred, slot, true);
874 if (!IS_ERR(task))
875 rpc_put_task_async(task);
876}
877
878#else /* !CONFIG_NFS_V4_1 */
879
880static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
881{
882 return nfs40_sequence_done(task, res);
883}
884
885static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
886{
887 if (res->sr_slot != NULL)
888 nfs40_sequence_free_slot(res);
889}
890
891int nfs4_sequence_done(struct rpc_task *task,
892 struct nfs4_sequence_res *res)
893{
894 return nfs40_sequence_done(task, res);
895}
896EXPORT_SYMBOL_GPL(nfs4_sequence_done);
897
898static void
899nfs4_sequence_process_interrupted(struct nfs_client *client,
900 struct nfs4_slot *slot, struct rpc_cred *cred)
901{
902 WARN_ON_ONCE(1);
903 slot->interrupted = 0;
904}
905
906#endif /* !CONFIG_NFS_V4_1 */
907
908static
909void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
910 struct nfs4_sequence_res *res,
911 struct nfs4_slot *slot)
912{
913 if (!slot)
914 return;
915 slot->privileged = args->sa_privileged ? 1 : 0;
916 args->sa_slot = slot;
917
918 res->sr_slot = slot;
919 res->sr_timestamp = jiffies;
920 res->sr_status_flags = 0;
921 res->sr_status = 1;
922
923}
924
925int nfs4_setup_sequence(struct nfs_client *client,
926 struct nfs4_sequence_args *args,
927 struct nfs4_sequence_res *res,
928 struct rpc_task *task)
929{
930 struct nfs4_session *session = nfs4_get_session(client);
931 struct nfs4_slot_table *tbl = client->cl_slot_tbl;
932 struct nfs4_slot *slot;
933
934 /* slot already allocated? */
935 if (res->sr_slot != NULL)
936 goto out_start;
937
938 if (session) {
939 tbl = &session->fc_slot_table;
940 task->tk_timeout = 0;
941 }
942
943 for (;;) {
944 spin_lock(&tbl->slot_tbl_lock);
945 /* The state manager will wait until the slot table is empty */
946 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
947 goto out_sleep;
948
949 slot = nfs4_alloc_slot(tbl);
950 if (IS_ERR(slot)) {
951 /* Try again in 1/4 second */
952 if (slot == ERR_PTR(-ENOMEM))
953 task->tk_timeout = HZ >> 2;
954 goto out_sleep;
955 }
956 spin_unlock(&tbl->slot_tbl_lock);
957
958 if (likely(!slot->interrupted))
959 break;
960 nfs4_sequence_process_interrupted(client,
961 slot, task->tk_msg.rpc_cred);
962 }
963
964 nfs4_sequence_attach_slot(args, res, slot);
965
966 trace_nfs4_setup_sequence(session, args);
967out_start:
968 rpc_call_start(task);
969 return 0;
970
971out_sleep:
972 if (args->sa_privileged)
973 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
974 NULL, RPC_PRIORITY_PRIVILEGED);
975 else
976 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
977 spin_unlock(&tbl->slot_tbl_lock);
978 return -EAGAIN;
979}
980EXPORT_SYMBOL_GPL(nfs4_setup_sequence);
981
982static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
983{
984 struct nfs4_call_sync_data *data = calldata;
985 nfs4_setup_sequence(data->seq_server->nfs_client,
986 data->seq_args, data->seq_res, task);
987}
988
989static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
990{
991 struct nfs4_call_sync_data *data = calldata;
992 nfs4_sequence_done(task, data->seq_res);
993}
994
995static const struct rpc_call_ops nfs40_call_sync_ops = {
996 .rpc_call_prepare = nfs40_call_sync_prepare,
997 .rpc_call_done = nfs40_call_sync_done,
998};
999
1000static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
1001 struct nfs_server *server,
1002 struct rpc_message *msg,
1003 struct nfs4_sequence_args *args,
1004 struct nfs4_sequence_res *res)
1005{
1006 int ret;
1007 struct rpc_task *task;
1008 struct nfs_client *clp = server->nfs_client;
1009 struct nfs4_call_sync_data data = {
1010 .seq_server = server,
1011 .seq_args = args,
1012 .seq_res = res,
1013 };
1014 struct rpc_task_setup task_setup = {
1015 .rpc_client = clnt,
1016 .rpc_message = msg,
1017 .callback_ops = clp->cl_mvops->call_sync_ops,
1018 .callback_data = &data
1019 };
1020
1021 task = rpc_run_task(&task_setup);
1022 if (IS_ERR(task))
1023 ret = PTR_ERR(task);
1024 else {
1025 ret = task->tk_status;
1026 rpc_put_task(task);
1027 }
1028 return ret;
1029}
1030
1031int nfs4_call_sync(struct rpc_clnt *clnt,
1032 struct nfs_server *server,
1033 struct rpc_message *msg,
1034 struct nfs4_sequence_args *args,
1035 struct nfs4_sequence_res *res,
1036 int cache_reply)
1037{
1038 nfs4_init_sequence(args, res, cache_reply);
1039 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1040}
1041
1042static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
1043 unsigned long timestamp)
1044{
1045 struct nfs_inode *nfsi = NFS_I(dir);
1046
1047 spin_lock(&dir->i_lock);
1048 nfsi->cache_validity |= NFS_INO_INVALID_CTIME
1049 | NFS_INO_INVALID_MTIME
1050 | NFS_INO_INVALID_DATA;
1051 if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(dir)) {
1052 nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE;
1053 nfsi->attrtimeo_timestamp = jiffies;
1054 } else {
1055 nfs_force_lookup_revalidate(dir);
1056 if (cinfo->before != inode_peek_iversion_raw(dir))
1057 nfsi->cache_validity |= NFS_INO_INVALID_ACCESS |
1058 NFS_INO_INVALID_ACL;
1059 }
1060 inode_set_iversion_raw(dir, cinfo->after);
1061 nfsi->read_cache_jiffies = timestamp;
1062 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1063 nfs_fscache_invalidate(dir);
1064 spin_unlock(&dir->i_lock);
1065}
1066
1067struct nfs4_opendata {
1068 struct kref kref;
1069 struct nfs_openargs o_arg;
1070 struct nfs_openres o_res;
1071 struct nfs_open_confirmargs c_arg;
1072 struct nfs_open_confirmres c_res;
1073 struct nfs4_string owner_name;
1074 struct nfs4_string group_name;
1075 struct nfs4_label *a_label;
1076 struct nfs_fattr f_attr;
1077 struct nfs4_label *f_label;
1078 struct dentry *dir;
1079 struct dentry *dentry;
1080 struct nfs4_state_owner *owner;
1081 struct nfs4_state *state;
1082 struct iattr attrs;
1083 unsigned long timestamp;
1084 bool rpc_done;
1085 bool file_created;
1086 bool is_recover;
1087 bool cancelled;
1088 int rpc_status;
1089};
1090
1091struct nfs4_open_createattrs {
1092 struct nfs4_label *label;
1093 struct iattr *sattr;
1094 const __u32 verf[2];
1095};
1096
1097static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1098 int err, struct nfs4_exception *exception)
1099{
1100 if (err != -EINVAL)
1101 return false;
1102 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1103 return false;
1104 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1105 exception->retry = 1;
1106 return true;
1107}
1108
1109static u32
1110nfs4_map_atomic_open_share(struct nfs_server *server,
1111 fmode_t fmode, int openflags)
1112{
1113 u32 res = 0;
1114
1115 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1116 case FMODE_READ:
1117 res = NFS4_SHARE_ACCESS_READ;
1118 break;
1119 case FMODE_WRITE:
1120 res = NFS4_SHARE_ACCESS_WRITE;
1121 break;
1122 case FMODE_READ|FMODE_WRITE:
1123 res = NFS4_SHARE_ACCESS_BOTH;
1124 }
1125 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1126 goto out;
1127 /* Want no delegation if we're using O_DIRECT */
1128 if (openflags & O_DIRECT)
1129 res |= NFS4_SHARE_WANT_NO_DELEG;
1130out:
1131 return res;
1132}
1133
1134static enum open_claim_type4
1135nfs4_map_atomic_open_claim(struct nfs_server *server,
1136 enum open_claim_type4 claim)
1137{
1138 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1139 return claim;
1140 switch (claim) {
1141 default:
1142 return claim;
1143 case NFS4_OPEN_CLAIM_FH:
1144 return NFS4_OPEN_CLAIM_NULL;
1145 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1146 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1147 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1148 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1149 }
1150}
1151
1152static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1153{
1154 p->o_res.f_attr = &p->f_attr;
1155 p->o_res.f_label = p->f_label;
1156 p->o_res.seqid = p->o_arg.seqid;
1157 p->c_res.seqid = p->c_arg.seqid;
1158 p->o_res.server = p->o_arg.server;
1159 p->o_res.access_request = p->o_arg.access;
1160 nfs_fattr_init(&p->f_attr);
1161 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1162}
1163
1164static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1165 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1166 const struct nfs4_open_createattrs *c,
1167 enum open_claim_type4 claim,
1168 gfp_t gfp_mask)
1169{
1170 struct dentry *parent = dget_parent(dentry);
1171 struct inode *dir = d_inode(parent);
1172 struct nfs_server *server = NFS_SERVER(dir);
1173 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1174 struct nfs4_label *label = (c != NULL) ? c->label : NULL;
1175 struct nfs4_opendata *p;
1176
1177 p = kzalloc(sizeof(*p), gfp_mask);
1178 if (p == NULL)
1179 goto err;
1180
1181 p->f_label = nfs4_label_alloc(server, gfp_mask);
1182 if (IS_ERR(p->f_label))
1183 goto err_free_p;
1184
1185 p->a_label = nfs4_label_alloc(server, gfp_mask);
1186 if (IS_ERR(p->a_label))
1187 goto err_free_f;
1188
1189 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1190 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1191 if (IS_ERR(p->o_arg.seqid))
1192 goto err_free_label;
1193 nfs_sb_active(dentry->d_sb);
1194 p->dentry = dget(dentry);
1195 p->dir = parent;
1196 p->owner = sp;
1197 atomic_inc(&sp->so_count);
1198 p->o_arg.open_flags = flags;
1199 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1200 p->o_arg.umask = current_umask();
1201 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1202 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1203 fmode, flags);
1204 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1205 * will return permission denied for all bits until close */
1206 if (!(flags & O_EXCL)) {
1207 /* ask server to check for all possible rights as results
1208 * are cached */
1209 switch (p->o_arg.claim) {
1210 default:
1211 break;
1212 case NFS4_OPEN_CLAIM_NULL:
1213 case NFS4_OPEN_CLAIM_FH:
1214 p->o_arg.access = NFS4_ACCESS_READ |
1215 NFS4_ACCESS_MODIFY |
1216 NFS4_ACCESS_EXTEND |
1217 NFS4_ACCESS_EXECUTE;
1218 }
1219 }
1220 p->o_arg.clientid = server->nfs_client->cl_clientid;
1221 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1222 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1223 p->o_arg.name = &dentry->d_name;
1224 p->o_arg.server = server;
1225 p->o_arg.bitmask = nfs4_bitmask(server, label);
1226 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1227 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1228 switch (p->o_arg.claim) {
1229 case NFS4_OPEN_CLAIM_NULL:
1230 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1231 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1232 p->o_arg.fh = NFS_FH(dir);
1233 break;
1234 case NFS4_OPEN_CLAIM_PREVIOUS:
1235 case NFS4_OPEN_CLAIM_FH:
1236 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1237 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1238 p->o_arg.fh = NFS_FH(d_inode(dentry));
1239 }
1240 if (c != NULL && c->sattr != NULL && c->sattr->ia_valid != 0) {
1241 p->o_arg.u.attrs = &p->attrs;
1242 memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
1243
1244 memcpy(p->o_arg.u.verifier.data, c->verf,
1245 sizeof(p->o_arg.u.verifier.data));
1246 }
1247 p->c_arg.fh = &p->o_res.fh;
1248 p->c_arg.stateid = &p->o_res.stateid;
1249 p->c_arg.seqid = p->o_arg.seqid;
1250 nfs4_init_opendata_res(p);
1251 kref_init(&p->kref);
1252 return p;
1253
1254err_free_label:
1255 nfs4_label_free(p->a_label);
1256err_free_f:
1257 nfs4_label_free(p->f_label);
1258err_free_p:
1259 kfree(p);
1260err:
1261 dput(parent);
1262 return NULL;
1263}
1264
1265static void nfs4_opendata_free(struct kref *kref)
1266{
1267 struct nfs4_opendata *p = container_of(kref,
1268 struct nfs4_opendata, kref);
1269 struct super_block *sb = p->dentry->d_sb;
1270
1271 nfs_free_seqid(p->o_arg.seqid);
1272 nfs4_sequence_free_slot(&p->o_res.seq_res);
1273 if (p->state != NULL)
1274 nfs4_put_open_state(p->state);
1275 nfs4_put_state_owner(p->owner);
1276
1277 nfs4_label_free(p->a_label);
1278 nfs4_label_free(p->f_label);
1279
1280 dput(p->dir);
1281 dput(p->dentry);
1282 nfs_sb_deactive(sb);
1283 nfs_fattr_free_names(&p->f_attr);
1284 kfree(p->f_attr.mdsthreshold);
1285 kfree(p);
1286}
1287
1288static void nfs4_opendata_put(struct nfs4_opendata *p)
1289{
1290 if (p != NULL)
1291 kref_put(&p->kref, nfs4_opendata_free);
1292}
1293
1294static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1295 fmode_t fmode)
1296{
1297 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1298 case FMODE_READ|FMODE_WRITE:
1299 return state->n_rdwr != 0;
1300 case FMODE_WRITE:
1301 return state->n_wronly != 0;
1302 case FMODE_READ:
1303 return state->n_rdonly != 0;
1304 }
1305 WARN_ON_ONCE(1);
1306 return false;
1307}
1308
1309static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1310{
1311 int ret = 0;
1312
1313 if (open_mode & (O_EXCL|O_TRUNC))
1314 goto out;
1315 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1316 case FMODE_READ:
1317 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1318 && state->n_rdonly != 0;
1319 break;
1320 case FMODE_WRITE:
1321 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1322 && state->n_wronly != 0;
1323 break;
1324 case FMODE_READ|FMODE_WRITE:
1325 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1326 && state->n_rdwr != 0;
1327 }
1328out:
1329 return ret;
1330}
1331
1332static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1333 enum open_claim_type4 claim)
1334{
1335 if (delegation == NULL)
1336 return 0;
1337 if ((delegation->type & fmode) != fmode)
1338 return 0;
1339 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1340 return 0;
1341 switch (claim) {
1342 case NFS4_OPEN_CLAIM_NULL:
1343 case NFS4_OPEN_CLAIM_FH:
1344 break;
1345 case NFS4_OPEN_CLAIM_PREVIOUS:
1346 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1347 break;
1348 default:
1349 return 0;
1350 }
1351 nfs_mark_delegation_referenced(delegation);
1352 return 1;
1353}
1354
1355static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1356{
1357 switch (fmode) {
1358 case FMODE_WRITE:
1359 state->n_wronly++;
1360 break;
1361 case FMODE_READ:
1362 state->n_rdonly++;
1363 break;
1364 case FMODE_READ|FMODE_WRITE:
1365 state->n_rdwr++;
1366 }
1367 nfs4_state_set_mode_locked(state, state->state | fmode);
1368}
1369
1370#ifdef CONFIG_NFS_V4_1
1371static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
1372{
1373 if (state->n_rdonly && !test_bit(NFS_O_RDONLY_STATE, &state->flags))
1374 return true;
1375 if (state->n_wronly && !test_bit(NFS_O_WRONLY_STATE, &state->flags))
1376 return true;
1377 if (state->n_rdwr && !test_bit(NFS_O_RDWR_STATE, &state->flags))
1378 return true;
1379 return false;
1380}
1381#endif /* CONFIG_NFS_V4_1 */
1382
1383static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
1384{
1385 if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1386 wake_up_all(&state->waitq);
1387}
1388
1389static void nfs_state_log_out_of_order_open_stateid(struct nfs4_state *state,
1390 const nfs4_stateid *stateid)
1391{
1392 u32 state_seqid = be32_to_cpu(state->open_stateid.seqid);
1393 u32 stateid_seqid = be32_to_cpu(stateid->seqid);
1394
1395 if (stateid_seqid == state_seqid + 1U ||
1396 (stateid_seqid == 1U && state_seqid == 0xffffffffU))
1397 nfs_state_log_update_open_stateid(state);
1398 else
1399 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
1400}
1401
1402static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1403{
1404 struct nfs_client *clp = state->owner->so_server->nfs_client;
1405 bool need_recover = false;
1406
1407 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1408 need_recover = true;
1409 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1410 need_recover = true;
1411 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1412 need_recover = true;
1413 if (need_recover)
1414 nfs4_state_mark_reclaim_nograce(clp, state);
1415}
1416
1417/*
1418 * Check for whether or not the caller may update the open stateid
1419 * to the value passed in by stateid.
1420 *
1421 * Note: This function relies heavily on the server implementing
1422 * RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
1423 * correctly.
1424 * i.e. The stateid seqids have to be initialised to 1, and
1425 * are then incremented on every state transition.
1426 */
1427static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1428 const nfs4_stateid *stateid)
1429{
1430 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0 ||
1431 !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1432 if (stateid->seqid == cpu_to_be32(1))
1433 nfs_state_log_update_open_stateid(state);
1434 else
1435 set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
1436 return true;
1437 }
1438
1439 if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1440 nfs_state_log_out_of_order_open_stateid(state, stateid);
1441 return true;
1442 }
1443 return false;
1444}
1445
1446static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1447{
1448 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1449 return;
1450 if (state->n_wronly)
1451 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1452 if (state->n_rdonly)
1453 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1454 if (state->n_rdwr)
1455 set_bit(NFS_O_RDWR_STATE, &state->flags);
1456 set_bit(NFS_OPEN_STATE, &state->flags);
1457}
1458
1459static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1460 nfs4_stateid *stateid, fmode_t fmode)
1461{
1462 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1463 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1464 case FMODE_WRITE:
1465 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1466 break;
1467 case FMODE_READ:
1468 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1469 break;
1470 case 0:
1471 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1472 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1473 clear_bit(NFS_OPEN_STATE, &state->flags);
1474 }
1475 if (stateid == NULL)
1476 return;
1477 /* Handle OPEN+OPEN_DOWNGRADE races */
1478 if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1479 !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
1480 nfs_resync_open_stateid_locked(state);
1481 goto out;
1482 }
1483 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1484 nfs4_stateid_copy(&state->stateid, stateid);
1485 nfs4_stateid_copy(&state->open_stateid, stateid);
1486 trace_nfs4_open_stateid_update(state->inode, stateid, 0);
1487out:
1488 nfs_state_log_update_open_stateid(state);
1489}
1490
1491static void nfs_clear_open_stateid(struct nfs4_state *state,
1492 nfs4_stateid *arg_stateid,
1493 nfs4_stateid *stateid, fmode_t fmode)
1494{
1495 write_seqlock(&state->seqlock);
1496 /* Ignore, if the CLOSE argment doesn't match the current stateid */
1497 if (nfs4_state_match_open_stateid_other(state, arg_stateid))
1498 nfs_clear_open_stateid_locked(state, stateid, fmode);
1499 write_sequnlock(&state->seqlock);
1500 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1501 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1502}
1503
1504static void nfs_set_open_stateid_locked(struct nfs4_state *state,
1505 const nfs4_stateid *stateid, nfs4_stateid *freeme)
1506{
1507 DEFINE_WAIT(wait);
1508 int status = 0;
1509 for (;;) {
1510
1511 if (!nfs_need_update_open_stateid(state, stateid))
1512 return;
1513 if (!test_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
1514 break;
1515 if (status)
1516 break;
1517 /* Rely on seqids for serialisation with NFSv4.0 */
1518 if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
1519 break;
1520
1521 prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
1522 /*
1523 * Ensure we process the state changes in the same order
1524 * in which the server processed them by delaying the
1525 * update of the stateid until we are in sequence.
1526 */
1527 write_sequnlock(&state->seqlock);
1528 spin_unlock(&state->owner->so_lock);
1529 rcu_read_unlock();
1530 trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
1531 if (!signal_pending(current)) {
1532 if (schedule_timeout(5*HZ) == 0)
1533 status = -EAGAIN;
1534 else
1535 status = 0;
1536 } else
1537 status = -EINTR;
1538 finish_wait(&state->waitq, &wait);
1539 rcu_read_lock();
1540 spin_lock(&state->owner->so_lock);
1541 write_seqlock(&state->seqlock);
1542 }
1543
1544 if (test_bit(NFS_OPEN_STATE, &state->flags) &&
1545 !nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1546 nfs4_stateid_copy(freeme, &state->open_stateid);
1547 nfs_test_and_clear_all_open_stateid(state);
1548 }
1549
1550 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1551 nfs4_stateid_copy(&state->stateid, stateid);
1552 nfs4_stateid_copy(&state->open_stateid, stateid);
1553 trace_nfs4_open_stateid_update(state->inode, stateid, status);
1554 nfs_state_log_update_open_stateid(state);
1555}
1556
1557static void nfs_state_set_open_stateid(struct nfs4_state *state,
1558 const nfs4_stateid *open_stateid,
1559 fmode_t fmode,
1560 nfs4_stateid *freeme)
1561{
1562 /*
1563 * Protect the call to nfs4_state_set_mode_locked and
1564 * serialise the stateid update
1565 */
1566 write_seqlock(&state->seqlock);
1567 nfs_set_open_stateid_locked(state, open_stateid, freeme);
1568 switch (fmode) {
1569 case FMODE_READ:
1570 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1571 break;
1572 case FMODE_WRITE:
1573 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1574 break;
1575 case FMODE_READ|FMODE_WRITE:
1576 set_bit(NFS_O_RDWR_STATE, &state->flags);
1577 }
1578 set_bit(NFS_OPEN_STATE, &state->flags);
1579 write_sequnlock(&state->seqlock);
1580}
1581
1582static void nfs_state_set_delegation(struct nfs4_state *state,
1583 const nfs4_stateid *deleg_stateid,
1584 fmode_t fmode)
1585{
1586 /*
1587 * Protect the call to nfs4_state_set_mode_locked and
1588 * serialise the stateid update
1589 */
1590 write_seqlock(&state->seqlock);
1591 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1592 set_bit(NFS_DELEGATED_STATE, &state->flags);
1593 write_sequnlock(&state->seqlock);
1594}
1595
1596static int update_open_stateid(struct nfs4_state *state,
1597 const nfs4_stateid *open_stateid,
1598 const nfs4_stateid *delegation,
1599 fmode_t fmode)
1600{
1601 struct nfs_server *server = NFS_SERVER(state->inode);
1602 struct nfs_client *clp = server->nfs_client;
1603 struct nfs_inode *nfsi = NFS_I(state->inode);
1604 struct nfs_delegation *deleg_cur;
1605 nfs4_stateid freeme = { };
1606 int ret = 0;
1607
1608 fmode &= (FMODE_READ|FMODE_WRITE);
1609
1610 rcu_read_lock();
1611 spin_lock(&state->owner->so_lock);
1612 if (open_stateid != NULL) {
1613 nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
1614 ret = 1;
1615 }
1616
1617 deleg_cur = rcu_dereference(nfsi->delegation);
1618 if (deleg_cur == NULL)
1619 goto no_delegation;
1620
1621 spin_lock(&deleg_cur->lock);
1622 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1623 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1624 (deleg_cur->type & fmode) != fmode)
1625 goto no_delegation_unlock;
1626
1627 if (delegation == NULL)
1628 delegation = &deleg_cur->stateid;
1629 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1630 goto no_delegation_unlock;
1631
1632 nfs_mark_delegation_referenced(deleg_cur);
1633 nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
1634 ret = 1;
1635no_delegation_unlock:
1636 spin_unlock(&deleg_cur->lock);
1637no_delegation:
1638 if (ret)
1639 update_open_stateflags(state, fmode);
1640 spin_unlock(&state->owner->so_lock);
1641 rcu_read_unlock();
1642
1643 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1644 nfs4_schedule_state_manager(clp);
1645 if (freeme.type != 0)
1646 nfs4_test_and_free_stateid(server, &freeme,
1647 state->owner->so_cred);
1648
1649 return ret;
1650}
1651
1652static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1653 const nfs4_stateid *stateid)
1654{
1655 struct nfs4_state *state = lsp->ls_state;
1656 bool ret = false;
1657
1658 spin_lock(&state->state_lock);
1659 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1660 goto out_noupdate;
1661 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1662 goto out_noupdate;
1663 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1664 ret = true;
1665out_noupdate:
1666 spin_unlock(&state->state_lock);
1667 return ret;
1668}
1669
1670static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1671{
1672 struct nfs_delegation *delegation;
1673
1674 fmode &= FMODE_READ|FMODE_WRITE;
1675 rcu_read_lock();
1676 delegation = rcu_dereference(NFS_I(inode)->delegation);
1677 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1678 rcu_read_unlock();
1679 return;
1680 }
1681 rcu_read_unlock();
1682 nfs4_inode_return_delegation(inode);
1683}
1684
1685static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1686{
1687 struct nfs4_state *state = opendata->state;
1688 struct nfs_inode *nfsi = NFS_I(state->inode);
1689 struct nfs_delegation *delegation;
1690 int open_mode = opendata->o_arg.open_flags;
1691 fmode_t fmode = opendata->o_arg.fmode;
1692 enum open_claim_type4 claim = opendata->o_arg.claim;
1693 nfs4_stateid stateid;
1694 int ret = -EAGAIN;
1695
1696 for (;;) {
1697 spin_lock(&state->owner->so_lock);
1698 if (can_open_cached(state, fmode, open_mode)) {
1699 update_open_stateflags(state, fmode);
1700 spin_unlock(&state->owner->so_lock);
1701 goto out_return_state;
1702 }
1703 spin_unlock(&state->owner->so_lock);
1704 rcu_read_lock();
1705 delegation = rcu_dereference(nfsi->delegation);
1706 if (!can_open_delegated(delegation, fmode, claim)) {
1707 rcu_read_unlock();
1708 break;
1709 }
1710 /* Save the delegation */
1711 nfs4_stateid_copy(&stateid, &delegation->stateid);
1712 rcu_read_unlock();
1713 nfs_release_seqid(opendata->o_arg.seqid);
1714 if (!opendata->is_recover) {
1715 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1716 if (ret != 0)
1717 goto out;
1718 }
1719 ret = -EAGAIN;
1720
1721 /* Try to update the stateid using the delegation */
1722 if (update_open_stateid(state, NULL, &stateid, fmode))
1723 goto out_return_state;
1724 }
1725out:
1726 return ERR_PTR(ret);
1727out_return_state:
1728 atomic_inc(&state->count);
1729 return state;
1730}
1731
1732static void
1733nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1734{
1735 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1736 struct nfs_delegation *delegation;
1737 int delegation_flags = 0;
1738
1739 rcu_read_lock();
1740 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1741 if (delegation)
1742 delegation_flags = delegation->flags;
1743 rcu_read_unlock();
1744 switch (data->o_arg.claim) {
1745 default:
1746 break;
1747 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1748 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1749 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1750 "returning a delegation for "
1751 "OPEN(CLAIM_DELEGATE_CUR)\n",
1752 clp->cl_hostname);
1753 return;
1754 }
1755 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1756 nfs_inode_set_delegation(state->inode,
1757 data->owner->so_cred,
1758 data->o_res.delegation_type,
1759 &data->o_res.delegation,
1760 data->o_res.pagemod_limit);
1761 else
1762 nfs_inode_reclaim_delegation(state->inode,
1763 data->owner->so_cred,
1764 data->o_res.delegation_type,
1765 &data->o_res.delegation,
1766 data->o_res.pagemod_limit);
1767}
1768
1769/*
1770 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1771 * and update the nfs4_state.
1772 */
1773static struct nfs4_state *
1774_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1775{
1776 struct inode *inode = data->state->inode;
1777 struct nfs4_state *state = data->state;
1778 int ret;
1779
1780 if (!data->rpc_done) {
1781 if (data->rpc_status)
1782 return ERR_PTR(data->rpc_status);
1783 /* cached opens have already been processed */
1784 goto update;
1785 }
1786
1787 ret = nfs_refresh_inode(inode, &data->f_attr);
1788 if (ret)
1789 return ERR_PTR(ret);
1790
1791 if (data->o_res.delegation_type != 0)
1792 nfs4_opendata_check_deleg(data, state);
1793update:
1794 update_open_stateid(state, &data->o_res.stateid, NULL,
1795 data->o_arg.fmode);
1796 atomic_inc(&state->count);
1797
1798 return state;
1799}
1800
1801static struct inode *
1802nfs4_opendata_get_inode(struct nfs4_opendata *data)
1803{
1804 struct inode *inode;
1805
1806 switch (data->o_arg.claim) {
1807 case NFS4_OPEN_CLAIM_NULL:
1808 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1809 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1810 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1811 return ERR_PTR(-EAGAIN);
1812 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh,
1813 &data->f_attr, data->f_label);
1814 break;
1815 default:
1816 inode = d_inode(data->dentry);
1817 ihold(inode);
1818 nfs_refresh_inode(inode, &data->f_attr);
1819 }
1820 return inode;
1821}
1822
1823static struct nfs4_state *
1824nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
1825{
1826 struct nfs4_state *state;
1827 struct inode *inode;
1828
1829 inode = nfs4_opendata_get_inode(data);
1830 if (IS_ERR(inode))
1831 return ERR_CAST(inode);
1832 if (data->state != NULL && data->state->inode == inode) {
1833 state = data->state;
1834 atomic_inc(&state->count);
1835 } else
1836 state = nfs4_get_open_state(inode, data->owner);
1837 iput(inode);
1838 if (state == NULL)
1839 state = ERR_PTR(-ENOMEM);
1840 return state;
1841}
1842
1843static struct nfs4_state *
1844_nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1845{
1846 struct nfs4_state *state;
1847
1848 if (!data->rpc_done) {
1849 state = nfs4_try_open_cached(data);
1850 trace_nfs4_cached_open(data->state);
1851 goto out;
1852 }
1853
1854 state = nfs4_opendata_find_nfs4_state(data);
1855 if (IS_ERR(state))
1856 goto out;
1857
1858 if (data->o_res.delegation_type != 0)
1859 nfs4_opendata_check_deleg(data, state);
1860 update_open_stateid(state, &data->o_res.stateid, NULL,
1861 data->o_arg.fmode);
1862out:
1863 nfs_release_seqid(data->o_arg.seqid);
1864 return state;
1865}
1866
1867static struct nfs4_state *
1868nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1869{
1870 struct nfs4_state *ret;
1871
1872 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1873 ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
1874 else
1875 ret = _nfs4_opendata_to_nfs4_state(data);
1876 nfs4_sequence_free_slot(&data->o_res.seq_res);
1877 return ret;
1878}
1879
1880static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1881{
1882 struct nfs_inode *nfsi = NFS_I(state->inode);
1883 struct nfs_open_context *ctx;
1884
1885 spin_lock(&state->inode->i_lock);
1886 list_for_each_entry(ctx, &nfsi->open_files, list) {
1887 if (ctx->state != state)
1888 continue;
1889 get_nfs_open_context(ctx);
1890 spin_unlock(&state->inode->i_lock);
1891 return ctx;
1892 }
1893 spin_unlock(&state->inode->i_lock);
1894 return ERR_PTR(-ENOENT);
1895}
1896
1897static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1898 struct nfs4_state *state, enum open_claim_type4 claim)
1899{
1900 struct nfs4_opendata *opendata;
1901
1902 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1903 NULL, claim, GFP_NOFS);
1904 if (opendata == NULL)
1905 return ERR_PTR(-ENOMEM);
1906 opendata->state = state;
1907 atomic_inc(&state->count);
1908 return opendata;
1909}
1910
1911static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
1912 fmode_t fmode)
1913{
1914 struct nfs4_state *newstate;
1915 int ret;
1916
1917 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
1918 return 0;
1919 opendata->o_arg.open_flags = 0;
1920 opendata->o_arg.fmode = fmode;
1921 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1922 NFS_SB(opendata->dentry->d_sb),
1923 fmode, 0);
1924 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1925 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1926 nfs4_init_opendata_res(opendata);
1927 ret = _nfs4_recover_proc_open(opendata);
1928 if (ret != 0)
1929 return ret;
1930 newstate = nfs4_opendata_to_nfs4_state(opendata);
1931 if (IS_ERR(newstate))
1932 return PTR_ERR(newstate);
1933 if (newstate != opendata->state)
1934 ret = -ESTALE;
1935 nfs4_close_state(newstate, fmode);
1936 return ret;
1937}
1938
1939static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1940{
1941 int ret;
1942
1943 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1944 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1945 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1946 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1947 /* memory barrier prior to reading state->n_* */
1948 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1949 clear_bit(NFS_OPEN_STATE, &state->flags);
1950 smp_rmb();
1951 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1952 if (ret != 0)
1953 return ret;
1954 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1955 if (ret != 0)
1956 return ret;
1957 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
1958 if (ret != 0)
1959 return ret;
1960 /*
1961 * We may have performed cached opens for all three recoveries.
1962 * Check if we need to update the current stateid.
1963 */
1964 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1965 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1966 write_seqlock(&state->seqlock);
1967 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1968 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1969 write_sequnlock(&state->seqlock);
1970 }
1971 return 0;
1972}
1973
1974/*
1975 * OPEN_RECLAIM:
1976 * reclaim state on the server after a reboot.
1977 */
1978static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1979{
1980 struct nfs_delegation *delegation;
1981 struct nfs4_opendata *opendata;
1982 fmode_t delegation_type = 0;
1983 int status;
1984
1985 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1986 NFS4_OPEN_CLAIM_PREVIOUS);
1987 if (IS_ERR(opendata))
1988 return PTR_ERR(opendata);
1989 rcu_read_lock();
1990 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1991 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1992 delegation_type = delegation->type;
1993 rcu_read_unlock();
1994 opendata->o_arg.u.delegation_type = delegation_type;
1995 status = nfs4_open_recover(opendata, state);
1996 nfs4_opendata_put(opendata);
1997 return status;
1998}
1999
2000static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
2001{
2002 struct nfs_server *server = NFS_SERVER(state->inode);
2003 struct nfs4_exception exception = { };
2004 int err;
2005 do {
2006 err = _nfs4_do_open_reclaim(ctx, state);
2007 trace_nfs4_open_reclaim(ctx, 0, err);
2008 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2009 continue;
2010 if (err != -NFS4ERR_DELAY)
2011 break;
2012 nfs4_handle_exception(server, err, &exception);
2013 } while (exception.retry);
2014 return err;
2015}
2016
2017static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
2018{
2019 struct nfs_open_context *ctx;
2020 int ret;
2021
2022 ctx = nfs4_state_find_open_context(state);
2023 if (IS_ERR(ctx))
2024 return -EAGAIN;
2025 ret = nfs4_do_open_reclaim(ctx, state);
2026 put_nfs_open_context(ctx);
2027 return ret;
2028}
2029
2030static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
2031{
2032 switch (err) {
2033 default:
2034 printk(KERN_ERR "NFS: %s: unhandled error "
2035 "%d.\n", __func__, err);
2036 case 0:
2037 case -ENOENT:
2038 case -EAGAIN:
2039 case -ESTALE:
2040 break;
2041 case -NFS4ERR_BADSESSION:
2042 case -NFS4ERR_BADSLOT:
2043 case -NFS4ERR_BAD_HIGH_SLOT:
2044 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
2045 case -NFS4ERR_DEADSESSION:
2046 set_bit(NFS_DELEGATED_STATE, &state->flags);
2047 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
2048 return -EAGAIN;
2049 case -NFS4ERR_STALE_CLIENTID:
2050 case -NFS4ERR_STALE_STATEID:
2051 set_bit(NFS_DELEGATED_STATE, &state->flags);
2052 /* Don't recall a delegation if it was lost */
2053 nfs4_schedule_lease_recovery(server->nfs_client);
2054 return -EAGAIN;
2055 case -NFS4ERR_MOVED:
2056 nfs4_schedule_migration_recovery(server);
2057 return -EAGAIN;
2058 case -NFS4ERR_LEASE_MOVED:
2059 nfs4_schedule_lease_moved_recovery(server->nfs_client);
2060 return -EAGAIN;
2061 case -NFS4ERR_DELEG_REVOKED:
2062 case -NFS4ERR_ADMIN_REVOKED:
2063 case -NFS4ERR_EXPIRED:
2064 case -NFS4ERR_BAD_STATEID:
2065 case -NFS4ERR_OPENMODE:
2066 nfs_inode_find_state_and_recover(state->inode,
2067 stateid);
2068 nfs4_schedule_stateid_recovery(server, state);
2069 return -EAGAIN;
2070 case -NFS4ERR_DELAY:
2071 case -NFS4ERR_GRACE:
2072 set_bit(NFS_DELEGATED_STATE, &state->flags);
2073 ssleep(1);
2074 return -EAGAIN;
2075 case -ENOMEM:
2076 case -NFS4ERR_DENIED:
2077 if (fl) {
2078 struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
2079 if (lsp)
2080 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2081 }
2082 return 0;
2083 }
2084 return err;
2085}
2086
2087int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
2088 struct nfs4_state *state, const nfs4_stateid *stateid,
2089 fmode_t type)
2090{
2091 struct nfs_server *server = NFS_SERVER(state->inode);
2092 struct nfs4_opendata *opendata;
2093 int err = 0;
2094
2095 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2096 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
2097 if (IS_ERR(opendata))
2098 return PTR_ERR(opendata);
2099 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
2100 write_seqlock(&state->seqlock);
2101 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2102 write_sequnlock(&state->seqlock);
2103 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2104 switch (type & (FMODE_READ|FMODE_WRITE)) {
2105 case FMODE_READ|FMODE_WRITE:
2106 case FMODE_WRITE:
2107 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
2108 if (err)
2109 break;
2110 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
2111 if (err)
2112 break;
2113 case FMODE_READ:
2114 err = nfs4_open_recover_helper(opendata, FMODE_READ);
2115 }
2116 nfs4_opendata_put(opendata);
2117 return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
2118}
2119
2120static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
2121{
2122 struct nfs4_opendata *data = calldata;
2123
2124 nfs4_setup_sequence(data->o_arg.server->nfs_client,
2125 &data->c_arg.seq_args, &data->c_res.seq_res, task);
2126}
2127
2128static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
2129{
2130 struct nfs4_opendata *data = calldata;
2131
2132 nfs40_sequence_done(task, &data->c_res.seq_res);
2133
2134 data->rpc_status = task->tk_status;
2135 if (data->rpc_status == 0) {
2136 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
2137 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2138 renew_lease(data->o_res.server, data->timestamp);
2139 data->rpc_done = true;
2140 }
2141}
2142
2143static void nfs4_open_confirm_release(void *calldata)
2144{
2145 struct nfs4_opendata *data = calldata;
2146 struct nfs4_state *state = NULL;
2147
2148 /* If this request hasn't been cancelled, do nothing */
2149 if (!data->cancelled)
2150 goto out_free;
2151 /* In case of error, no cleanup! */
2152 if (!data->rpc_done)
2153 goto out_free;
2154 state = nfs4_opendata_to_nfs4_state(data);
2155 if (!IS_ERR(state))
2156 nfs4_close_state(state, data->o_arg.fmode);
2157out_free:
2158 nfs4_opendata_put(data);
2159}
2160
2161static const struct rpc_call_ops nfs4_open_confirm_ops = {
2162 .rpc_call_prepare = nfs4_open_confirm_prepare,
2163 .rpc_call_done = nfs4_open_confirm_done,
2164 .rpc_release = nfs4_open_confirm_release,
2165};
2166
2167/*
2168 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
2169 */
2170static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
2171{
2172 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
2173 struct rpc_task *task;
2174 struct rpc_message msg = {
2175 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
2176 .rpc_argp = &data->c_arg,
2177 .rpc_resp = &data->c_res,
2178 .rpc_cred = data->owner->so_cred,
2179 };
2180 struct rpc_task_setup task_setup_data = {
2181 .rpc_client = server->client,
2182 .rpc_message = &msg,
2183 .callback_ops = &nfs4_open_confirm_ops,
2184 .callback_data = data,
2185 .workqueue = nfsiod_workqueue,
2186 .flags = RPC_TASK_ASYNC,
2187 };
2188 int status;
2189
2190 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
2191 kref_get(&data->kref);
2192 data->rpc_done = false;
2193 data->rpc_status = 0;
2194 data->timestamp = jiffies;
2195 if (data->is_recover)
2196 nfs4_set_sequence_privileged(&data->c_arg.seq_args);
2197 task = rpc_run_task(&task_setup_data);
2198 if (IS_ERR(task))
2199 return PTR_ERR(task);
2200 status = rpc_wait_for_completion_task(task);
2201 if (status != 0) {
2202 data->cancelled = true;
2203 smp_wmb();
2204 } else
2205 status = data->rpc_status;
2206 rpc_put_task(task);
2207 return status;
2208}
2209
2210static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
2211{
2212 struct nfs4_opendata *data = calldata;
2213 struct nfs4_state_owner *sp = data->owner;
2214 struct nfs_client *clp = sp->so_server->nfs_client;
2215 enum open_claim_type4 claim = data->o_arg.claim;
2216
2217 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
2218 goto out_wait;
2219 /*
2220 * Check if we still need to send an OPEN call, or if we can use
2221 * a delegation instead.
2222 */
2223 if (data->state != NULL) {
2224 struct nfs_delegation *delegation;
2225
2226 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
2227 goto out_no_action;
2228 rcu_read_lock();
2229 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
2230 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
2231 goto unlock_no_action;
2232 rcu_read_unlock();
2233 }
2234 /* Update client id. */
2235 data->o_arg.clientid = clp->cl_clientid;
2236 switch (claim) {
2237 default:
2238 break;
2239 case NFS4_OPEN_CLAIM_PREVIOUS:
2240 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2241 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
2242 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2243 case NFS4_OPEN_CLAIM_FH:
2244 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2245 }
2246 data->timestamp = jiffies;
2247 if (nfs4_setup_sequence(data->o_arg.server->nfs_client,
2248 &data->o_arg.seq_args,
2249 &data->o_res.seq_res,
2250 task) != 0)
2251 nfs_release_seqid(data->o_arg.seqid);
2252
2253 /* Set the create mode (note dependency on the session type) */
2254 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2255 if (data->o_arg.open_flags & O_EXCL) {
2256 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2257 if (nfs4_has_persistent_session(clp))
2258 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2259 else if (clp->cl_mvops->minor_version > 0)
2260 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2261 }
2262 return;
2263unlock_no_action:
2264 trace_nfs4_cached_open(data->state);
2265 rcu_read_unlock();
2266out_no_action:
2267 task->tk_action = NULL;
2268out_wait:
2269 nfs4_sequence_done(task, &data->o_res.seq_res);
2270}
2271
2272static void nfs4_open_done(struct rpc_task *task, void *calldata)
2273{
2274 struct nfs4_opendata *data = calldata;
2275
2276 data->rpc_status = task->tk_status;
2277
2278 if (!nfs4_sequence_process(task, &data->o_res.seq_res))
2279 return;
2280
2281 if (task->tk_status == 0) {
2282 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2283 switch (data->o_res.f_attr->mode & S_IFMT) {
2284 case S_IFREG:
2285 break;
2286 case S_IFLNK:
2287 data->rpc_status = -ELOOP;
2288 break;
2289 case S_IFDIR:
2290 data->rpc_status = -EISDIR;
2291 break;
2292 default:
2293 data->rpc_status = -ENOTDIR;
2294 }
2295 }
2296 renew_lease(data->o_res.server, data->timestamp);
2297 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2298 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2299 }
2300 data->rpc_done = true;
2301}
2302
2303static void nfs4_open_release(void *calldata)
2304{
2305 struct nfs4_opendata *data = calldata;
2306 struct nfs4_state *state = NULL;
2307
2308 /* If this request hasn't been cancelled, do nothing */
2309 if (!data->cancelled)
2310 goto out_free;
2311 /* In case of error, no cleanup! */
2312 if (data->rpc_status != 0 || !data->rpc_done)
2313 goto out_free;
2314 /* In case we need an open_confirm, no cleanup! */
2315 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2316 goto out_free;
2317 state = nfs4_opendata_to_nfs4_state(data);
2318 if (!IS_ERR(state))
2319 nfs4_close_state(state, data->o_arg.fmode);
2320out_free:
2321 nfs4_opendata_put(data);
2322}
2323
2324static const struct rpc_call_ops nfs4_open_ops = {
2325 .rpc_call_prepare = nfs4_open_prepare,
2326 .rpc_call_done = nfs4_open_done,
2327 .rpc_release = nfs4_open_release,
2328};
2329
2330static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
2331{
2332 struct inode *dir = d_inode(data->dir);
2333 struct nfs_server *server = NFS_SERVER(dir);
2334 struct nfs_openargs *o_arg = &data->o_arg;
2335 struct nfs_openres *o_res = &data->o_res;
2336 struct rpc_task *task;
2337 struct rpc_message msg = {
2338 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2339 .rpc_argp = o_arg,
2340 .rpc_resp = o_res,
2341 .rpc_cred = data->owner->so_cred,
2342 };
2343 struct rpc_task_setup task_setup_data = {
2344 .rpc_client = server->client,
2345 .rpc_message = &msg,
2346 .callback_ops = &nfs4_open_ops,
2347 .callback_data = data,
2348 .workqueue = nfsiod_workqueue,
2349 .flags = RPC_TASK_ASYNC,
2350 };
2351 int status;
2352
2353 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
2354 kref_get(&data->kref);
2355 data->rpc_done = false;
2356 data->rpc_status = 0;
2357 data->cancelled = false;
2358 data->is_recover = false;
2359 if (isrecover) {
2360 nfs4_set_sequence_privileged(&o_arg->seq_args);
2361 data->is_recover = true;
2362 }
2363 task = rpc_run_task(&task_setup_data);
2364 if (IS_ERR(task))
2365 return PTR_ERR(task);
2366 status = rpc_wait_for_completion_task(task);
2367 if (status != 0) {
2368 data->cancelled = true;
2369 smp_wmb();
2370 } else
2371 status = data->rpc_status;
2372 rpc_put_task(task);
2373
2374 return status;
2375}
2376
2377static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2378{
2379 struct inode *dir = d_inode(data->dir);
2380 struct nfs_openres *o_res = &data->o_res;
2381 int status;
2382
2383 status = nfs4_run_open_task(data, 1);
2384 if (status != 0 || !data->rpc_done)
2385 return status;
2386
2387 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2388
2389 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM)
2390 status = _nfs4_proc_open_confirm(data);
2391
2392 return status;
2393}
2394
2395/*
2396 * Additional permission checks in order to distinguish between an
2397 * open for read, and an open for execute. This works around the
2398 * fact that NFSv4 OPEN treats read and execute permissions as being
2399 * the same.
2400 * Note that in the non-execute case, we want to turn off permission
2401 * checking if we just created a new file (POSIX open() semantics).
2402 */
2403static int nfs4_opendata_access(struct rpc_cred *cred,
2404 struct nfs4_opendata *opendata,
2405 struct nfs4_state *state, fmode_t fmode,
2406 int openflags)
2407{
2408 struct nfs_access_entry cache;
2409 u32 mask, flags;
2410
2411 /* access call failed or for some reason the server doesn't
2412 * support any access modes -- defer access call until later */
2413 if (opendata->o_res.access_supported == 0)
2414 return 0;
2415
2416 mask = 0;
2417 /*
2418 * Use openflags to check for exec, because fmode won't
2419 * always have FMODE_EXEC set when file open for exec.
2420 */
2421 if (openflags & __FMODE_EXEC) {
2422 /* ONLY check for exec rights */
2423 if (S_ISDIR(state->inode->i_mode))
2424 mask = NFS4_ACCESS_LOOKUP;
2425 else
2426 mask = NFS4_ACCESS_EXECUTE;
2427 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2428 mask = NFS4_ACCESS_READ;
2429
2430 cache.cred = cred;
2431 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2432 nfs_access_add_cache(state->inode, &cache);
2433
2434 flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
2435 if ((mask & ~cache.mask & flags) == 0)
2436 return 0;
2437
2438 return -EACCES;
2439}
2440
2441/*
2442 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2443 */
2444static int _nfs4_proc_open(struct nfs4_opendata *data)
2445{
2446 struct inode *dir = d_inode(data->dir);
2447 struct nfs_server *server = NFS_SERVER(dir);
2448 struct nfs_openargs *o_arg = &data->o_arg;
2449 struct nfs_openres *o_res = &data->o_res;
2450 int status;
2451
2452 status = nfs4_run_open_task(data, 0);
2453 if (!data->rpc_done)
2454 return status;
2455 if (status != 0) {
2456 if (status == -NFS4ERR_BADNAME &&
2457 !(o_arg->open_flags & O_CREAT))
2458 return -ENOENT;
2459 return status;
2460 }
2461
2462 nfs_fattr_map_and_free_names(server, &data->f_attr);
2463
2464 if (o_arg->open_flags & O_CREAT) {
2465 if (o_arg->open_flags & O_EXCL)
2466 data->file_created = true;
2467 else if (o_res->cinfo.before != o_res->cinfo.after)
2468 data->file_created = true;
2469 if (data->file_created ||
2470 inode_peek_iversion_raw(dir) != o_res->cinfo.after)
2471 update_changeattr(dir, &o_res->cinfo,
2472 o_res->f_attr->time_start);
2473 }
2474 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2475 server->caps &= ~NFS_CAP_POSIX_LOCK;
2476 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2477 status = _nfs4_proc_open_confirm(data);
2478 if (status != 0)
2479 return status;
2480 }
2481 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR)) {
2482 nfs4_sequence_free_slot(&o_res->seq_res);
2483 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2484 }
2485 return 0;
2486}
2487
2488/*
2489 * OPEN_EXPIRED:
2490 * reclaim state on the server after a network partition.
2491 * Assumes caller holds the appropriate lock
2492 */
2493static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2494{
2495 struct nfs4_opendata *opendata;
2496 int ret;
2497
2498 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2499 NFS4_OPEN_CLAIM_FH);
2500 if (IS_ERR(opendata))
2501 return PTR_ERR(opendata);
2502 ret = nfs4_open_recover(opendata, state);
2503 if (ret == -ESTALE)
2504 d_drop(ctx->dentry);
2505 nfs4_opendata_put(opendata);
2506 return ret;
2507}
2508
2509static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2510{
2511 struct nfs_server *server = NFS_SERVER(state->inode);
2512 struct nfs4_exception exception = { };
2513 int err;
2514
2515 do {
2516 err = _nfs4_open_expired(ctx, state);
2517 trace_nfs4_open_expired(ctx, 0, err);
2518 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2519 continue;
2520 switch (err) {
2521 default:
2522 goto out;
2523 case -NFS4ERR_GRACE:
2524 case -NFS4ERR_DELAY:
2525 nfs4_handle_exception(server, err, &exception);
2526 err = 0;
2527 }
2528 } while (exception.retry);
2529out:
2530 return err;
2531}
2532
2533static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2534{
2535 struct nfs_open_context *ctx;
2536 int ret;
2537
2538 ctx = nfs4_state_find_open_context(state);
2539 if (IS_ERR(ctx))
2540 return -EAGAIN;
2541 ret = nfs4_do_open_expired(ctx, state);
2542 put_nfs_open_context(ctx);
2543 return ret;
2544}
2545
2546static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state,
2547 const nfs4_stateid *stateid)
2548{
2549 nfs_remove_bad_delegation(state->inode, stateid);
2550 write_seqlock(&state->seqlock);
2551 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2552 write_sequnlock(&state->seqlock);
2553 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2554}
2555
2556static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2557{
2558 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2559 nfs_finish_clear_delegation_stateid(state, NULL);
2560}
2561
2562static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2563{
2564 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2565 nfs40_clear_delegation_stateid(state);
2566 return nfs4_open_expired(sp, state);
2567}
2568
2569static int nfs40_test_and_free_expired_stateid(struct nfs_server *server,
2570 nfs4_stateid *stateid,
2571 struct rpc_cred *cred)
2572{
2573 return -NFS4ERR_BAD_STATEID;
2574}
2575
2576#if defined(CONFIG_NFS_V4_1)
2577static int nfs41_test_and_free_expired_stateid(struct nfs_server *server,
2578 nfs4_stateid *stateid,
2579 struct rpc_cred *cred)
2580{
2581 int status;
2582
2583 switch (stateid->type) {
2584 default:
2585 break;
2586 case NFS4_INVALID_STATEID_TYPE:
2587 case NFS4_SPECIAL_STATEID_TYPE:
2588 return -NFS4ERR_BAD_STATEID;
2589 case NFS4_REVOKED_STATEID_TYPE:
2590 goto out_free;
2591 }
2592
2593 status = nfs41_test_stateid(server, stateid, cred);
2594 switch (status) {
2595 case -NFS4ERR_EXPIRED:
2596 case -NFS4ERR_ADMIN_REVOKED:
2597 case -NFS4ERR_DELEG_REVOKED:
2598 break;
2599 default:
2600 return status;
2601 }
2602out_free:
2603 /* Ack the revoked state to the server */
2604 nfs41_free_stateid(server, stateid, cred, true);
2605 return -NFS4ERR_EXPIRED;
2606}
2607
2608static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2609{
2610 struct nfs_server *server = NFS_SERVER(state->inode);
2611 nfs4_stateid stateid;
2612 struct nfs_delegation *delegation;
2613 struct rpc_cred *cred;
2614 int status;
2615
2616 /* Get the delegation credential for use by test/free_stateid */
2617 rcu_read_lock();
2618 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2619 if (delegation == NULL) {
2620 rcu_read_unlock();
2621 return;
2622 }
2623
2624 nfs4_stateid_copy(&stateid, &delegation->stateid);
2625 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
2626 !test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
2627 &delegation->flags)) {
2628 rcu_read_unlock();
2629 nfs_finish_clear_delegation_stateid(state, &stateid);
2630 return;
2631 }
2632
2633 cred = get_rpccred(delegation->cred);
2634 rcu_read_unlock();
2635 status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
2636 trace_nfs4_test_delegation_stateid(state, NULL, status);
2637 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
2638 nfs_finish_clear_delegation_stateid(state, &stateid);
2639
2640 put_rpccred(cred);
2641}
2642
2643/**
2644 * nfs41_check_expired_locks - possibly free a lock stateid
2645 *
2646 * @state: NFSv4 state for an inode
2647 *
2648 * Returns NFS_OK if recovery for this stateid is now finished.
2649 * Otherwise a negative NFS4ERR value is returned.
2650 */
2651static int nfs41_check_expired_locks(struct nfs4_state *state)
2652{
2653 int status, ret = NFS_OK;
2654 struct nfs4_lock_state *lsp, *prev = NULL;
2655 struct nfs_server *server = NFS_SERVER(state->inode);
2656
2657 if (!test_bit(LK_STATE_IN_USE, &state->flags))
2658 goto out;
2659
2660 spin_lock(&state->state_lock);
2661 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
2662 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
2663 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
2664
2665 refcount_inc(&lsp->ls_count);
2666 spin_unlock(&state->state_lock);
2667
2668 nfs4_put_lock_state(prev);
2669 prev = lsp;
2670
2671 status = nfs41_test_and_free_expired_stateid(server,
2672 &lsp->ls_stateid,
2673 cred);
2674 trace_nfs4_test_lock_stateid(state, lsp, status);
2675 if (status == -NFS4ERR_EXPIRED ||
2676 status == -NFS4ERR_BAD_STATEID) {
2677 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
2678 lsp->ls_stateid.type = NFS4_INVALID_STATEID_TYPE;
2679 if (!recover_lost_locks)
2680 set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
2681 } else if (status != NFS_OK) {
2682 ret = status;
2683 nfs4_put_lock_state(prev);
2684 goto out;
2685 }
2686 spin_lock(&state->state_lock);
2687 }
2688 }
2689 spin_unlock(&state->state_lock);
2690 nfs4_put_lock_state(prev);
2691out:
2692 return ret;
2693}
2694
2695/**
2696 * nfs41_check_open_stateid - possibly free an open stateid
2697 *
2698 * @state: NFSv4 state for an inode
2699 *
2700 * Returns NFS_OK if recovery for this stateid is now finished.
2701 * Otherwise a negative NFS4ERR value is returned.
2702 */
2703static int nfs41_check_open_stateid(struct nfs4_state *state)
2704{
2705 struct nfs_server *server = NFS_SERVER(state->inode);
2706 nfs4_stateid *stateid = &state->open_stateid;
2707 struct rpc_cred *cred = state->owner->so_cred;
2708 int status;
2709
2710 if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) {
2711 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) {
2712 if (nfs4_have_delegation(state->inode, state->state))
2713 return NFS_OK;
2714 return -NFS4ERR_OPENMODE;
2715 }
2716 return -NFS4ERR_BAD_STATEID;
2717 }
2718 status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
2719 trace_nfs4_test_open_stateid(state, NULL, status);
2720 if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
2721 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2722 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2723 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2724 clear_bit(NFS_OPEN_STATE, &state->flags);
2725 stateid->type = NFS4_INVALID_STATEID_TYPE;
2726 return status;
2727 }
2728 if (nfs_open_stateid_recover_openmode(state))
2729 return -NFS4ERR_OPENMODE;
2730 return NFS_OK;
2731}
2732
2733static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2734{
2735 int status;
2736
2737 nfs41_check_delegation_stateid(state);
2738 status = nfs41_check_expired_locks(state);
2739 if (status != NFS_OK)
2740 return status;
2741 status = nfs41_check_open_stateid(state);
2742 if (status != NFS_OK)
2743 status = nfs4_open_expired(sp, state);
2744 return status;
2745}
2746#endif
2747
2748/*
2749 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2750 * fields corresponding to attributes that were used to store the verifier.
2751 * Make sure we clobber those fields in the later setattr call
2752 */
2753static unsigned nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2754 struct iattr *sattr, struct nfs4_label **label)
2755{
2756 const __u32 *bitmask = opendata->o_arg.server->exclcreat_bitmask;
2757 __u32 attrset[3];
2758 unsigned ret;
2759 unsigned i;
2760
2761 for (i = 0; i < ARRAY_SIZE(attrset); i++) {
2762 attrset[i] = opendata->o_res.attrset[i];
2763 if (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE4_1)
2764 attrset[i] &= ~bitmask[i];
2765 }
2766
2767 ret = (opendata->o_arg.createmode == NFS4_CREATE_EXCLUSIVE) ?
2768 sattr->ia_valid : 0;
2769
2770 if ((attrset[1] & (FATTR4_WORD1_TIME_ACCESS|FATTR4_WORD1_TIME_ACCESS_SET))) {
2771 if (sattr->ia_valid & ATTR_ATIME_SET)
2772 ret |= ATTR_ATIME_SET;
2773 else
2774 ret |= ATTR_ATIME;
2775 }
2776
2777 if ((attrset[1] & (FATTR4_WORD1_TIME_MODIFY|FATTR4_WORD1_TIME_MODIFY_SET))) {
2778 if (sattr->ia_valid & ATTR_MTIME_SET)
2779 ret |= ATTR_MTIME_SET;
2780 else
2781 ret |= ATTR_MTIME;
2782 }
2783
2784 if (!(attrset[2] & FATTR4_WORD2_SECURITY_LABEL))
2785 *label = NULL;
2786 return ret;
2787}
2788
2789static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2790 fmode_t fmode,
2791 int flags,
2792 struct nfs_open_context *ctx)
2793{
2794 struct nfs4_state_owner *sp = opendata->owner;
2795 struct nfs_server *server = sp->so_server;
2796 struct dentry *dentry;
2797 struct nfs4_state *state;
2798 unsigned int seq;
2799 int ret;
2800
2801 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2802
2803 ret = _nfs4_proc_open(opendata);
2804 if (ret != 0)
2805 goto out;
2806
2807 state = nfs4_opendata_to_nfs4_state(opendata);
2808 ret = PTR_ERR(state);
2809 if (IS_ERR(state))
2810 goto out;
2811 ctx->state = state;
2812 if (server->caps & NFS_CAP_POSIX_LOCK)
2813 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2814 if (opendata->o_res.rflags & NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK)
2815 set_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags);
2816
2817 dentry = opendata->dentry;
2818 if (d_really_is_negative(dentry)) {
2819 struct dentry *alias;
2820 d_drop(dentry);
2821 alias = d_exact_alias(dentry, state->inode);
2822 if (!alias)
2823 alias = d_splice_alias(igrab(state->inode), dentry);
2824 /* d_splice_alias() can't fail here - it's a non-directory */
2825 if (alias) {
2826 dput(ctx->dentry);
2827 ctx->dentry = dentry = alias;
2828 }
2829 nfs_set_verifier(dentry,
2830 nfs_save_change_attribute(d_inode(opendata->dir)));
2831 }
2832
2833 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2834 if (ret != 0)
2835 goto out;
2836
2837 if (d_inode(dentry) == state->inode) {
2838 nfs_inode_attach_open_context(ctx);
2839 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2840 nfs4_schedule_stateid_recovery(server, state);
2841 }
2842out:
2843 return ret;
2844}
2845
2846/*
2847 * Returns a referenced nfs4_state
2848 */
2849static int _nfs4_do_open(struct inode *dir,
2850 struct nfs_open_context *ctx,
2851 int flags,
2852 const struct nfs4_open_createattrs *c,
2853 int *opened)
2854{
2855 struct nfs4_state_owner *sp;
2856 struct nfs4_state *state = NULL;
2857 struct nfs_server *server = NFS_SERVER(dir);
2858 struct nfs4_opendata *opendata;
2859 struct dentry *dentry = ctx->dentry;
2860 struct rpc_cred *cred = ctx->cred;
2861 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2862 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2863 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2864 struct iattr *sattr = c->sattr;
2865 struct nfs4_label *label = c->label;
2866 struct nfs4_label *olabel = NULL;
2867 int status;
2868
2869 /* Protect against reboot recovery conflicts */
2870 status = -ENOMEM;
2871 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2872 if (sp == NULL) {
2873 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2874 goto out_err;
2875 }
2876 status = nfs4_client_recover_expired_lease(server->nfs_client);
2877 if (status != 0)
2878 goto err_put_state_owner;
2879 if (d_really_is_positive(dentry))
2880 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
2881 status = -ENOMEM;
2882 if (d_really_is_positive(dentry))
2883 claim = NFS4_OPEN_CLAIM_FH;
2884 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
2885 c, claim, GFP_KERNEL);
2886 if (opendata == NULL)
2887 goto err_put_state_owner;
2888
2889 if (label) {
2890 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2891 if (IS_ERR(olabel)) {
2892 status = PTR_ERR(olabel);
2893 goto err_opendata_put;
2894 }
2895 }
2896
2897 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2898 if (!opendata->f_attr.mdsthreshold) {
2899 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2900 if (!opendata->f_attr.mdsthreshold)
2901 goto err_free_label;
2902 }
2903 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2904 }
2905 if (d_really_is_positive(dentry))
2906 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
2907
2908 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2909 if (status != 0)
2910 goto err_free_label;
2911 state = ctx->state;
2912
2913 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
2914 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2915 unsigned attrs = nfs4_exclusive_attrset(opendata, sattr, &label);
2916 /*
2917 * send create attributes which was not set by open
2918 * with an extra setattr.
2919 */
2920 if (attrs || label) {
2921 unsigned ia_old = sattr->ia_valid;
2922
2923 sattr->ia_valid = attrs;
2924 nfs_fattr_init(opendata->o_res.f_attr);
2925 status = nfs4_do_setattr(state->inode, cred,
2926 opendata->o_res.f_attr, sattr,
2927 ctx, label, olabel);
2928 if (status == 0) {
2929 nfs_setattr_update_inode(state->inode, sattr,
2930 opendata->o_res.f_attr);
2931 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2932 }
2933 sattr->ia_valid = ia_old;
2934 }
2935 }
2936 if (opened && opendata->file_created)
2937 *opened |= FILE_CREATED;
2938
2939 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2940 *ctx_th = opendata->f_attr.mdsthreshold;
2941 opendata->f_attr.mdsthreshold = NULL;
2942 }
2943
2944 nfs4_label_free(olabel);
2945
2946 nfs4_opendata_put(opendata);
2947 nfs4_put_state_owner(sp);
2948 return 0;
2949err_free_label:
2950 nfs4_label_free(olabel);
2951err_opendata_put:
2952 nfs4_opendata_put(opendata);
2953err_put_state_owner:
2954 nfs4_put_state_owner(sp);
2955out_err:
2956 return status;
2957}
2958
2959
2960static struct nfs4_state *nfs4_do_open(struct inode *dir,
2961 struct nfs_open_context *ctx,
2962 int flags,
2963 struct iattr *sattr,
2964 struct nfs4_label *label,
2965 int *opened)
2966{
2967 struct nfs_server *server = NFS_SERVER(dir);
2968 struct nfs4_exception exception = { };
2969 struct nfs4_state *res;
2970 struct nfs4_open_createattrs c = {
2971 .label = label,
2972 .sattr = sattr,
2973 .verf = {
2974 [0] = (__u32)jiffies,
2975 [1] = (__u32)current->pid,
2976 },
2977 };
2978 int status;
2979
2980 do {
2981 status = _nfs4_do_open(dir, ctx, flags, &c, opened);
2982 res = ctx->state;
2983 trace_nfs4_open_file(ctx, flags, status);
2984 if (status == 0)
2985 break;
2986 /* NOTE: BAD_SEQID means the server and client disagree about the
2987 * book-keeping w.r.t. state-changing operations
2988 * (OPEN/CLOSE/LOCK/LOCKU...)
2989 * It is actually a sign of a bug on the client or on the server.
2990 *
2991 * If we receive a BAD_SEQID error in the particular case of
2992 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2993 * have unhashed the old state_owner for us, and that we can
2994 * therefore safely retry using a new one. We should still warn
2995 * the user though...
2996 */
2997 if (status == -NFS4ERR_BAD_SEQID) {
2998 pr_warn_ratelimited("NFS: v4 server %s "
2999 " returned a bad sequence-id error!\n",
3000 NFS_SERVER(dir)->nfs_client->cl_hostname);
3001 exception.retry = 1;
3002 continue;
3003 }
3004 /*
3005 * BAD_STATEID on OPEN means that the server cancelled our
3006 * state before it received the OPEN_CONFIRM.
3007 * Recover by retrying the request as per the discussion
3008 * on Page 181 of RFC3530.
3009 */
3010 if (status == -NFS4ERR_BAD_STATEID) {
3011 exception.retry = 1;
3012 continue;
3013 }
3014 if (status == -EAGAIN) {
3015 /* We must have found a delegation */
3016 exception.retry = 1;
3017 continue;
3018 }
3019 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
3020 continue;
3021 res = ERR_PTR(nfs4_handle_exception(server,
3022 status, &exception));
3023 } while (exception.retry);
3024 return res;
3025}
3026
3027static int _nfs4_do_setattr(struct inode *inode,
3028 struct nfs_setattrargs *arg,
3029 struct nfs_setattrres *res,
3030 struct rpc_cred *cred,
3031 struct nfs_open_context *ctx)
3032{
3033 struct nfs_server *server = NFS_SERVER(inode);
3034 struct rpc_message msg = {
3035 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
3036 .rpc_argp = arg,
3037 .rpc_resp = res,
3038 .rpc_cred = cred,
3039 };
3040 struct rpc_cred *delegation_cred = NULL;
3041 unsigned long timestamp = jiffies;
3042 fmode_t fmode;
3043 bool truncate;
3044 int status;
3045
3046 nfs_fattr_init(res->fattr);
3047
3048 /* Servers should only apply open mode checks for file size changes */
3049 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
3050 fmode = truncate ? FMODE_WRITE : FMODE_READ;
3051
3052 if (nfs4_copy_delegation_stateid(inode, fmode, &arg->stateid, &delegation_cred)) {
3053 /* Use that stateid */
3054 } else if (truncate && ctx != NULL) {
3055 struct nfs_lock_context *l_ctx;
3056 if (!nfs4_valid_open_stateid(ctx->state))
3057 return -EBADF;
3058 l_ctx = nfs_get_lock_context(ctx);
3059 if (IS_ERR(l_ctx))
3060 return PTR_ERR(l_ctx);
3061 status = nfs4_select_rw_stateid(ctx->state, FMODE_WRITE, l_ctx,
3062 &arg->stateid, &delegation_cred);
3063 nfs_put_lock_context(l_ctx);
3064 if (status == -EIO)
3065 return -EBADF;
3066 } else
3067 nfs4_stateid_copy(&arg->stateid, &zero_stateid);
3068 if (delegation_cred)
3069 msg.rpc_cred = delegation_cred;
3070
3071 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
3072
3073 put_rpccred(delegation_cred);
3074 if (status == 0 && ctx != NULL)
3075 renew_lease(server, timestamp);
3076 trace_nfs4_setattr(inode, &arg->stateid, status);
3077 return status;
3078}
3079
3080static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
3081 struct nfs_fattr *fattr, struct iattr *sattr,
3082 struct nfs_open_context *ctx, struct nfs4_label *ilabel,
3083 struct nfs4_label *olabel)
3084{
3085 struct nfs_server *server = NFS_SERVER(inode);
3086 struct nfs4_state *state = ctx ? ctx->state : NULL;
3087 struct nfs_setattrargs arg = {
3088 .fh = NFS_FH(inode),
3089 .iap = sattr,
3090 .server = server,
3091 .bitmask = server->attr_bitmask,
3092 .label = ilabel,
3093 };
3094 struct nfs_setattrres res = {
3095 .fattr = fattr,
3096 .label = olabel,
3097 .server = server,
3098 };
3099 struct nfs4_exception exception = {
3100 .state = state,
3101 .inode = inode,
3102 .stateid = &arg.stateid,
3103 };
3104 int err;
3105
3106 arg.bitmask = nfs4_bitmask(server, ilabel);
3107 if (ilabel)
3108 arg.bitmask = nfs4_bitmask(server, olabel);
3109
3110 do {
3111 err = _nfs4_do_setattr(inode, &arg, &res, cred, ctx);
3112 switch (err) {
3113 case -NFS4ERR_OPENMODE:
3114 if (!(sattr->ia_valid & ATTR_SIZE)) {
3115 pr_warn_once("NFSv4: server %s is incorrectly "
3116 "applying open mode checks to "
3117 "a SETATTR that is not "
3118 "changing file size.\n",
3119 server->nfs_client->cl_hostname);
3120 }
3121 if (state && !(state->state & FMODE_WRITE)) {
3122 err = -EBADF;
3123 if (sattr->ia_valid & ATTR_OPEN)
3124 err = -EACCES;
3125 goto out;
3126 }
3127 }
3128 err = nfs4_handle_exception(server, err, &exception);
3129 } while (exception.retry);
3130out:
3131 return err;
3132}
3133
3134static bool
3135nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
3136{
3137 if (inode == NULL || !nfs_have_layout(inode))
3138 return false;
3139
3140 return pnfs_wait_on_layoutreturn(inode, task);
3141}
3142
3143struct nfs4_closedata {
3144 struct inode *inode;
3145 struct nfs4_state *state;
3146 struct nfs_closeargs arg;
3147 struct nfs_closeres res;
3148 struct {
3149 struct nfs4_layoutreturn_args arg;
3150 struct nfs4_layoutreturn_res res;
3151 struct nfs4_xdr_opaque_data ld_private;
3152 u32 roc_barrier;
3153 bool roc;
3154 } lr;
3155 struct nfs_fattr fattr;
3156 unsigned long timestamp;
3157};
3158
3159static void nfs4_free_closedata(void *data)
3160{
3161 struct nfs4_closedata *calldata = data;
3162 struct nfs4_state_owner *sp = calldata->state->owner;
3163 struct super_block *sb = calldata->state->inode->i_sb;
3164
3165 if (calldata->lr.roc)
3166 pnfs_roc_release(&calldata->lr.arg, &calldata->lr.res,
3167 calldata->res.lr_ret);
3168 nfs4_put_open_state(calldata->state);
3169 nfs_free_seqid(calldata->arg.seqid);
3170 nfs4_put_state_owner(sp);
3171 nfs_sb_deactive(sb);
3172 kfree(calldata);
3173}
3174
3175static void nfs4_close_done(struct rpc_task *task, void *data)
3176{
3177 struct nfs4_closedata *calldata = data;
3178 struct nfs4_state *state = calldata->state;
3179 struct nfs_server *server = NFS_SERVER(calldata->inode);
3180 nfs4_stateid *res_stateid = NULL;
3181 struct nfs4_exception exception = {
3182 .state = state,
3183 .inode = calldata->inode,
3184 .stateid = &calldata->arg.stateid,
3185 };
3186
3187 dprintk("%s: begin!\n", __func__);
3188 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
3189 return;
3190 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
3191
3192 /* Handle Layoutreturn errors */
3193 if (calldata->arg.lr_args && task->tk_status != 0) {
3194 switch (calldata->res.lr_ret) {
3195 default:
3196 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
3197 break;
3198 case 0:
3199 calldata->arg.lr_args = NULL;
3200 calldata->res.lr_res = NULL;
3201 break;
3202 case -NFS4ERR_OLD_STATEID:
3203 if (nfs4_refresh_layout_stateid(&calldata->arg.lr_args->stateid,
3204 calldata->inode))
3205 goto lr_restart;
3206 /* Fallthrough */
3207 case -NFS4ERR_ADMIN_REVOKED:
3208 case -NFS4ERR_DELEG_REVOKED:
3209 case -NFS4ERR_EXPIRED:
3210 case -NFS4ERR_BAD_STATEID:
3211 case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
3212 case -NFS4ERR_WRONG_CRED:
3213 calldata->arg.lr_args = NULL;
3214 calldata->res.lr_res = NULL;
3215 goto lr_restart;
3216 }
3217 }
3218
3219 /* hmm. we are done with the inode, and in the process of freeing
3220 * the state_owner. we keep this around to process errors
3221 */
3222 switch (task->tk_status) {
3223 case 0:
3224 res_stateid = &calldata->res.stateid;
3225 renew_lease(server, calldata->timestamp);
3226 break;
3227 case -NFS4ERR_ACCESS:
3228 if (calldata->arg.bitmask != NULL) {
3229 calldata->arg.bitmask = NULL;
3230 calldata->res.fattr = NULL;
3231 goto out_restart;
3232
3233 }
3234 break;
3235 case -NFS4ERR_OLD_STATEID:
3236 /* Did we race with OPEN? */
3237 if (nfs4_refresh_open_stateid(&calldata->arg.stateid,
3238 state))
3239 goto out_restart;
3240 goto out_release;
3241 case -NFS4ERR_ADMIN_REVOKED:
3242 case -NFS4ERR_STALE_STATEID:
3243 case -NFS4ERR_EXPIRED:
3244 nfs4_free_revoked_stateid(server,
3245 &calldata->arg.stateid,
3246 task->tk_msg.rpc_cred);
3247 /* Fallthrough */
3248 case -NFS4ERR_BAD_STATEID:
3249 break;
3250 default:
3251 task->tk_status = nfs4_async_handle_exception(task,
3252 server, task->tk_status, &exception);
3253 if (exception.retry)
3254 goto out_restart;
3255 }
3256 nfs_clear_open_stateid(state, &calldata->arg.stateid,
3257 res_stateid, calldata->arg.fmode);
3258out_release:
3259 task->tk_status = 0;
3260 nfs_release_seqid(calldata->arg.seqid);
3261 nfs_refresh_inode(calldata->inode, &calldata->fattr);
3262 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
3263 return;
3264lr_restart:
3265 calldata->res.lr_ret = 0;
3266out_restart:
3267 task->tk_status = 0;
3268 rpc_restart_call_prepare(task);
3269 goto out_release;
3270}
3271
3272static void nfs4_close_prepare(struct rpc_task *task, void *data)
3273{
3274 struct nfs4_closedata *calldata = data;
3275 struct nfs4_state *state = calldata->state;
3276 struct inode *inode = calldata->inode;
3277 bool is_rdonly, is_wronly, is_rdwr;
3278 int call_close = 0;
3279
3280 dprintk("%s: begin!\n", __func__);
3281 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
3282 goto out_wait;
3283
3284 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
3285 spin_lock(&state->owner->so_lock);
3286 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
3287 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
3288 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
3289 /* Calculate the change in open mode */
3290 calldata->arg.fmode = 0;
3291 if (state->n_rdwr == 0) {
3292 if (state->n_rdonly == 0)
3293 call_close |= is_rdonly;
3294 else if (is_rdonly)
3295 calldata->arg.fmode |= FMODE_READ;
3296 if (state->n_wronly == 0)
3297 call_close |= is_wronly;
3298 else if (is_wronly)
3299 calldata->arg.fmode |= FMODE_WRITE;
3300 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3301 call_close |= is_rdwr;
3302 } else if (is_rdwr)
3303 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3304
3305 if (!nfs4_valid_open_stateid(state) ||
3306 !nfs4_refresh_open_stateid(&calldata->arg.stateid, state))
3307 call_close = 0;
3308 spin_unlock(&state->owner->so_lock);
3309
3310 if (!call_close) {
3311 /* Note: exit _without_ calling nfs4_close_done */
3312 goto out_no_action;
3313 }
3314
3315 if (!calldata->lr.roc && nfs4_wait_on_layoutreturn(inode, task)) {
3316 nfs_release_seqid(calldata->arg.seqid);
3317 goto out_wait;
3318 }
3319
3320 if (calldata->arg.fmode == 0)
3321 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
3322
3323 if (calldata->arg.fmode == 0 || calldata->arg.fmode == FMODE_READ) {
3324 /* Close-to-open cache consistency revalidation */
3325 if (!nfs4_have_delegation(inode, FMODE_READ))
3326 calldata->arg.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
3327 else
3328 calldata->arg.bitmask = NULL;
3329 }
3330
3331 calldata->arg.share_access =
3332 nfs4_map_atomic_open_share(NFS_SERVER(inode),
3333 calldata->arg.fmode, 0);
3334
3335 if (calldata->res.fattr == NULL)
3336 calldata->arg.bitmask = NULL;
3337 else if (calldata->arg.bitmask == NULL)
3338 calldata->res.fattr = NULL;
3339 calldata->timestamp = jiffies;
3340 if (nfs4_setup_sequence(NFS_SERVER(inode)->nfs_client,
3341 &calldata->arg.seq_args,
3342 &calldata->res.seq_res,
3343 task) != 0)
3344 nfs_release_seqid(calldata->arg.seqid);
3345 dprintk("%s: done!\n", __func__);
3346 return;
3347out_no_action:
3348 task->tk_action = NULL;
3349out_wait:
3350 nfs4_sequence_done(task, &calldata->res.seq_res);
3351}
3352
3353static const struct rpc_call_ops nfs4_close_ops = {
3354 .rpc_call_prepare = nfs4_close_prepare,
3355 .rpc_call_done = nfs4_close_done,
3356 .rpc_release = nfs4_free_closedata,
3357};
3358
3359/*
3360 * It is possible for data to be read/written from a mem-mapped file
3361 * after the sys_close call (which hits the vfs layer as a flush).
3362 * This means that we can't safely call nfsv4 close on a file until
3363 * the inode is cleared. This in turn means that we are not good
3364 * NFSv4 citizens - we do not indicate to the server to update the file's
3365 * share state even when we are done with one of the three share
3366 * stateid's in the inode.
3367 *
3368 * NOTE: Caller must be holding the sp->so_owner semaphore!
3369 */
3370int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
3371{
3372 struct nfs_server *server = NFS_SERVER(state->inode);
3373 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
3374 struct nfs4_closedata *calldata;
3375 struct nfs4_state_owner *sp = state->owner;
3376 struct rpc_task *task;
3377 struct rpc_message msg = {
3378 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
3379 .rpc_cred = state->owner->so_cred,
3380 };
3381 struct rpc_task_setup task_setup_data = {
3382 .rpc_client = server->client,
3383 .rpc_message = &msg,
3384 .callback_ops = &nfs4_close_ops,
3385 .workqueue = nfsiod_workqueue,
3386 .flags = RPC_TASK_ASYNC,
3387 };
3388 int status = -ENOMEM;
3389
3390 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
3391 &task_setup_data.rpc_client, &msg);
3392
3393 calldata = kzalloc(sizeof(*calldata), gfp_mask);
3394 if (calldata == NULL)
3395 goto out;
3396 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
3397 calldata->inode = state->inode;
3398 calldata->state = state;
3399 calldata->arg.fh = NFS_FH(state->inode);
3400 if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
3401 goto out_free_calldata;
3402 /* Serialization for the sequence id */
3403 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
3404 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
3405 if (IS_ERR(calldata->arg.seqid))
3406 goto out_free_calldata;
3407 nfs_fattr_init(&calldata->fattr);
3408 calldata->arg.fmode = 0;
3409 calldata->lr.arg.ld_private = &calldata->lr.ld_private;
3410 calldata->res.fattr = &calldata->fattr;
3411 calldata->res.seqid = calldata->arg.seqid;
3412 calldata->res.server = server;
3413 calldata->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
3414 calldata->lr.roc = pnfs_roc(state->inode,
3415 &calldata->lr.arg, &calldata->lr.res, msg.rpc_cred);
3416 if (calldata->lr.roc) {
3417 calldata->arg.lr_args = &calldata->lr.arg;
3418 calldata->res.lr_res = &calldata->lr.res;
3419 }
3420 nfs_sb_active(calldata->inode->i_sb);
3421
3422 msg.rpc_argp = &calldata->arg;
3423 msg.rpc_resp = &calldata->res;
3424 task_setup_data.callback_data = calldata;
3425 task = rpc_run_task(&task_setup_data);
3426 if (IS_ERR(task))
3427 return PTR_ERR(task);
3428 status = 0;
3429 if (wait)
3430 status = rpc_wait_for_completion_task(task);
3431 rpc_put_task(task);
3432 return status;
3433out_free_calldata:
3434 kfree(calldata);
3435out:
3436 nfs4_put_open_state(state);
3437 nfs4_put_state_owner(sp);
3438 return status;
3439}
3440
3441static struct inode *
3442nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
3443 int open_flags, struct iattr *attr, int *opened)
3444{
3445 struct nfs4_state *state;
3446 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
3447
3448 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3449
3450 /* Protect against concurrent sillydeletes */
3451 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3452
3453 nfs4_label_release_security(label);
3454
3455 if (IS_ERR(state))
3456 return ERR_CAST(state);
3457 return state->inode;
3458}
3459
3460static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3461{
3462 if (ctx->state == NULL)
3463 return;
3464 if (is_sync)
3465 nfs4_close_sync(ctx->state, ctx->mode);
3466 else
3467 nfs4_close_state(ctx->state, ctx->mode);
3468}
3469
3470#define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3471#define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3472#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_MODE_UMASK - 1UL)
3473
3474static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3475{
3476 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3477 struct nfs4_server_caps_arg args = {
3478 .fhandle = fhandle,
3479 .bitmask = bitmask,
3480 };
3481 struct nfs4_server_caps_res res = {};
3482 struct rpc_message msg = {
3483 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3484 .rpc_argp = &args,
3485 .rpc_resp = &res,
3486 };
3487 int status;
3488 int i;
3489
3490 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3491 FATTR4_WORD0_FH_EXPIRE_TYPE |
3492 FATTR4_WORD0_LINK_SUPPORT |
3493 FATTR4_WORD0_SYMLINK_SUPPORT |
3494 FATTR4_WORD0_ACLSUPPORT;
3495 if (minorversion)
3496 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3497
3498 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3499 if (status == 0) {
3500 /* Sanity check the server answers */
3501 switch (minorversion) {
3502 case 0:
3503 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3504 res.attr_bitmask[2] = 0;
3505 break;
3506 case 1:
3507 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3508 break;
3509 case 2:
3510 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3511 }
3512 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3513 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
3514 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
3515 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
3516 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
3517 NFS_CAP_CTIME|NFS_CAP_MTIME|
3518 NFS_CAP_SECURITY_LABEL);
3519 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3520 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3521 server->caps |= NFS_CAP_ACLS;
3522 if (res.has_links != 0)
3523 server->caps |= NFS_CAP_HARDLINKS;
3524 if (res.has_symlinks != 0)
3525 server->caps |= NFS_CAP_SYMLINKS;
3526 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
3527 server->caps |= NFS_CAP_FILEID;
3528 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
3529 server->caps |= NFS_CAP_MODE;
3530 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
3531 server->caps |= NFS_CAP_NLINK;
3532 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
3533 server->caps |= NFS_CAP_OWNER;
3534 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
3535 server->caps |= NFS_CAP_OWNER_GROUP;
3536 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
3537 server->caps |= NFS_CAP_ATIME;
3538 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
3539 server->caps |= NFS_CAP_CTIME;
3540 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
3541 server->caps |= NFS_CAP_MTIME;
3542#ifdef CONFIG_NFS_V4_SECURITY_LABEL
3543 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3544 server->caps |= NFS_CAP_SECURITY_LABEL;
3545#endif
3546 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3547 sizeof(server->attr_bitmask));
3548 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3549
3550 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3551 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3552 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3553 server->cache_consistency_bitmask[2] = 0;
3554
3555 /* Avoid a regression due to buggy server */
3556 for (i = 0; i < ARRAY_SIZE(res.exclcreat_bitmask); i++)
3557 res.exclcreat_bitmask[i] &= res.attr_bitmask[i];
3558 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3559 sizeof(server->exclcreat_bitmask));
3560
3561 server->acl_bitmask = res.acl_bitmask;
3562 server->fh_expire_type = res.fh_expire_type;
3563 }
3564
3565 return status;
3566}
3567
3568int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3569{
3570 struct nfs4_exception exception = { };
3571 int err;
3572 do {
3573 err = nfs4_handle_exception(server,
3574 _nfs4_server_capabilities(server, fhandle),
3575 &exception);
3576 } while (exception.retry);
3577 return err;
3578}
3579
3580static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3581 struct nfs_fsinfo *info)
3582{
3583 u32 bitmask[3];
3584 struct nfs4_lookup_root_arg args = {
3585 .bitmask = bitmask,
3586 };
3587 struct nfs4_lookup_res res = {
3588 .server = server,
3589 .fattr = info->fattr,
3590 .fh = fhandle,
3591 };
3592 struct rpc_message msg = {
3593 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
3594 .rpc_argp = &args,
3595 .rpc_resp = &res,
3596 };
3597
3598 bitmask[0] = nfs4_fattr_bitmap[0];
3599 bitmask[1] = nfs4_fattr_bitmap[1];
3600 /*
3601 * Process the label in the upcoming getfattr
3602 */
3603 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
3604
3605 nfs_fattr_init(info->fattr);
3606 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3607}
3608
3609static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3610 struct nfs_fsinfo *info)
3611{
3612 struct nfs4_exception exception = { };
3613 int err;
3614 do {
3615 err = _nfs4_lookup_root(server, fhandle, info);
3616 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3617 switch (err) {
3618 case 0:
3619 case -NFS4ERR_WRONGSEC:
3620 goto out;
3621 default:
3622 err = nfs4_handle_exception(server, err, &exception);
3623 }
3624 } while (exception.retry);
3625out:
3626 return err;
3627}
3628
3629static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3630 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3631{
3632 struct rpc_auth_create_args auth_args = {
3633 .pseudoflavor = flavor,
3634 };
3635 struct rpc_auth *auth;
3636
3637 auth = rpcauth_create(&auth_args, server->client);
3638 if (IS_ERR(auth))
3639 return -EACCES;
3640 return nfs4_lookup_root(server, fhandle, info);
3641}
3642
3643/*
3644 * Retry pseudoroot lookup with various security flavors. We do this when:
3645 *
3646 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3647 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3648 *
3649 * Returns zero on success, or a negative NFS4ERR value, or a
3650 * negative errno value.
3651 */
3652static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3653 struct nfs_fsinfo *info)
3654{
3655 /* Per 3530bis 15.33.5 */
3656 static const rpc_authflavor_t flav_array[] = {
3657 RPC_AUTH_GSS_KRB5P,
3658 RPC_AUTH_GSS_KRB5I,
3659 RPC_AUTH_GSS_KRB5,
3660 RPC_AUTH_UNIX, /* courtesy */
3661 RPC_AUTH_NULL,
3662 };
3663 int status = -EPERM;
3664 size_t i;
3665
3666 if (server->auth_info.flavor_len > 0) {
3667 /* try each flavor specified by user */
3668 for (i = 0; i < server->auth_info.flavor_len; i++) {
3669 status = nfs4_lookup_root_sec(server, fhandle, info,
3670 server->auth_info.flavors[i]);
3671 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3672 continue;
3673 break;
3674 }
3675 } else {
3676 /* no flavors specified by user, try default list */
3677 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3678 status = nfs4_lookup_root_sec(server, fhandle, info,
3679 flav_array[i]);
3680 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3681 continue;
3682 break;
3683 }
3684 }
3685
3686 /*
3687 * -EACCESS could mean that the user doesn't have correct permissions
3688 * to access the mount. It could also mean that we tried to mount
3689 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3690 * existing mount programs don't handle -EACCES very well so it should
3691 * be mapped to -EPERM instead.
3692 */
3693 if (status == -EACCES)
3694 status = -EPERM;
3695 return status;
3696}
3697
3698/**
3699 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3700 * @server: initialized nfs_server handle
3701 * @fhandle: we fill in the pseudo-fs root file handle
3702 * @info: we fill in an FSINFO struct
3703 * @auth_probe: probe the auth flavours
3704 *
3705 * Returns zero on success, or a negative errno.
3706 */
3707int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3708 struct nfs_fsinfo *info,
3709 bool auth_probe)
3710{
3711 int status = 0;
3712
3713 if (!auth_probe)
3714 status = nfs4_lookup_root(server, fhandle, info);
3715
3716 if (auth_probe || status == NFS4ERR_WRONGSEC)
3717 status = server->nfs_client->cl_mvops->find_root_sec(server,
3718 fhandle, info);
3719
3720 if (status == 0)
3721 status = nfs4_server_capabilities(server, fhandle);
3722 if (status == 0)
3723 status = nfs4_do_fsinfo(server, fhandle, info);
3724
3725 return nfs4_map_errors(status);
3726}
3727
3728static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3729 struct nfs_fsinfo *info)
3730{
3731 int error;
3732 struct nfs_fattr *fattr = info->fattr;
3733 struct nfs4_label *label = NULL;
3734
3735 error = nfs4_server_capabilities(server, mntfh);
3736 if (error < 0) {
3737 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3738 return error;
3739 }
3740
3741 label = nfs4_label_alloc(server, GFP_KERNEL);
3742 if (IS_ERR(label))
3743 return PTR_ERR(label);
3744
3745 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3746 if (error < 0) {
3747 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3748 goto err_free_label;
3749 }
3750
3751 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3752 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3753 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3754
3755err_free_label:
3756 nfs4_label_free(label);
3757
3758 return error;
3759}
3760
3761/*
3762 * Get locations and (maybe) other attributes of a referral.
3763 * Note that we'll actually follow the referral later when
3764 * we detect fsid mismatch in inode revalidation
3765 */
3766static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3767 const struct qstr *name, struct nfs_fattr *fattr,
3768 struct nfs_fh *fhandle)
3769{
3770 int status = -ENOMEM;
3771 struct page *page = NULL;
3772 struct nfs4_fs_locations *locations = NULL;
3773
3774 page = alloc_page(GFP_KERNEL);
3775 if (page == NULL)
3776 goto out;
3777 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3778 if (locations == NULL)
3779 goto out;
3780
3781 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3782 if (status != 0)
3783 goto out;
3784
3785 /*
3786 * If the fsid didn't change, this is a migration event, not a
3787 * referral. Cause us to drop into the exception handler, which
3788 * will kick off migration recovery.
3789 */
3790 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3791 dprintk("%s: server did not return a different fsid for"
3792 " a referral at %s\n", __func__, name->name);
3793 status = -NFS4ERR_MOVED;
3794 goto out;
3795 }
3796 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3797 nfs_fixup_referral_attributes(&locations->fattr);
3798
3799 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3800 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3801 memset(fhandle, 0, sizeof(struct nfs_fh));
3802out:
3803 if (page)
3804 __free_page(page);
3805 kfree(locations);
3806 return status;
3807}
3808
3809static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3810 struct nfs_fattr *fattr, struct nfs4_label *label)
3811{
3812 struct nfs4_getattr_arg args = {
3813 .fh = fhandle,
3814 .bitmask = server->attr_bitmask,
3815 };
3816 struct nfs4_getattr_res res = {
3817 .fattr = fattr,
3818 .label = label,
3819 .server = server,
3820 };
3821 struct rpc_message msg = {
3822 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3823 .rpc_argp = &args,
3824 .rpc_resp = &res,
3825 };
3826
3827 args.bitmask = nfs4_bitmask(server, label);
3828
3829 nfs_fattr_init(fattr);
3830 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3831}
3832
3833static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3834 struct nfs_fattr *fattr, struct nfs4_label *label)
3835{
3836 struct nfs4_exception exception = { };
3837 int err;
3838 do {
3839 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3840 trace_nfs4_getattr(server, fhandle, fattr, err);
3841 err = nfs4_handle_exception(server, err,
3842 &exception);
3843 } while (exception.retry);
3844 return err;
3845}
3846
3847/*
3848 * The file is not closed if it is opened due to the a request to change
3849 * the size of the file. The open call will not be needed once the
3850 * VFS layer lookup-intents are implemented.
3851 *
3852 * Close is called when the inode is destroyed.
3853 * If we haven't opened the file for O_WRONLY, we
3854 * need to in the size_change case to obtain a stateid.
3855 *
3856 * Got race?
3857 * Because OPEN is always done by name in nfsv4, it is
3858 * possible that we opened a different file by the same
3859 * name. We can recognize this race condition, but we
3860 * can't do anything about it besides returning an error.
3861 *
3862 * This will be fixed with VFS changes (lookup-intent).
3863 */
3864static int
3865nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3866 struct iattr *sattr)
3867{
3868 struct inode *inode = d_inode(dentry);
3869 struct rpc_cred *cred = NULL;
3870 struct nfs_open_context *ctx = NULL;
3871 struct nfs4_label *label = NULL;
3872 int status;
3873
3874 if (pnfs_ld_layoutret_on_setattr(inode) &&
3875 sattr->ia_valid & ATTR_SIZE &&
3876 sattr->ia_size < i_size_read(inode))
3877 pnfs_commit_and_return_layout(inode);
3878
3879 nfs_fattr_init(fattr);
3880
3881 /* Deal with open(O_TRUNC) */
3882 if (sattr->ia_valid & ATTR_OPEN)
3883 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3884
3885 /* Optimization: if the end result is no change, don't RPC */
3886 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3887 return 0;
3888
3889 /* Search for an existing open(O_WRITE) file */
3890 if (sattr->ia_valid & ATTR_FILE) {
3891
3892 ctx = nfs_file_open_context(sattr->ia_file);
3893 if (ctx)
3894 cred = ctx->cred;
3895 }
3896
3897 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3898 if (IS_ERR(label))
3899 return PTR_ERR(label);
3900
3901 /* Return any delegations if we're going to change ACLs */
3902 if ((sattr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
3903 nfs4_inode_make_writeable(inode);
3904
3905 status = nfs4_do_setattr(inode, cred, fattr, sattr, ctx, NULL, label);
3906 if (status == 0) {
3907 nfs_setattr_update_inode(inode, sattr, fattr);
3908 nfs_setsecurity(inode, fattr, label);
3909 }
3910 nfs4_label_free(label);
3911 return status;
3912}
3913
3914static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3915 const struct qstr *name, struct nfs_fh *fhandle,
3916 struct nfs_fattr *fattr, struct nfs4_label *label)
3917{
3918 struct nfs_server *server = NFS_SERVER(dir);
3919 int status;
3920 struct nfs4_lookup_arg args = {
3921 .bitmask = server->attr_bitmask,
3922 .dir_fh = NFS_FH(dir),
3923 .name = name,
3924 };
3925 struct nfs4_lookup_res res = {
3926 .server = server,
3927 .fattr = fattr,
3928 .label = label,
3929 .fh = fhandle,
3930 };
3931 struct rpc_message msg = {
3932 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3933 .rpc_argp = &args,
3934 .rpc_resp = &res,
3935 };
3936
3937 args.bitmask = nfs4_bitmask(server, label);
3938
3939 nfs_fattr_init(fattr);
3940
3941 dprintk("NFS call lookup %s\n", name->name);
3942 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3943 dprintk("NFS reply lookup: %d\n", status);
3944 return status;
3945}
3946
3947static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3948{
3949 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3950 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3951 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3952 fattr->nlink = 2;
3953}
3954
3955static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3956 const struct qstr *name, struct nfs_fh *fhandle,
3957 struct nfs_fattr *fattr, struct nfs4_label *label)
3958{
3959 struct nfs4_exception exception = { };
3960 struct rpc_clnt *client = *clnt;
3961 int err;
3962 do {
3963 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3964 trace_nfs4_lookup(dir, name, err);
3965 switch (err) {
3966 case -NFS4ERR_BADNAME:
3967 err = -ENOENT;
3968 goto out;
3969 case -NFS4ERR_MOVED:
3970 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3971 if (err == -NFS4ERR_MOVED)
3972 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3973 goto out;
3974 case -NFS4ERR_WRONGSEC:
3975 err = -EPERM;
3976 if (client != *clnt)
3977 goto out;
3978 client = nfs4_negotiate_security(client, dir, name);
3979 if (IS_ERR(client))
3980 return PTR_ERR(client);
3981
3982 exception.retry = 1;
3983 break;
3984 default:
3985 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3986 }
3987 } while (exception.retry);
3988
3989out:
3990 if (err == 0)
3991 *clnt = client;
3992 else if (client != *clnt)
3993 rpc_shutdown_client(client);
3994
3995 return err;
3996}
3997
3998static int nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
3999 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
4000 struct nfs4_label *label)
4001{
4002 int status;
4003 struct rpc_clnt *client = NFS_CLIENT(dir);
4004
4005 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
4006 if (client != NFS_CLIENT(dir)) {
4007 rpc_shutdown_client(client);
4008 nfs_fixup_secinfo_attributes(fattr);
4009 }
4010 return status;
4011}
4012
4013struct rpc_clnt *
4014nfs4_proc_lookup_mountpoint(struct inode *dir, const struct qstr *name,
4015 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
4016{
4017 struct rpc_clnt *client = NFS_CLIENT(dir);
4018 int status;
4019
4020 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
4021 if (status < 0)
4022 return ERR_PTR(status);
4023 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
4024}
4025
4026static int _nfs4_proc_lookupp(struct inode *inode,
4027 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
4028 struct nfs4_label *label)
4029{
4030 struct rpc_clnt *clnt = NFS_CLIENT(inode);
4031 struct nfs_server *server = NFS_SERVER(inode);
4032 int status;
4033 struct nfs4_lookupp_arg args = {
4034 .bitmask = server->attr_bitmask,
4035 .fh = NFS_FH(inode),
4036 };
4037 struct nfs4_lookupp_res res = {
4038 .server = server,
4039 .fattr = fattr,
4040 .label = label,
4041 .fh = fhandle,
4042 };
4043 struct rpc_message msg = {
4044 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUPP],
4045 .rpc_argp = &args,
4046 .rpc_resp = &res,
4047 };
4048
4049 args.bitmask = nfs4_bitmask(server, label);
4050
4051 nfs_fattr_init(fattr);
4052
4053 dprintk("NFS call lookupp ino=0x%lx\n", inode->i_ino);
4054 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
4055 &res.seq_res, 0);
4056 dprintk("NFS reply lookupp: %d\n", status);
4057 return status;
4058}
4059
4060static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
4061 struct nfs_fattr *fattr, struct nfs4_label *label)
4062{
4063 struct nfs4_exception exception = { };
4064 int err;
4065 do {
4066 err = _nfs4_proc_lookupp(inode, fhandle, fattr, label);
4067 trace_nfs4_lookupp(inode, err);
4068 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4069 &exception);
4070 } while (exception.retry);
4071 return err;
4072}
4073
4074static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4075{
4076 struct nfs_server *server = NFS_SERVER(inode);
4077 struct nfs4_accessargs args = {
4078 .fh = NFS_FH(inode),
4079 .access = entry->mask,
4080 };
4081 struct nfs4_accessres res = {
4082 .server = server,
4083 };
4084 struct rpc_message msg = {
4085 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
4086 .rpc_argp = &args,
4087 .rpc_resp = &res,
4088 .rpc_cred = entry->cred,
4089 };
4090 int status = 0;
4091
4092 if (!nfs_have_delegated_attributes(inode)) {
4093 res.fattr = nfs_alloc_fattr();
4094 if (res.fattr == NULL)
4095 return -ENOMEM;
4096 args.bitmask = server->cache_consistency_bitmask;
4097 }
4098
4099 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4100 if (!status) {
4101 nfs_access_set_mask(entry, res.access);
4102 if (res.fattr)
4103 nfs_refresh_inode(inode, res.fattr);
4104 }
4105 nfs_free_fattr(res.fattr);
4106 return status;
4107}
4108
4109static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
4110{
4111 struct nfs4_exception exception = { };
4112 int err;
4113 do {
4114 err = _nfs4_proc_access(inode, entry);
4115 trace_nfs4_access(inode, err);
4116 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4117 &exception);
4118 } while (exception.retry);
4119 return err;
4120}
4121
4122/*
4123 * TODO: For the time being, we don't try to get any attributes
4124 * along with any of the zero-copy operations READ, READDIR,
4125 * READLINK, WRITE.
4126 *
4127 * In the case of the first three, we want to put the GETATTR
4128 * after the read-type operation -- this is because it is hard
4129 * to predict the length of a GETATTR response in v4, and thus
4130 * align the READ data correctly. This means that the GETATTR
4131 * may end up partially falling into the page cache, and we should
4132 * shift it into the 'tail' of the xdr_buf before processing.
4133 * To do this efficiently, we need to know the total length
4134 * of data received, which doesn't seem to be available outside
4135 * of the RPC layer.
4136 *
4137 * In the case of WRITE, we also want to put the GETATTR after
4138 * the operation -- in this case because we want to make sure
4139 * we get the post-operation mtime and size.
4140 *
4141 * Both of these changes to the XDR layer would in fact be quite
4142 * minor, but I decided to leave them for a subsequent patch.
4143 */
4144static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
4145 unsigned int pgbase, unsigned int pglen)
4146{
4147 struct nfs4_readlink args = {
4148 .fh = NFS_FH(inode),
4149 .pgbase = pgbase,
4150 .pglen = pglen,
4151 .pages = &page,
4152 };
4153 struct nfs4_readlink_res res;
4154 struct rpc_message msg = {
4155 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
4156 .rpc_argp = &args,
4157 .rpc_resp = &res,
4158 };
4159
4160 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
4161}
4162
4163static int nfs4_proc_readlink(struct inode *inode, struct page *page,
4164 unsigned int pgbase, unsigned int pglen)
4165{
4166 struct nfs4_exception exception = { };
4167 int err;
4168 do {
4169 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
4170 trace_nfs4_readlink(inode, err);
4171 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4172 &exception);
4173 } while (exception.retry);
4174 return err;
4175}
4176
4177/*
4178 * This is just for mknod. open(O_CREAT) will always do ->open_context().
4179 */
4180static int
4181nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
4182 int flags)
4183{
4184 struct nfs_server *server = NFS_SERVER(dir);
4185 struct nfs4_label l, *ilabel = NULL;
4186 struct nfs_open_context *ctx;
4187 struct nfs4_state *state;
4188 int status = 0;
4189
4190 ctx = alloc_nfs_open_context(dentry, FMODE_READ, NULL);
4191 if (IS_ERR(ctx))
4192 return PTR_ERR(ctx);
4193
4194 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
4195
4196 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4197 sattr->ia_mode &= ~current_umask();
4198 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
4199 if (IS_ERR(state)) {
4200 status = PTR_ERR(state);
4201 goto out;
4202 }
4203out:
4204 nfs4_label_release_security(ilabel);
4205 put_nfs_open_context(ctx);
4206 return status;
4207}
4208
4209static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name)
4210{
4211 struct nfs_server *server = NFS_SERVER(dir);
4212 struct nfs_removeargs args = {
4213 .fh = NFS_FH(dir),
4214 .name = *name,
4215 };
4216 struct nfs_removeres res = {
4217 .server = server,
4218 };
4219 struct rpc_message msg = {
4220 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
4221 .rpc_argp = &args,
4222 .rpc_resp = &res,
4223 };
4224 unsigned long timestamp = jiffies;
4225 int status;
4226
4227 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
4228 if (status == 0)
4229 update_changeattr(dir, &res.cinfo, timestamp);
4230 return status;
4231}
4232
4233static int nfs4_proc_remove(struct inode *dir, struct dentry *dentry)
4234{
4235 struct nfs4_exception exception = { };
4236 struct inode *inode = d_inode(dentry);
4237 int err;
4238
4239 if (inode) {
4240 if (inode->i_nlink == 1)
4241 nfs4_inode_return_delegation(inode);
4242 else
4243 nfs4_inode_make_writeable(inode);
4244 }
4245 do {
4246 err = _nfs4_proc_remove(dir, &dentry->d_name);
4247 trace_nfs4_remove(dir, &dentry->d_name, err);
4248 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4249 &exception);
4250 } while (exception.retry);
4251 return err;
4252}
4253
4254static int nfs4_proc_rmdir(struct inode *dir, const struct qstr *name)
4255{
4256 struct nfs4_exception exception = { };
4257 int err;
4258
4259 do {
4260 err = _nfs4_proc_remove(dir, name);
4261 trace_nfs4_remove(dir, name, err);
4262 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4263 &exception);
4264 } while (exception.retry);
4265 return err;
4266}
4267
4268static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct dentry *dentry)
4269{
4270 struct nfs_removeargs *args = msg->rpc_argp;
4271 struct nfs_removeres *res = msg->rpc_resp;
4272 struct inode *inode = d_inode(dentry);
4273
4274 res->server = NFS_SB(dentry->d_sb);
4275 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
4276 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
4277
4278 nfs_fattr_init(res->dir_attr);
4279
4280 if (inode)
4281 nfs4_inode_return_delegation(inode);
4282}
4283
4284static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
4285{
4286 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb)->nfs_client,
4287 &data->args.seq_args,
4288 &data->res.seq_res,
4289 task);
4290}
4291
4292static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
4293{
4294 struct nfs_unlinkdata *data = task->tk_calldata;
4295 struct nfs_removeres *res = &data->res;
4296
4297 if (!nfs4_sequence_done(task, &res->seq_res))
4298 return 0;
4299 if (nfs4_async_handle_error(task, res->server, NULL,
4300 &data->timeout) == -EAGAIN)
4301 return 0;
4302 if (task->tk_status == 0)
4303 update_changeattr(dir, &res->cinfo, res->dir_attr->time_start);
4304 return 1;
4305}
4306
4307static void nfs4_proc_rename_setup(struct rpc_message *msg,
4308 struct dentry *old_dentry,
4309 struct dentry *new_dentry)
4310{
4311 struct nfs_renameargs *arg = msg->rpc_argp;
4312 struct nfs_renameres *res = msg->rpc_resp;
4313 struct inode *old_inode = d_inode(old_dentry);
4314 struct inode *new_inode = d_inode(new_dentry);
4315
4316 if (old_inode)
4317 nfs4_inode_make_writeable(old_inode);
4318 if (new_inode)
4319 nfs4_inode_return_delegation(new_inode);
4320 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
4321 res->server = NFS_SB(old_dentry->d_sb);
4322 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
4323}
4324
4325static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
4326{
4327 nfs4_setup_sequence(NFS_SERVER(data->old_dir)->nfs_client,
4328 &data->args.seq_args,
4329 &data->res.seq_res,
4330 task);
4331}
4332
4333static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
4334 struct inode *new_dir)
4335{
4336 struct nfs_renamedata *data = task->tk_calldata;
4337 struct nfs_renameres *res = &data->res;
4338
4339 if (!nfs4_sequence_done(task, &res->seq_res))
4340 return 0;
4341 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
4342 return 0;
4343
4344 if (task->tk_status == 0) {
4345 update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start);
4346 if (new_dir != old_dir)
4347 update_changeattr(new_dir, &res->new_cinfo, res->new_fattr->time_start);
4348 }
4349 return 1;
4350}
4351
4352static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4353{
4354 struct nfs_server *server = NFS_SERVER(inode);
4355 struct nfs4_link_arg arg = {
4356 .fh = NFS_FH(inode),
4357 .dir_fh = NFS_FH(dir),
4358 .name = name,
4359 .bitmask = server->attr_bitmask,
4360 };
4361 struct nfs4_link_res res = {
4362 .server = server,
4363 .label = NULL,
4364 };
4365 struct rpc_message msg = {
4366 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
4367 .rpc_argp = &arg,
4368 .rpc_resp = &res,
4369 };
4370 int status = -ENOMEM;
4371
4372 res.fattr = nfs_alloc_fattr();
4373 if (res.fattr == NULL)
4374 goto out;
4375
4376 res.label = nfs4_label_alloc(server, GFP_KERNEL);
4377 if (IS_ERR(res.label)) {
4378 status = PTR_ERR(res.label);
4379 goto out;
4380 }
4381 arg.bitmask = nfs4_bitmask(server, res.label);
4382
4383 nfs4_inode_make_writeable(inode);
4384
4385 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4386 if (!status) {
4387 update_changeattr(dir, &res.cinfo, res.fattr->time_start);
4388 status = nfs_post_op_update_inode(inode, res.fattr);
4389 if (!status)
4390 nfs_setsecurity(inode, res.fattr, res.label);
4391 }
4392
4393
4394 nfs4_label_free(res.label);
4395
4396out:
4397 nfs_free_fattr(res.fattr);
4398 return status;
4399}
4400
4401static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
4402{
4403 struct nfs4_exception exception = { };
4404 int err;
4405 do {
4406 err = nfs4_handle_exception(NFS_SERVER(inode),
4407 _nfs4_proc_link(inode, dir, name),
4408 &exception);
4409 } while (exception.retry);
4410 return err;
4411}
4412
4413struct nfs4_createdata {
4414 struct rpc_message msg;
4415 struct nfs4_create_arg arg;
4416 struct nfs4_create_res res;
4417 struct nfs_fh fh;
4418 struct nfs_fattr fattr;
4419 struct nfs4_label *label;
4420};
4421
4422static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
4423 const struct qstr *name, struct iattr *sattr, u32 ftype)
4424{
4425 struct nfs4_createdata *data;
4426
4427 data = kzalloc(sizeof(*data), GFP_KERNEL);
4428 if (data != NULL) {
4429 struct nfs_server *server = NFS_SERVER(dir);
4430
4431 data->label = nfs4_label_alloc(server, GFP_KERNEL);
4432 if (IS_ERR(data->label))
4433 goto out_free;
4434
4435 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
4436 data->msg.rpc_argp = &data->arg;
4437 data->msg.rpc_resp = &data->res;
4438 data->arg.dir_fh = NFS_FH(dir);
4439 data->arg.server = server;
4440 data->arg.name = name;
4441 data->arg.attrs = sattr;
4442 data->arg.ftype = ftype;
4443 data->arg.bitmask = nfs4_bitmask(server, data->label);
4444 data->arg.umask = current_umask();
4445 data->res.server = server;
4446 data->res.fh = &data->fh;
4447 data->res.fattr = &data->fattr;
4448 data->res.label = data->label;
4449 nfs_fattr_init(data->res.fattr);
4450 }
4451 return data;
4452out_free:
4453 kfree(data);
4454 return NULL;
4455}
4456
4457static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
4458{
4459 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
4460 &data->arg.seq_args, &data->res.seq_res, 1);
4461 if (status == 0) {
4462 update_changeattr(dir, &data->res.dir_cinfo,
4463 data->res.fattr->time_start);
4464 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
4465 }
4466 return status;
4467}
4468
4469static void nfs4_free_createdata(struct nfs4_createdata *data)
4470{
4471 nfs4_label_free(data->label);
4472 kfree(data);
4473}
4474
4475static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
4476 struct page *page, unsigned int len, struct iattr *sattr,
4477 struct nfs4_label *label)
4478{
4479 struct nfs4_createdata *data;
4480 int status = -ENAMETOOLONG;
4481
4482 if (len > NFS4_MAXPATHLEN)
4483 goto out;
4484
4485 status = -ENOMEM;
4486 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
4487 if (data == NULL)
4488 goto out;
4489
4490 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
4491 data->arg.u.symlink.pages = &page;
4492 data->arg.u.symlink.len = len;
4493 data->arg.label = label;
4494
4495 status = nfs4_do_create(dir, dentry, data);
4496
4497 nfs4_free_createdata(data);
4498out:
4499 return status;
4500}
4501
4502static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
4503 struct page *page, unsigned int len, struct iattr *sattr)
4504{
4505 struct nfs4_exception exception = { };
4506 struct nfs4_label l, *label = NULL;
4507 int err;
4508
4509 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4510
4511 do {
4512 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
4513 trace_nfs4_symlink(dir, &dentry->d_name, err);
4514 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4515 &exception);
4516 } while (exception.retry);
4517
4518 nfs4_label_release_security(label);
4519 return err;
4520}
4521
4522static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4523 struct iattr *sattr, struct nfs4_label *label)
4524{
4525 struct nfs4_createdata *data;
4526 int status = -ENOMEM;
4527
4528 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
4529 if (data == NULL)
4530 goto out;
4531
4532 data->arg.label = label;
4533 status = nfs4_do_create(dir, dentry, data);
4534
4535 nfs4_free_createdata(data);
4536out:
4537 return status;
4538}
4539
4540static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4541 struct iattr *sattr)
4542{
4543 struct nfs_server *server = NFS_SERVER(dir);
4544 struct nfs4_exception exception = { };
4545 struct nfs4_label l, *label = NULL;
4546 int err;
4547
4548 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4549
4550 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4551 sattr->ia_mode &= ~current_umask();
4552 do {
4553 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
4554 trace_nfs4_mkdir(dir, &dentry->d_name, err);
4555 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4556 &exception);
4557 } while (exception.retry);
4558 nfs4_label_release_security(label);
4559
4560 return err;
4561}
4562
4563static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4564 u64 cookie, struct page **pages, unsigned int count, bool plus)
4565{
4566 struct inode *dir = d_inode(dentry);
4567 struct nfs4_readdir_arg args = {
4568 .fh = NFS_FH(dir),
4569 .pages = pages,
4570 .pgbase = 0,
4571 .count = count,
4572 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
4573 .plus = plus,
4574 };
4575 struct nfs4_readdir_res res;
4576 struct rpc_message msg = {
4577 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
4578 .rpc_argp = &args,
4579 .rpc_resp = &res,
4580 .rpc_cred = cred,
4581 };
4582 int status;
4583
4584 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
4585 dentry,
4586 (unsigned long long)cookie);
4587 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
4588 res.pgbase = args.pgbase;
4589 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
4590 if (status >= 0) {
4591 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
4592 status += args.pgbase;
4593 }
4594
4595 nfs_invalidate_atime(dir);
4596
4597 dprintk("%s: returns %d\n", __func__, status);
4598 return status;
4599}
4600
4601static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4602 u64 cookie, struct page **pages, unsigned int count, bool plus)
4603{
4604 struct nfs4_exception exception = { };
4605 int err;
4606 do {
4607 err = _nfs4_proc_readdir(dentry, cred, cookie,
4608 pages, count, plus);
4609 trace_nfs4_readdir(d_inode(dentry), err);
4610 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
4611 &exception);
4612 } while (exception.retry);
4613 return err;
4614}
4615
4616static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4617 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
4618{
4619 struct nfs4_createdata *data;
4620 int mode = sattr->ia_mode;
4621 int status = -ENOMEM;
4622
4623 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
4624 if (data == NULL)
4625 goto out;
4626
4627 if (S_ISFIFO(mode))
4628 data->arg.ftype = NF4FIFO;
4629 else if (S_ISBLK(mode)) {
4630 data->arg.ftype = NF4BLK;
4631 data->arg.u.device.specdata1 = MAJOR(rdev);
4632 data->arg.u.device.specdata2 = MINOR(rdev);
4633 }
4634 else if (S_ISCHR(mode)) {
4635 data->arg.ftype = NF4CHR;
4636 data->arg.u.device.specdata1 = MAJOR(rdev);
4637 data->arg.u.device.specdata2 = MINOR(rdev);
4638 } else if (!S_ISSOCK(mode)) {
4639 status = -EINVAL;
4640 goto out_free;
4641 }
4642
4643 data->arg.label = label;
4644 status = nfs4_do_create(dir, dentry, data);
4645out_free:
4646 nfs4_free_createdata(data);
4647out:
4648 return status;
4649}
4650
4651static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4652 struct iattr *sattr, dev_t rdev)
4653{
4654 struct nfs_server *server = NFS_SERVER(dir);
4655 struct nfs4_exception exception = { };
4656 struct nfs4_label l, *label = NULL;
4657 int err;
4658
4659 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4660
4661 if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK))
4662 sattr->ia_mode &= ~current_umask();
4663 do {
4664 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
4665 trace_nfs4_mknod(dir, &dentry->d_name, err);
4666 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4667 &exception);
4668 } while (exception.retry);
4669
4670 nfs4_label_release_security(label);
4671
4672 return err;
4673}
4674
4675static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
4676 struct nfs_fsstat *fsstat)
4677{
4678 struct nfs4_statfs_arg args = {
4679 .fh = fhandle,
4680 .bitmask = server->attr_bitmask,
4681 };
4682 struct nfs4_statfs_res res = {
4683 .fsstat = fsstat,
4684 };
4685 struct rpc_message msg = {
4686 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
4687 .rpc_argp = &args,
4688 .rpc_resp = &res,
4689 };
4690
4691 nfs_fattr_init(fsstat->fattr);
4692 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4693}
4694
4695static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4696{
4697 struct nfs4_exception exception = { };
4698 int err;
4699 do {
4700 err = nfs4_handle_exception(server,
4701 _nfs4_proc_statfs(server, fhandle, fsstat),
4702 &exception);
4703 } while (exception.retry);
4704 return err;
4705}
4706
4707static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4708 struct nfs_fsinfo *fsinfo)
4709{
4710 struct nfs4_fsinfo_arg args = {
4711 .fh = fhandle,
4712 .bitmask = server->attr_bitmask,
4713 };
4714 struct nfs4_fsinfo_res res = {
4715 .fsinfo = fsinfo,
4716 };
4717 struct rpc_message msg = {
4718 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4719 .rpc_argp = &args,
4720 .rpc_resp = &res,
4721 };
4722
4723 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4724}
4725
4726static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4727{
4728 struct nfs4_exception exception = { };
4729 unsigned long now = jiffies;
4730 int err;
4731
4732 do {
4733 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4734 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4735 if (err == 0) {
4736 nfs4_set_lease_period(server->nfs_client,
4737 fsinfo->lease_time * HZ,
4738 now);
4739 break;
4740 }
4741 err = nfs4_handle_exception(server, err, &exception);
4742 } while (exception.retry);
4743 return err;
4744}
4745
4746static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4747{
4748 int error;
4749
4750 nfs_fattr_init(fsinfo->fattr);
4751 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4752 if (error == 0) {
4753 /* block layout checks this! */
4754 server->pnfs_blksize = fsinfo->blksize;
4755 set_pnfs_layoutdriver(server, fhandle, fsinfo);
4756 }
4757
4758 return error;
4759}
4760
4761static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4762 struct nfs_pathconf *pathconf)
4763{
4764 struct nfs4_pathconf_arg args = {
4765 .fh = fhandle,
4766 .bitmask = server->attr_bitmask,
4767 };
4768 struct nfs4_pathconf_res res = {
4769 .pathconf = pathconf,
4770 };
4771 struct rpc_message msg = {
4772 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4773 .rpc_argp = &args,
4774 .rpc_resp = &res,
4775 };
4776
4777 /* None of the pathconf attributes are mandatory to implement */
4778 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4779 memset(pathconf, 0, sizeof(*pathconf));
4780 return 0;
4781 }
4782
4783 nfs_fattr_init(pathconf->fattr);
4784 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4785}
4786
4787static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4788 struct nfs_pathconf *pathconf)
4789{
4790 struct nfs4_exception exception = { };
4791 int err;
4792
4793 do {
4794 err = nfs4_handle_exception(server,
4795 _nfs4_proc_pathconf(server, fhandle, pathconf),
4796 &exception);
4797 } while (exception.retry);
4798 return err;
4799}
4800
4801int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4802 const struct nfs_open_context *ctx,
4803 const struct nfs_lock_context *l_ctx,
4804 fmode_t fmode)
4805{
4806 return nfs4_select_rw_stateid(ctx->state, fmode, l_ctx, stateid, NULL);
4807}
4808EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4809
4810static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4811 const struct nfs_open_context *ctx,
4812 const struct nfs_lock_context *l_ctx,
4813 fmode_t fmode)
4814{
4815 nfs4_stateid current_stateid;
4816
4817 /* If the current stateid represents a lost lock, then exit */
4818 if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode) == -EIO)
4819 return true;
4820 return nfs4_stateid_match(stateid, ¤t_stateid);
4821}
4822
4823static bool nfs4_error_stateid_expired(int err)
4824{
4825 switch (err) {
4826 case -NFS4ERR_DELEG_REVOKED:
4827 case -NFS4ERR_ADMIN_REVOKED:
4828 case -NFS4ERR_BAD_STATEID:
4829 case -NFS4ERR_STALE_STATEID:
4830 case -NFS4ERR_OLD_STATEID:
4831 case -NFS4ERR_OPENMODE:
4832 case -NFS4ERR_EXPIRED:
4833 return true;
4834 }
4835 return false;
4836}
4837
4838static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4839{
4840 struct nfs_server *server = NFS_SERVER(hdr->inode);
4841
4842 trace_nfs4_read(hdr, task->tk_status);
4843 if (task->tk_status < 0) {
4844 struct nfs4_exception exception = {
4845 .inode = hdr->inode,
4846 .state = hdr->args.context->state,
4847 .stateid = &hdr->args.stateid,
4848 };
4849 task->tk_status = nfs4_async_handle_exception(task,
4850 server, task->tk_status, &exception);
4851 if (exception.retry) {
4852 rpc_restart_call_prepare(task);
4853 return -EAGAIN;
4854 }
4855 }
4856
4857 if (task->tk_status > 0)
4858 renew_lease(server, hdr->timestamp);
4859 return 0;
4860}
4861
4862static bool nfs4_read_stateid_changed(struct rpc_task *task,
4863 struct nfs_pgio_args *args)
4864{
4865
4866 if (!nfs4_error_stateid_expired(task->tk_status) ||
4867 nfs4_stateid_is_current(&args->stateid,
4868 args->context,
4869 args->lock_context,
4870 FMODE_READ))
4871 return false;
4872 rpc_restart_call_prepare(task);
4873 return true;
4874}
4875
4876static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4877{
4878
4879 dprintk("--> %s\n", __func__);
4880
4881 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4882 return -EAGAIN;
4883 if (nfs4_read_stateid_changed(task, &hdr->args))
4884 return -EAGAIN;
4885 if (task->tk_status > 0)
4886 nfs_invalidate_atime(hdr->inode);
4887 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4888 nfs4_read_done_cb(task, hdr);
4889}
4890
4891static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4892 struct rpc_message *msg)
4893{
4894 hdr->timestamp = jiffies;
4895 if (!hdr->pgio_done_cb)
4896 hdr->pgio_done_cb = nfs4_read_done_cb;
4897 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4898 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4899}
4900
4901static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4902 struct nfs_pgio_header *hdr)
4903{
4904 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode)->nfs_client,
4905 &hdr->args.seq_args,
4906 &hdr->res.seq_res,
4907 task))
4908 return 0;
4909 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4910 hdr->args.lock_context,
4911 hdr->rw_mode) == -EIO)
4912 return -EIO;
4913 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4914 return -EIO;
4915 return 0;
4916}
4917
4918static int nfs4_write_done_cb(struct rpc_task *task,
4919 struct nfs_pgio_header *hdr)
4920{
4921 struct inode *inode = hdr->inode;
4922
4923 trace_nfs4_write(hdr, task->tk_status);
4924 if (task->tk_status < 0) {
4925 struct nfs4_exception exception = {
4926 .inode = hdr->inode,
4927 .state = hdr->args.context->state,
4928 .stateid = &hdr->args.stateid,
4929 };
4930 task->tk_status = nfs4_async_handle_exception(task,
4931 NFS_SERVER(inode), task->tk_status,
4932 &exception);
4933 if (exception.retry) {
4934 rpc_restart_call_prepare(task);
4935 return -EAGAIN;
4936 }
4937 }
4938 if (task->tk_status >= 0) {
4939 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4940 nfs_writeback_update_inode(hdr);
4941 }
4942 return 0;
4943}
4944
4945static bool nfs4_write_stateid_changed(struct rpc_task *task,
4946 struct nfs_pgio_args *args)
4947{
4948
4949 if (!nfs4_error_stateid_expired(task->tk_status) ||
4950 nfs4_stateid_is_current(&args->stateid,
4951 args->context,
4952 args->lock_context,
4953 FMODE_WRITE))
4954 return false;
4955 rpc_restart_call_prepare(task);
4956 return true;
4957}
4958
4959static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4960{
4961 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4962 return -EAGAIN;
4963 if (nfs4_write_stateid_changed(task, &hdr->args))
4964 return -EAGAIN;
4965 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4966 nfs4_write_done_cb(task, hdr);
4967}
4968
4969static
4970bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4971{
4972 /* Don't request attributes for pNFS or O_DIRECT writes */
4973 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4974 return false;
4975 /* Otherwise, request attributes if and only if we don't hold
4976 * a delegation
4977 */
4978 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4979}
4980
4981static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4982 struct rpc_message *msg)
4983{
4984 struct nfs_server *server = NFS_SERVER(hdr->inode);
4985
4986 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4987 hdr->args.bitmask = NULL;
4988 hdr->res.fattr = NULL;
4989 } else
4990 hdr->args.bitmask = server->cache_consistency_bitmask;
4991
4992 if (!hdr->pgio_done_cb)
4993 hdr->pgio_done_cb = nfs4_write_done_cb;
4994 hdr->res.server = server;
4995 hdr->timestamp = jiffies;
4996
4997 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4998 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4999}
5000
5001static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
5002{
5003 nfs4_setup_sequence(NFS_SERVER(data->inode)->nfs_client,
5004 &data->args.seq_args,
5005 &data->res.seq_res,
5006 task);
5007}
5008
5009static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
5010{
5011 struct inode *inode = data->inode;
5012
5013 trace_nfs4_commit(data, task->tk_status);
5014 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
5015 NULL, NULL) == -EAGAIN) {
5016 rpc_restart_call_prepare(task);
5017 return -EAGAIN;
5018 }
5019 return 0;
5020}
5021
5022static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
5023{
5024 if (!nfs4_sequence_done(task, &data->res.seq_res))
5025 return -EAGAIN;
5026 return data->commit_done_cb(task, data);
5027}
5028
5029static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
5030{
5031 struct nfs_server *server = NFS_SERVER(data->inode);
5032
5033 if (data->commit_done_cb == NULL)
5034 data->commit_done_cb = nfs4_commit_done_cb;
5035 data->res.server = server;
5036 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
5037 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5038}
5039
5040struct nfs4_renewdata {
5041 struct nfs_client *client;
5042 unsigned long timestamp;
5043};
5044
5045/*
5046 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
5047 * standalone procedure for queueing an asynchronous RENEW.
5048 */
5049static void nfs4_renew_release(void *calldata)
5050{
5051 struct nfs4_renewdata *data = calldata;
5052 struct nfs_client *clp = data->client;
5053
5054 if (refcount_read(&clp->cl_count) > 1)
5055 nfs4_schedule_state_renewal(clp);
5056 nfs_put_client(clp);
5057 kfree(data);
5058}
5059
5060static void nfs4_renew_done(struct rpc_task *task, void *calldata)
5061{
5062 struct nfs4_renewdata *data = calldata;
5063 struct nfs_client *clp = data->client;
5064 unsigned long timestamp = data->timestamp;
5065
5066 trace_nfs4_renew_async(clp, task->tk_status);
5067 switch (task->tk_status) {
5068 case 0:
5069 break;
5070 case -NFS4ERR_LEASE_MOVED:
5071 nfs4_schedule_lease_moved_recovery(clp);
5072 break;
5073 default:
5074 /* Unless we're shutting down, schedule state recovery! */
5075 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
5076 return;
5077 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
5078 nfs4_schedule_lease_recovery(clp);
5079 return;
5080 }
5081 nfs4_schedule_path_down_recovery(clp);
5082 }
5083 do_renew_lease(clp, timestamp);
5084}
5085
5086static const struct rpc_call_ops nfs4_renew_ops = {
5087 .rpc_call_done = nfs4_renew_done,
5088 .rpc_release = nfs4_renew_release,
5089};
5090
5091static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
5092{
5093 struct rpc_message msg = {
5094 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5095 .rpc_argp = clp,
5096 .rpc_cred = cred,
5097 };
5098 struct nfs4_renewdata *data;
5099
5100 if (renew_flags == 0)
5101 return 0;
5102 if (!refcount_inc_not_zero(&clp->cl_count))
5103 return -EIO;
5104 data = kmalloc(sizeof(*data), GFP_NOFS);
5105 if (data == NULL) {
5106 nfs_put_client(clp);
5107 return -ENOMEM;
5108 }
5109 data->client = clp;
5110 data->timestamp = jiffies;
5111 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
5112 &nfs4_renew_ops, data);
5113}
5114
5115static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
5116{
5117 struct rpc_message msg = {
5118 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
5119 .rpc_argp = clp,
5120 .rpc_cred = cred,
5121 };
5122 unsigned long now = jiffies;
5123 int status;
5124
5125 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5126 if (status < 0)
5127 return status;
5128 do_renew_lease(clp, now);
5129 return 0;
5130}
5131
5132static inline int nfs4_server_supports_acls(struct nfs_server *server)
5133{
5134 return server->caps & NFS_CAP_ACLS;
5135}
5136
5137/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
5138 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
5139 * the stack.
5140 */
5141#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
5142
5143static int buf_to_pages_noslab(const void *buf, size_t buflen,
5144 struct page **pages)
5145{
5146 struct page *newpage, **spages;
5147 int rc = 0;
5148 size_t len;
5149 spages = pages;
5150
5151 do {
5152 len = min_t(size_t, PAGE_SIZE, buflen);
5153 newpage = alloc_page(GFP_KERNEL);
5154
5155 if (newpage == NULL)
5156 goto unwind;
5157 memcpy(page_address(newpage), buf, len);
5158 buf += len;
5159 buflen -= len;
5160 *pages++ = newpage;
5161 rc++;
5162 } while (buflen != 0);
5163
5164 return rc;
5165
5166unwind:
5167 for(; rc > 0; rc--)
5168 __free_page(spages[rc-1]);
5169 return -ENOMEM;
5170}
5171
5172struct nfs4_cached_acl {
5173 int cached;
5174 size_t len;
5175 char data[0];
5176};
5177
5178static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
5179{
5180 struct nfs_inode *nfsi = NFS_I(inode);
5181
5182 spin_lock(&inode->i_lock);
5183 kfree(nfsi->nfs4_acl);
5184 nfsi->nfs4_acl = acl;
5185 spin_unlock(&inode->i_lock);
5186}
5187
5188static void nfs4_zap_acl_attr(struct inode *inode)
5189{
5190 nfs4_set_cached_acl(inode, NULL);
5191}
5192
5193static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
5194{
5195 struct nfs_inode *nfsi = NFS_I(inode);
5196 struct nfs4_cached_acl *acl;
5197 int ret = -ENOENT;
5198
5199 spin_lock(&inode->i_lock);
5200 acl = nfsi->nfs4_acl;
5201 if (acl == NULL)
5202 goto out;
5203 if (buf == NULL) /* user is just asking for length */
5204 goto out_len;
5205 if (acl->cached == 0)
5206 goto out;
5207 ret = -ERANGE; /* see getxattr(2) man page */
5208 if (acl->len > buflen)
5209 goto out;
5210 memcpy(buf, acl->data, acl->len);
5211out_len:
5212 ret = acl->len;
5213out:
5214 spin_unlock(&inode->i_lock);
5215 return ret;
5216}
5217
5218static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
5219{
5220 struct nfs4_cached_acl *acl;
5221 size_t buflen = sizeof(*acl) + acl_len;
5222
5223 if (buflen <= PAGE_SIZE) {
5224 acl = kmalloc(buflen, GFP_KERNEL);
5225 if (acl == NULL)
5226 goto out;
5227 acl->cached = 1;
5228 _copy_from_pages(acl->data, pages, pgbase, acl_len);
5229 } else {
5230 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
5231 if (acl == NULL)
5232 goto out;
5233 acl->cached = 0;
5234 }
5235 acl->len = acl_len;
5236out:
5237 nfs4_set_cached_acl(inode, acl);
5238}
5239
5240/*
5241 * The getxattr API returns the required buffer length when called with a
5242 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
5243 * the required buf. On a NULL buf, we send a page of data to the server
5244 * guessing that the ACL request can be serviced by a page. If so, we cache
5245 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
5246 * the cache. If not so, we throw away the page, and cache the required
5247 * length. The next getxattr call will then produce another round trip to
5248 * the server, this time with the input buf of the required size.
5249 */
5250static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5251{
5252 struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
5253 struct nfs_getaclargs args = {
5254 .fh = NFS_FH(inode),
5255 .acl_pages = pages,
5256 .acl_len = buflen,
5257 };
5258 struct nfs_getaclres res = {
5259 .acl_len = buflen,
5260 };
5261 struct rpc_message msg = {
5262 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
5263 .rpc_argp = &args,
5264 .rpc_resp = &res,
5265 };
5266 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE) + 1;
5267 int ret = -ENOMEM, i;
5268
5269 if (npages > ARRAY_SIZE(pages))
5270 return -ERANGE;
5271
5272 for (i = 0; i < npages; i++) {
5273 pages[i] = alloc_page(GFP_KERNEL);
5274 if (!pages[i])
5275 goto out_free;
5276 }
5277
5278 /* for decoding across pages */
5279 res.acl_scratch = alloc_page(GFP_KERNEL);
5280 if (!res.acl_scratch)
5281 goto out_free;
5282
5283 args.acl_len = npages * PAGE_SIZE;
5284
5285 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
5286 __func__, buf, buflen, npages, args.acl_len);
5287 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
5288 &msg, &args.seq_args, &res.seq_res, 0);
5289 if (ret)
5290 goto out_free;
5291
5292 /* Handle the case where the passed-in buffer is too short */
5293 if (res.acl_flags & NFS4_ACL_TRUNC) {
5294 /* Did the user only issue a request for the acl length? */
5295 if (buf == NULL)
5296 goto out_ok;
5297 ret = -ERANGE;
5298 goto out_free;
5299 }
5300 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
5301 if (buf) {
5302 if (res.acl_len > buflen) {
5303 ret = -ERANGE;
5304 goto out_free;
5305 }
5306 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
5307 }
5308out_ok:
5309 ret = res.acl_len;
5310out_free:
5311 for (i = 0; i < npages; i++)
5312 if (pages[i])
5313 __free_page(pages[i]);
5314 if (res.acl_scratch)
5315 __free_page(res.acl_scratch);
5316 return ret;
5317}
5318
5319static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
5320{
5321 struct nfs4_exception exception = { };
5322 ssize_t ret;
5323 do {
5324 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
5325 trace_nfs4_get_acl(inode, ret);
5326 if (ret >= 0)
5327 break;
5328 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
5329 } while (exception.retry);
5330 return ret;
5331}
5332
5333static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
5334{
5335 struct nfs_server *server = NFS_SERVER(inode);
5336 int ret;
5337
5338 if (!nfs4_server_supports_acls(server))
5339 return -EOPNOTSUPP;
5340 ret = nfs_revalidate_inode(server, inode);
5341 if (ret < 0)
5342 return ret;
5343 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
5344 nfs_zap_acl_cache(inode);
5345 ret = nfs4_read_cached_acl(inode, buf, buflen);
5346 if (ret != -ENOENT)
5347 /* -ENOENT is returned if there is no ACL or if there is an ACL
5348 * but no cached acl data, just the acl length */
5349 return ret;
5350 return nfs4_get_acl_uncached(inode, buf, buflen);
5351}
5352
5353static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
5354{
5355 struct nfs_server *server = NFS_SERVER(inode);
5356 struct page *pages[NFS4ACL_MAXPAGES];
5357 struct nfs_setaclargs arg = {
5358 .fh = NFS_FH(inode),
5359 .acl_pages = pages,
5360 .acl_len = buflen,
5361 };
5362 struct nfs_setaclres res;
5363 struct rpc_message msg = {
5364 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
5365 .rpc_argp = &arg,
5366 .rpc_resp = &res,
5367 };
5368 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
5369 int ret, i;
5370
5371 if (!nfs4_server_supports_acls(server))
5372 return -EOPNOTSUPP;
5373 if (npages > ARRAY_SIZE(pages))
5374 return -ERANGE;
5375 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages);
5376 if (i < 0)
5377 return i;
5378 nfs4_inode_make_writeable(inode);
5379 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5380
5381 /*
5382 * Free each page after tx, so the only ref left is
5383 * held by the network stack
5384 */
5385 for (; i > 0; i--)
5386 put_page(pages[i-1]);
5387
5388 /*
5389 * Acl update can result in inode attribute update.
5390 * so mark the attribute cache invalid.
5391 */
5392 spin_lock(&inode->i_lock);
5393 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_CHANGE
5394 | NFS_INO_INVALID_CTIME;
5395 spin_unlock(&inode->i_lock);
5396 nfs_access_zap_cache(inode);
5397 nfs_zap_acl_cache(inode);
5398 return ret;
5399}
5400
5401static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
5402{
5403 struct nfs4_exception exception = { };
5404 int err;
5405 do {
5406 err = __nfs4_proc_set_acl(inode, buf, buflen);
5407 trace_nfs4_set_acl(inode, err);
5408 err = nfs4_handle_exception(NFS_SERVER(inode), err,
5409 &exception);
5410 } while (exception.retry);
5411 return err;
5412}
5413
5414#ifdef CONFIG_NFS_V4_SECURITY_LABEL
5415static int _nfs4_get_security_label(struct inode *inode, void *buf,
5416 size_t buflen)
5417{
5418 struct nfs_server *server = NFS_SERVER(inode);
5419 struct nfs_fattr fattr;
5420 struct nfs4_label label = {0, 0, buflen, buf};
5421
5422 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
5423 struct nfs4_getattr_arg arg = {
5424 .fh = NFS_FH(inode),
5425 .bitmask = bitmask,
5426 };
5427 struct nfs4_getattr_res res = {
5428 .fattr = &fattr,
5429 .label = &label,
5430 .server = server,
5431 };
5432 struct rpc_message msg = {
5433 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
5434 .rpc_argp = &arg,
5435 .rpc_resp = &res,
5436 };
5437 int ret;
5438
5439 nfs_fattr_init(&fattr);
5440
5441 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
5442 if (ret)
5443 return ret;
5444 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
5445 return -ENOENT;
5446 if (buflen < label.len)
5447 return -ERANGE;
5448 return 0;
5449}
5450
5451static int nfs4_get_security_label(struct inode *inode, void *buf,
5452 size_t buflen)
5453{
5454 struct nfs4_exception exception = { };
5455 int err;
5456
5457 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5458 return -EOPNOTSUPP;
5459
5460 do {
5461 err = _nfs4_get_security_label(inode, buf, buflen);
5462 trace_nfs4_get_security_label(inode, err);
5463 err = nfs4_handle_exception(NFS_SERVER(inode), err,
5464 &exception);
5465 } while (exception.retry);
5466 return err;
5467}
5468
5469static int _nfs4_do_set_security_label(struct inode *inode,
5470 struct nfs4_label *ilabel,
5471 struct nfs_fattr *fattr,
5472 struct nfs4_label *olabel)
5473{
5474
5475 struct iattr sattr = {0};
5476 struct nfs_server *server = NFS_SERVER(inode);
5477 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
5478 struct nfs_setattrargs arg = {
5479 .fh = NFS_FH(inode),
5480 .iap = &sattr,
5481 .server = server,
5482 .bitmask = bitmask,
5483 .label = ilabel,
5484 };
5485 struct nfs_setattrres res = {
5486 .fattr = fattr,
5487 .label = olabel,
5488 .server = server,
5489 };
5490 struct rpc_message msg = {
5491 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
5492 .rpc_argp = &arg,
5493 .rpc_resp = &res,
5494 };
5495 int status;
5496
5497 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
5498
5499 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5500 if (status)
5501 dprintk("%s failed: %d\n", __func__, status);
5502
5503 return status;
5504}
5505
5506static int nfs4_do_set_security_label(struct inode *inode,
5507 struct nfs4_label *ilabel,
5508 struct nfs_fattr *fattr,
5509 struct nfs4_label *olabel)
5510{
5511 struct nfs4_exception exception = { };
5512 int err;
5513
5514 do {
5515 err = _nfs4_do_set_security_label(inode, ilabel,
5516 fattr, olabel);
5517 trace_nfs4_set_security_label(inode, err);
5518 err = nfs4_handle_exception(NFS_SERVER(inode), err,
5519 &exception);
5520 } while (exception.retry);
5521 return err;
5522}
5523
5524static int
5525nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
5526{
5527 struct nfs4_label ilabel, *olabel = NULL;
5528 struct nfs_fattr fattr;
5529 struct rpc_cred *cred;
5530 int status;
5531
5532 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5533 return -EOPNOTSUPP;
5534
5535 nfs_fattr_init(&fattr);
5536
5537 ilabel.pi = 0;
5538 ilabel.lfs = 0;
5539 ilabel.label = (char *)buf;
5540 ilabel.len = buflen;
5541
5542 cred = rpc_lookup_cred();
5543 if (IS_ERR(cred))
5544 return PTR_ERR(cred);
5545
5546 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
5547 if (IS_ERR(olabel)) {
5548 status = -PTR_ERR(olabel);
5549 goto out;
5550 }
5551
5552 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
5553 if (status == 0)
5554 nfs_setsecurity(inode, &fattr, olabel);
5555
5556 nfs4_label_free(olabel);
5557out:
5558 put_rpccred(cred);
5559 return status;
5560}
5561#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
5562
5563
5564static void nfs4_init_boot_verifier(const struct nfs_client *clp,
5565 nfs4_verifier *bootverf)
5566{
5567 __be32 verf[2];
5568
5569 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
5570 /* An impossible timestamp guarantees this value
5571 * will never match a generated boot time. */
5572 verf[0] = cpu_to_be32(U32_MAX);
5573 verf[1] = cpu_to_be32(U32_MAX);
5574 } else {
5575 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
5576 u64 ns = ktime_to_ns(nn->boot_time);
5577
5578 verf[0] = cpu_to_be32(ns >> 32);
5579 verf[1] = cpu_to_be32(ns);
5580 }
5581 memcpy(bootverf->data, verf, sizeof(bootverf->data));
5582}
5583
5584static int
5585nfs4_init_nonuniform_client_string(struct nfs_client *clp)
5586{
5587 size_t len;
5588 char *str;
5589
5590 if (clp->cl_owner_id != NULL)
5591 return 0;
5592
5593 rcu_read_lock();
5594 len = 14 + strlen(clp->cl_ipaddr) + 1 +
5595 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
5596 1 +
5597 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
5598 1;
5599 rcu_read_unlock();
5600
5601 if (len > NFS4_OPAQUE_LIMIT + 1)
5602 return -EINVAL;
5603
5604 /*
5605 * Since this string is allocated at mount time, and held until the
5606 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5607 * about a memory-reclaim deadlock.
5608 */
5609 str = kmalloc(len, GFP_KERNEL);
5610 if (!str)
5611 return -ENOMEM;
5612
5613 rcu_read_lock();
5614 scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
5615 clp->cl_ipaddr,
5616 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
5617 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
5618 rcu_read_unlock();
5619
5620 clp->cl_owner_id = str;
5621 return 0;
5622}
5623
5624static int
5625nfs4_init_uniquifier_client_string(struct nfs_client *clp)
5626{
5627 size_t len;
5628 char *str;
5629
5630 len = 10 + 10 + 1 + 10 + 1 +
5631 strlen(nfs4_client_id_uniquifier) + 1 +
5632 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5633
5634 if (len > NFS4_OPAQUE_LIMIT + 1)
5635 return -EINVAL;
5636
5637 /*
5638 * Since this string is allocated at mount time, and held until the
5639 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5640 * about a memory-reclaim deadlock.
5641 */
5642 str = kmalloc(len, GFP_KERNEL);
5643 if (!str)
5644 return -ENOMEM;
5645
5646 scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
5647 clp->rpc_ops->version, clp->cl_minorversion,
5648 nfs4_client_id_uniquifier,
5649 clp->cl_rpcclient->cl_nodename);
5650 clp->cl_owner_id = str;
5651 return 0;
5652}
5653
5654static int
5655nfs4_init_uniform_client_string(struct nfs_client *clp)
5656{
5657 size_t len;
5658 char *str;
5659
5660 if (clp->cl_owner_id != NULL)
5661 return 0;
5662
5663 if (nfs4_client_id_uniquifier[0] != '\0')
5664 return nfs4_init_uniquifier_client_string(clp);
5665
5666 len = 10 + 10 + 1 + 10 + 1 +
5667 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5668
5669 if (len > NFS4_OPAQUE_LIMIT + 1)
5670 return -EINVAL;
5671
5672 /*
5673 * Since this string is allocated at mount time, and held until the
5674 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5675 * about a memory-reclaim deadlock.
5676 */
5677 str = kmalloc(len, GFP_KERNEL);
5678 if (!str)
5679 return -ENOMEM;
5680
5681 scnprintf(str, len, "Linux NFSv%u.%u %s",
5682 clp->rpc_ops->version, clp->cl_minorversion,
5683 clp->cl_rpcclient->cl_nodename);
5684 clp->cl_owner_id = str;
5685 return 0;
5686}
5687
5688/*
5689 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5690 * services. Advertise one based on the address family of the
5691 * clientaddr.
5692 */
5693static unsigned int
5694nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5695{
5696 if (strchr(clp->cl_ipaddr, ':') != NULL)
5697 return scnprintf(buf, len, "tcp6");
5698 else
5699 return scnprintf(buf, len, "tcp");
5700}
5701
5702static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5703{
5704 struct nfs4_setclientid *sc = calldata;
5705
5706 if (task->tk_status == 0)
5707 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5708}
5709
5710static const struct rpc_call_ops nfs4_setclientid_ops = {
5711 .rpc_call_done = nfs4_setclientid_done,
5712};
5713
5714/**
5715 * nfs4_proc_setclientid - Negotiate client ID
5716 * @clp: state data structure
5717 * @program: RPC program for NFSv4 callback service
5718 * @port: IP port number for NFS4 callback service
5719 * @cred: RPC credential to use for this call
5720 * @res: where to place the result
5721 *
5722 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5723 */
5724int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5725 unsigned short port, struct rpc_cred *cred,
5726 struct nfs4_setclientid_res *res)
5727{
5728 nfs4_verifier sc_verifier;
5729 struct nfs4_setclientid setclientid = {
5730 .sc_verifier = &sc_verifier,
5731 .sc_prog = program,
5732 .sc_clnt = clp,
5733 };
5734 struct rpc_message msg = {
5735 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5736 .rpc_argp = &setclientid,
5737 .rpc_resp = res,
5738 .rpc_cred = cred,
5739 };
5740 struct rpc_task *task;
5741 struct rpc_task_setup task_setup_data = {
5742 .rpc_client = clp->cl_rpcclient,
5743 .rpc_message = &msg,
5744 .callback_ops = &nfs4_setclientid_ops,
5745 .callback_data = &setclientid,
5746 .flags = RPC_TASK_TIMEOUT,
5747 };
5748 int status;
5749
5750 /* nfs_client_id4 */
5751 nfs4_init_boot_verifier(clp, &sc_verifier);
5752
5753 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5754 status = nfs4_init_uniform_client_string(clp);
5755 else
5756 status = nfs4_init_nonuniform_client_string(clp);
5757
5758 if (status)
5759 goto out;
5760
5761 /* cb_client4 */
5762 setclientid.sc_netid_len =
5763 nfs4_init_callback_netid(clp,
5764 setclientid.sc_netid,
5765 sizeof(setclientid.sc_netid));
5766 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5767 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5768 clp->cl_ipaddr, port >> 8, port & 255);
5769
5770 dprintk("NFS call setclientid auth=%s, '%s'\n",
5771 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5772 clp->cl_owner_id);
5773 task = rpc_run_task(&task_setup_data);
5774 if (IS_ERR(task)) {
5775 status = PTR_ERR(task);
5776 goto out;
5777 }
5778 status = task->tk_status;
5779 if (setclientid.sc_cred) {
5780 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5781 put_rpccred(setclientid.sc_cred);
5782 }
5783 rpc_put_task(task);
5784out:
5785 trace_nfs4_setclientid(clp, status);
5786 dprintk("NFS reply setclientid: %d\n", status);
5787 return status;
5788}
5789
5790/**
5791 * nfs4_proc_setclientid_confirm - Confirm client ID
5792 * @clp: state data structure
5793 * @res: result of a previous SETCLIENTID
5794 * @cred: RPC credential to use for this call
5795 *
5796 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5797 */
5798int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5799 struct nfs4_setclientid_res *arg,
5800 struct rpc_cred *cred)
5801{
5802 struct rpc_message msg = {
5803 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5804 .rpc_argp = arg,
5805 .rpc_cred = cred,
5806 };
5807 int status;
5808
5809 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5810 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5811 clp->cl_clientid);
5812 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5813 trace_nfs4_setclientid_confirm(clp, status);
5814 dprintk("NFS reply setclientid_confirm: %d\n", status);
5815 return status;
5816}
5817
5818struct nfs4_delegreturndata {
5819 struct nfs4_delegreturnargs args;
5820 struct nfs4_delegreturnres res;
5821 struct nfs_fh fh;
5822 nfs4_stateid stateid;
5823 unsigned long timestamp;
5824 struct {
5825 struct nfs4_layoutreturn_args arg;
5826 struct nfs4_layoutreturn_res res;
5827 struct nfs4_xdr_opaque_data ld_private;
5828 u32 roc_barrier;
5829 bool roc;
5830 } lr;
5831 struct nfs_fattr fattr;
5832 int rpc_status;
5833 struct inode *inode;
5834};
5835
5836static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5837{
5838 struct nfs4_delegreturndata *data = calldata;
5839 struct nfs4_exception exception = {
5840 .inode = data->inode,
5841 .stateid = &data->stateid,
5842 };
5843
5844 if (!nfs4_sequence_done(task, &data->res.seq_res))
5845 return;
5846
5847 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5848
5849 /* Handle Layoutreturn errors */
5850 if (data->args.lr_args && task->tk_status != 0) {
5851 switch(data->res.lr_ret) {
5852 default:
5853 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
5854 break;
5855 case 0:
5856 data->args.lr_args = NULL;
5857 data->res.lr_res = NULL;
5858 break;
5859 case -NFS4ERR_OLD_STATEID:
5860 if (nfs4_refresh_layout_stateid(&data->args.lr_args->stateid,
5861 data->inode))
5862 goto lr_restart;
5863 /* Fallthrough */
5864 case -NFS4ERR_ADMIN_REVOKED:
5865 case -NFS4ERR_DELEG_REVOKED:
5866 case -NFS4ERR_EXPIRED:
5867 case -NFS4ERR_BAD_STATEID:
5868 case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
5869 case -NFS4ERR_WRONG_CRED:
5870 data->args.lr_args = NULL;
5871 data->res.lr_res = NULL;
5872 goto lr_restart;
5873 }
5874 }
5875
5876 switch (task->tk_status) {
5877 case 0:
5878 renew_lease(data->res.server, data->timestamp);
5879 break;
5880 case -NFS4ERR_ADMIN_REVOKED:
5881 case -NFS4ERR_DELEG_REVOKED:
5882 case -NFS4ERR_EXPIRED:
5883 nfs4_free_revoked_stateid(data->res.server,
5884 data->args.stateid,
5885 task->tk_msg.rpc_cred);
5886 /* Fallthrough */
5887 case -NFS4ERR_BAD_STATEID:
5888 case -NFS4ERR_STALE_STATEID:
5889 task->tk_status = 0;
5890 break;
5891 case -NFS4ERR_OLD_STATEID:
5892 if (nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
5893 goto out_restart;
5894 task->tk_status = 0;
5895 break;
5896 case -NFS4ERR_ACCESS:
5897 if (data->args.bitmask) {
5898 data->args.bitmask = NULL;
5899 data->res.fattr = NULL;
5900 goto out_restart;
5901 }
5902 /* Fallthrough */
5903 default:
5904 task->tk_status = nfs4_async_handle_exception(task,
5905 data->res.server, task->tk_status,
5906 &exception);
5907 if (exception.retry)
5908 goto out_restart;
5909 }
5910 data->rpc_status = task->tk_status;
5911 return;
5912lr_restart:
5913 data->res.lr_ret = 0;
5914out_restart:
5915 task->tk_status = 0;
5916 rpc_restart_call_prepare(task);
5917}
5918
5919static void nfs4_delegreturn_release(void *calldata)
5920{
5921 struct nfs4_delegreturndata *data = calldata;
5922 struct inode *inode = data->inode;
5923
5924 if (inode) {
5925 if (data->lr.roc)
5926 pnfs_roc_release(&data->lr.arg, &data->lr.res,
5927 data->res.lr_ret);
5928 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5929 nfs_iput_and_deactive(inode);
5930 }
5931 kfree(calldata);
5932}
5933
5934static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5935{
5936 struct nfs4_delegreturndata *d_data;
5937
5938 d_data = (struct nfs4_delegreturndata *)data;
5939
5940 if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task))
5941 return;
5942
5943 nfs4_setup_sequence(d_data->res.server->nfs_client,
5944 &d_data->args.seq_args,
5945 &d_data->res.seq_res,
5946 task);
5947}
5948
5949static const struct rpc_call_ops nfs4_delegreturn_ops = {
5950 .rpc_call_prepare = nfs4_delegreturn_prepare,
5951 .rpc_call_done = nfs4_delegreturn_done,
5952 .rpc_release = nfs4_delegreturn_release,
5953};
5954
5955static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5956{
5957 struct nfs4_delegreturndata *data;
5958 struct nfs_server *server = NFS_SERVER(inode);
5959 struct rpc_task *task;
5960 struct rpc_message msg = {
5961 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5962 .rpc_cred = cred,
5963 };
5964 struct rpc_task_setup task_setup_data = {
5965 .rpc_client = server->client,
5966 .rpc_message = &msg,
5967 .callback_ops = &nfs4_delegreturn_ops,
5968 .flags = RPC_TASK_ASYNC,
5969 };
5970 int status = 0;
5971
5972 data = kzalloc(sizeof(*data), GFP_NOFS);
5973 if (data == NULL)
5974 return -ENOMEM;
5975 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5976
5977 nfs4_state_protect(server->nfs_client,
5978 NFS_SP4_MACH_CRED_CLEANUP,
5979 &task_setup_data.rpc_client, &msg);
5980
5981 data->args.fhandle = &data->fh;
5982 data->args.stateid = &data->stateid;
5983 data->args.bitmask = server->cache_consistency_bitmask;
5984 nfs_copy_fh(&data->fh, NFS_FH(inode));
5985 nfs4_stateid_copy(&data->stateid, stateid);
5986 data->res.fattr = &data->fattr;
5987 data->res.server = server;
5988 data->res.lr_ret = -NFS4ERR_NOMATCHING_LAYOUT;
5989 data->lr.arg.ld_private = &data->lr.ld_private;
5990 nfs_fattr_init(data->res.fattr);
5991 data->timestamp = jiffies;
5992 data->rpc_status = 0;
5993 data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, cred);
5994 data->inode = nfs_igrab_and_active(inode);
5995 if (data->inode) {
5996 if (data->lr.roc) {
5997 data->args.lr_args = &data->lr.arg;
5998 data->res.lr_res = &data->lr.res;
5999 }
6000 } else if (data->lr.roc) {
6001 pnfs_roc_release(&data->lr.arg, &data->lr.res, 0);
6002 data->lr.roc = false;
6003 }
6004
6005 task_setup_data.callback_data = data;
6006 msg.rpc_argp = &data->args;
6007 msg.rpc_resp = &data->res;
6008 task = rpc_run_task(&task_setup_data);
6009 if (IS_ERR(task))
6010 return PTR_ERR(task);
6011 if (!issync)
6012 goto out;
6013 status = rpc_wait_for_completion_task(task);
6014 if (status != 0)
6015 goto out;
6016 status = data->rpc_status;
6017out:
6018 rpc_put_task(task);
6019 return status;
6020}
6021
6022int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
6023{
6024 struct nfs_server *server = NFS_SERVER(inode);
6025 struct nfs4_exception exception = { };
6026 int err;
6027 do {
6028 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
6029 trace_nfs4_delegreturn(inode, stateid, err);
6030 switch (err) {
6031 case -NFS4ERR_STALE_STATEID:
6032 case -NFS4ERR_EXPIRED:
6033 case 0:
6034 return 0;
6035 }
6036 err = nfs4_handle_exception(server, err, &exception);
6037 } while (exception.retry);
6038 return err;
6039}
6040
6041static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6042{
6043 struct inode *inode = state->inode;
6044 struct nfs_server *server = NFS_SERVER(inode);
6045 struct nfs_client *clp = server->nfs_client;
6046 struct nfs_lockt_args arg = {
6047 .fh = NFS_FH(inode),
6048 .fl = request,
6049 };
6050 struct nfs_lockt_res res = {
6051 .denied = request,
6052 };
6053 struct rpc_message msg = {
6054 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
6055 .rpc_argp = &arg,
6056 .rpc_resp = &res,
6057 .rpc_cred = state->owner->so_cred,
6058 };
6059 struct nfs4_lock_state *lsp;
6060 int status;
6061
6062 arg.lock_owner.clientid = clp->cl_clientid;
6063 status = nfs4_set_lock_state(state, request);
6064 if (status != 0)
6065 goto out;
6066 lsp = request->fl_u.nfs4_fl.owner;
6067 arg.lock_owner.id = lsp->ls_seqid.owner_id;
6068 arg.lock_owner.s_dev = server->s_dev;
6069 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
6070 switch (status) {
6071 case 0:
6072 request->fl_type = F_UNLCK;
6073 break;
6074 case -NFS4ERR_DENIED:
6075 status = 0;
6076 }
6077 request->fl_ops->fl_release_private(request);
6078 request->fl_ops = NULL;
6079out:
6080 return status;
6081}
6082
6083static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6084{
6085 struct nfs4_exception exception = { };
6086 int err;
6087
6088 do {
6089 err = _nfs4_proc_getlk(state, cmd, request);
6090 trace_nfs4_get_lock(request, state, cmd, err);
6091 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
6092 &exception);
6093 } while (exception.retry);
6094 return err;
6095}
6096
6097struct nfs4_unlockdata {
6098 struct nfs_locku_args arg;
6099 struct nfs_locku_res res;
6100 struct nfs4_lock_state *lsp;
6101 struct nfs_open_context *ctx;
6102 struct nfs_lock_context *l_ctx;
6103 struct file_lock fl;
6104 struct nfs_server *server;
6105 unsigned long timestamp;
6106};
6107
6108static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
6109 struct nfs_open_context *ctx,
6110 struct nfs4_lock_state *lsp,
6111 struct nfs_seqid *seqid)
6112{
6113 struct nfs4_unlockdata *p;
6114 struct inode *inode = lsp->ls_state->inode;
6115
6116 p = kzalloc(sizeof(*p), GFP_NOFS);
6117 if (p == NULL)
6118 return NULL;
6119 p->arg.fh = NFS_FH(inode);
6120 p->arg.fl = &p->fl;
6121 p->arg.seqid = seqid;
6122 p->res.seqid = seqid;
6123 p->lsp = lsp;
6124 refcount_inc(&lsp->ls_count);
6125 /* Ensure we don't close file until we're done freeing locks! */
6126 p->ctx = get_nfs_open_context(ctx);
6127 p->l_ctx = nfs_get_lock_context(ctx);
6128 memcpy(&p->fl, fl, sizeof(p->fl));
6129 p->server = NFS_SERVER(inode);
6130 return p;
6131}
6132
6133static void nfs4_locku_release_calldata(void *data)
6134{
6135 struct nfs4_unlockdata *calldata = data;
6136 nfs_free_seqid(calldata->arg.seqid);
6137 nfs4_put_lock_state(calldata->lsp);
6138 nfs_put_lock_context(calldata->l_ctx);
6139 put_nfs_open_context(calldata->ctx);
6140 kfree(calldata);
6141}
6142
6143static void nfs4_locku_done(struct rpc_task *task, void *data)
6144{
6145 struct nfs4_unlockdata *calldata = data;
6146 struct nfs4_exception exception = {
6147 .inode = calldata->lsp->ls_state->inode,
6148 .stateid = &calldata->arg.stateid,
6149 };
6150
6151 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
6152 return;
6153 switch (task->tk_status) {
6154 case 0:
6155 renew_lease(calldata->server, calldata->timestamp);
6156 locks_lock_inode_wait(calldata->lsp->ls_state->inode, &calldata->fl);
6157 if (nfs4_update_lock_stateid(calldata->lsp,
6158 &calldata->res.stateid))
6159 break;
6160 case -NFS4ERR_ADMIN_REVOKED:
6161 case -NFS4ERR_EXPIRED:
6162 nfs4_free_revoked_stateid(calldata->server,
6163 &calldata->arg.stateid,
6164 task->tk_msg.rpc_cred);
6165 case -NFS4ERR_BAD_STATEID:
6166 case -NFS4ERR_OLD_STATEID:
6167 case -NFS4ERR_STALE_STATEID:
6168 if (!nfs4_stateid_match(&calldata->arg.stateid,
6169 &calldata->lsp->ls_stateid))
6170 rpc_restart_call_prepare(task);
6171 break;
6172 default:
6173 task->tk_status = nfs4_async_handle_exception(task,
6174 calldata->server, task->tk_status,
6175 &exception);
6176 if (exception.retry)
6177 rpc_restart_call_prepare(task);
6178 }
6179 nfs_release_seqid(calldata->arg.seqid);
6180}
6181
6182static void nfs4_locku_prepare(struct rpc_task *task, void *data)
6183{
6184 struct nfs4_unlockdata *calldata = data;
6185
6186 if (test_bit(NFS_CONTEXT_UNLOCK, &calldata->l_ctx->open_context->flags) &&
6187 nfs_async_iocounter_wait(task, calldata->l_ctx))
6188 return;
6189
6190 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
6191 goto out_wait;
6192 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
6193 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
6194 /* Note: exit _without_ running nfs4_locku_done */
6195 goto out_no_action;
6196 }
6197 calldata->timestamp = jiffies;
6198 if (nfs4_setup_sequence(calldata->server->nfs_client,
6199 &calldata->arg.seq_args,
6200 &calldata->res.seq_res,
6201 task) != 0)
6202 nfs_release_seqid(calldata->arg.seqid);
6203 return;
6204out_no_action:
6205 task->tk_action = NULL;
6206out_wait:
6207 nfs4_sequence_done(task, &calldata->res.seq_res);
6208}
6209
6210static const struct rpc_call_ops nfs4_locku_ops = {
6211 .rpc_call_prepare = nfs4_locku_prepare,
6212 .rpc_call_done = nfs4_locku_done,
6213 .rpc_release = nfs4_locku_release_calldata,
6214};
6215
6216static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
6217 struct nfs_open_context *ctx,
6218 struct nfs4_lock_state *lsp,
6219 struct nfs_seqid *seqid)
6220{
6221 struct nfs4_unlockdata *data;
6222 struct rpc_message msg = {
6223 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
6224 .rpc_cred = ctx->cred,
6225 };
6226 struct rpc_task_setup task_setup_data = {
6227 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
6228 .rpc_message = &msg,
6229 .callback_ops = &nfs4_locku_ops,
6230 .workqueue = nfsiod_workqueue,
6231 .flags = RPC_TASK_ASYNC,
6232 };
6233
6234 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
6235 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
6236
6237 /* Ensure this is an unlock - when canceling a lock, the
6238 * canceled lock is passed in, and it won't be an unlock.
6239 */
6240 fl->fl_type = F_UNLCK;
6241 if (fl->fl_flags & FL_CLOSE)
6242 set_bit(NFS_CONTEXT_UNLOCK, &ctx->flags);
6243
6244 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
6245 if (data == NULL) {
6246 nfs_free_seqid(seqid);
6247 return ERR_PTR(-ENOMEM);
6248 }
6249
6250 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
6251 msg.rpc_argp = &data->arg;
6252 msg.rpc_resp = &data->res;
6253 task_setup_data.callback_data = data;
6254 return rpc_run_task(&task_setup_data);
6255}
6256
6257static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
6258{
6259 struct inode *inode = state->inode;
6260 struct nfs4_state_owner *sp = state->owner;
6261 struct nfs_inode *nfsi = NFS_I(inode);
6262 struct nfs_seqid *seqid;
6263 struct nfs4_lock_state *lsp;
6264 struct rpc_task *task;
6265 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
6266 int status = 0;
6267 unsigned char fl_flags = request->fl_flags;
6268
6269 status = nfs4_set_lock_state(state, request);
6270 /* Unlock _before_ we do the RPC call */
6271 request->fl_flags |= FL_EXISTS;
6272 /* Exclude nfs_delegation_claim_locks() */
6273 mutex_lock(&sp->so_delegreturn_mutex);
6274 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
6275 down_read(&nfsi->rwsem);
6276 if (locks_lock_inode_wait(inode, request) == -ENOENT) {
6277 up_read(&nfsi->rwsem);
6278 mutex_unlock(&sp->so_delegreturn_mutex);
6279 goto out;
6280 }
6281 up_read(&nfsi->rwsem);
6282 mutex_unlock(&sp->so_delegreturn_mutex);
6283 if (status != 0)
6284 goto out;
6285 /* Is this a delegated lock? */
6286 lsp = request->fl_u.nfs4_fl.owner;
6287 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
6288 goto out;
6289 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
6290 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
6291 status = -ENOMEM;
6292 if (IS_ERR(seqid))
6293 goto out;
6294 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
6295 status = PTR_ERR(task);
6296 if (IS_ERR(task))
6297 goto out;
6298 status = rpc_wait_for_completion_task(task);
6299 rpc_put_task(task);
6300out:
6301 request->fl_flags = fl_flags;
6302 trace_nfs4_unlock(request, state, F_SETLK, status);
6303 return status;
6304}
6305
6306struct nfs4_lockdata {
6307 struct nfs_lock_args arg;
6308 struct nfs_lock_res res;
6309 struct nfs4_lock_state *lsp;
6310 struct nfs_open_context *ctx;
6311 struct file_lock fl;
6312 unsigned long timestamp;
6313 int rpc_status;
6314 int cancelled;
6315 struct nfs_server *server;
6316};
6317
6318static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
6319 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
6320 gfp_t gfp_mask)
6321{
6322 struct nfs4_lockdata *p;
6323 struct inode *inode = lsp->ls_state->inode;
6324 struct nfs_server *server = NFS_SERVER(inode);
6325 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
6326
6327 p = kzalloc(sizeof(*p), gfp_mask);
6328 if (p == NULL)
6329 return NULL;
6330
6331 p->arg.fh = NFS_FH(inode);
6332 p->arg.fl = &p->fl;
6333 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
6334 if (IS_ERR(p->arg.open_seqid))
6335 goto out_free;
6336 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
6337 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
6338 if (IS_ERR(p->arg.lock_seqid))
6339 goto out_free_seqid;
6340 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
6341 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
6342 p->arg.lock_owner.s_dev = server->s_dev;
6343 p->res.lock_seqid = p->arg.lock_seqid;
6344 p->lsp = lsp;
6345 p->server = server;
6346 refcount_inc(&lsp->ls_count);
6347 p->ctx = get_nfs_open_context(ctx);
6348 memcpy(&p->fl, fl, sizeof(p->fl));
6349 return p;
6350out_free_seqid:
6351 nfs_free_seqid(p->arg.open_seqid);
6352out_free:
6353 kfree(p);
6354 return NULL;
6355}
6356
6357static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
6358{
6359 struct nfs4_lockdata *data = calldata;
6360 struct nfs4_state *state = data->lsp->ls_state;
6361
6362 dprintk("%s: begin!\n", __func__);
6363 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
6364 goto out_wait;
6365 /* Do we need to do an open_to_lock_owner? */
6366 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
6367 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
6368 goto out_release_lock_seqid;
6369 }
6370 nfs4_stateid_copy(&data->arg.open_stateid,
6371 &state->open_stateid);
6372 data->arg.new_lock_owner = 1;
6373 data->res.open_seqid = data->arg.open_seqid;
6374 } else {
6375 data->arg.new_lock_owner = 0;
6376 nfs4_stateid_copy(&data->arg.lock_stateid,
6377 &data->lsp->ls_stateid);
6378 }
6379 if (!nfs4_valid_open_stateid(state)) {
6380 data->rpc_status = -EBADF;
6381 task->tk_action = NULL;
6382 goto out_release_open_seqid;
6383 }
6384 data->timestamp = jiffies;
6385 if (nfs4_setup_sequence(data->server->nfs_client,
6386 &data->arg.seq_args,
6387 &data->res.seq_res,
6388 task) == 0)
6389 return;
6390out_release_open_seqid:
6391 nfs_release_seqid(data->arg.open_seqid);
6392out_release_lock_seqid:
6393 nfs_release_seqid(data->arg.lock_seqid);
6394out_wait:
6395 nfs4_sequence_done(task, &data->res.seq_res);
6396 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
6397}
6398
6399static void nfs4_lock_done(struct rpc_task *task, void *calldata)
6400{
6401 struct nfs4_lockdata *data = calldata;
6402 struct nfs4_lock_state *lsp = data->lsp;
6403
6404 dprintk("%s: begin!\n", __func__);
6405
6406 if (!nfs4_sequence_done(task, &data->res.seq_res))
6407 return;
6408
6409 data->rpc_status = task->tk_status;
6410 switch (task->tk_status) {
6411 case 0:
6412 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
6413 data->timestamp);
6414 if (data->arg.new_lock) {
6415 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
6416 if (locks_lock_inode_wait(lsp->ls_state->inode, &data->fl) < 0) {
6417 rpc_restart_call_prepare(task);
6418 break;
6419 }
6420 }
6421 if (data->arg.new_lock_owner != 0) {
6422 nfs_confirm_seqid(&lsp->ls_seqid, 0);
6423 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
6424 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
6425 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
6426 rpc_restart_call_prepare(task);
6427 break;
6428 case -NFS4ERR_BAD_STATEID:
6429 case -NFS4ERR_OLD_STATEID:
6430 case -NFS4ERR_STALE_STATEID:
6431 case -NFS4ERR_EXPIRED:
6432 if (data->arg.new_lock_owner != 0) {
6433 if (!nfs4_stateid_match(&data->arg.open_stateid,
6434 &lsp->ls_state->open_stateid))
6435 rpc_restart_call_prepare(task);
6436 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
6437 &lsp->ls_stateid))
6438 rpc_restart_call_prepare(task);
6439 }
6440 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
6441}
6442
6443static void nfs4_lock_release(void *calldata)
6444{
6445 struct nfs4_lockdata *data = calldata;
6446
6447 dprintk("%s: begin!\n", __func__);
6448 nfs_free_seqid(data->arg.open_seqid);
6449 if (data->cancelled) {
6450 struct rpc_task *task;
6451 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
6452 data->arg.lock_seqid);
6453 if (!IS_ERR(task))
6454 rpc_put_task_async(task);
6455 dprintk("%s: cancelling lock!\n", __func__);
6456 } else
6457 nfs_free_seqid(data->arg.lock_seqid);
6458 nfs4_put_lock_state(data->lsp);
6459 put_nfs_open_context(data->ctx);
6460 kfree(data);
6461 dprintk("%s: done!\n", __func__);
6462}
6463
6464static const struct rpc_call_ops nfs4_lock_ops = {
6465 .rpc_call_prepare = nfs4_lock_prepare,
6466 .rpc_call_done = nfs4_lock_done,
6467 .rpc_release = nfs4_lock_release,
6468};
6469
6470static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
6471{
6472 switch (error) {
6473 case -NFS4ERR_ADMIN_REVOKED:
6474 case -NFS4ERR_EXPIRED:
6475 case -NFS4ERR_BAD_STATEID:
6476 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
6477 if (new_lock_owner != 0 ||
6478 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
6479 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
6480 break;
6481 case -NFS4ERR_STALE_STATEID:
6482 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
6483 nfs4_schedule_lease_recovery(server->nfs_client);
6484 };
6485}
6486
6487static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
6488{
6489 struct nfs4_lockdata *data;
6490 struct rpc_task *task;
6491 struct rpc_message msg = {
6492 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
6493 .rpc_cred = state->owner->so_cred,
6494 };
6495 struct rpc_task_setup task_setup_data = {
6496 .rpc_client = NFS_CLIENT(state->inode),
6497 .rpc_message = &msg,
6498 .callback_ops = &nfs4_lock_ops,
6499 .workqueue = nfsiod_workqueue,
6500 .flags = RPC_TASK_ASYNC,
6501 };
6502 int ret;
6503
6504 dprintk("%s: begin!\n", __func__);
6505 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
6506 fl->fl_u.nfs4_fl.owner,
6507 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
6508 if (data == NULL)
6509 return -ENOMEM;
6510 if (IS_SETLKW(cmd))
6511 data->arg.block = 1;
6512 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
6513 msg.rpc_argp = &data->arg;
6514 msg.rpc_resp = &data->res;
6515 task_setup_data.callback_data = data;
6516 if (recovery_type > NFS_LOCK_NEW) {
6517 if (recovery_type == NFS_LOCK_RECLAIM)
6518 data->arg.reclaim = NFS_LOCK_RECLAIM;
6519 nfs4_set_sequence_privileged(&data->arg.seq_args);
6520 } else
6521 data->arg.new_lock = 1;
6522 task = rpc_run_task(&task_setup_data);
6523 if (IS_ERR(task))
6524 return PTR_ERR(task);
6525 ret = rpc_wait_for_completion_task(task);
6526 if (ret == 0) {
6527 ret = data->rpc_status;
6528 if (ret)
6529 nfs4_handle_setlk_error(data->server, data->lsp,
6530 data->arg.new_lock_owner, ret);
6531 } else
6532 data->cancelled = true;
6533 rpc_put_task(task);
6534 dprintk("%s: done, ret = %d!\n", __func__, ret);
6535 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
6536 return ret;
6537}
6538
6539static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
6540{
6541 struct nfs_server *server = NFS_SERVER(state->inode);
6542 struct nfs4_exception exception = {
6543 .inode = state->inode,
6544 };
6545 int err;
6546
6547 do {
6548 /* Cache the lock if possible... */
6549 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
6550 return 0;
6551 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
6552 if (err != -NFS4ERR_DELAY)
6553 break;
6554 nfs4_handle_exception(server, err, &exception);
6555 } while (exception.retry);
6556 return err;
6557}
6558
6559static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
6560{
6561 struct nfs_server *server = NFS_SERVER(state->inode);
6562 struct nfs4_exception exception = {
6563 .inode = state->inode,
6564 };
6565 int err;
6566
6567 err = nfs4_set_lock_state(state, request);
6568 if (err != 0)
6569 return err;
6570 if (!recover_lost_locks) {
6571 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
6572 return 0;
6573 }
6574 do {
6575 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
6576 return 0;
6577 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
6578 switch (err) {
6579 default:
6580 goto out;
6581 case -NFS4ERR_GRACE:
6582 case -NFS4ERR_DELAY:
6583 nfs4_handle_exception(server, err, &exception);
6584 err = 0;
6585 }
6586 } while (exception.retry);
6587out:
6588 return err;
6589}
6590
6591#if defined(CONFIG_NFS_V4_1)
6592static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
6593{
6594 struct nfs4_lock_state *lsp;
6595 int status;
6596
6597 status = nfs4_set_lock_state(state, request);
6598 if (status != 0)
6599 return status;
6600 lsp = request->fl_u.nfs4_fl.owner;
6601 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) ||
6602 test_bit(NFS_LOCK_LOST, &lsp->ls_flags))
6603 return 0;
6604 return nfs4_lock_expired(state, request);
6605}
6606#endif
6607
6608static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6609{
6610 struct nfs_inode *nfsi = NFS_I(state->inode);
6611 struct nfs4_state_owner *sp = state->owner;
6612 unsigned char fl_flags = request->fl_flags;
6613 int status;
6614
6615 request->fl_flags |= FL_ACCESS;
6616 status = locks_lock_inode_wait(state->inode, request);
6617 if (status < 0)
6618 goto out;
6619 mutex_lock(&sp->so_delegreturn_mutex);
6620 down_read(&nfsi->rwsem);
6621 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
6622 /* Yes: cache locks! */
6623 /* ...but avoid races with delegation recall... */
6624 request->fl_flags = fl_flags & ~FL_SLEEP;
6625 status = locks_lock_inode_wait(state->inode, request);
6626 up_read(&nfsi->rwsem);
6627 mutex_unlock(&sp->so_delegreturn_mutex);
6628 goto out;
6629 }
6630 up_read(&nfsi->rwsem);
6631 mutex_unlock(&sp->so_delegreturn_mutex);
6632 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
6633out:
6634 request->fl_flags = fl_flags;
6635 return status;
6636}
6637
6638static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6639{
6640 struct nfs4_exception exception = {
6641 .state = state,
6642 .inode = state->inode,
6643 };
6644 int err;
6645
6646 do {
6647 err = _nfs4_proc_setlk(state, cmd, request);
6648 if (err == -NFS4ERR_DENIED)
6649 err = -EAGAIN;
6650 err = nfs4_handle_exception(NFS_SERVER(state->inode),
6651 err, &exception);
6652 } while (exception.retry);
6653 return err;
6654}
6655
6656#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
6657#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
6658
6659static int
6660nfs4_retry_setlk_simple(struct nfs4_state *state, int cmd,
6661 struct file_lock *request)
6662{
6663 int status = -ERESTARTSYS;
6664 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
6665
6666 while(!signalled()) {
6667 status = nfs4_proc_setlk(state, cmd, request);
6668 if ((status != -EAGAIN) || IS_SETLK(cmd))
6669 break;
6670 freezable_schedule_timeout_interruptible(timeout);
6671 timeout *= 2;
6672 timeout = min_t(unsigned long, NFS4_LOCK_MAXTIMEOUT, timeout);
6673 status = -ERESTARTSYS;
6674 }
6675 return status;
6676}
6677
6678#ifdef CONFIG_NFS_V4_1
6679struct nfs4_lock_waiter {
6680 struct task_struct *task;
6681 struct inode *inode;
6682 struct nfs_lowner *owner;
6683 bool notified;
6684};
6685
6686static int
6687nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
6688{
6689 int ret;
6690 struct nfs4_lock_waiter *waiter = wait->private;
6691
6692 /* NULL key means to wake up everyone */
6693 if (key) {
6694 struct cb_notify_lock_args *cbnl = key;
6695 struct nfs_lowner *lowner = &cbnl->cbnl_owner,
6696 *wowner = waiter->owner;
6697
6698 /* Only wake if the callback was for the same owner. */
6699 if (lowner->id != wowner->id || lowner->s_dev != wowner->s_dev)
6700 return 0;
6701
6702 /* Make sure it's for the right inode */
6703 if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
6704 return 0;
6705
6706 waiter->notified = true;
6707 }
6708
6709 /* override "private" so we can use default_wake_function */
6710 wait->private = waiter->task;
6711 ret = autoremove_wake_function(wait, mode, flags, key);
6712 wait->private = waiter;
6713 return ret;
6714}
6715
6716static int
6717nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6718{
6719 int status = -ERESTARTSYS;
6720 unsigned long flags;
6721 struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
6722 struct nfs_server *server = NFS_SERVER(state->inode);
6723 struct nfs_client *clp = server->nfs_client;
6724 wait_queue_head_t *q = &clp->cl_lock_waitq;
6725 struct nfs_lowner owner = { .clientid = clp->cl_clientid,
6726 .id = lsp->ls_seqid.owner_id,
6727 .s_dev = server->s_dev };
6728 struct nfs4_lock_waiter waiter = { .task = current,
6729 .inode = state->inode,
6730 .owner = &owner,
6731 .notified = false };
6732 wait_queue_entry_t wait;
6733
6734 /* Don't bother with waitqueue if we don't expect a callback */
6735 if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
6736 return nfs4_retry_setlk_simple(state, cmd, request);
6737
6738 init_wait(&wait);
6739 wait.private = &waiter;
6740 wait.func = nfs4_wake_lock_waiter;
6741 add_wait_queue(q, &wait);
6742
6743 while(!signalled()) {
6744 waiter.notified = false;
6745 status = nfs4_proc_setlk(state, cmd, request);
6746 if ((status != -EAGAIN) || IS_SETLK(cmd))
6747 break;
6748
6749 status = -ERESTARTSYS;
6750 spin_lock_irqsave(&q->lock, flags);
6751 if (waiter.notified) {
6752 spin_unlock_irqrestore(&q->lock, flags);
6753 continue;
6754 }
6755 set_current_state(TASK_INTERRUPTIBLE);
6756 spin_unlock_irqrestore(&q->lock, flags);
6757
6758 freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT);
6759 }
6760
6761 finish_wait(q, &wait);
6762 return status;
6763}
6764#else /* !CONFIG_NFS_V4_1 */
6765static inline int
6766nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6767{
6768 return nfs4_retry_setlk_simple(state, cmd, request);
6769}
6770#endif
6771
6772static int
6773nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6774{
6775 struct nfs_open_context *ctx;
6776 struct nfs4_state *state;
6777 int status;
6778
6779 /* verify open state */
6780 ctx = nfs_file_open_context(filp);
6781 state = ctx->state;
6782
6783 if (IS_GETLK(cmd)) {
6784 if (state != NULL)
6785 return nfs4_proc_getlk(state, F_GETLK, request);
6786 return 0;
6787 }
6788
6789 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
6790 return -EINVAL;
6791
6792 if (request->fl_type == F_UNLCK) {
6793 if (state != NULL)
6794 return nfs4_proc_unlck(state, cmd, request);
6795 return 0;
6796 }
6797
6798 if (state == NULL)
6799 return -ENOLCK;
6800
6801 if ((request->fl_flags & FL_POSIX) &&
6802 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
6803 return -ENOLCK;
6804
6805 /*
6806 * Don't rely on the VFS having checked the file open mode,
6807 * since it won't do this for flock() locks.
6808 */
6809 switch (request->fl_type) {
6810 case F_RDLCK:
6811 if (!(filp->f_mode & FMODE_READ))
6812 return -EBADF;
6813 break;
6814 case F_WRLCK:
6815 if (!(filp->f_mode & FMODE_WRITE))
6816 return -EBADF;
6817 }
6818
6819 status = nfs4_set_lock_state(state, request);
6820 if (status != 0)
6821 return status;
6822
6823 return nfs4_retry_setlk(state, cmd, request);
6824}
6825
6826int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6827{
6828 struct nfs_server *server = NFS_SERVER(state->inode);
6829 int err;
6830
6831 err = nfs4_set_lock_state(state, fl);
6832 if (err != 0)
6833 return err;
6834 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6835 return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
6836}
6837
6838struct nfs_release_lockowner_data {
6839 struct nfs4_lock_state *lsp;
6840 struct nfs_server *server;
6841 struct nfs_release_lockowner_args args;
6842 struct nfs_release_lockowner_res res;
6843 unsigned long timestamp;
6844};
6845
6846static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6847{
6848 struct nfs_release_lockowner_data *data = calldata;
6849 struct nfs_server *server = data->server;
6850 nfs4_setup_sequence(server->nfs_client, &data->args.seq_args,
6851 &data->res.seq_res, task);
6852 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6853 data->timestamp = jiffies;
6854}
6855
6856static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6857{
6858 struct nfs_release_lockowner_data *data = calldata;
6859 struct nfs_server *server = data->server;
6860
6861 nfs40_sequence_done(task, &data->res.seq_res);
6862
6863 switch (task->tk_status) {
6864 case 0:
6865 renew_lease(server, data->timestamp);
6866 break;
6867 case -NFS4ERR_STALE_CLIENTID:
6868 case -NFS4ERR_EXPIRED:
6869 nfs4_schedule_lease_recovery(server->nfs_client);
6870 break;
6871 case -NFS4ERR_LEASE_MOVED:
6872 case -NFS4ERR_DELAY:
6873 if (nfs4_async_handle_error(task, server,
6874 NULL, NULL) == -EAGAIN)
6875 rpc_restart_call_prepare(task);
6876 }
6877}
6878
6879static void nfs4_release_lockowner_release(void *calldata)
6880{
6881 struct nfs_release_lockowner_data *data = calldata;
6882 nfs4_free_lock_state(data->server, data->lsp);
6883 kfree(calldata);
6884}
6885
6886static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6887 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6888 .rpc_call_done = nfs4_release_lockowner_done,
6889 .rpc_release = nfs4_release_lockowner_release,
6890};
6891
6892static void
6893nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6894{
6895 struct nfs_release_lockowner_data *data;
6896 struct rpc_message msg = {
6897 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6898 };
6899
6900 if (server->nfs_client->cl_mvops->minor_version != 0)
6901 return;
6902
6903 data = kmalloc(sizeof(*data), GFP_NOFS);
6904 if (!data)
6905 return;
6906 data->lsp = lsp;
6907 data->server = server;
6908 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6909 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6910 data->args.lock_owner.s_dev = server->s_dev;
6911
6912 msg.rpc_argp = &data->args;
6913 msg.rpc_resp = &data->res;
6914 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6915 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6916}
6917
6918#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6919
6920static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
6921 struct dentry *unused, struct inode *inode,
6922 const char *key, const void *buf,
6923 size_t buflen, int flags)
6924{
6925 return nfs4_proc_set_acl(inode, buf, buflen);
6926}
6927
6928static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
6929 struct dentry *unused, struct inode *inode,
6930 const char *key, void *buf, size_t buflen)
6931{
6932 return nfs4_proc_get_acl(inode, buf, buflen);
6933}
6934
6935static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
6936{
6937 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)));
6938}
6939
6940#ifdef CONFIG_NFS_V4_SECURITY_LABEL
6941
6942static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
6943 struct dentry *unused, struct inode *inode,
6944 const char *key, const void *buf,
6945 size_t buflen, int flags)
6946{
6947 if (security_ismaclabel(key))
6948 return nfs4_set_security_label(inode, buf, buflen);
6949
6950 return -EOPNOTSUPP;
6951}
6952
6953static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
6954 struct dentry *unused, struct inode *inode,
6955 const char *key, void *buf, size_t buflen)
6956{
6957 if (security_ismaclabel(key))
6958 return nfs4_get_security_label(inode, buf, buflen);
6959 return -EOPNOTSUPP;
6960}
6961
6962static ssize_t
6963nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
6964{
6965 int len = 0;
6966
6967 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
6968 len = security_inode_listsecurity(inode, list, list_len);
6969 if (list_len && len > list_len)
6970 return -ERANGE;
6971 }
6972 return len;
6973}
6974
6975static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6976 .prefix = XATTR_SECURITY_PREFIX,
6977 .get = nfs4_xattr_get_nfs4_label,
6978 .set = nfs4_xattr_set_nfs4_label,
6979};
6980
6981#else
6982
6983static ssize_t
6984nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
6985{
6986 return 0;
6987}
6988
6989#endif
6990
6991/*
6992 * nfs_fhget will use either the mounted_on_fileid or the fileid
6993 */
6994static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6995{
6996 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6997 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6998 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6999 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
7000 return;
7001
7002 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
7003 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
7004 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
7005 fattr->nlink = 2;
7006}
7007
7008static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7009 const struct qstr *name,
7010 struct nfs4_fs_locations *fs_locations,
7011 struct page *page)
7012{
7013 struct nfs_server *server = NFS_SERVER(dir);
7014 u32 bitmask[3];
7015 struct nfs4_fs_locations_arg args = {
7016 .dir_fh = NFS_FH(dir),
7017 .name = name,
7018 .page = page,
7019 .bitmask = bitmask,
7020 };
7021 struct nfs4_fs_locations_res res = {
7022 .fs_locations = fs_locations,
7023 };
7024 struct rpc_message msg = {
7025 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7026 .rpc_argp = &args,
7027 .rpc_resp = &res,
7028 };
7029 int status;
7030
7031 dprintk("%s: start\n", __func__);
7032
7033 bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
7034 bitmask[1] = nfs4_fattr_bitmap[1];
7035
7036 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
7037 * is not supported */
7038 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
7039 bitmask[0] &= ~FATTR4_WORD0_FILEID;
7040 else
7041 bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
7042
7043 nfs_fattr_init(&fs_locations->fattr);
7044 fs_locations->server = server;
7045 fs_locations->nlocations = 0;
7046 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
7047 dprintk("%s: returned status = %d\n", __func__, status);
7048 return status;
7049}
7050
7051int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
7052 const struct qstr *name,
7053 struct nfs4_fs_locations *fs_locations,
7054 struct page *page)
7055{
7056 struct nfs4_exception exception = { };
7057 int err;
7058 do {
7059 err = _nfs4_proc_fs_locations(client, dir, name,
7060 fs_locations, page);
7061 trace_nfs4_get_fs_locations(dir, name, err);
7062 err = nfs4_handle_exception(NFS_SERVER(dir), err,
7063 &exception);
7064 } while (exception.retry);
7065 return err;
7066}
7067
7068/*
7069 * This operation also signals the server that this client is
7070 * performing migration recovery. The server can stop returning
7071 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
7072 * appended to this compound to identify the client ID which is
7073 * performing recovery.
7074 */
7075static int _nfs40_proc_get_locations(struct inode *inode,
7076 struct nfs4_fs_locations *locations,
7077 struct page *page, struct rpc_cred *cred)
7078{
7079 struct nfs_server *server = NFS_SERVER(inode);
7080 struct rpc_clnt *clnt = server->client;
7081 u32 bitmask[2] = {
7082 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
7083 };
7084 struct nfs4_fs_locations_arg args = {
7085 .clientid = server->nfs_client->cl_clientid,
7086 .fh = NFS_FH(inode),
7087 .page = page,
7088 .bitmask = bitmask,
7089 .migration = 1, /* skip LOOKUP */
7090 .renew = 1, /* append RENEW */
7091 };
7092 struct nfs4_fs_locations_res res = {
7093 .fs_locations = locations,
7094 .migration = 1,
7095 .renew = 1,
7096 };
7097 struct rpc_message msg = {
7098 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7099 .rpc_argp = &args,
7100 .rpc_resp = &res,
7101 .rpc_cred = cred,
7102 };
7103 unsigned long now = jiffies;
7104 int status;
7105
7106 nfs_fattr_init(&locations->fattr);
7107 locations->server = server;
7108 locations->nlocations = 0;
7109
7110 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
7111 nfs4_set_sequence_privileged(&args.seq_args);
7112 status = nfs4_call_sync_sequence(clnt, server, &msg,
7113 &args.seq_args, &res.seq_res);
7114 if (status)
7115 return status;
7116
7117 renew_lease(server, now);
7118 return 0;
7119}
7120
7121#ifdef CONFIG_NFS_V4_1
7122
7123/*
7124 * This operation also signals the server that this client is
7125 * performing migration recovery. The server can stop asserting
7126 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
7127 * performing this operation is identified in the SEQUENCE
7128 * operation in this compound.
7129 *
7130 * When the client supports GETATTR(fs_locations_info), it can
7131 * be plumbed in here.
7132 */
7133static int _nfs41_proc_get_locations(struct inode *inode,
7134 struct nfs4_fs_locations *locations,
7135 struct page *page, struct rpc_cred *cred)
7136{
7137 struct nfs_server *server = NFS_SERVER(inode);
7138 struct rpc_clnt *clnt = server->client;
7139 u32 bitmask[2] = {
7140 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
7141 };
7142 struct nfs4_fs_locations_arg args = {
7143 .fh = NFS_FH(inode),
7144 .page = page,
7145 .bitmask = bitmask,
7146 .migration = 1, /* skip LOOKUP */
7147 };
7148 struct nfs4_fs_locations_res res = {
7149 .fs_locations = locations,
7150 .migration = 1,
7151 };
7152 struct rpc_message msg = {
7153 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
7154 .rpc_argp = &args,
7155 .rpc_resp = &res,
7156 .rpc_cred = cred,
7157 };
7158 int status;
7159
7160 nfs_fattr_init(&locations->fattr);
7161 locations->server = server;
7162 locations->nlocations = 0;
7163
7164 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
7165 nfs4_set_sequence_privileged(&args.seq_args);
7166 status = nfs4_call_sync_sequence(clnt, server, &msg,
7167 &args.seq_args, &res.seq_res);
7168 if (status == NFS4_OK &&
7169 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
7170 status = -NFS4ERR_LEASE_MOVED;
7171 return status;
7172}
7173
7174#endif /* CONFIG_NFS_V4_1 */
7175
7176/**
7177 * nfs4_proc_get_locations - discover locations for a migrated FSID
7178 * @inode: inode on FSID that is migrating
7179 * @locations: result of query
7180 * @page: buffer
7181 * @cred: credential to use for this operation
7182 *
7183 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
7184 * operation failed, or a negative errno if a local error occurred.
7185 *
7186 * On success, "locations" is filled in, but if the server has
7187 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
7188 * asserted.
7189 *
7190 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
7191 * from this client that require migration recovery.
7192 */
7193int nfs4_proc_get_locations(struct inode *inode,
7194 struct nfs4_fs_locations *locations,
7195 struct page *page, struct rpc_cred *cred)
7196{
7197 struct nfs_server *server = NFS_SERVER(inode);
7198 struct nfs_client *clp = server->nfs_client;
7199 const struct nfs4_mig_recovery_ops *ops =
7200 clp->cl_mvops->mig_recovery_ops;
7201 struct nfs4_exception exception = { };
7202 int status;
7203
7204 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
7205 (unsigned long long)server->fsid.major,
7206 (unsigned long long)server->fsid.minor,
7207 clp->cl_hostname);
7208 nfs_display_fhandle(NFS_FH(inode), __func__);
7209
7210 do {
7211 status = ops->get_locations(inode, locations, page, cred);
7212 if (status != -NFS4ERR_DELAY)
7213 break;
7214 nfs4_handle_exception(server, status, &exception);
7215 } while (exception.retry);
7216 return status;
7217}
7218
7219/*
7220 * This operation also signals the server that this client is
7221 * performing "lease moved" recovery. The server can stop
7222 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
7223 * is appended to this compound to identify the client ID which is
7224 * performing recovery.
7225 */
7226static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
7227{
7228 struct nfs_server *server = NFS_SERVER(inode);
7229 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
7230 struct rpc_clnt *clnt = server->client;
7231 struct nfs4_fsid_present_arg args = {
7232 .fh = NFS_FH(inode),
7233 .clientid = clp->cl_clientid,
7234 .renew = 1, /* append RENEW */
7235 };
7236 struct nfs4_fsid_present_res res = {
7237 .renew = 1,
7238 };
7239 struct rpc_message msg = {
7240 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
7241 .rpc_argp = &args,
7242 .rpc_resp = &res,
7243 .rpc_cred = cred,
7244 };
7245 unsigned long now = jiffies;
7246 int status;
7247
7248 res.fh = nfs_alloc_fhandle();
7249 if (res.fh == NULL)
7250 return -ENOMEM;
7251
7252 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
7253 nfs4_set_sequence_privileged(&args.seq_args);
7254 status = nfs4_call_sync_sequence(clnt, server, &msg,
7255 &args.seq_args, &res.seq_res);
7256 nfs_free_fhandle(res.fh);
7257 if (status)
7258 return status;
7259
7260 do_renew_lease(clp, now);
7261 return 0;
7262}
7263
7264#ifdef CONFIG_NFS_V4_1
7265
7266/*
7267 * This operation also signals the server that this client is
7268 * performing "lease moved" recovery. The server can stop asserting
7269 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
7270 * this operation is identified in the SEQUENCE operation in this
7271 * compound.
7272 */
7273static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
7274{
7275 struct nfs_server *server = NFS_SERVER(inode);
7276 struct rpc_clnt *clnt = server->client;
7277 struct nfs4_fsid_present_arg args = {
7278 .fh = NFS_FH(inode),
7279 };
7280 struct nfs4_fsid_present_res res = {
7281 };
7282 struct rpc_message msg = {
7283 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
7284 .rpc_argp = &args,
7285 .rpc_resp = &res,
7286 .rpc_cred = cred,
7287 };
7288 int status;
7289
7290 res.fh = nfs_alloc_fhandle();
7291 if (res.fh == NULL)
7292 return -ENOMEM;
7293
7294 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
7295 nfs4_set_sequence_privileged(&args.seq_args);
7296 status = nfs4_call_sync_sequence(clnt, server, &msg,
7297 &args.seq_args, &res.seq_res);
7298 nfs_free_fhandle(res.fh);
7299 if (status == NFS4_OK &&
7300 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
7301 status = -NFS4ERR_LEASE_MOVED;
7302 return status;
7303}
7304
7305#endif /* CONFIG_NFS_V4_1 */
7306
7307/**
7308 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
7309 * @inode: inode on FSID to check
7310 * @cred: credential to use for this operation
7311 *
7312 * Server indicates whether the FSID is present, moved, or not
7313 * recognized. This operation is necessary to clear a LEASE_MOVED
7314 * condition for this client ID.
7315 *
7316 * Returns NFS4_OK if the FSID is present on this server,
7317 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
7318 * NFS4ERR code if some error occurred on the server, or a
7319 * negative errno if a local failure occurred.
7320 */
7321int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
7322{
7323 struct nfs_server *server = NFS_SERVER(inode);
7324 struct nfs_client *clp = server->nfs_client;
7325 const struct nfs4_mig_recovery_ops *ops =
7326 clp->cl_mvops->mig_recovery_ops;
7327 struct nfs4_exception exception = { };
7328 int status;
7329
7330 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
7331 (unsigned long long)server->fsid.major,
7332 (unsigned long long)server->fsid.minor,
7333 clp->cl_hostname);
7334 nfs_display_fhandle(NFS_FH(inode), __func__);
7335
7336 do {
7337 status = ops->fsid_present(inode, cred);
7338 if (status != -NFS4ERR_DELAY)
7339 break;
7340 nfs4_handle_exception(server, status, &exception);
7341 } while (exception.retry);
7342 return status;
7343}
7344
7345/**
7346 * If 'use_integrity' is true and the state managment nfs_client
7347 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
7348 * and the machine credential as per RFC3530bis and RFC5661 Security
7349 * Considerations sections. Otherwise, just use the user cred with the
7350 * filesystem's rpc_client.
7351 */
7352static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
7353{
7354 int status;
7355 struct nfs4_secinfo_arg args = {
7356 .dir_fh = NFS_FH(dir),
7357 .name = name,
7358 };
7359 struct nfs4_secinfo_res res = {
7360 .flavors = flavors,
7361 };
7362 struct rpc_message msg = {
7363 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
7364 .rpc_argp = &args,
7365 .rpc_resp = &res,
7366 };
7367 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
7368 struct rpc_cred *cred = NULL;
7369
7370 if (use_integrity) {
7371 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
7372 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
7373 msg.rpc_cred = cred;
7374 }
7375
7376 dprintk("NFS call secinfo %s\n", name->name);
7377
7378 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
7379 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
7380
7381 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
7382 &res.seq_res, 0);
7383 dprintk("NFS reply secinfo: %d\n", status);
7384
7385 if (cred)
7386 put_rpccred(cred);
7387
7388 return status;
7389}
7390
7391int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
7392 struct nfs4_secinfo_flavors *flavors)
7393{
7394 struct nfs4_exception exception = { };
7395 int err;
7396 do {
7397 err = -NFS4ERR_WRONGSEC;
7398
7399 /* try to use integrity protection with machine cred */
7400 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
7401 err = _nfs4_proc_secinfo(dir, name, flavors, true);
7402
7403 /*
7404 * if unable to use integrity protection, or SECINFO with
7405 * integrity protection returns NFS4ERR_WRONGSEC (which is
7406 * disallowed by spec, but exists in deployed servers) use
7407 * the current filesystem's rpc_client and the user cred.
7408 */
7409 if (err == -NFS4ERR_WRONGSEC)
7410 err = _nfs4_proc_secinfo(dir, name, flavors, false);
7411
7412 trace_nfs4_secinfo(dir, name, err);
7413 err = nfs4_handle_exception(NFS_SERVER(dir), err,
7414 &exception);
7415 } while (exception.retry);
7416 return err;
7417}
7418
7419#ifdef CONFIG_NFS_V4_1
7420/*
7421 * Check the exchange flags returned by the server for invalid flags, having
7422 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
7423 * DS flags set.
7424 */
7425static int nfs4_check_cl_exchange_flags(u32 flags)
7426{
7427 if (flags & ~EXCHGID4_FLAG_MASK_R)
7428 goto out_inval;
7429 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
7430 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
7431 goto out_inval;
7432 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
7433 goto out_inval;
7434 return NFS_OK;
7435out_inval:
7436 return -NFS4ERR_INVAL;
7437}
7438
7439static bool
7440nfs41_same_server_scope(struct nfs41_server_scope *a,
7441 struct nfs41_server_scope *b)
7442{
7443 if (a->server_scope_sz != b->server_scope_sz)
7444 return false;
7445 return memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0;
7446}
7447
7448static void
7449nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
7450{
7451}
7452
7453static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
7454 .rpc_call_done = &nfs4_bind_one_conn_to_session_done,
7455};
7456
7457/*
7458 * nfs4_proc_bind_one_conn_to_session()
7459 *
7460 * The 4.1 client currently uses the same TCP connection for the
7461 * fore and backchannel.
7462 */
7463static
7464int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
7465 struct rpc_xprt *xprt,
7466 struct nfs_client *clp,
7467 struct rpc_cred *cred)
7468{
7469 int status;
7470 struct nfs41_bind_conn_to_session_args args = {
7471 .client = clp,
7472 .dir = NFS4_CDFC4_FORE_OR_BOTH,
7473 };
7474 struct nfs41_bind_conn_to_session_res res;
7475 struct rpc_message msg = {
7476 .rpc_proc =
7477 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
7478 .rpc_argp = &args,
7479 .rpc_resp = &res,
7480 .rpc_cred = cred,
7481 };
7482 struct rpc_task_setup task_setup_data = {
7483 .rpc_client = clnt,
7484 .rpc_xprt = xprt,
7485 .callback_ops = &nfs4_bind_one_conn_to_session_ops,
7486 .rpc_message = &msg,
7487 .flags = RPC_TASK_TIMEOUT,
7488 };
7489 struct rpc_task *task;
7490
7491 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
7492 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
7493 args.dir = NFS4_CDFC4_FORE;
7494
7495 /* Do not set the backchannel flag unless this is clnt->cl_xprt */
7496 if (xprt != rcu_access_pointer(clnt->cl_xprt))
7497 args.dir = NFS4_CDFC4_FORE;
7498
7499 task = rpc_run_task(&task_setup_data);
7500 if (!IS_ERR(task)) {
7501 status = task->tk_status;
7502 rpc_put_task(task);
7503 } else
7504 status = PTR_ERR(task);
7505 trace_nfs4_bind_conn_to_session(clp, status);
7506 if (status == 0) {
7507 if (memcmp(res.sessionid.data,
7508 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
7509 dprintk("NFS: %s: Session ID mismatch\n", __func__);
7510 return -EIO;
7511 }
7512 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
7513 dprintk("NFS: %s: Unexpected direction from server\n",
7514 __func__);
7515 return -EIO;
7516 }
7517 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
7518 dprintk("NFS: %s: Server returned RDMA mode = true\n",
7519 __func__);
7520 return -EIO;
7521 }
7522 }
7523
7524 return status;
7525}
7526
7527struct rpc_bind_conn_calldata {
7528 struct nfs_client *clp;
7529 struct rpc_cred *cred;
7530};
7531
7532static int
7533nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
7534 struct rpc_xprt *xprt,
7535 void *calldata)
7536{
7537 struct rpc_bind_conn_calldata *p = calldata;
7538
7539 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
7540}
7541
7542int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
7543{
7544 struct rpc_bind_conn_calldata data = {
7545 .clp = clp,
7546 .cred = cred,
7547 };
7548 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
7549 nfs4_proc_bind_conn_to_session_callback, &data);
7550}
7551
7552/*
7553 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
7554 * and operations we'd like to see to enable certain features in the allow map
7555 */
7556static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
7557 .how = SP4_MACH_CRED,
7558 .enforce.u.words = {
7559 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
7560 1 << (OP_EXCHANGE_ID - 32) |
7561 1 << (OP_CREATE_SESSION - 32) |
7562 1 << (OP_DESTROY_SESSION - 32) |
7563 1 << (OP_DESTROY_CLIENTID - 32)
7564 },
7565 .allow.u.words = {
7566 [0] = 1 << (OP_CLOSE) |
7567 1 << (OP_OPEN_DOWNGRADE) |
7568 1 << (OP_LOCKU) |
7569 1 << (OP_DELEGRETURN) |
7570 1 << (OP_COMMIT),
7571 [1] = 1 << (OP_SECINFO - 32) |
7572 1 << (OP_SECINFO_NO_NAME - 32) |
7573 1 << (OP_LAYOUTRETURN - 32) |
7574 1 << (OP_TEST_STATEID - 32) |
7575 1 << (OP_FREE_STATEID - 32) |
7576 1 << (OP_WRITE - 32)
7577 }
7578};
7579
7580/*
7581 * Select the state protection mode for client `clp' given the server results
7582 * from exchange_id in `sp'.
7583 *
7584 * Returns 0 on success, negative errno otherwise.
7585 */
7586static int nfs4_sp4_select_mode(struct nfs_client *clp,
7587 struct nfs41_state_protection *sp)
7588{
7589 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
7590 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
7591 1 << (OP_EXCHANGE_ID - 32) |
7592 1 << (OP_CREATE_SESSION - 32) |
7593 1 << (OP_DESTROY_SESSION - 32) |
7594 1 << (OP_DESTROY_CLIENTID - 32)
7595 };
7596 unsigned long flags = 0;
7597 unsigned int i;
7598 int ret = 0;
7599
7600 if (sp->how == SP4_MACH_CRED) {
7601 /* Print state protect result */
7602 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
7603 for (i = 0; i <= LAST_NFS4_OP; i++) {
7604 if (test_bit(i, sp->enforce.u.longs))
7605 dfprintk(MOUNT, " enforce op %d\n", i);
7606 if (test_bit(i, sp->allow.u.longs))
7607 dfprintk(MOUNT, " allow op %d\n", i);
7608 }
7609
7610 /* make sure nothing is on enforce list that isn't supported */
7611 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
7612 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
7613 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
7614 ret = -EINVAL;
7615 goto out;
7616 }
7617 }
7618
7619 /*
7620 * Minimal mode - state operations are allowed to use machine
7621 * credential. Note this already happens by default, so the
7622 * client doesn't have to do anything more than the negotiation.
7623 *
7624 * NOTE: we don't care if EXCHANGE_ID is in the list -
7625 * we're already using the machine cred for exchange_id
7626 * and will never use a different cred.
7627 */
7628 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
7629 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
7630 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
7631 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
7632 dfprintk(MOUNT, "sp4_mach_cred:\n");
7633 dfprintk(MOUNT, " minimal mode enabled\n");
7634 __set_bit(NFS_SP4_MACH_CRED_MINIMAL, &flags);
7635 } else {
7636 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
7637 ret = -EINVAL;
7638 goto out;
7639 }
7640
7641 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
7642 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
7643 test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
7644 test_bit(OP_LOCKU, sp->allow.u.longs)) {
7645 dfprintk(MOUNT, " cleanup mode enabled\n");
7646 __set_bit(NFS_SP4_MACH_CRED_CLEANUP, &flags);
7647 }
7648
7649 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
7650 dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
7651 __set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP, &flags);
7652 }
7653
7654 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
7655 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
7656 dfprintk(MOUNT, " secinfo mode enabled\n");
7657 __set_bit(NFS_SP4_MACH_CRED_SECINFO, &flags);
7658 }
7659
7660 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
7661 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
7662 dfprintk(MOUNT, " stateid mode enabled\n");
7663 __set_bit(NFS_SP4_MACH_CRED_STATEID, &flags);
7664 }
7665
7666 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
7667 dfprintk(MOUNT, " write mode enabled\n");
7668 __set_bit(NFS_SP4_MACH_CRED_WRITE, &flags);
7669 }
7670
7671 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
7672 dfprintk(MOUNT, " commit mode enabled\n");
7673 __set_bit(NFS_SP4_MACH_CRED_COMMIT, &flags);
7674 }
7675 }
7676out:
7677 clp->cl_sp4_flags = flags;
7678 return 0;
7679}
7680
7681struct nfs41_exchange_id_data {
7682 struct nfs41_exchange_id_res res;
7683 struct nfs41_exchange_id_args args;
7684};
7685
7686static void nfs4_exchange_id_release(void *data)
7687{
7688 struct nfs41_exchange_id_data *cdata =
7689 (struct nfs41_exchange_id_data *)data;
7690
7691 nfs_put_client(cdata->args.client);
7692 kfree(cdata->res.impl_id);
7693 kfree(cdata->res.server_scope);
7694 kfree(cdata->res.server_owner);
7695 kfree(cdata);
7696}
7697
7698static const struct rpc_call_ops nfs4_exchange_id_call_ops = {
7699 .rpc_release = nfs4_exchange_id_release,
7700};
7701
7702/*
7703 * _nfs4_proc_exchange_id()
7704 *
7705 * Wrapper for EXCHANGE_ID operation.
7706 */
7707static struct rpc_task *
7708nfs4_run_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
7709 u32 sp4_how, struct rpc_xprt *xprt)
7710{
7711 struct rpc_message msg = {
7712 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
7713 .rpc_cred = cred,
7714 };
7715 struct rpc_task_setup task_setup_data = {
7716 .rpc_client = clp->cl_rpcclient,
7717 .callback_ops = &nfs4_exchange_id_call_ops,
7718 .rpc_message = &msg,
7719 .flags = RPC_TASK_TIMEOUT,
7720 };
7721 struct nfs41_exchange_id_data *calldata;
7722 int status;
7723
7724 if (!refcount_inc_not_zero(&clp->cl_count))
7725 return ERR_PTR(-EIO);
7726
7727 status = -ENOMEM;
7728 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7729 if (!calldata)
7730 goto out;
7731
7732 nfs4_init_boot_verifier(clp, &calldata->args.verifier);
7733
7734 status = nfs4_init_uniform_client_string(clp);
7735 if (status)
7736 goto out_calldata;
7737
7738 calldata->res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
7739 GFP_NOFS);
7740 status = -ENOMEM;
7741 if (unlikely(calldata->res.server_owner == NULL))
7742 goto out_calldata;
7743
7744 calldata->res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
7745 GFP_NOFS);
7746 if (unlikely(calldata->res.server_scope == NULL))
7747 goto out_server_owner;
7748
7749 calldata->res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
7750 if (unlikely(calldata->res.impl_id == NULL))
7751 goto out_server_scope;
7752
7753 switch (sp4_how) {
7754 case SP4_NONE:
7755 calldata->args.state_protect.how = SP4_NONE;
7756 break;
7757
7758 case SP4_MACH_CRED:
7759 calldata->args.state_protect = nfs4_sp4_mach_cred_request;
7760 break;
7761
7762 default:
7763 /* unsupported! */
7764 WARN_ON_ONCE(1);
7765 status = -EINVAL;
7766 goto out_impl_id;
7767 }
7768 if (xprt) {
7769 task_setup_data.rpc_xprt = xprt;
7770 task_setup_data.flags |= RPC_TASK_SOFTCONN;
7771 memcpy(calldata->args.verifier.data, clp->cl_confirm.data,
7772 sizeof(calldata->args.verifier.data));
7773 }
7774 calldata->args.client = clp;
7775 calldata->args.flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
7776 EXCHGID4_FLAG_BIND_PRINC_STATEID;
7777#ifdef CONFIG_NFS_V4_1_MIGRATION
7778 calldata->args.flags |= EXCHGID4_FLAG_SUPP_MOVED_MIGR;
7779#endif
7780 msg.rpc_argp = &calldata->args;
7781 msg.rpc_resp = &calldata->res;
7782 task_setup_data.callback_data = calldata;
7783
7784 return rpc_run_task(&task_setup_data);
7785
7786out_impl_id:
7787 kfree(calldata->res.impl_id);
7788out_server_scope:
7789 kfree(calldata->res.server_scope);
7790out_server_owner:
7791 kfree(calldata->res.server_owner);
7792out_calldata:
7793 kfree(calldata);
7794out:
7795 nfs_put_client(clp);
7796 return ERR_PTR(status);
7797}
7798
7799/*
7800 * _nfs4_proc_exchange_id()
7801 *
7802 * Wrapper for EXCHANGE_ID operation.
7803 */
7804static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
7805 u32 sp4_how)
7806{
7807 struct rpc_task *task;
7808 struct nfs41_exchange_id_args *argp;
7809 struct nfs41_exchange_id_res *resp;
7810 int status;
7811
7812 task = nfs4_run_exchange_id(clp, cred, sp4_how, NULL);
7813 if (IS_ERR(task))
7814 return PTR_ERR(task);
7815
7816 argp = task->tk_msg.rpc_argp;
7817 resp = task->tk_msg.rpc_resp;
7818 status = task->tk_status;
7819 if (status != 0)
7820 goto out;
7821
7822 status = nfs4_check_cl_exchange_flags(resp->flags);
7823 if (status != 0)
7824 goto out;
7825
7826 status = nfs4_sp4_select_mode(clp, &resp->state_protect);
7827 if (status != 0)
7828 goto out;
7829
7830 clp->cl_clientid = resp->clientid;
7831 clp->cl_exchange_flags = resp->flags;
7832 clp->cl_seqid = resp->seqid;
7833 /* Client ID is not confirmed */
7834 if (!(resp->flags & EXCHGID4_FLAG_CONFIRMED_R))
7835 clear_bit(NFS4_SESSION_ESTABLISHED,
7836 &clp->cl_session->session_state);
7837
7838 if (clp->cl_serverscope != NULL &&
7839 !nfs41_same_server_scope(clp->cl_serverscope,
7840 resp->server_scope)) {
7841 dprintk("%s: server_scope mismatch detected\n",
7842 __func__);
7843 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
7844 }
7845
7846 swap(clp->cl_serverowner, resp->server_owner);
7847 swap(clp->cl_serverscope, resp->server_scope);
7848 swap(clp->cl_implid, resp->impl_id);
7849
7850 /* Save the EXCHANGE_ID verifier session trunk tests */
7851 memcpy(clp->cl_confirm.data, argp->verifier.data,
7852 sizeof(clp->cl_confirm.data));
7853out:
7854 trace_nfs4_exchange_id(clp, status);
7855 rpc_put_task(task);
7856 return status;
7857}
7858
7859/*
7860 * nfs4_proc_exchange_id()
7861 *
7862 * Returns zero, a negative errno, or a negative NFS4ERR status code.
7863 *
7864 * Since the clientid has expired, all compounds using sessions
7865 * associated with the stale clientid will be returning
7866 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
7867 * be in some phase of session reset.
7868 *
7869 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
7870 */
7871int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
7872{
7873 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
7874 int status;
7875
7876 /* try SP4_MACH_CRED if krb5i/p */
7877 if (authflavor == RPC_AUTH_GSS_KRB5I ||
7878 authflavor == RPC_AUTH_GSS_KRB5P) {
7879 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
7880 if (!status)
7881 return 0;
7882 }
7883
7884 /* try SP4_NONE */
7885 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
7886}
7887
7888/**
7889 * nfs4_test_session_trunk
7890 *
7891 * This is an add_xprt_test() test function called from
7892 * rpc_clnt_setup_test_and_add_xprt.
7893 *
7894 * The rpc_xprt_switch is referrenced by rpc_clnt_setup_test_and_add_xprt
7895 * and is dereferrenced in nfs4_exchange_id_release
7896 *
7897 * Upon success, add the new transport to the rpc_clnt
7898 *
7899 * @clnt: struct rpc_clnt to get new transport
7900 * @xprt: the rpc_xprt to test
7901 * @data: call data for _nfs4_proc_exchange_id.
7902 */
7903int nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
7904 void *data)
7905{
7906 struct nfs4_add_xprt_data *adata = (struct nfs4_add_xprt_data *)data;
7907 struct rpc_task *task;
7908 int status;
7909
7910 u32 sp4_how;
7911
7912 dprintk("--> %s try %s\n", __func__,
7913 xprt->address_strings[RPC_DISPLAY_ADDR]);
7914
7915 sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
7916
7917 /* Test connection for session trunking. Async exchange_id call */
7918 task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
7919 if (IS_ERR(task))
7920 return PTR_ERR(task);
7921
7922 status = task->tk_status;
7923 if (status == 0)
7924 status = nfs4_detect_session_trunking(adata->clp,
7925 task->tk_msg.rpc_resp, xprt);
7926
7927 rpc_put_task(task);
7928 return status;
7929}
7930EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
7931
7932static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
7933 struct rpc_cred *cred)
7934{
7935 struct rpc_message msg = {
7936 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
7937 .rpc_argp = clp,
7938 .rpc_cred = cred,
7939 };
7940 int status;
7941
7942 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7943 trace_nfs4_destroy_clientid(clp, status);
7944 if (status)
7945 dprintk("NFS: Got error %d from the server %s on "
7946 "DESTROY_CLIENTID.", status, clp->cl_hostname);
7947 return status;
7948}
7949
7950static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
7951 struct rpc_cred *cred)
7952{
7953 unsigned int loop;
7954 int ret;
7955
7956 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7957 ret = _nfs4_proc_destroy_clientid(clp, cred);
7958 switch (ret) {
7959 case -NFS4ERR_DELAY:
7960 case -NFS4ERR_CLIENTID_BUSY:
7961 ssleep(1);
7962 break;
7963 default:
7964 return ret;
7965 }
7966 }
7967 return 0;
7968}
7969
7970int nfs4_destroy_clientid(struct nfs_client *clp)
7971{
7972 struct rpc_cred *cred;
7973 int ret = 0;
7974
7975 if (clp->cl_mvops->minor_version < 1)
7976 goto out;
7977 if (clp->cl_exchange_flags == 0)
7978 goto out;
7979 if (clp->cl_preserve_clid)
7980 goto out;
7981 cred = nfs4_get_clid_cred(clp);
7982 ret = nfs4_proc_destroy_clientid(clp, cred);
7983 if (cred)
7984 put_rpccred(cred);
7985 switch (ret) {
7986 case 0:
7987 case -NFS4ERR_STALE_CLIENTID:
7988 clp->cl_exchange_flags = 0;
7989 }
7990out:
7991 return ret;
7992}
7993
7994struct nfs4_get_lease_time_data {
7995 struct nfs4_get_lease_time_args *args;
7996 struct nfs4_get_lease_time_res *res;
7997 struct nfs_client *clp;
7998};
7999
8000static void nfs4_get_lease_time_prepare(struct rpc_task *task,
8001 void *calldata)
8002{
8003 struct nfs4_get_lease_time_data *data =
8004 (struct nfs4_get_lease_time_data *)calldata;
8005
8006 dprintk("--> %s\n", __func__);
8007 /* just setup sequence, do not trigger session recovery
8008 since we're invoked within one */
8009 nfs4_setup_sequence(data->clp,
8010 &data->args->la_seq_args,
8011 &data->res->lr_seq_res,
8012 task);
8013 dprintk("<-- %s\n", __func__);
8014}
8015
8016/*
8017 * Called from nfs4_state_manager thread for session setup, so don't recover
8018 * from sequence operation or clientid errors.
8019 */
8020static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
8021{
8022 struct nfs4_get_lease_time_data *data =
8023 (struct nfs4_get_lease_time_data *)calldata;
8024
8025 dprintk("--> %s\n", __func__);
8026 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
8027 return;
8028 switch (task->tk_status) {
8029 case -NFS4ERR_DELAY:
8030 case -NFS4ERR_GRACE:
8031 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
8032 rpc_delay(task, NFS4_POLL_RETRY_MIN);
8033 task->tk_status = 0;
8034 /* fall through */
8035 case -NFS4ERR_RETRY_UNCACHED_REP:
8036 rpc_restart_call_prepare(task);
8037 return;
8038 }
8039 dprintk("<-- %s\n", __func__);
8040}
8041
8042static const struct rpc_call_ops nfs4_get_lease_time_ops = {
8043 .rpc_call_prepare = nfs4_get_lease_time_prepare,
8044 .rpc_call_done = nfs4_get_lease_time_done,
8045};
8046
8047int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
8048{
8049 struct rpc_task *task;
8050 struct nfs4_get_lease_time_args args;
8051 struct nfs4_get_lease_time_res res = {
8052 .lr_fsinfo = fsinfo,
8053 };
8054 struct nfs4_get_lease_time_data data = {
8055 .args = &args,
8056 .res = &res,
8057 .clp = clp,
8058 };
8059 struct rpc_message msg = {
8060 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
8061 .rpc_argp = &args,
8062 .rpc_resp = &res,
8063 };
8064 struct rpc_task_setup task_setup = {
8065 .rpc_client = clp->cl_rpcclient,
8066 .rpc_message = &msg,
8067 .callback_ops = &nfs4_get_lease_time_ops,
8068 .callback_data = &data,
8069 .flags = RPC_TASK_TIMEOUT,
8070 };
8071 int status;
8072
8073 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
8074 nfs4_set_sequence_privileged(&args.la_seq_args);
8075 task = rpc_run_task(&task_setup);
8076
8077 if (IS_ERR(task))
8078 return PTR_ERR(task);
8079
8080 status = task->tk_status;
8081 rpc_put_task(task);
8082 return status;
8083}
8084
8085/*
8086 * Initialize the values to be used by the client in CREATE_SESSION
8087 * If nfs4_init_session set the fore channel request and response sizes,
8088 * use them.
8089 *
8090 * Set the back channel max_resp_sz_cached to zero to force the client to
8091 * always set csa_cachethis to FALSE because the current implementation
8092 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
8093 */
8094static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
8095 struct rpc_clnt *clnt)
8096{
8097 unsigned int max_rqst_sz, max_resp_sz;
8098 unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
8099
8100 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
8101 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
8102
8103 /* Fore channel attributes */
8104 args->fc_attrs.max_rqst_sz = max_rqst_sz;
8105 args->fc_attrs.max_resp_sz = max_resp_sz;
8106 args->fc_attrs.max_ops = NFS4_MAX_OPS;
8107 args->fc_attrs.max_reqs = max_session_slots;
8108
8109 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
8110 "max_ops=%u max_reqs=%u\n",
8111 __func__,
8112 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
8113 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
8114
8115 /* Back channel attributes */
8116 args->bc_attrs.max_rqst_sz = max_bc_payload;
8117 args->bc_attrs.max_resp_sz = max_bc_payload;
8118 args->bc_attrs.max_resp_sz_cached = 0;
8119 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
8120 args->bc_attrs.max_reqs = min_t(unsigned short, max_session_cb_slots, 1);
8121
8122 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
8123 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
8124 __func__,
8125 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
8126 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
8127 args->bc_attrs.max_reqs);
8128}
8129
8130static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
8131 struct nfs41_create_session_res *res)
8132{
8133 struct nfs4_channel_attrs *sent = &args->fc_attrs;
8134 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
8135
8136 if (rcvd->max_resp_sz > sent->max_resp_sz)
8137 return -EINVAL;
8138 /*
8139 * Our requested max_ops is the minimum we need; we're not
8140 * prepared to break up compounds into smaller pieces than that.
8141 * So, no point even trying to continue if the server won't
8142 * cooperate:
8143 */
8144 if (rcvd->max_ops < sent->max_ops)
8145 return -EINVAL;
8146 if (rcvd->max_reqs == 0)
8147 return -EINVAL;
8148 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
8149 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
8150 return 0;
8151}
8152
8153static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
8154 struct nfs41_create_session_res *res)
8155{
8156 struct nfs4_channel_attrs *sent = &args->bc_attrs;
8157 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
8158
8159 if (!(res->flags & SESSION4_BACK_CHAN))
8160 goto out;
8161 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
8162 return -EINVAL;
8163 if (rcvd->max_resp_sz < sent->max_resp_sz)
8164 return -EINVAL;
8165 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
8166 return -EINVAL;
8167 if (rcvd->max_ops > sent->max_ops)
8168 return -EINVAL;
8169 if (rcvd->max_reqs > sent->max_reqs)
8170 return -EINVAL;
8171out:
8172 return 0;
8173}
8174
8175static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
8176 struct nfs41_create_session_res *res)
8177{
8178 int ret;
8179
8180 ret = nfs4_verify_fore_channel_attrs(args, res);
8181 if (ret)
8182 return ret;
8183 return nfs4_verify_back_channel_attrs(args, res);
8184}
8185
8186static void nfs4_update_session(struct nfs4_session *session,
8187 struct nfs41_create_session_res *res)
8188{
8189 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
8190 /* Mark client id and session as being confirmed */
8191 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
8192 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
8193 session->flags = res->flags;
8194 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
8195 if (res->flags & SESSION4_BACK_CHAN)
8196 memcpy(&session->bc_attrs, &res->bc_attrs,
8197 sizeof(session->bc_attrs));
8198}
8199
8200static int _nfs4_proc_create_session(struct nfs_client *clp,
8201 struct rpc_cred *cred)
8202{
8203 struct nfs4_session *session = clp->cl_session;
8204 struct nfs41_create_session_args args = {
8205 .client = clp,
8206 .clientid = clp->cl_clientid,
8207 .seqid = clp->cl_seqid,
8208 .cb_program = NFS4_CALLBACK,
8209 };
8210 struct nfs41_create_session_res res;
8211
8212 struct rpc_message msg = {
8213 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
8214 .rpc_argp = &args,
8215 .rpc_resp = &res,
8216 .rpc_cred = cred,
8217 };
8218 int status;
8219
8220 nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
8221 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
8222
8223 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
8224 trace_nfs4_create_session(clp, status);
8225
8226 switch (status) {
8227 case -NFS4ERR_STALE_CLIENTID:
8228 case -NFS4ERR_DELAY:
8229 case -ETIMEDOUT:
8230 case -EACCES:
8231 case -EAGAIN:
8232 goto out;
8233 };
8234
8235 clp->cl_seqid++;
8236 if (!status) {
8237 /* Verify the session's negotiated channel_attrs values */
8238 status = nfs4_verify_channel_attrs(&args, &res);
8239 /* Increment the clientid slot sequence id */
8240 if (status)
8241 goto out;
8242 nfs4_update_session(session, &res);
8243 }
8244out:
8245 return status;
8246}
8247
8248/*
8249 * Issues a CREATE_SESSION operation to the server.
8250 * It is the responsibility of the caller to verify the session is
8251 * expired before calling this routine.
8252 */
8253int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
8254{
8255 int status;
8256 unsigned *ptr;
8257 struct nfs4_session *session = clp->cl_session;
8258
8259 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
8260
8261 status = _nfs4_proc_create_session(clp, cred);
8262 if (status)
8263 goto out;
8264
8265 /* Init or reset the session slot tables */
8266 status = nfs4_setup_session_slot_tables(session);
8267 dprintk("slot table setup returned %d\n", status);
8268 if (status)
8269 goto out;
8270
8271 ptr = (unsigned *)&session->sess_id.data[0];
8272 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
8273 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
8274out:
8275 dprintk("<-- %s\n", __func__);
8276 return status;
8277}
8278
8279/*
8280 * Issue the over-the-wire RPC DESTROY_SESSION.
8281 * The caller must serialize access to this routine.
8282 */
8283int nfs4_proc_destroy_session(struct nfs4_session *session,
8284 struct rpc_cred *cred)
8285{
8286 struct rpc_message msg = {
8287 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
8288 .rpc_argp = session,
8289 .rpc_cred = cred,
8290 };
8291 int status = 0;
8292
8293 dprintk("--> nfs4_proc_destroy_session\n");
8294
8295 /* session is still being setup */
8296 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
8297 return 0;
8298
8299 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
8300 trace_nfs4_destroy_session(session->clp, status);
8301
8302 if (status)
8303 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
8304 "Session has been destroyed regardless...\n", status);
8305
8306 dprintk("<-- nfs4_proc_destroy_session\n");
8307 return status;
8308}
8309
8310/*
8311 * Renew the cl_session lease.
8312 */
8313struct nfs4_sequence_data {
8314 struct nfs_client *clp;
8315 struct nfs4_sequence_args args;
8316 struct nfs4_sequence_res res;
8317};
8318
8319static void nfs41_sequence_release(void *data)
8320{
8321 struct nfs4_sequence_data *calldata = data;
8322 struct nfs_client *clp = calldata->clp;
8323
8324 if (refcount_read(&clp->cl_count) > 1)
8325 nfs4_schedule_state_renewal(clp);
8326 nfs_put_client(clp);
8327 kfree(calldata);
8328}
8329
8330static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
8331{
8332 switch(task->tk_status) {
8333 case -NFS4ERR_DELAY:
8334 rpc_delay(task, NFS4_POLL_RETRY_MAX);
8335 return -EAGAIN;
8336 default:
8337 nfs4_schedule_lease_recovery(clp);
8338 }
8339 return 0;
8340}
8341
8342static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
8343{
8344 struct nfs4_sequence_data *calldata = data;
8345 struct nfs_client *clp = calldata->clp;
8346
8347 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
8348 return;
8349
8350 trace_nfs4_sequence(clp, task->tk_status);
8351 if (task->tk_status < 0) {
8352 dprintk("%s ERROR %d\n", __func__, task->tk_status);
8353 if (refcount_read(&clp->cl_count) == 1)
8354 goto out;
8355
8356 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
8357 rpc_restart_call_prepare(task);
8358 return;
8359 }
8360 }
8361 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
8362out:
8363 dprintk("<-- %s\n", __func__);
8364}
8365
8366static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
8367{
8368 struct nfs4_sequence_data *calldata = data;
8369 struct nfs_client *clp = calldata->clp;
8370 struct nfs4_sequence_args *args;
8371 struct nfs4_sequence_res *res;
8372
8373 args = task->tk_msg.rpc_argp;
8374 res = task->tk_msg.rpc_resp;
8375
8376 nfs4_setup_sequence(clp, args, res, task);
8377}
8378
8379static const struct rpc_call_ops nfs41_sequence_ops = {
8380 .rpc_call_done = nfs41_sequence_call_done,
8381 .rpc_call_prepare = nfs41_sequence_prepare,
8382 .rpc_release = nfs41_sequence_release,
8383};
8384
8385static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
8386 struct rpc_cred *cred,
8387 struct nfs4_slot *slot,
8388 bool is_privileged)
8389{
8390 struct nfs4_sequence_data *calldata;
8391 struct rpc_message msg = {
8392 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
8393 .rpc_cred = cred,
8394 };
8395 struct rpc_task_setup task_setup_data = {
8396 .rpc_client = clp->cl_rpcclient,
8397 .rpc_message = &msg,
8398 .callback_ops = &nfs41_sequence_ops,
8399 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
8400 };
8401 struct rpc_task *ret;
8402
8403 ret = ERR_PTR(-EIO);
8404 if (!refcount_inc_not_zero(&clp->cl_count))
8405 goto out_err;
8406
8407 ret = ERR_PTR(-ENOMEM);
8408 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8409 if (calldata == NULL)
8410 goto out_put_clp;
8411 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
8412 nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
8413 if (is_privileged)
8414 nfs4_set_sequence_privileged(&calldata->args);
8415 msg.rpc_argp = &calldata->args;
8416 msg.rpc_resp = &calldata->res;
8417 calldata->clp = clp;
8418 task_setup_data.callback_data = calldata;
8419
8420 ret = rpc_run_task(&task_setup_data);
8421 if (IS_ERR(ret))
8422 goto out_err;
8423 return ret;
8424out_put_clp:
8425 nfs_put_client(clp);
8426out_err:
8427 nfs41_release_slot(slot);
8428 return ret;
8429}
8430
8431static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
8432{
8433 struct rpc_task *task;
8434 int ret = 0;
8435
8436 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
8437 return -EAGAIN;
8438 task = _nfs41_proc_sequence(clp, cred, NULL, false);
8439 if (IS_ERR(task))
8440 ret = PTR_ERR(task);
8441 else
8442 rpc_put_task_async(task);
8443 dprintk("<-- %s status=%d\n", __func__, ret);
8444 return ret;
8445}
8446
8447static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
8448{
8449 struct rpc_task *task;
8450 int ret;
8451
8452 task = _nfs41_proc_sequence(clp, cred, NULL, true);
8453 if (IS_ERR(task)) {
8454 ret = PTR_ERR(task);
8455 goto out;
8456 }
8457 ret = rpc_wait_for_completion_task(task);
8458 if (!ret)
8459 ret = task->tk_status;
8460 rpc_put_task(task);
8461out:
8462 dprintk("<-- %s status=%d\n", __func__, ret);
8463 return ret;
8464}
8465
8466struct nfs4_reclaim_complete_data {
8467 struct nfs_client *clp;
8468 struct nfs41_reclaim_complete_args arg;
8469 struct nfs41_reclaim_complete_res res;
8470};
8471
8472static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
8473{
8474 struct nfs4_reclaim_complete_data *calldata = data;
8475
8476 nfs4_setup_sequence(calldata->clp,
8477 &calldata->arg.seq_args,
8478 &calldata->res.seq_res,
8479 task);
8480}
8481
8482static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
8483{
8484 switch(task->tk_status) {
8485 case 0:
8486 wake_up_all(&clp->cl_lock_waitq);
8487 /* Fallthrough */
8488 case -NFS4ERR_COMPLETE_ALREADY:
8489 case -NFS4ERR_WRONG_CRED: /* What to do here? */
8490 break;
8491 case -NFS4ERR_DELAY:
8492 rpc_delay(task, NFS4_POLL_RETRY_MAX);
8493 /* fall through */
8494 case -NFS4ERR_RETRY_UNCACHED_REP:
8495 return -EAGAIN;
8496 case -NFS4ERR_BADSESSION:
8497 case -NFS4ERR_DEADSESSION:
8498 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
8499 nfs4_schedule_session_recovery(clp->cl_session,
8500 task->tk_status);
8501 break;
8502 default:
8503 nfs4_schedule_lease_recovery(clp);
8504 }
8505 return 0;
8506}
8507
8508static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
8509{
8510 struct nfs4_reclaim_complete_data *calldata = data;
8511 struct nfs_client *clp = calldata->clp;
8512 struct nfs4_sequence_res *res = &calldata->res.seq_res;
8513
8514 dprintk("--> %s\n", __func__);
8515 if (!nfs41_sequence_done(task, res))
8516 return;
8517
8518 trace_nfs4_reclaim_complete(clp, task->tk_status);
8519 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
8520 rpc_restart_call_prepare(task);
8521 return;
8522 }
8523 dprintk("<-- %s\n", __func__);
8524}
8525
8526static void nfs4_free_reclaim_complete_data(void *data)
8527{
8528 struct nfs4_reclaim_complete_data *calldata = data;
8529
8530 kfree(calldata);
8531}
8532
8533static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
8534 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
8535 .rpc_call_done = nfs4_reclaim_complete_done,
8536 .rpc_release = nfs4_free_reclaim_complete_data,
8537};
8538
8539/*
8540 * Issue a global reclaim complete.
8541 */
8542static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
8543 struct rpc_cred *cred)
8544{
8545 struct nfs4_reclaim_complete_data *calldata;
8546 struct rpc_task *task;
8547 struct rpc_message msg = {
8548 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
8549 .rpc_cred = cred,
8550 };
8551 struct rpc_task_setup task_setup_data = {
8552 .rpc_client = clp->cl_rpcclient,
8553 .rpc_message = &msg,
8554 .callback_ops = &nfs4_reclaim_complete_call_ops,
8555 .flags = RPC_TASK_ASYNC,
8556 };
8557 int status = -ENOMEM;
8558
8559 dprintk("--> %s\n", __func__);
8560 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
8561 if (calldata == NULL)
8562 goto out;
8563 calldata->clp = clp;
8564 calldata->arg.one_fs = 0;
8565
8566 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
8567 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
8568 msg.rpc_argp = &calldata->arg;
8569 msg.rpc_resp = &calldata->res;
8570 task_setup_data.callback_data = calldata;
8571 task = rpc_run_task(&task_setup_data);
8572 if (IS_ERR(task)) {
8573 status = PTR_ERR(task);
8574 goto out;
8575 }
8576 status = rpc_wait_for_completion_task(task);
8577 if (status == 0)
8578 status = task->tk_status;
8579 rpc_put_task(task);
8580out:
8581 dprintk("<-- %s status=%d\n", __func__, status);
8582 return status;
8583}
8584
8585static void
8586nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
8587{
8588 struct nfs4_layoutget *lgp = calldata;
8589 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
8590
8591 dprintk("--> %s\n", __func__);
8592 nfs4_setup_sequence(server->nfs_client, &lgp->args.seq_args,
8593 &lgp->res.seq_res, task);
8594 dprintk("<-- %s\n", __func__);
8595}
8596
8597static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
8598{
8599 struct nfs4_layoutget *lgp = calldata;
8600
8601 dprintk("--> %s\n", __func__);
8602 nfs41_sequence_process(task, &lgp->res.seq_res);
8603 dprintk("<-- %s\n", __func__);
8604}
8605
8606static int
8607nfs4_layoutget_handle_exception(struct rpc_task *task,
8608 struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
8609{
8610 struct inode *inode = lgp->args.inode;
8611 struct nfs_server *server = NFS_SERVER(inode);
8612 struct pnfs_layout_hdr *lo;
8613 int nfs4err = task->tk_status;
8614 int err, status = 0;
8615 LIST_HEAD(head);
8616
8617 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
8618
8619 switch (nfs4err) {
8620 case 0:
8621 goto out;
8622
8623 /*
8624 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
8625 * on the file. set tk_status to -ENODATA to tell upper layer to
8626 * retry go inband.
8627 */
8628 case -NFS4ERR_LAYOUTUNAVAILABLE:
8629 status = -ENODATA;
8630 goto out;
8631 /*
8632 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
8633 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
8634 */
8635 case -NFS4ERR_BADLAYOUT:
8636 status = -EOVERFLOW;
8637 goto out;
8638 /*
8639 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
8640 * (or clients) writing to the same RAID stripe except when
8641 * the minlength argument is 0 (see RFC5661 section 18.43.3).
8642 *
8643 * Treat it like we would RECALLCONFLICT -- we retry for a little
8644 * while, and then eventually give up.
8645 */
8646 case -NFS4ERR_LAYOUTTRYLATER:
8647 if (lgp->args.minlength == 0) {
8648 status = -EOVERFLOW;
8649 goto out;
8650 }
8651 status = -EBUSY;
8652 break;
8653 case -NFS4ERR_RECALLCONFLICT:
8654 status = -ERECALLCONFLICT;
8655 break;
8656 case -NFS4ERR_DELEG_REVOKED:
8657 case -NFS4ERR_ADMIN_REVOKED:
8658 case -NFS4ERR_EXPIRED:
8659 case -NFS4ERR_BAD_STATEID:
8660 exception->timeout = 0;
8661 spin_lock(&inode->i_lock);
8662 lo = NFS_I(inode)->layout;
8663 /* If the open stateid was bad, then recover it. */
8664 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
8665 !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) {
8666 spin_unlock(&inode->i_lock);
8667 exception->state = lgp->args.ctx->state;
8668 exception->stateid = &lgp->args.stateid;
8669 break;
8670 }
8671
8672 /*
8673 * Mark the bad layout state as invalid, then retry
8674 */
8675 pnfs_mark_layout_stateid_invalid(lo, &head);
8676 spin_unlock(&inode->i_lock);
8677 nfs_commit_inode(inode, 0);
8678 pnfs_free_lseg_list(&head);
8679 status = -EAGAIN;
8680 goto out;
8681 }
8682
8683 nfs4_sequence_free_slot(&lgp->res.seq_res);
8684 err = nfs4_handle_exception(server, nfs4err, exception);
8685 if (!status) {
8686 if (exception->retry)
8687 status = -EAGAIN;
8688 else
8689 status = err;
8690 }
8691out:
8692 dprintk("<-- %s\n", __func__);
8693 return status;
8694}
8695
8696static size_t max_response_pages(struct nfs_server *server)
8697{
8698 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
8699 return nfs_page_array_len(0, max_resp_sz);
8700}
8701
8702static void nfs4_free_pages(struct page **pages, size_t size)
8703{
8704 int i;
8705
8706 if (!pages)
8707 return;
8708
8709 for (i = 0; i < size; i++) {
8710 if (!pages[i])
8711 break;
8712 __free_page(pages[i]);
8713 }
8714 kfree(pages);
8715}
8716
8717static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
8718{
8719 struct page **pages;
8720 int i;
8721
8722 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
8723 if (!pages) {
8724 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
8725 return NULL;
8726 }
8727
8728 for (i = 0; i < size; i++) {
8729 pages[i] = alloc_page(gfp_flags);
8730 if (!pages[i]) {
8731 dprintk("%s: failed to allocate page\n", __func__);
8732 nfs4_free_pages(pages, size);
8733 return NULL;
8734 }
8735 }
8736
8737 return pages;
8738}
8739
8740static void nfs4_layoutget_release(void *calldata)
8741{
8742 struct nfs4_layoutget *lgp = calldata;
8743 struct inode *inode = lgp->args.inode;
8744 struct nfs_server *server = NFS_SERVER(inode);
8745 size_t max_pages = max_response_pages(server);
8746
8747 dprintk("--> %s\n", __func__);
8748 nfs4_sequence_free_slot(&lgp->res.seq_res);
8749 nfs4_free_pages(lgp->args.layout.pages, max_pages);
8750 pnfs_put_layout_hdr(NFS_I(inode)->layout);
8751 put_nfs_open_context(lgp->args.ctx);
8752 kfree(calldata);
8753 dprintk("<-- %s\n", __func__);
8754}
8755
8756static const struct rpc_call_ops nfs4_layoutget_call_ops = {
8757 .rpc_call_prepare = nfs4_layoutget_prepare,
8758 .rpc_call_done = nfs4_layoutget_done,
8759 .rpc_release = nfs4_layoutget_release,
8760};
8761
8762struct pnfs_layout_segment *
8763nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
8764{
8765 struct inode *inode = lgp->args.inode;
8766 struct nfs_server *server = NFS_SERVER(inode);
8767 size_t max_pages = max_response_pages(server);
8768 struct rpc_task *task;
8769 struct rpc_message msg = {
8770 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
8771 .rpc_argp = &lgp->args,
8772 .rpc_resp = &lgp->res,
8773 .rpc_cred = lgp->cred,
8774 };
8775 struct rpc_task_setup task_setup_data = {
8776 .rpc_client = server->client,
8777 .rpc_message = &msg,
8778 .callback_ops = &nfs4_layoutget_call_ops,
8779 .callback_data = lgp,
8780 .flags = RPC_TASK_ASYNC,
8781 };
8782 struct pnfs_layout_segment *lseg = NULL;
8783 struct nfs4_exception exception = {
8784 .inode = inode,
8785 .timeout = *timeout,
8786 };
8787 int status = 0;
8788
8789 dprintk("--> %s\n", __func__);
8790
8791 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
8792 pnfs_get_layout_hdr(NFS_I(inode)->layout);
8793
8794 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
8795 if (!lgp->args.layout.pages) {
8796 nfs4_layoutget_release(lgp);
8797 return ERR_PTR(-ENOMEM);
8798 }
8799 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
8800
8801 lgp->res.layoutp = &lgp->args.layout;
8802 lgp->res.seq_res.sr_slot = NULL;
8803 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
8804
8805 task = rpc_run_task(&task_setup_data);
8806 if (IS_ERR(task))
8807 return ERR_CAST(task);
8808 status = rpc_wait_for_completion_task(task);
8809 if (status == 0) {
8810 status = nfs4_layoutget_handle_exception(task, lgp, &exception);
8811 *timeout = exception.timeout;
8812 }
8813
8814 trace_nfs4_layoutget(lgp->args.ctx,
8815 &lgp->args.range,
8816 &lgp->res.range,
8817 &lgp->res.stateid,
8818 status);
8819
8820 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
8821 if (status == 0 && lgp->res.layoutp->len)
8822 lseg = pnfs_layout_process(lgp);
8823 rpc_put_task(task);
8824 dprintk("<-- %s status=%d\n", __func__, status);
8825 if (status)
8826 return ERR_PTR(status);
8827 return lseg;
8828}
8829
8830static void
8831nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
8832{
8833 struct nfs4_layoutreturn *lrp = calldata;
8834
8835 dprintk("--> %s\n", __func__);
8836 nfs4_setup_sequence(lrp->clp,
8837 &lrp->args.seq_args,
8838 &lrp->res.seq_res,
8839 task);
8840}
8841
8842static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
8843{
8844 struct nfs4_layoutreturn *lrp = calldata;
8845 struct nfs_server *server;
8846
8847 dprintk("--> %s\n", __func__);
8848
8849 if (!nfs41_sequence_process(task, &lrp->res.seq_res))
8850 return;
8851
8852 server = NFS_SERVER(lrp->args.inode);
8853 switch (task->tk_status) {
8854 case -NFS4ERR_OLD_STATEID:
8855 if (nfs4_refresh_layout_stateid(&lrp->args.stateid,
8856 lrp->args.inode))
8857 goto out_restart;
8858 /* Fallthrough */
8859 default:
8860 task->tk_status = 0;
8861 /* Fallthrough */
8862 case 0:
8863 break;
8864 case -NFS4ERR_DELAY:
8865 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
8866 break;
8867 goto out_restart;
8868 }
8869 dprintk("<-- %s\n", __func__);
8870 return;
8871out_restart:
8872 task->tk_status = 0;
8873 nfs4_sequence_free_slot(&lrp->res.seq_res);
8874 rpc_restart_call_prepare(task);
8875}
8876
8877static void nfs4_layoutreturn_release(void *calldata)
8878{
8879 struct nfs4_layoutreturn *lrp = calldata;
8880 struct pnfs_layout_hdr *lo = lrp->args.layout;
8881
8882 dprintk("--> %s\n", __func__);
8883 pnfs_layoutreturn_free_lsegs(lo, &lrp->args.stateid, &lrp->args.range,
8884 lrp->res.lrs_present ? &lrp->res.stateid : NULL);
8885 nfs4_sequence_free_slot(&lrp->res.seq_res);
8886 if (lrp->ld_private.ops && lrp->ld_private.ops->free)
8887 lrp->ld_private.ops->free(&lrp->ld_private);
8888 pnfs_put_layout_hdr(lrp->args.layout);
8889 nfs_iput_and_deactive(lrp->inode);
8890 kfree(calldata);
8891 dprintk("<-- %s\n", __func__);
8892}
8893
8894static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
8895 .rpc_call_prepare = nfs4_layoutreturn_prepare,
8896 .rpc_call_done = nfs4_layoutreturn_done,
8897 .rpc_release = nfs4_layoutreturn_release,
8898};
8899
8900int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
8901{
8902 struct rpc_task *task;
8903 struct rpc_message msg = {
8904 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
8905 .rpc_argp = &lrp->args,
8906 .rpc_resp = &lrp->res,
8907 .rpc_cred = lrp->cred,
8908 };
8909 struct rpc_task_setup task_setup_data = {
8910 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
8911 .rpc_message = &msg,
8912 .callback_ops = &nfs4_layoutreturn_call_ops,
8913 .callback_data = lrp,
8914 };
8915 int status = 0;
8916
8917 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
8918 NFS_SP4_MACH_CRED_PNFS_CLEANUP,
8919 &task_setup_data.rpc_client, &msg);
8920
8921 dprintk("--> %s\n", __func__);
8922 if (!sync) {
8923 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
8924 if (!lrp->inode) {
8925 nfs4_layoutreturn_release(lrp);
8926 return -EAGAIN;
8927 }
8928 task_setup_data.flags |= RPC_TASK_ASYNC;
8929 }
8930 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
8931 task = rpc_run_task(&task_setup_data);
8932 if (IS_ERR(task))
8933 return PTR_ERR(task);
8934 if (sync)
8935 status = task->tk_status;
8936 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
8937 dprintk("<-- %s status=%d\n", __func__, status);
8938 rpc_put_task(task);
8939 return status;
8940}
8941
8942static int
8943_nfs4_proc_getdeviceinfo(struct nfs_server *server,
8944 struct pnfs_device *pdev,
8945 struct rpc_cred *cred)
8946{
8947 struct nfs4_getdeviceinfo_args args = {
8948 .pdev = pdev,
8949 .notify_types = NOTIFY_DEVICEID4_CHANGE |
8950 NOTIFY_DEVICEID4_DELETE,
8951 };
8952 struct nfs4_getdeviceinfo_res res = {
8953 .pdev = pdev,
8954 };
8955 struct rpc_message msg = {
8956 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
8957 .rpc_argp = &args,
8958 .rpc_resp = &res,
8959 .rpc_cred = cred,
8960 };
8961 int status;
8962
8963 dprintk("--> %s\n", __func__);
8964 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
8965 if (res.notification & ~args.notify_types)
8966 dprintk("%s: unsupported notification\n", __func__);
8967 if (res.notification != args.notify_types)
8968 pdev->nocache = 1;
8969
8970 dprintk("<-- %s status=%d\n", __func__, status);
8971
8972 return status;
8973}
8974
8975int nfs4_proc_getdeviceinfo(struct nfs_server *server,
8976 struct pnfs_device *pdev,
8977 struct rpc_cred *cred)
8978{
8979 struct nfs4_exception exception = { };
8980 int err;
8981
8982 do {
8983 err = nfs4_handle_exception(server,
8984 _nfs4_proc_getdeviceinfo(server, pdev, cred),
8985 &exception);
8986 } while (exception.retry);
8987 return err;
8988}
8989EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
8990
8991static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
8992{
8993 struct nfs4_layoutcommit_data *data = calldata;
8994 struct nfs_server *server = NFS_SERVER(data->args.inode);
8995
8996 nfs4_setup_sequence(server->nfs_client,
8997 &data->args.seq_args,
8998 &data->res.seq_res,
8999 task);
9000}
9001
9002static void
9003nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
9004{
9005 struct nfs4_layoutcommit_data *data = calldata;
9006 struct nfs_server *server = NFS_SERVER(data->args.inode);
9007
9008 if (!nfs41_sequence_done(task, &data->res.seq_res))
9009 return;
9010
9011 switch (task->tk_status) { /* Just ignore these failures */
9012 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
9013 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
9014 case -NFS4ERR_BADLAYOUT: /* no layout */
9015 case -NFS4ERR_GRACE: /* loca_recalim always false */
9016 task->tk_status = 0;
9017 case 0:
9018 break;
9019 default:
9020 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
9021 rpc_restart_call_prepare(task);
9022 return;
9023 }
9024 }
9025}
9026
9027static void nfs4_layoutcommit_release(void *calldata)
9028{
9029 struct nfs4_layoutcommit_data *data = calldata;
9030
9031 pnfs_cleanup_layoutcommit(data);
9032 nfs_post_op_update_inode_force_wcc(data->args.inode,
9033 data->res.fattr);
9034 put_rpccred(data->cred);
9035 nfs_iput_and_deactive(data->inode);
9036 kfree(data);
9037}
9038
9039static const struct rpc_call_ops nfs4_layoutcommit_ops = {
9040 .rpc_call_prepare = nfs4_layoutcommit_prepare,
9041 .rpc_call_done = nfs4_layoutcommit_done,
9042 .rpc_release = nfs4_layoutcommit_release,
9043};
9044
9045int
9046nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
9047{
9048 struct rpc_message msg = {
9049 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
9050 .rpc_argp = &data->args,
9051 .rpc_resp = &data->res,
9052 .rpc_cred = data->cred,
9053 };
9054 struct rpc_task_setup task_setup_data = {
9055 .task = &data->task,
9056 .rpc_client = NFS_CLIENT(data->args.inode),
9057 .rpc_message = &msg,
9058 .callback_ops = &nfs4_layoutcommit_ops,
9059 .callback_data = data,
9060 };
9061 struct rpc_task *task;
9062 int status = 0;
9063
9064 dprintk("NFS: initiating layoutcommit call. sync %d "
9065 "lbw: %llu inode %lu\n", sync,
9066 data->args.lastbytewritten,
9067 data->args.inode->i_ino);
9068
9069 if (!sync) {
9070 data->inode = nfs_igrab_and_active(data->args.inode);
9071 if (data->inode == NULL) {
9072 nfs4_layoutcommit_release(data);
9073 return -EAGAIN;
9074 }
9075 task_setup_data.flags = RPC_TASK_ASYNC;
9076 }
9077 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
9078 task = rpc_run_task(&task_setup_data);
9079 if (IS_ERR(task))
9080 return PTR_ERR(task);
9081 if (sync)
9082 status = task->tk_status;
9083 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
9084 dprintk("%s: status %d\n", __func__, status);
9085 rpc_put_task(task);
9086 return status;
9087}
9088
9089/**
9090 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
9091 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
9092 */
9093static int
9094_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
9095 struct nfs_fsinfo *info,
9096 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
9097{
9098 struct nfs41_secinfo_no_name_args args = {
9099 .style = SECINFO_STYLE_CURRENT_FH,
9100 };
9101 struct nfs4_secinfo_res res = {
9102 .flavors = flavors,
9103 };
9104 struct rpc_message msg = {
9105 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
9106 .rpc_argp = &args,
9107 .rpc_resp = &res,
9108 };
9109 struct rpc_clnt *clnt = server->client;
9110 struct rpc_cred *cred = NULL;
9111 int status;
9112
9113 if (use_integrity) {
9114 clnt = server->nfs_client->cl_rpcclient;
9115 cred = nfs4_get_clid_cred(server->nfs_client);
9116 msg.rpc_cred = cred;
9117 }
9118
9119 dprintk("--> %s\n", __func__);
9120 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
9121 &res.seq_res, 0);
9122 dprintk("<-- %s status=%d\n", __func__, status);
9123
9124 if (cred)
9125 put_rpccred(cred);
9126
9127 return status;
9128}
9129
9130static int
9131nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
9132 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
9133{
9134 struct nfs4_exception exception = { };
9135 int err;
9136 do {
9137 /* first try using integrity protection */
9138 err = -NFS4ERR_WRONGSEC;
9139
9140 /* try to use integrity protection with machine cred */
9141 if (_nfs4_is_integrity_protected(server->nfs_client))
9142 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
9143 flavors, true);
9144
9145 /*
9146 * if unable to use integrity protection, or SECINFO with
9147 * integrity protection returns NFS4ERR_WRONGSEC (which is
9148 * disallowed by spec, but exists in deployed servers) use
9149 * the current filesystem's rpc_client and the user cred.
9150 */
9151 if (err == -NFS4ERR_WRONGSEC)
9152 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
9153 flavors, false);
9154
9155 switch (err) {
9156 case 0:
9157 case -NFS4ERR_WRONGSEC:
9158 case -ENOTSUPP:
9159 goto out;
9160 default:
9161 err = nfs4_handle_exception(server, err, &exception);
9162 }
9163 } while (exception.retry);
9164out:
9165 return err;
9166}
9167
9168static int
9169nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
9170 struct nfs_fsinfo *info)
9171{
9172 int err;
9173 struct page *page;
9174 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
9175 struct nfs4_secinfo_flavors *flavors;
9176 struct nfs4_secinfo4 *secinfo;
9177 int i;
9178
9179 page = alloc_page(GFP_KERNEL);
9180 if (!page) {
9181 err = -ENOMEM;
9182 goto out;
9183 }
9184
9185 flavors = page_address(page);
9186 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
9187
9188 /*
9189 * Fall back on "guess and check" method if
9190 * the server doesn't support SECINFO_NO_NAME
9191 */
9192 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
9193 err = nfs4_find_root_sec(server, fhandle, info);
9194 goto out_freepage;
9195 }
9196 if (err)
9197 goto out_freepage;
9198
9199 for (i = 0; i < flavors->num_flavors; i++) {
9200 secinfo = &flavors->flavors[i];
9201
9202 switch (secinfo->flavor) {
9203 case RPC_AUTH_NULL:
9204 case RPC_AUTH_UNIX:
9205 case RPC_AUTH_GSS:
9206 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
9207 &secinfo->flavor_info);
9208 break;
9209 default:
9210 flavor = RPC_AUTH_MAXFLAVOR;
9211 break;
9212 }
9213
9214 if (!nfs_auth_info_match(&server->auth_info, flavor))
9215 flavor = RPC_AUTH_MAXFLAVOR;
9216
9217 if (flavor != RPC_AUTH_MAXFLAVOR) {
9218 err = nfs4_lookup_root_sec(server, fhandle,
9219 info, flavor);
9220 if (!err)
9221 break;
9222 }
9223 }
9224
9225 if (flavor == RPC_AUTH_MAXFLAVOR)
9226 err = -EPERM;
9227
9228out_freepage:
9229 put_page(page);
9230 if (err == -EACCES)
9231 return -EPERM;
9232out:
9233 return err;
9234}
9235
9236static int _nfs41_test_stateid(struct nfs_server *server,
9237 nfs4_stateid *stateid,
9238 struct rpc_cred *cred)
9239{
9240 int status;
9241 struct nfs41_test_stateid_args args = {
9242 .stateid = stateid,
9243 };
9244 struct nfs41_test_stateid_res res;
9245 struct rpc_message msg = {
9246 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
9247 .rpc_argp = &args,
9248 .rpc_resp = &res,
9249 .rpc_cred = cred,
9250 };
9251 struct rpc_clnt *rpc_client = server->client;
9252
9253 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
9254 &rpc_client, &msg);
9255
9256 dprintk("NFS call test_stateid %p\n", stateid);
9257 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
9258 nfs4_set_sequence_privileged(&args.seq_args);
9259 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
9260 &args.seq_args, &res.seq_res);
9261 if (status != NFS_OK) {
9262 dprintk("NFS reply test_stateid: failed, %d\n", status);
9263 return status;
9264 }
9265 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
9266 return -res.status;
9267}
9268
9269static void nfs4_handle_delay_or_session_error(struct nfs_server *server,
9270 int err, struct nfs4_exception *exception)
9271{
9272 exception->retry = 0;
9273 switch(err) {
9274 case -NFS4ERR_DELAY:
9275 case -NFS4ERR_RETRY_UNCACHED_REP:
9276 nfs4_handle_exception(server, err, exception);
9277 break;
9278 case -NFS4ERR_BADSESSION:
9279 case -NFS4ERR_BADSLOT:
9280 case -NFS4ERR_BAD_HIGH_SLOT:
9281 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
9282 case -NFS4ERR_DEADSESSION:
9283 nfs4_do_handle_exception(server, err, exception);
9284 }
9285}
9286
9287/**
9288 * nfs41_test_stateid - perform a TEST_STATEID operation
9289 *
9290 * @server: server / transport on which to perform the operation
9291 * @stateid: state ID to test
9292 * @cred: credential
9293 *
9294 * Returns NFS_OK if the server recognizes that "stateid" is valid.
9295 * Otherwise a negative NFS4ERR value is returned if the operation
9296 * failed or the state ID is not currently valid.
9297 */
9298static int nfs41_test_stateid(struct nfs_server *server,
9299 nfs4_stateid *stateid,
9300 struct rpc_cred *cred)
9301{
9302 struct nfs4_exception exception = { };
9303 int err;
9304 do {
9305 err = _nfs41_test_stateid(server, stateid, cred);
9306 nfs4_handle_delay_or_session_error(server, err, &exception);
9307 } while (exception.retry);
9308 return err;
9309}
9310
9311struct nfs_free_stateid_data {
9312 struct nfs_server *server;
9313 struct nfs41_free_stateid_args args;
9314 struct nfs41_free_stateid_res res;
9315};
9316
9317static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
9318{
9319 struct nfs_free_stateid_data *data = calldata;
9320 nfs4_setup_sequence(data->server->nfs_client,
9321 &data->args.seq_args,
9322 &data->res.seq_res,
9323 task);
9324}
9325
9326static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
9327{
9328 struct nfs_free_stateid_data *data = calldata;
9329
9330 nfs41_sequence_done(task, &data->res.seq_res);
9331
9332 switch (task->tk_status) {
9333 case -NFS4ERR_DELAY:
9334 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
9335 rpc_restart_call_prepare(task);
9336 }
9337}
9338
9339static void nfs41_free_stateid_release(void *calldata)
9340{
9341 kfree(calldata);
9342}
9343
9344static const struct rpc_call_ops nfs41_free_stateid_ops = {
9345 .rpc_call_prepare = nfs41_free_stateid_prepare,
9346 .rpc_call_done = nfs41_free_stateid_done,
9347 .rpc_release = nfs41_free_stateid_release,
9348};
9349
9350static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
9351 const nfs4_stateid *stateid,
9352 struct rpc_cred *cred,
9353 bool privileged)
9354{
9355 struct rpc_message msg = {
9356 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
9357 .rpc_cred = cred,
9358 };
9359 struct rpc_task_setup task_setup = {
9360 .rpc_client = server->client,
9361 .rpc_message = &msg,
9362 .callback_ops = &nfs41_free_stateid_ops,
9363 .flags = RPC_TASK_ASYNC,
9364 };
9365 struct nfs_free_stateid_data *data;
9366
9367 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
9368 &task_setup.rpc_client, &msg);
9369
9370 dprintk("NFS call free_stateid %p\n", stateid);
9371 data = kmalloc(sizeof(*data), GFP_NOFS);
9372 if (!data)
9373 return ERR_PTR(-ENOMEM);
9374 data->server = server;
9375 nfs4_stateid_copy(&data->args.stateid, stateid);
9376
9377 task_setup.callback_data = data;
9378
9379 msg.rpc_argp = &data->args;
9380 msg.rpc_resp = &data->res;
9381 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
9382 if (privileged)
9383 nfs4_set_sequence_privileged(&data->args.seq_args);
9384
9385 return rpc_run_task(&task_setup);
9386}
9387
9388/**
9389 * nfs41_free_stateid - perform a FREE_STATEID operation
9390 *
9391 * @server: server / transport on which to perform the operation
9392 * @stateid: state ID to release
9393 * @cred: credential
9394 * @is_recovery: set to true if this call needs to be privileged
9395 *
9396 * Note: this function is always asynchronous.
9397 */
9398static int nfs41_free_stateid(struct nfs_server *server,
9399 const nfs4_stateid *stateid,
9400 struct rpc_cred *cred,
9401 bool is_recovery)
9402{
9403 struct rpc_task *task;
9404
9405 task = _nfs41_free_stateid(server, stateid, cred, is_recovery);
9406 if (IS_ERR(task))
9407 return PTR_ERR(task);
9408 rpc_put_task(task);
9409 return 0;
9410}
9411
9412static void
9413nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
9414{
9415 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
9416
9417 nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
9418 nfs4_free_lock_state(server, lsp);
9419}
9420
9421static bool nfs41_match_stateid(const nfs4_stateid *s1,
9422 const nfs4_stateid *s2)
9423{
9424 if (s1->type != s2->type)
9425 return false;
9426
9427 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
9428 return false;
9429
9430 if (s1->seqid == s2->seqid)
9431 return true;
9432
9433 return s1->seqid == 0 || s2->seqid == 0;
9434}
9435
9436#endif /* CONFIG_NFS_V4_1 */
9437
9438static bool nfs4_match_stateid(const nfs4_stateid *s1,
9439 const nfs4_stateid *s2)
9440{
9441 return nfs4_stateid_match(s1, s2);
9442}
9443
9444
9445static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
9446 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
9447 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
9448 .recover_open = nfs4_open_reclaim,
9449 .recover_lock = nfs4_lock_reclaim,
9450 .establish_clid = nfs4_init_clientid,
9451 .detect_trunking = nfs40_discover_server_trunking,
9452};
9453
9454#if defined(CONFIG_NFS_V4_1)
9455static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
9456 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
9457 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
9458 .recover_open = nfs4_open_reclaim,
9459 .recover_lock = nfs4_lock_reclaim,
9460 .establish_clid = nfs41_init_clientid,
9461 .reclaim_complete = nfs41_proc_reclaim_complete,
9462 .detect_trunking = nfs41_discover_server_trunking,
9463};
9464#endif /* CONFIG_NFS_V4_1 */
9465
9466static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
9467 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
9468 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
9469 .recover_open = nfs40_open_expired,
9470 .recover_lock = nfs4_lock_expired,
9471 .establish_clid = nfs4_init_clientid,
9472};
9473
9474#if defined(CONFIG_NFS_V4_1)
9475static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
9476 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
9477 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
9478 .recover_open = nfs41_open_expired,
9479 .recover_lock = nfs41_lock_expired,
9480 .establish_clid = nfs41_init_clientid,
9481};
9482#endif /* CONFIG_NFS_V4_1 */
9483
9484static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
9485 .sched_state_renewal = nfs4_proc_async_renew,
9486 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
9487 .renew_lease = nfs4_proc_renew,
9488};
9489
9490#if defined(CONFIG_NFS_V4_1)
9491static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
9492 .sched_state_renewal = nfs41_proc_async_sequence,
9493 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
9494 .renew_lease = nfs4_proc_sequence,
9495};
9496#endif
9497
9498static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
9499 .get_locations = _nfs40_proc_get_locations,
9500 .fsid_present = _nfs40_proc_fsid_present,
9501};
9502
9503#if defined(CONFIG_NFS_V4_1)
9504static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
9505 .get_locations = _nfs41_proc_get_locations,
9506 .fsid_present = _nfs41_proc_fsid_present,
9507};
9508#endif /* CONFIG_NFS_V4_1 */
9509
9510static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
9511 .minor_version = 0,
9512 .init_caps = NFS_CAP_READDIRPLUS
9513 | NFS_CAP_ATOMIC_OPEN
9514 | NFS_CAP_POSIX_LOCK,
9515 .init_client = nfs40_init_client,
9516 .shutdown_client = nfs40_shutdown_client,
9517 .match_stateid = nfs4_match_stateid,
9518 .find_root_sec = nfs4_find_root_sec,
9519 .free_lock_state = nfs4_release_lockowner,
9520 .test_and_free_expired = nfs40_test_and_free_expired_stateid,
9521 .alloc_seqid = nfs_alloc_seqid,
9522 .call_sync_ops = &nfs40_call_sync_ops,
9523 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
9524 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
9525 .state_renewal_ops = &nfs40_state_renewal_ops,
9526 .mig_recovery_ops = &nfs40_mig_recovery_ops,
9527};
9528
9529#if defined(CONFIG_NFS_V4_1)
9530static struct nfs_seqid *
9531nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
9532{
9533 return NULL;
9534}
9535
9536static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
9537 .minor_version = 1,
9538 .init_caps = NFS_CAP_READDIRPLUS
9539 | NFS_CAP_ATOMIC_OPEN
9540 | NFS_CAP_POSIX_LOCK
9541 | NFS_CAP_STATEID_NFSV41
9542 | NFS_CAP_ATOMIC_OPEN_V1,
9543 .init_client = nfs41_init_client,
9544 .shutdown_client = nfs41_shutdown_client,
9545 .match_stateid = nfs41_match_stateid,
9546 .find_root_sec = nfs41_find_root_sec,
9547 .free_lock_state = nfs41_free_lock_state,
9548 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
9549 .alloc_seqid = nfs_alloc_no_seqid,
9550 .session_trunk = nfs4_test_session_trunk,
9551 .call_sync_ops = &nfs41_call_sync_ops,
9552 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
9553 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
9554 .state_renewal_ops = &nfs41_state_renewal_ops,
9555 .mig_recovery_ops = &nfs41_mig_recovery_ops,
9556};
9557#endif
9558
9559#if defined(CONFIG_NFS_V4_2)
9560static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
9561 .minor_version = 2,
9562 .init_caps = NFS_CAP_READDIRPLUS
9563 | NFS_CAP_ATOMIC_OPEN
9564 | NFS_CAP_POSIX_LOCK
9565 | NFS_CAP_STATEID_NFSV41
9566 | NFS_CAP_ATOMIC_OPEN_V1
9567 | NFS_CAP_ALLOCATE
9568 | NFS_CAP_COPY
9569 | NFS_CAP_DEALLOCATE
9570 | NFS_CAP_SEEK
9571 | NFS_CAP_LAYOUTSTATS
9572 | NFS_CAP_CLONE,
9573 .init_client = nfs41_init_client,
9574 .shutdown_client = nfs41_shutdown_client,
9575 .match_stateid = nfs41_match_stateid,
9576 .find_root_sec = nfs41_find_root_sec,
9577 .free_lock_state = nfs41_free_lock_state,
9578 .call_sync_ops = &nfs41_call_sync_ops,
9579 .test_and_free_expired = nfs41_test_and_free_expired_stateid,
9580 .alloc_seqid = nfs_alloc_no_seqid,
9581 .session_trunk = nfs4_test_session_trunk,
9582 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
9583 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
9584 .state_renewal_ops = &nfs41_state_renewal_ops,
9585 .mig_recovery_ops = &nfs41_mig_recovery_ops,
9586};
9587#endif
9588
9589const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
9590 [0] = &nfs_v4_0_minor_ops,
9591#if defined(CONFIG_NFS_V4_1)
9592 [1] = &nfs_v4_1_minor_ops,
9593#endif
9594#if defined(CONFIG_NFS_V4_2)
9595 [2] = &nfs_v4_2_minor_ops,
9596#endif
9597};
9598
9599static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
9600{
9601 ssize_t error, error2;
9602
9603 error = generic_listxattr(dentry, list, size);
9604 if (error < 0)
9605 return error;
9606 if (list) {
9607 list += error;
9608 size -= error;
9609 }
9610
9611 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
9612 if (error2 < 0)
9613 return error2;
9614 return error + error2;
9615}
9616
9617static const struct inode_operations nfs4_dir_inode_operations = {
9618 .create = nfs_create,
9619 .lookup = nfs_lookup,
9620 .atomic_open = nfs_atomic_open,
9621 .link = nfs_link,
9622 .unlink = nfs_unlink,
9623 .symlink = nfs_symlink,
9624 .mkdir = nfs_mkdir,
9625 .rmdir = nfs_rmdir,
9626 .mknod = nfs_mknod,
9627 .rename = nfs_rename,
9628 .permission = nfs_permission,
9629 .getattr = nfs_getattr,
9630 .setattr = nfs_setattr,
9631 .listxattr = nfs4_listxattr,
9632};
9633
9634static const struct inode_operations nfs4_file_inode_operations = {
9635 .permission = nfs_permission,
9636 .getattr = nfs_getattr,
9637 .setattr = nfs_setattr,
9638 .listxattr = nfs4_listxattr,
9639};
9640
9641const struct nfs_rpc_ops nfs_v4_clientops = {
9642 .version = 4, /* protocol version */
9643 .dentry_ops = &nfs4_dentry_operations,
9644 .dir_inode_ops = &nfs4_dir_inode_operations,
9645 .file_inode_ops = &nfs4_file_inode_operations,
9646 .file_ops = &nfs4_file_operations,
9647 .getroot = nfs4_proc_get_root,
9648 .submount = nfs4_submount,
9649 .try_mount = nfs4_try_mount,
9650 .getattr = nfs4_proc_getattr,
9651 .setattr = nfs4_proc_setattr,
9652 .lookup = nfs4_proc_lookup,
9653 .lookupp = nfs4_proc_lookupp,
9654 .access = nfs4_proc_access,
9655 .readlink = nfs4_proc_readlink,
9656 .create = nfs4_proc_create,
9657 .remove = nfs4_proc_remove,
9658 .unlink_setup = nfs4_proc_unlink_setup,
9659 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
9660 .unlink_done = nfs4_proc_unlink_done,
9661 .rename_setup = nfs4_proc_rename_setup,
9662 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
9663 .rename_done = nfs4_proc_rename_done,
9664 .link = nfs4_proc_link,
9665 .symlink = nfs4_proc_symlink,
9666 .mkdir = nfs4_proc_mkdir,
9667 .rmdir = nfs4_proc_rmdir,
9668 .readdir = nfs4_proc_readdir,
9669 .mknod = nfs4_proc_mknod,
9670 .statfs = nfs4_proc_statfs,
9671 .fsinfo = nfs4_proc_fsinfo,
9672 .pathconf = nfs4_proc_pathconf,
9673 .set_capabilities = nfs4_server_capabilities,
9674 .decode_dirent = nfs4_decode_dirent,
9675 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
9676 .read_setup = nfs4_proc_read_setup,
9677 .read_done = nfs4_read_done,
9678 .write_setup = nfs4_proc_write_setup,
9679 .write_done = nfs4_write_done,
9680 .commit_setup = nfs4_proc_commit_setup,
9681 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
9682 .commit_done = nfs4_commit_done,
9683 .lock = nfs4_proc_lock,
9684 .clear_acl_cache = nfs4_zap_acl_attr,
9685 .close_context = nfs4_close_context,
9686 .open_context = nfs4_atomic_open,
9687 .have_delegation = nfs4_have_delegation,
9688 .alloc_client = nfs4_alloc_client,
9689 .init_client = nfs4_init_client,
9690 .free_client = nfs4_free_client,
9691 .create_server = nfs4_create_server,
9692 .clone_server = nfs_clone_server,
9693};
9694
9695static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
9696 .name = XATTR_NAME_NFSV4_ACL,
9697 .list = nfs4_xattr_list_nfs4_acl,
9698 .get = nfs4_xattr_get_nfs4_acl,
9699 .set = nfs4_xattr_set_nfs4_acl,
9700};
9701
9702const struct xattr_handler *nfs4_xattr_handlers[] = {
9703 &nfs4_xattr_nfs4_acl_handler,
9704#ifdef CONFIG_NFS_V4_SECURITY_LABEL
9705 &nfs4_xattr_nfs4_label_handler,
9706#endif
9707 NULL
9708};
9709
9710/*
9711 * Local variables:
9712 * c-basic-offset: 8
9713 * End:
9714 */
1/*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include <linux/mm.h>
39#include <linux/delay.h>
40#include <linux/errno.h>
41#include <linux/string.h>
42#include <linux/ratelimit.h>
43#include <linux/printk.h>
44#include <linux/slab.h>
45#include <linux/sunrpc/clnt.h>
46#include <linux/sunrpc/gss_api.h>
47#include <linux/nfs.h>
48#include <linux/nfs4.h>
49#include <linux/nfs_fs.h>
50#include <linux/nfs_page.h>
51#include <linux/nfs_mount.h>
52#include <linux/namei.h>
53#include <linux/mount.h>
54#include <linux/module.h>
55#include <linux/nfs_idmap.h>
56#include <linux/sunrpc/bc_xprt.h>
57#include <linux/xattr.h>
58#include <linux/utsname.h>
59#include <linux/freezer.h>
60
61#include "nfs4_fs.h"
62#include "delegation.h"
63#include "internal.h"
64#include "iostat.h"
65#include "callback.h"
66#include "pnfs.h"
67#include "netns.h"
68
69#define NFSDBG_FACILITY NFSDBG_PROC
70
71#define NFS4_POLL_RETRY_MIN (HZ/10)
72#define NFS4_POLL_RETRY_MAX (15*HZ)
73
74#define NFS4_MAX_LOOP_ON_RECOVER (10)
75
76static unsigned short max_session_slots = NFS4_DEF_SLOT_TABLE_SIZE;
77
78struct nfs4_opendata;
79static int _nfs4_proc_open(struct nfs4_opendata *data);
80static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
81static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
82static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
83static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
84static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *);
85static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
86static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
87 struct nfs_fattr *fattr, struct iattr *sattr,
88 struct nfs4_state *state);
89#ifdef CONFIG_NFS_V4_1
90static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *);
91static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *);
92#endif
93/* Prevent leaks of NFSv4 errors into userland */
94static int nfs4_map_errors(int err)
95{
96 if (err >= -1000)
97 return err;
98 switch (err) {
99 case -NFS4ERR_RESOURCE:
100 return -EREMOTEIO;
101 case -NFS4ERR_WRONGSEC:
102 return -EPERM;
103 case -NFS4ERR_BADOWNER:
104 case -NFS4ERR_BADNAME:
105 return -EINVAL;
106 case -NFS4ERR_SHARE_DENIED:
107 return -EACCES;
108 case -NFS4ERR_MINOR_VERS_MISMATCH:
109 return -EPROTONOSUPPORT;
110 default:
111 dprintk("%s could not handle NFSv4 error %d\n",
112 __func__, -err);
113 break;
114 }
115 return -EIO;
116}
117
118/*
119 * This is our standard bitmap for GETATTR requests.
120 */
121const u32 nfs4_fattr_bitmap[3] = {
122 FATTR4_WORD0_TYPE
123 | FATTR4_WORD0_CHANGE
124 | FATTR4_WORD0_SIZE
125 | FATTR4_WORD0_FSID
126 | FATTR4_WORD0_FILEID,
127 FATTR4_WORD1_MODE
128 | FATTR4_WORD1_NUMLINKS
129 | FATTR4_WORD1_OWNER
130 | FATTR4_WORD1_OWNER_GROUP
131 | FATTR4_WORD1_RAWDEV
132 | FATTR4_WORD1_SPACE_USED
133 | FATTR4_WORD1_TIME_ACCESS
134 | FATTR4_WORD1_TIME_METADATA
135 | FATTR4_WORD1_TIME_MODIFY
136};
137
138static const u32 nfs4_pnfs_open_bitmap[3] = {
139 FATTR4_WORD0_TYPE
140 | FATTR4_WORD0_CHANGE
141 | FATTR4_WORD0_SIZE
142 | FATTR4_WORD0_FSID
143 | FATTR4_WORD0_FILEID,
144 FATTR4_WORD1_MODE
145 | FATTR4_WORD1_NUMLINKS
146 | FATTR4_WORD1_OWNER
147 | FATTR4_WORD1_OWNER_GROUP
148 | FATTR4_WORD1_RAWDEV
149 | FATTR4_WORD1_SPACE_USED
150 | FATTR4_WORD1_TIME_ACCESS
151 | FATTR4_WORD1_TIME_METADATA
152 | FATTR4_WORD1_TIME_MODIFY,
153 FATTR4_WORD2_MDSTHRESHOLD
154};
155
156const u32 nfs4_statfs_bitmap[2] = {
157 FATTR4_WORD0_FILES_AVAIL
158 | FATTR4_WORD0_FILES_FREE
159 | FATTR4_WORD0_FILES_TOTAL,
160 FATTR4_WORD1_SPACE_AVAIL
161 | FATTR4_WORD1_SPACE_FREE
162 | FATTR4_WORD1_SPACE_TOTAL
163};
164
165const u32 nfs4_pathconf_bitmap[2] = {
166 FATTR4_WORD0_MAXLINK
167 | FATTR4_WORD0_MAXNAME,
168 0
169};
170
171const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
172 | FATTR4_WORD0_MAXREAD
173 | FATTR4_WORD0_MAXWRITE
174 | FATTR4_WORD0_LEASE_TIME,
175 FATTR4_WORD1_TIME_DELTA
176 | FATTR4_WORD1_FS_LAYOUT_TYPES,
177 FATTR4_WORD2_LAYOUT_BLKSIZE
178};
179
180const u32 nfs4_fs_locations_bitmap[2] = {
181 FATTR4_WORD0_TYPE
182 | FATTR4_WORD0_CHANGE
183 | FATTR4_WORD0_SIZE
184 | FATTR4_WORD0_FSID
185 | FATTR4_WORD0_FILEID
186 | FATTR4_WORD0_FS_LOCATIONS,
187 FATTR4_WORD1_MODE
188 | FATTR4_WORD1_NUMLINKS
189 | FATTR4_WORD1_OWNER
190 | FATTR4_WORD1_OWNER_GROUP
191 | FATTR4_WORD1_RAWDEV
192 | FATTR4_WORD1_SPACE_USED
193 | FATTR4_WORD1_TIME_ACCESS
194 | FATTR4_WORD1_TIME_METADATA
195 | FATTR4_WORD1_TIME_MODIFY
196 | FATTR4_WORD1_MOUNTED_ON_FILEID
197};
198
199static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
200 struct nfs4_readdir_arg *readdir)
201{
202 __be32 *start, *p;
203
204 BUG_ON(readdir->count < 80);
205 if (cookie > 2) {
206 readdir->cookie = cookie;
207 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
208 return;
209 }
210
211 readdir->cookie = 0;
212 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
213 if (cookie == 2)
214 return;
215
216 /*
217 * NFSv4 servers do not return entries for '.' and '..'
218 * Therefore, we fake these entries here. We let '.'
219 * have cookie 0 and '..' have cookie 1. Note that
220 * when talking to the server, we always send cookie 0
221 * instead of 1 or 2.
222 */
223 start = p = kmap_atomic(*readdir->pages);
224
225 if (cookie == 0) {
226 *p++ = xdr_one; /* next */
227 *p++ = xdr_zero; /* cookie, first word */
228 *p++ = xdr_one; /* cookie, second word */
229 *p++ = xdr_one; /* entry len */
230 memcpy(p, ".\0\0\0", 4); /* entry */
231 p++;
232 *p++ = xdr_one; /* bitmap length */
233 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
234 *p++ = htonl(8); /* attribute buffer length */
235 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_inode));
236 }
237
238 *p++ = xdr_one; /* next */
239 *p++ = xdr_zero; /* cookie, first word */
240 *p++ = xdr_two; /* cookie, second word */
241 *p++ = xdr_two; /* entry len */
242 memcpy(p, "..\0\0", 4); /* entry */
243 p++;
244 *p++ = xdr_one; /* bitmap length */
245 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
246 *p++ = htonl(8); /* attribute buffer length */
247 p = xdr_encode_hyper(p, NFS_FILEID(dentry->d_parent->d_inode));
248
249 readdir->pgbase = (char *)p - (char *)start;
250 readdir->count -= readdir->pgbase;
251 kunmap_atomic(start);
252}
253
254static int nfs4_wait_clnt_recover(struct nfs_client *clp)
255{
256 int res;
257
258 might_sleep();
259
260 res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
261 nfs_wait_bit_killable, TASK_KILLABLE);
262 return res;
263}
264
265static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
266{
267 int res = 0;
268
269 might_sleep();
270
271 if (*timeout <= 0)
272 *timeout = NFS4_POLL_RETRY_MIN;
273 if (*timeout > NFS4_POLL_RETRY_MAX)
274 *timeout = NFS4_POLL_RETRY_MAX;
275 freezable_schedule_timeout_killable(*timeout);
276 if (fatal_signal_pending(current))
277 res = -ERESTARTSYS;
278 *timeout <<= 1;
279 return res;
280}
281
282/* This is the error handling routine for processes that are allowed
283 * to sleep.
284 */
285static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
286{
287 struct nfs_client *clp = server->nfs_client;
288 struct nfs4_state *state = exception->state;
289 struct inode *inode = exception->inode;
290 int ret = errorcode;
291
292 exception->retry = 0;
293 switch(errorcode) {
294 case 0:
295 return 0;
296 case -NFS4ERR_OPENMODE:
297 if (inode && nfs_have_delegation(inode, FMODE_READ)) {
298 nfs_inode_return_delegation(inode);
299 exception->retry = 1;
300 return 0;
301 }
302 if (state == NULL)
303 break;
304 nfs4_schedule_stateid_recovery(server, state);
305 goto wait_on_recovery;
306 case -NFS4ERR_DELEG_REVOKED:
307 case -NFS4ERR_ADMIN_REVOKED:
308 case -NFS4ERR_BAD_STATEID:
309 if (state == NULL)
310 break;
311 nfs_remove_bad_delegation(state->inode);
312 nfs4_schedule_stateid_recovery(server, state);
313 goto wait_on_recovery;
314 case -NFS4ERR_EXPIRED:
315 if (state != NULL)
316 nfs4_schedule_stateid_recovery(server, state);
317 case -NFS4ERR_STALE_STATEID:
318 case -NFS4ERR_STALE_CLIENTID:
319 nfs4_schedule_lease_recovery(clp);
320 goto wait_on_recovery;
321#if defined(CONFIG_NFS_V4_1)
322 case -NFS4ERR_BADSESSION:
323 case -NFS4ERR_BADSLOT:
324 case -NFS4ERR_BAD_HIGH_SLOT:
325 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
326 case -NFS4ERR_DEADSESSION:
327 case -NFS4ERR_SEQ_FALSE_RETRY:
328 case -NFS4ERR_SEQ_MISORDERED:
329 dprintk("%s ERROR: %d Reset session\n", __func__,
330 errorcode);
331 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
332 exception->retry = 1;
333 break;
334#endif /* defined(CONFIG_NFS_V4_1) */
335 case -NFS4ERR_FILE_OPEN:
336 if (exception->timeout > HZ) {
337 /* We have retried a decent amount, time to
338 * fail
339 */
340 ret = -EBUSY;
341 break;
342 }
343 case -NFS4ERR_GRACE:
344 case -NFS4ERR_DELAY:
345 case -EKEYEXPIRED:
346 ret = nfs4_delay(server->client, &exception->timeout);
347 if (ret != 0)
348 break;
349 case -NFS4ERR_RETRY_UNCACHED_REP:
350 case -NFS4ERR_OLD_STATEID:
351 exception->retry = 1;
352 break;
353 case -NFS4ERR_BADOWNER:
354 /* The following works around a Linux server bug! */
355 case -NFS4ERR_BADNAME:
356 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
357 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
358 exception->retry = 1;
359 printk(KERN_WARNING "NFS: v4 server %s "
360 "does not accept raw "
361 "uid/gids. "
362 "Reenabling the idmapper.\n",
363 server->nfs_client->cl_hostname);
364 }
365 }
366 /* We failed to handle the error */
367 return nfs4_map_errors(ret);
368wait_on_recovery:
369 ret = nfs4_wait_clnt_recover(clp);
370 if (ret == 0)
371 exception->retry = 1;
372 return ret;
373}
374
375
376static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
377{
378 spin_lock(&clp->cl_lock);
379 if (time_before(clp->cl_last_renewal,timestamp))
380 clp->cl_last_renewal = timestamp;
381 spin_unlock(&clp->cl_lock);
382}
383
384static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
385{
386 do_renew_lease(server->nfs_client, timestamp);
387}
388
389#if defined(CONFIG_NFS_V4_1)
390
391/*
392 * nfs4_free_slot - free a slot and efficiently update slot table.
393 *
394 * freeing a slot is trivially done by clearing its respective bit
395 * in the bitmap.
396 * If the freed slotid equals highest_used_slotid we want to update it
397 * so that the server would be able to size down the slot table if needed,
398 * otherwise we know that the highest_used_slotid is still in use.
399 * When updating highest_used_slotid there may be "holes" in the bitmap
400 * so we need to scan down from highest_used_slotid to 0 looking for the now
401 * highest slotid in use.
402 * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
403 *
404 * Must be called while holding tbl->slot_tbl_lock
405 */
406static void
407nfs4_free_slot(struct nfs4_slot_table *tbl, u32 slotid)
408{
409 BUG_ON(slotid >= NFS4_MAX_SLOT_TABLE);
410 /* clear used bit in bitmap */
411 __clear_bit(slotid, tbl->used_slots);
412
413 /* update highest_used_slotid when it is freed */
414 if (slotid == tbl->highest_used_slotid) {
415 slotid = find_last_bit(tbl->used_slots, tbl->max_slots);
416 if (slotid < tbl->max_slots)
417 tbl->highest_used_slotid = slotid;
418 else
419 tbl->highest_used_slotid = NFS4_NO_SLOT;
420 }
421 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
422 slotid, tbl->highest_used_slotid);
423}
424
425bool nfs4_set_task_privileged(struct rpc_task *task, void *dummy)
426{
427 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
428 return true;
429}
430
431/*
432 * Signal state manager thread if session fore channel is drained
433 */
434static void nfs4_check_drain_fc_complete(struct nfs4_session *ses)
435{
436 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
437 rpc_wake_up_first(&ses->fc_slot_table.slot_tbl_waitq,
438 nfs4_set_task_privileged, NULL);
439 return;
440 }
441
442 if (ses->fc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
443 return;
444
445 dprintk("%s COMPLETE: Session Fore Channel Drained\n", __func__);
446 complete(&ses->fc_slot_table.complete);
447}
448
449/*
450 * Signal state manager thread if session back channel is drained
451 */
452void nfs4_check_drain_bc_complete(struct nfs4_session *ses)
453{
454 if (!test_bit(NFS4_SESSION_DRAINING, &ses->session_state) ||
455 ses->bc_slot_table.highest_used_slotid != NFS4_NO_SLOT)
456 return;
457 dprintk("%s COMPLETE: Session Back Channel Drained\n", __func__);
458 complete(&ses->bc_slot_table.complete);
459}
460
461static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
462{
463 struct nfs4_slot_table *tbl;
464
465 tbl = &res->sr_session->fc_slot_table;
466 if (!res->sr_slot) {
467 /* just wake up the next guy waiting since
468 * we may have not consumed a slot after all */
469 dprintk("%s: No slot\n", __func__);
470 return;
471 }
472
473 spin_lock(&tbl->slot_tbl_lock);
474 nfs4_free_slot(tbl, res->sr_slot - tbl->slots);
475 nfs4_check_drain_fc_complete(res->sr_session);
476 spin_unlock(&tbl->slot_tbl_lock);
477 res->sr_slot = NULL;
478}
479
480static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
481{
482 unsigned long timestamp;
483 struct nfs_client *clp;
484
485 /*
486 * sr_status remains 1 if an RPC level error occurred. The server
487 * may or may not have processed the sequence operation..
488 * Proceed as if the server received and processed the sequence
489 * operation.
490 */
491 if (res->sr_status == 1)
492 res->sr_status = NFS_OK;
493
494 /* don't increment the sequence number if the task wasn't sent */
495 if (!RPC_WAS_SENT(task))
496 goto out;
497
498 /* Check the SEQUENCE operation status */
499 switch (res->sr_status) {
500 case 0:
501 /* Update the slot's sequence and clientid lease timer */
502 ++res->sr_slot->seq_nr;
503 timestamp = res->sr_renewal_time;
504 clp = res->sr_session->clp;
505 do_renew_lease(clp, timestamp);
506 /* Check sequence flags */
507 if (res->sr_status_flags != 0)
508 nfs4_schedule_lease_recovery(clp);
509 break;
510 case -NFS4ERR_DELAY:
511 /* The server detected a resend of the RPC call and
512 * returned NFS4ERR_DELAY as per Section 2.10.6.2
513 * of RFC5661.
514 */
515 dprintk("%s: slot=%td seq=%d: Operation in progress\n",
516 __func__,
517 res->sr_slot - res->sr_session->fc_slot_table.slots,
518 res->sr_slot->seq_nr);
519 goto out_retry;
520 default:
521 /* Just update the slot sequence no. */
522 ++res->sr_slot->seq_nr;
523 }
524out:
525 /* The session may be reset by one of the error handlers. */
526 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
527 nfs41_sequence_free_slot(res);
528 return 1;
529out_retry:
530 if (!rpc_restart_call(task))
531 goto out;
532 rpc_delay(task, NFS4_POLL_RETRY_MAX);
533 return 0;
534}
535
536static int nfs4_sequence_done(struct rpc_task *task,
537 struct nfs4_sequence_res *res)
538{
539 if (res->sr_session == NULL)
540 return 1;
541 return nfs41_sequence_done(task, res);
542}
543
544/*
545 * nfs4_find_slot - efficiently look for a free slot
546 *
547 * nfs4_find_slot looks for an unset bit in the used_slots bitmap.
548 * If found, we mark the slot as used, update the highest_used_slotid,
549 * and respectively set up the sequence operation args.
550 * The slot number is returned if found, or NFS4_NO_SLOT otherwise.
551 *
552 * Note: must be called with under the slot_tbl_lock.
553 */
554static u32
555nfs4_find_slot(struct nfs4_slot_table *tbl)
556{
557 u32 slotid;
558 u32 ret_id = NFS4_NO_SLOT;
559
560 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
561 __func__, tbl->used_slots[0], tbl->highest_used_slotid,
562 tbl->max_slots);
563 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slots);
564 if (slotid >= tbl->max_slots)
565 goto out;
566 __set_bit(slotid, tbl->used_slots);
567 if (slotid > tbl->highest_used_slotid ||
568 tbl->highest_used_slotid == NFS4_NO_SLOT)
569 tbl->highest_used_slotid = slotid;
570 ret_id = slotid;
571out:
572 dprintk("<-- %s used_slots=%04lx highest_used=%d slotid=%d \n",
573 __func__, tbl->used_slots[0], tbl->highest_used_slotid, ret_id);
574 return ret_id;
575}
576
577static void nfs41_init_sequence(struct nfs4_sequence_args *args,
578 struct nfs4_sequence_res *res, int cache_reply)
579{
580 args->sa_session = NULL;
581 args->sa_cache_this = 0;
582 if (cache_reply)
583 args->sa_cache_this = 1;
584 res->sr_session = NULL;
585 res->sr_slot = NULL;
586}
587
588int nfs41_setup_sequence(struct nfs4_session *session,
589 struct nfs4_sequence_args *args,
590 struct nfs4_sequence_res *res,
591 struct rpc_task *task)
592{
593 struct nfs4_slot *slot;
594 struct nfs4_slot_table *tbl;
595 u32 slotid;
596
597 dprintk("--> %s\n", __func__);
598 /* slot already allocated? */
599 if (res->sr_slot != NULL)
600 return 0;
601
602 tbl = &session->fc_slot_table;
603
604 spin_lock(&tbl->slot_tbl_lock);
605 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) &&
606 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
607 /* The state manager will wait until the slot table is empty */
608 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
609 spin_unlock(&tbl->slot_tbl_lock);
610 dprintk("%s session is draining\n", __func__);
611 return -EAGAIN;
612 }
613
614 if (!rpc_queue_empty(&tbl->slot_tbl_waitq) &&
615 !rpc_task_has_priority(task, RPC_PRIORITY_PRIVILEGED)) {
616 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
617 spin_unlock(&tbl->slot_tbl_lock);
618 dprintk("%s enforce FIFO order\n", __func__);
619 return -EAGAIN;
620 }
621
622 slotid = nfs4_find_slot(tbl);
623 if (slotid == NFS4_NO_SLOT) {
624 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
625 spin_unlock(&tbl->slot_tbl_lock);
626 dprintk("<-- %s: no free slots\n", __func__);
627 return -EAGAIN;
628 }
629 spin_unlock(&tbl->slot_tbl_lock);
630
631 rpc_task_set_priority(task, RPC_PRIORITY_NORMAL);
632 slot = tbl->slots + slotid;
633 args->sa_session = session;
634 args->sa_slotid = slotid;
635
636 dprintk("<-- %s slotid=%d seqid=%d\n", __func__, slotid, slot->seq_nr);
637
638 res->sr_session = session;
639 res->sr_slot = slot;
640 res->sr_renewal_time = jiffies;
641 res->sr_status_flags = 0;
642 /*
643 * sr_status is only set in decode_sequence, and so will remain
644 * set to 1 if an rpc level failure occurs.
645 */
646 res->sr_status = 1;
647 return 0;
648}
649EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
650
651int nfs4_setup_sequence(const struct nfs_server *server,
652 struct nfs4_sequence_args *args,
653 struct nfs4_sequence_res *res,
654 struct rpc_task *task)
655{
656 struct nfs4_session *session = nfs4_get_session(server);
657 int ret = 0;
658
659 if (session == NULL)
660 goto out;
661
662 dprintk("--> %s clp %p session %p sr_slot %td\n",
663 __func__, session->clp, session, res->sr_slot ?
664 res->sr_slot - session->fc_slot_table.slots : -1);
665
666 ret = nfs41_setup_sequence(session, args, res, task);
667out:
668 dprintk("<-- %s status=%d\n", __func__, ret);
669 return ret;
670}
671
672struct nfs41_call_sync_data {
673 const struct nfs_server *seq_server;
674 struct nfs4_sequence_args *seq_args;
675 struct nfs4_sequence_res *seq_res;
676};
677
678static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
679{
680 struct nfs41_call_sync_data *data = calldata;
681
682 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
683
684 if (nfs4_setup_sequence(data->seq_server, data->seq_args,
685 data->seq_res, task))
686 return;
687 rpc_call_start(task);
688}
689
690static void nfs41_call_priv_sync_prepare(struct rpc_task *task, void *calldata)
691{
692 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
693 nfs41_call_sync_prepare(task, calldata);
694}
695
696static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
697{
698 struct nfs41_call_sync_data *data = calldata;
699
700 nfs41_sequence_done(task, data->seq_res);
701}
702
703static const struct rpc_call_ops nfs41_call_sync_ops = {
704 .rpc_call_prepare = nfs41_call_sync_prepare,
705 .rpc_call_done = nfs41_call_sync_done,
706};
707
708static const struct rpc_call_ops nfs41_call_priv_sync_ops = {
709 .rpc_call_prepare = nfs41_call_priv_sync_prepare,
710 .rpc_call_done = nfs41_call_sync_done,
711};
712
713static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
714 struct nfs_server *server,
715 struct rpc_message *msg,
716 struct nfs4_sequence_args *args,
717 struct nfs4_sequence_res *res,
718 int privileged)
719{
720 int ret;
721 struct rpc_task *task;
722 struct nfs41_call_sync_data data = {
723 .seq_server = server,
724 .seq_args = args,
725 .seq_res = res,
726 };
727 struct rpc_task_setup task_setup = {
728 .rpc_client = clnt,
729 .rpc_message = msg,
730 .callback_ops = &nfs41_call_sync_ops,
731 .callback_data = &data
732 };
733
734 if (privileged)
735 task_setup.callback_ops = &nfs41_call_priv_sync_ops;
736 task = rpc_run_task(&task_setup);
737 if (IS_ERR(task))
738 ret = PTR_ERR(task);
739 else {
740 ret = task->tk_status;
741 rpc_put_task(task);
742 }
743 return ret;
744}
745
746int _nfs4_call_sync_session(struct rpc_clnt *clnt,
747 struct nfs_server *server,
748 struct rpc_message *msg,
749 struct nfs4_sequence_args *args,
750 struct nfs4_sequence_res *res,
751 int cache_reply)
752{
753 nfs41_init_sequence(args, res, cache_reply);
754 return nfs4_call_sync_sequence(clnt, server, msg, args, res, 0);
755}
756
757#else
758static inline
759void nfs41_init_sequence(struct nfs4_sequence_args *args,
760 struct nfs4_sequence_res *res, int cache_reply)
761{
762}
763
764static int nfs4_sequence_done(struct rpc_task *task,
765 struct nfs4_sequence_res *res)
766{
767 return 1;
768}
769#endif /* CONFIG_NFS_V4_1 */
770
771int _nfs4_call_sync(struct rpc_clnt *clnt,
772 struct nfs_server *server,
773 struct rpc_message *msg,
774 struct nfs4_sequence_args *args,
775 struct nfs4_sequence_res *res,
776 int cache_reply)
777{
778 nfs41_init_sequence(args, res, cache_reply);
779 return rpc_call_sync(clnt, msg, 0);
780}
781
782static inline
783int nfs4_call_sync(struct rpc_clnt *clnt,
784 struct nfs_server *server,
785 struct rpc_message *msg,
786 struct nfs4_sequence_args *args,
787 struct nfs4_sequence_res *res,
788 int cache_reply)
789{
790 return server->nfs_client->cl_mvops->call_sync(clnt, server, msg,
791 args, res, cache_reply);
792}
793
794static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
795{
796 struct nfs_inode *nfsi = NFS_I(dir);
797
798 spin_lock(&dir->i_lock);
799 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
800 if (!cinfo->atomic || cinfo->before != dir->i_version)
801 nfs_force_lookup_revalidate(dir);
802 dir->i_version = cinfo->after;
803 spin_unlock(&dir->i_lock);
804}
805
806struct nfs4_opendata {
807 struct kref kref;
808 struct nfs_openargs o_arg;
809 struct nfs_openres o_res;
810 struct nfs_open_confirmargs c_arg;
811 struct nfs_open_confirmres c_res;
812 struct nfs4_string owner_name;
813 struct nfs4_string group_name;
814 struct nfs_fattr f_attr;
815 struct dentry *dir;
816 struct dentry *dentry;
817 struct nfs4_state_owner *owner;
818 struct nfs4_state *state;
819 struct iattr attrs;
820 unsigned long timestamp;
821 unsigned int rpc_done : 1;
822 int rpc_status;
823 int cancelled;
824};
825
826
827static void nfs4_init_opendata_res(struct nfs4_opendata *p)
828{
829 p->o_res.f_attr = &p->f_attr;
830 p->o_res.seqid = p->o_arg.seqid;
831 p->c_res.seqid = p->c_arg.seqid;
832 p->o_res.server = p->o_arg.server;
833 nfs_fattr_init(&p->f_attr);
834 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
835}
836
837static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
838 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
839 const struct iattr *attrs,
840 gfp_t gfp_mask)
841{
842 struct dentry *parent = dget_parent(dentry);
843 struct inode *dir = parent->d_inode;
844 struct nfs_server *server = NFS_SERVER(dir);
845 struct nfs4_opendata *p;
846
847 p = kzalloc(sizeof(*p), gfp_mask);
848 if (p == NULL)
849 goto err;
850 p->o_arg.seqid = nfs_alloc_seqid(&sp->so_seqid, gfp_mask);
851 if (p->o_arg.seqid == NULL)
852 goto err_free;
853 nfs_sb_active(dentry->d_sb);
854 p->dentry = dget(dentry);
855 p->dir = parent;
856 p->owner = sp;
857 atomic_inc(&sp->so_count);
858 p->o_arg.fh = NFS_FH(dir);
859 p->o_arg.open_flags = flags;
860 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
861 p->o_arg.clientid = server->nfs_client->cl_clientid;
862 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
863 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
864 p->o_arg.name = &dentry->d_name;
865 p->o_arg.server = server;
866 p->o_arg.bitmask = server->attr_bitmask;
867 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
868 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
869 if (attrs != NULL && attrs->ia_valid != 0) {
870 __be32 verf[2];
871
872 p->o_arg.u.attrs = &p->attrs;
873 memcpy(&p->attrs, attrs, sizeof(p->attrs));
874
875 verf[0] = jiffies;
876 verf[1] = current->pid;
877 memcpy(p->o_arg.u.verifier.data, verf,
878 sizeof(p->o_arg.u.verifier.data));
879 }
880 p->c_arg.fh = &p->o_res.fh;
881 p->c_arg.stateid = &p->o_res.stateid;
882 p->c_arg.seqid = p->o_arg.seqid;
883 nfs4_init_opendata_res(p);
884 kref_init(&p->kref);
885 return p;
886err_free:
887 kfree(p);
888err:
889 dput(parent);
890 return NULL;
891}
892
893static void nfs4_opendata_free(struct kref *kref)
894{
895 struct nfs4_opendata *p = container_of(kref,
896 struct nfs4_opendata, kref);
897 struct super_block *sb = p->dentry->d_sb;
898
899 nfs_free_seqid(p->o_arg.seqid);
900 if (p->state != NULL)
901 nfs4_put_open_state(p->state);
902 nfs4_put_state_owner(p->owner);
903 dput(p->dir);
904 dput(p->dentry);
905 nfs_sb_deactive(sb);
906 nfs_fattr_free_names(&p->f_attr);
907 kfree(p);
908}
909
910static void nfs4_opendata_put(struct nfs4_opendata *p)
911{
912 if (p != NULL)
913 kref_put(&p->kref, nfs4_opendata_free);
914}
915
916static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
917{
918 int ret;
919
920 ret = rpc_wait_for_completion_task(task);
921 return ret;
922}
923
924static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
925{
926 int ret = 0;
927
928 if (open_mode & (O_EXCL|O_TRUNC))
929 goto out;
930 switch (mode & (FMODE_READ|FMODE_WRITE)) {
931 case FMODE_READ:
932 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
933 && state->n_rdonly != 0;
934 break;
935 case FMODE_WRITE:
936 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
937 && state->n_wronly != 0;
938 break;
939 case FMODE_READ|FMODE_WRITE:
940 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
941 && state->n_rdwr != 0;
942 }
943out:
944 return ret;
945}
946
947static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
948{
949 if (delegation == NULL)
950 return 0;
951 if ((delegation->type & fmode) != fmode)
952 return 0;
953 if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
954 return 0;
955 nfs_mark_delegation_referenced(delegation);
956 return 1;
957}
958
959static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
960{
961 switch (fmode) {
962 case FMODE_WRITE:
963 state->n_wronly++;
964 break;
965 case FMODE_READ:
966 state->n_rdonly++;
967 break;
968 case FMODE_READ|FMODE_WRITE:
969 state->n_rdwr++;
970 }
971 nfs4_state_set_mode_locked(state, state->state | fmode);
972}
973
974static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
975{
976 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
977 nfs4_stateid_copy(&state->stateid, stateid);
978 nfs4_stateid_copy(&state->open_stateid, stateid);
979 switch (fmode) {
980 case FMODE_READ:
981 set_bit(NFS_O_RDONLY_STATE, &state->flags);
982 break;
983 case FMODE_WRITE:
984 set_bit(NFS_O_WRONLY_STATE, &state->flags);
985 break;
986 case FMODE_READ|FMODE_WRITE:
987 set_bit(NFS_O_RDWR_STATE, &state->flags);
988 }
989}
990
991static void nfs_set_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
992{
993 write_seqlock(&state->seqlock);
994 nfs_set_open_stateid_locked(state, stateid, fmode);
995 write_sequnlock(&state->seqlock);
996}
997
998static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
999{
1000 /*
1001 * Protect the call to nfs4_state_set_mode_locked and
1002 * serialise the stateid update
1003 */
1004 write_seqlock(&state->seqlock);
1005 if (deleg_stateid != NULL) {
1006 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1007 set_bit(NFS_DELEGATED_STATE, &state->flags);
1008 }
1009 if (open_stateid != NULL)
1010 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1011 write_sequnlock(&state->seqlock);
1012 spin_lock(&state->owner->so_lock);
1013 update_open_stateflags(state, fmode);
1014 spin_unlock(&state->owner->so_lock);
1015}
1016
1017static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1018{
1019 struct nfs_inode *nfsi = NFS_I(state->inode);
1020 struct nfs_delegation *deleg_cur;
1021 int ret = 0;
1022
1023 fmode &= (FMODE_READ|FMODE_WRITE);
1024
1025 rcu_read_lock();
1026 deleg_cur = rcu_dereference(nfsi->delegation);
1027 if (deleg_cur == NULL)
1028 goto no_delegation;
1029
1030 spin_lock(&deleg_cur->lock);
1031 if (nfsi->delegation != deleg_cur ||
1032 (deleg_cur->type & fmode) != fmode)
1033 goto no_delegation_unlock;
1034
1035 if (delegation == NULL)
1036 delegation = &deleg_cur->stateid;
1037 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1038 goto no_delegation_unlock;
1039
1040 nfs_mark_delegation_referenced(deleg_cur);
1041 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1042 ret = 1;
1043no_delegation_unlock:
1044 spin_unlock(&deleg_cur->lock);
1045no_delegation:
1046 rcu_read_unlock();
1047
1048 if (!ret && open_stateid != NULL) {
1049 __update_open_stateid(state, open_stateid, NULL, fmode);
1050 ret = 1;
1051 }
1052
1053 return ret;
1054}
1055
1056
1057static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1058{
1059 struct nfs_delegation *delegation;
1060
1061 rcu_read_lock();
1062 delegation = rcu_dereference(NFS_I(inode)->delegation);
1063 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1064 rcu_read_unlock();
1065 return;
1066 }
1067 rcu_read_unlock();
1068 nfs_inode_return_delegation(inode);
1069}
1070
1071static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1072{
1073 struct nfs4_state *state = opendata->state;
1074 struct nfs_inode *nfsi = NFS_I(state->inode);
1075 struct nfs_delegation *delegation;
1076 int open_mode = opendata->o_arg.open_flags & (O_EXCL|O_TRUNC);
1077 fmode_t fmode = opendata->o_arg.fmode;
1078 nfs4_stateid stateid;
1079 int ret = -EAGAIN;
1080
1081 for (;;) {
1082 if (can_open_cached(state, fmode, open_mode)) {
1083 spin_lock(&state->owner->so_lock);
1084 if (can_open_cached(state, fmode, open_mode)) {
1085 update_open_stateflags(state, fmode);
1086 spin_unlock(&state->owner->so_lock);
1087 goto out_return_state;
1088 }
1089 spin_unlock(&state->owner->so_lock);
1090 }
1091 rcu_read_lock();
1092 delegation = rcu_dereference(nfsi->delegation);
1093 if (!can_open_delegated(delegation, fmode)) {
1094 rcu_read_unlock();
1095 break;
1096 }
1097 /* Save the delegation */
1098 nfs4_stateid_copy(&stateid, &delegation->stateid);
1099 rcu_read_unlock();
1100 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1101 if (ret != 0)
1102 goto out;
1103 ret = -EAGAIN;
1104
1105 /* Try to update the stateid using the delegation */
1106 if (update_open_stateid(state, NULL, &stateid, fmode))
1107 goto out_return_state;
1108 }
1109out:
1110 return ERR_PTR(ret);
1111out_return_state:
1112 atomic_inc(&state->count);
1113 return state;
1114}
1115
1116static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1117{
1118 struct inode *inode;
1119 struct nfs4_state *state = NULL;
1120 struct nfs_delegation *delegation;
1121 int ret;
1122
1123 if (!data->rpc_done) {
1124 state = nfs4_try_open_cached(data);
1125 goto out;
1126 }
1127
1128 ret = -EAGAIN;
1129 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1130 goto err;
1131 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr);
1132 ret = PTR_ERR(inode);
1133 if (IS_ERR(inode))
1134 goto err;
1135 ret = -ENOMEM;
1136 state = nfs4_get_open_state(inode, data->owner);
1137 if (state == NULL)
1138 goto err_put_inode;
1139 if (data->o_res.delegation_type != 0) {
1140 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
1141 int delegation_flags = 0;
1142
1143 rcu_read_lock();
1144 delegation = rcu_dereference(NFS_I(inode)->delegation);
1145 if (delegation)
1146 delegation_flags = delegation->flags;
1147 rcu_read_unlock();
1148 if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
1149 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1150 "returning a delegation for "
1151 "OPEN(CLAIM_DELEGATE_CUR)\n",
1152 clp->cl_hostname);
1153 } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1154 nfs_inode_set_delegation(state->inode,
1155 data->owner->so_cred,
1156 &data->o_res);
1157 else
1158 nfs_inode_reclaim_delegation(state->inode,
1159 data->owner->so_cred,
1160 &data->o_res);
1161 }
1162
1163 update_open_stateid(state, &data->o_res.stateid, NULL,
1164 data->o_arg.fmode);
1165 iput(inode);
1166out:
1167 return state;
1168err_put_inode:
1169 iput(inode);
1170err:
1171 return ERR_PTR(ret);
1172}
1173
1174static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1175{
1176 struct nfs_inode *nfsi = NFS_I(state->inode);
1177 struct nfs_open_context *ctx;
1178
1179 spin_lock(&state->inode->i_lock);
1180 list_for_each_entry(ctx, &nfsi->open_files, list) {
1181 if (ctx->state != state)
1182 continue;
1183 get_nfs_open_context(ctx);
1184 spin_unlock(&state->inode->i_lock);
1185 return ctx;
1186 }
1187 spin_unlock(&state->inode->i_lock);
1188 return ERR_PTR(-ENOENT);
1189}
1190
1191static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx, struct nfs4_state *state)
1192{
1193 struct nfs4_opendata *opendata;
1194
1195 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0, NULL, GFP_NOFS);
1196 if (opendata == NULL)
1197 return ERR_PTR(-ENOMEM);
1198 opendata->state = state;
1199 atomic_inc(&state->count);
1200 return opendata;
1201}
1202
1203static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmode, struct nfs4_state **res)
1204{
1205 struct nfs4_state *newstate;
1206 int ret;
1207
1208 opendata->o_arg.open_flags = 0;
1209 opendata->o_arg.fmode = fmode;
1210 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1211 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1212 nfs4_init_opendata_res(opendata);
1213 ret = _nfs4_recover_proc_open(opendata);
1214 if (ret != 0)
1215 return ret;
1216 newstate = nfs4_opendata_to_nfs4_state(opendata);
1217 if (IS_ERR(newstate))
1218 return PTR_ERR(newstate);
1219 nfs4_close_state(newstate, fmode);
1220 *res = newstate;
1221 return 0;
1222}
1223
1224static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1225{
1226 struct nfs4_state *newstate;
1227 int ret;
1228
1229 /* memory barrier prior to reading state->n_* */
1230 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1231 smp_rmb();
1232 if (state->n_rdwr != 0) {
1233 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1234 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE, &newstate);
1235 if (ret != 0)
1236 return ret;
1237 if (newstate != state)
1238 return -ESTALE;
1239 }
1240 if (state->n_wronly != 0) {
1241 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1242 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE, &newstate);
1243 if (ret != 0)
1244 return ret;
1245 if (newstate != state)
1246 return -ESTALE;
1247 }
1248 if (state->n_rdonly != 0) {
1249 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1250 ret = nfs4_open_recover_helper(opendata, FMODE_READ, &newstate);
1251 if (ret != 0)
1252 return ret;
1253 if (newstate != state)
1254 return -ESTALE;
1255 }
1256 /*
1257 * We may have performed cached opens for all three recoveries.
1258 * Check if we need to update the current stateid.
1259 */
1260 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1261 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1262 write_seqlock(&state->seqlock);
1263 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1264 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1265 write_sequnlock(&state->seqlock);
1266 }
1267 return 0;
1268}
1269
1270/*
1271 * OPEN_RECLAIM:
1272 * reclaim state on the server after a reboot.
1273 */
1274static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1275{
1276 struct nfs_delegation *delegation;
1277 struct nfs4_opendata *opendata;
1278 fmode_t delegation_type = 0;
1279 int status;
1280
1281 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1282 if (IS_ERR(opendata))
1283 return PTR_ERR(opendata);
1284 opendata->o_arg.claim = NFS4_OPEN_CLAIM_PREVIOUS;
1285 opendata->o_arg.fh = NFS_FH(state->inode);
1286 rcu_read_lock();
1287 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1288 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1289 delegation_type = delegation->type;
1290 rcu_read_unlock();
1291 opendata->o_arg.u.delegation_type = delegation_type;
1292 status = nfs4_open_recover(opendata, state);
1293 nfs4_opendata_put(opendata);
1294 return status;
1295}
1296
1297static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1298{
1299 struct nfs_server *server = NFS_SERVER(state->inode);
1300 struct nfs4_exception exception = { };
1301 int err;
1302 do {
1303 err = _nfs4_do_open_reclaim(ctx, state);
1304 if (err != -NFS4ERR_DELAY)
1305 break;
1306 nfs4_handle_exception(server, err, &exception);
1307 } while (exception.retry);
1308 return err;
1309}
1310
1311static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1312{
1313 struct nfs_open_context *ctx;
1314 int ret;
1315
1316 ctx = nfs4_state_find_open_context(state);
1317 if (IS_ERR(ctx))
1318 return PTR_ERR(ctx);
1319 ret = nfs4_do_open_reclaim(ctx, state);
1320 put_nfs_open_context(ctx);
1321 return ret;
1322}
1323
1324static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1325{
1326 struct nfs4_opendata *opendata;
1327 int ret;
1328
1329 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1330 if (IS_ERR(opendata))
1331 return PTR_ERR(opendata);
1332 opendata->o_arg.claim = NFS4_OPEN_CLAIM_DELEGATE_CUR;
1333 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1334 ret = nfs4_open_recover(opendata, state);
1335 nfs4_opendata_put(opendata);
1336 return ret;
1337}
1338
1339int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
1340{
1341 struct nfs4_exception exception = { };
1342 struct nfs_server *server = NFS_SERVER(state->inode);
1343 int err;
1344 do {
1345 err = _nfs4_open_delegation_recall(ctx, state, stateid);
1346 switch (err) {
1347 case 0:
1348 case -ENOENT:
1349 case -ESTALE:
1350 goto out;
1351 case -NFS4ERR_BADSESSION:
1352 case -NFS4ERR_BADSLOT:
1353 case -NFS4ERR_BAD_HIGH_SLOT:
1354 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1355 case -NFS4ERR_DEADSESSION:
1356 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1357 goto out;
1358 case -NFS4ERR_STALE_CLIENTID:
1359 case -NFS4ERR_STALE_STATEID:
1360 case -NFS4ERR_EXPIRED:
1361 /* Don't recall a delegation if it was lost */
1362 nfs4_schedule_lease_recovery(server->nfs_client);
1363 goto out;
1364 case -ERESTARTSYS:
1365 /*
1366 * The show must go on: exit, but mark the
1367 * stateid as needing recovery.
1368 */
1369 case -NFS4ERR_DELEG_REVOKED:
1370 case -NFS4ERR_ADMIN_REVOKED:
1371 case -NFS4ERR_BAD_STATEID:
1372 nfs_inode_find_state_and_recover(state->inode,
1373 stateid);
1374 nfs4_schedule_stateid_recovery(server, state);
1375 case -EKEYEXPIRED:
1376 /*
1377 * User RPCSEC_GSS context has expired.
1378 * We cannot recover this stateid now, so
1379 * skip it and allow recovery thread to
1380 * proceed.
1381 */
1382 case -ENOMEM:
1383 err = 0;
1384 goto out;
1385 }
1386 err = nfs4_handle_exception(server, err, &exception);
1387 } while (exception.retry);
1388out:
1389 return err;
1390}
1391
1392static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1393{
1394 struct nfs4_opendata *data = calldata;
1395
1396 data->rpc_status = task->tk_status;
1397 if (data->rpc_status == 0) {
1398 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1399 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1400 renew_lease(data->o_res.server, data->timestamp);
1401 data->rpc_done = 1;
1402 }
1403}
1404
1405static void nfs4_open_confirm_release(void *calldata)
1406{
1407 struct nfs4_opendata *data = calldata;
1408 struct nfs4_state *state = NULL;
1409
1410 /* If this request hasn't been cancelled, do nothing */
1411 if (data->cancelled == 0)
1412 goto out_free;
1413 /* In case of error, no cleanup! */
1414 if (!data->rpc_done)
1415 goto out_free;
1416 state = nfs4_opendata_to_nfs4_state(data);
1417 if (!IS_ERR(state))
1418 nfs4_close_state(state, data->o_arg.fmode);
1419out_free:
1420 nfs4_opendata_put(data);
1421}
1422
1423static const struct rpc_call_ops nfs4_open_confirm_ops = {
1424 .rpc_call_done = nfs4_open_confirm_done,
1425 .rpc_release = nfs4_open_confirm_release,
1426};
1427
1428/*
1429 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
1430 */
1431static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
1432{
1433 struct nfs_server *server = NFS_SERVER(data->dir->d_inode);
1434 struct rpc_task *task;
1435 struct rpc_message msg = {
1436 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
1437 .rpc_argp = &data->c_arg,
1438 .rpc_resp = &data->c_res,
1439 .rpc_cred = data->owner->so_cred,
1440 };
1441 struct rpc_task_setup task_setup_data = {
1442 .rpc_client = server->client,
1443 .rpc_message = &msg,
1444 .callback_ops = &nfs4_open_confirm_ops,
1445 .callback_data = data,
1446 .workqueue = nfsiod_workqueue,
1447 .flags = RPC_TASK_ASYNC,
1448 };
1449 int status;
1450
1451 kref_get(&data->kref);
1452 data->rpc_done = 0;
1453 data->rpc_status = 0;
1454 data->timestamp = jiffies;
1455 task = rpc_run_task(&task_setup_data);
1456 if (IS_ERR(task))
1457 return PTR_ERR(task);
1458 status = nfs4_wait_for_completion_rpc_task(task);
1459 if (status != 0) {
1460 data->cancelled = 1;
1461 smp_wmb();
1462 } else
1463 status = data->rpc_status;
1464 rpc_put_task(task);
1465 return status;
1466}
1467
1468static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
1469{
1470 struct nfs4_opendata *data = calldata;
1471 struct nfs4_state_owner *sp = data->owner;
1472
1473 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
1474 return;
1475 /*
1476 * Check if we still need to send an OPEN call, or if we can use
1477 * a delegation instead.
1478 */
1479 if (data->state != NULL) {
1480 struct nfs_delegation *delegation;
1481
1482 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
1483 goto out_no_action;
1484 rcu_read_lock();
1485 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
1486 if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
1487 can_open_delegated(delegation, data->o_arg.fmode))
1488 goto unlock_no_action;
1489 rcu_read_unlock();
1490 }
1491 /* Update client id. */
1492 data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid;
1493 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) {
1494 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
1495 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
1496 }
1497 data->timestamp = jiffies;
1498 if (nfs4_setup_sequence(data->o_arg.server,
1499 &data->o_arg.seq_args,
1500 &data->o_res.seq_res, task))
1501 return;
1502 rpc_call_start(task);
1503 return;
1504unlock_no_action:
1505 rcu_read_unlock();
1506out_no_action:
1507 task->tk_action = NULL;
1508
1509}
1510
1511static void nfs4_recover_open_prepare(struct rpc_task *task, void *calldata)
1512{
1513 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
1514 nfs4_open_prepare(task, calldata);
1515}
1516
1517static void nfs4_open_done(struct rpc_task *task, void *calldata)
1518{
1519 struct nfs4_opendata *data = calldata;
1520
1521 data->rpc_status = task->tk_status;
1522
1523 if (!nfs4_sequence_done(task, &data->o_res.seq_res))
1524 return;
1525
1526 if (task->tk_status == 0) {
1527 switch (data->o_res.f_attr->mode & S_IFMT) {
1528 case S_IFREG:
1529 break;
1530 case S_IFLNK:
1531 data->rpc_status = -ELOOP;
1532 break;
1533 case S_IFDIR:
1534 data->rpc_status = -EISDIR;
1535 break;
1536 default:
1537 data->rpc_status = -ENOTDIR;
1538 }
1539 renew_lease(data->o_res.server, data->timestamp);
1540 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
1541 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1542 }
1543 data->rpc_done = 1;
1544}
1545
1546static void nfs4_open_release(void *calldata)
1547{
1548 struct nfs4_opendata *data = calldata;
1549 struct nfs4_state *state = NULL;
1550
1551 /* If this request hasn't been cancelled, do nothing */
1552 if (data->cancelled == 0)
1553 goto out_free;
1554 /* In case of error, no cleanup! */
1555 if (data->rpc_status != 0 || !data->rpc_done)
1556 goto out_free;
1557 /* In case we need an open_confirm, no cleanup! */
1558 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
1559 goto out_free;
1560 state = nfs4_opendata_to_nfs4_state(data);
1561 if (!IS_ERR(state))
1562 nfs4_close_state(state, data->o_arg.fmode);
1563out_free:
1564 nfs4_opendata_put(data);
1565}
1566
1567static const struct rpc_call_ops nfs4_open_ops = {
1568 .rpc_call_prepare = nfs4_open_prepare,
1569 .rpc_call_done = nfs4_open_done,
1570 .rpc_release = nfs4_open_release,
1571};
1572
1573static const struct rpc_call_ops nfs4_recover_open_ops = {
1574 .rpc_call_prepare = nfs4_recover_open_prepare,
1575 .rpc_call_done = nfs4_open_done,
1576 .rpc_release = nfs4_open_release,
1577};
1578
1579static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
1580{
1581 struct inode *dir = data->dir->d_inode;
1582 struct nfs_server *server = NFS_SERVER(dir);
1583 struct nfs_openargs *o_arg = &data->o_arg;
1584 struct nfs_openres *o_res = &data->o_res;
1585 struct rpc_task *task;
1586 struct rpc_message msg = {
1587 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
1588 .rpc_argp = o_arg,
1589 .rpc_resp = o_res,
1590 .rpc_cred = data->owner->so_cred,
1591 };
1592 struct rpc_task_setup task_setup_data = {
1593 .rpc_client = server->client,
1594 .rpc_message = &msg,
1595 .callback_ops = &nfs4_open_ops,
1596 .callback_data = data,
1597 .workqueue = nfsiod_workqueue,
1598 .flags = RPC_TASK_ASYNC,
1599 };
1600 int status;
1601
1602 nfs41_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
1603 kref_get(&data->kref);
1604 data->rpc_done = 0;
1605 data->rpc_status = 0;
1606 data->cancelled = 0;
1607 if (isrecover)
1608 task_setup_data.callback_ops = &nfs4_recover_open_ops;
1609 task = rpc_run_task(&task_setup_data);
1610 if (IS_ERR(task))
1611 return PTR_ERR(task);
1612 status = nfs4_wait_for_completion_rpc_task(task);
1613 if (status != 0) {
1614 data->cancelled = 1;
1615 smp_wmb();
1616 } else
1617 status = data->rpc_status;
1618 rpc_put_task(task);
1619
1620 return status;
1621}
1622
1623static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1624{
1625 struct inode *dir = data->dir->d_inode;
1626 struct nfs_openres *o_res = &data->o_res;
1627 int status;
1628
1629 status = nfs4_run_open_task(data, 1);
1630 if (status != 0 || !data->rpc_done)
1631 return status;
1632
1633 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
1634
1635 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1636 status = _nfs4_proc_open_confirm(data);
1637 if (status != 0)
1638 return status;
1639 }
1640
1641 return status;
1642}
1643
1644/*
1645 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
1646 */
1647static int _nfs4_proc_open(struct nfs4_opendata *data)
1648{
1649 struct inode *dir = data->dir->d_inode;
1650 struct nfs_server *server = NFS_SERVER(dir);
1651 struct nfs_openargs *o_arg = &data->o_arg;
1652 struct nfs_openres *o_res = &data->o_res;
1653 int status;
1654
1655 status = nfs4_run_open_task(data, 0);
1656 if (!data->rpc_done)
1657 return status;
1658 if (status != 0) {
1659 if (status == -NFS4ERR_BADNAME &&
1660 !(o_arg->open_flags & O_CREAT))
1661 return -ENOENT;
1662 return status;
1663 }
1664
1665 nfs_fattr_map_and_free_names(server, &data->f_attr);
1666
1667 if (o_arg->open_flags & O_CREAT)
1668 update_changeattr(dir, &o_res->cinfo);
1669 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1670 server->caps &= ~NFS_CAP_POSIX_LOCK;
1671 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1672 status = _nfs4_proc_open_confirm(data);
1673 if (status != 0)
1674 return status;
1675 }
1676 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
1677 _nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr);
1678 return 0;
1679}
1680
1681static int nfs4_client_recover_expired_lease(struct nfs_client *clp)
1682{
1683 unsigned int loop;
1684 int ret;
1685
1686 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
1687 ret = nfs4_wait_clnt_recover(clp);
1688 if (ret != 0)
1689 break;
1690 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1691 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1692 break;
1693 nfs4_schedule_state_manager(clp);
1694 ret = -EIO;
1695 }
1696 return ret;
1697}
1698
1699static int nfs4_recover_expired_lease(struct nfs_server *server)
1700{
1701 return nfs4_client_recover_expired_lease(server->nfs_client);
1702}
1703
1704/*
1705 * OPEN_EXPIRED:
1706 * reclaim state on the server after a network partition.
1707 * Assumes caller holds the appropriate lock
1708 */
1709static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1710{
1711 struct nfs4_opendata *opendata;
1712 int ret;
1713
1714 opendata = nfs4_open_recoverdata_alloc(ctx, state);
1715 if (IS_ERR(opendata))
1716 return PTR_ERR(opendata);
1717 ret = nfs4_open_recover(opendata, state);
1718 if (ret == -ESTALE)
1719 d_drop(ctx->dentry);
1720 nfs4_opendata_put(opendata);
1721 return ret;
1722}
1723
1724static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
1725{
1726 struct nfs_server *server = NFS_SERVER(state->inode);
1727 struct nfs4_exception exception = { };
1728 int err;
1729
1730 do {
1731 err = _nfs4_open_expired(ctx, state);
1732 switch (err) {
1733 default:
1734 goto out;
1735 case -NFS4ERR_GRACE:
1736 case -NFS4ERR_DELAY:
1737 nfs4_handle_exception(server, err, &exception);
1738 err = 0;
1739 }
1740 } while (exception.retry);
1741out:
1742 return err;
1743}
1744
1745static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1746{
1747 struct nfs_open_context *ctx;
1748 int ret;
1749
1750 ctx = nfs4_state_find_open_context(state);
1751 if (IS_ERR(ctx))
1752 return PTR_ERR(ctx);
1753 ret = nfs4_do_open_expired(ctx, state);
1754 put_nfs_open_context(ctx);
1755 return ret;
1756}
1757
1758#if defined(CONFIG_NFS_V4_1)
1759static int nfs41_check_expired_stateid(struct nfs4_state *state, nfs4_stateid *stateid, unsigned int flags)
1760{
1761 int status = NFS_OK;
1762 struct nfs_server *server = NFS_SERVER(state->inode);
1763
1764 if (state->flags & flags) {
1765 status = nfs41_test_stateid(server, stateid);
1766 if (status != NFS_OK) {
1767 nfs41_free_stateid(server, stateid);
1768 state->flags &= ~flags;
1769 }
1770 }
1771 return status;
1772}
1773
1774static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
1775{
1776 int deleg_status, open_status;
1777 int deleg_flags = 1 << NFS_DELEGATED_STATE;
1778 int open_flags = (1 << NFS_O_RDONLY_STATE) | (1 << NFS_O_WRONLY_STATE) | (1 << NFS_O_RDWR_STATE);
1779
1780 deleg_status = nfs41_check_expired_stateid(state, &state->stateid, deleg_flags);
1781 open_status = nfs41_check_expired_stateid(state, &state->open_stateid, open_flags);
1782
1783 if ((deleg_status == NFS_OK) && (open_status == NFS_OK))
1784 return NFS_OK;
1785 return nfs4_open_expired(sp, state);
1786}
1787#endif
1788
1789/*
1790 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
1791 * fields corresponding to attributes that were used to store the verifier.
1792 * Make sure we clobber those fields in the later setattr call
1793 */
1794static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct iattr *sattr)
1795{
1796 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
1797 !(sattr->ia_valid & ATTR_ATIME_SET))
1798 sattr->ia_valid |= ATTR_ATIME;
1799
1800 if ((opendata->o_res.attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
1801 !(sattr->ia_valid & ATTR_MTIME_SET))
1802 sattr->ia_valid |= ATTR_MTIME;
1803}
1804
1805/*
1806 * Returns a referenced nfs4_state
1807 */
1808static int _nfs4_do_open(struct inode *dir,
1809 struct dentry *dentry,
1810 fmode_t fmode,
1811 int flags,
1812 struct iattr *sattr,
1813 struct rpc_cred *cred,
1814 struct nfs4_state **res,
1815 struct nfs4_threshold **ctx_th)
1816{
1817 struct nfs4_state_owner *sp;
1818 struct nfs4_state *state = NULL;
1819 struct nfs_server *server = NFS_SERVER(dir);
1820 struct nfs4_opendata *opendata;
1821 int status;
1822
1823 /* Protect against reboot recovery conflicts */
1824 status = -ENOMEM;
1825 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
1826 if (sp == NULL) {
1827 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
1828 goto out_err;
1829 }
1830 status = nfs4_recover_expired_lease(server);
1831 if (status != 0)
1832 goto err_put_state_owner;
1833 if (dentry->d_inode != NULL)
1834 nfs4_return_incompatible_delegation(dentry->d_inode, fmode);
1835 status = -ENOMEM;
1836 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr, GFP_KERNEL);
1837 if (opendata == NULL)
1838 goto err_put_state_owner;
1839
1840 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
1841 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
1842 if (!opendata->f_attr.mdsthreshold)
1843 goto err_opendata_put;
1844 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
1845 }
1846 if (dentry->d_inode != NULL)
1847 opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
1848
1849 status = _nfs4_proc_open(opendata);
1850 if (status != 0)
1851 goto err_opendata_put;
1852
1853 state = nfs4_opendata_to_nfs4_state(opendata);
1854 status = PTR_ERR(state);
1855 if (IS_ERR(state))
1856 goto err_opendata_put;
1857 if (server->caps & NFS_CAP_POSIX_LOCK)
1858 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
1859
1860 if (opendata->o_arg.open_flags & O_EXCL) {
1861 nfs4_exclusive_attrset(opendata, sattr);
1862
1863 nfs_fattr_init(opendata->o_res.f_attr);
1864 status = nfs4_do_setattr(state->inode, cred,
1865 opendata->o_res.f_attr, sattr,
1866 state);
1867 if (status == 0)
1868 nfs_setattr_update_inode(state->inode, sattr);
1869 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
1870 }
1871
1872 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
1873 *ctx_th = opendata->f_attr.mdsthreshold;
1874 else
1875 kfree(opendata->f_attr.mdsthreshold);
1876 opendata->f_attr.mdsthreshold = NULL;
1877
1878 nfs4_opendata_put(opendata);
1879 nfs4_put_state_owner(sp);
1880 *res = state;
1881 return 0;
1882err_opendata_put:
1883 kfree(opendata->f_attr.mdsthreshold);
1884 nfs4_opendata_put(opendata);
1885err_put_state_owner:
1886 nfs4_put_state_owner(sp);
1887out_err:
1888 *res = NULL;
1889 return status;
1890}
1891
1892
1893static struct nfs4_state *nfs4_do_open(struct inode *dir,
1894 struct dentry *dentry,
1895 fmode_t fmode,
1896 int flags,
1897 struct iattr *sattr,
1898 struct rpc_cred *cred,
1899 struct nfs4_threshold **ctx_th)
1900{
1901 struct nfs4_exception exception = { };
1902 struct nfs4_state *res;
1903 int status;
1904
1905 fmode &= FMODE_READ|FMODE_WRITE;
1906 do {
1907 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
1908 &res, ctx_th);
1909 if (status == 0)
1910 break;
1911 /* NOTE: BAD_SEQID means the server and client disagree about the
1912 * book-keeping w.r.t. state-changing operations
1913 * (OPEN/CLOSE/LOCK/LOCKU...)
1914 * It is actually a sign of a bug on the client or on the server.
1915 *
1916 * If we receive a BAD_SEQID error in the particular case of
1917 * doing an OPEN, we assume that nfs_increment_open_seqid() will
1918 * have unhashed the old state_owner for us, and that we can
1919 * therefore safely retry using a new one. We should still warn
1920 * the user though...
1921 */
1922 if (status == -NFS4ERR_BAD_SEQID) {
1923 pr_warn_ratelimited("NFS: v4 server %s "
1924 " returned a bad sequence-id error!\n",
1925 NFS_SERVER(dir)->nfs_client->cl_hostname);
1926 exception.retry = 1;
1927 continue;
1928 }
1929 /*
1930 * BAD_STATEID on OPEN means that the server cancelled our
1931 * state before it received the OPEN_CONFIRM.
1932 * Recover by retrying the request as per the discussion
1933 * on Page 181 of RFC3530.
1934 */
1935 if (status == -NFS4ERR_BAD_STATEID) {
1936 exception.retry = 1;
1937 continue;
1938 }
1939 if (status == -EAGAIN) {
1940 /* We must have found a delegation */
1941 exception.retry = 1;
1942 continue;
1943 }
1944 res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
1945 status, &exception));
1946 } while (exception.retry);
1947 return res;
1948}
1949
1950static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1951 struct nfs_fattr *fattr, struct iattr *sattr,
1952 struct nfs4_state *state)
1953{
1954 struct nfs_server *server = NFS_SERVER(inode);
1955 struct nfs_setattrargs arg = {
1956 .fh = NFS_FH(inode),
1957 .iap = sattr,
1958 .server = server,
1959 .bitmask = server->attr_bitmask,
1960 };
1961 struct nfs_setattrres res = {
1962 .fattr = fattr,
1963 .server = server,
1964 };
1965 struct rpc_message msg = {
1966 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
1967 .rpc_argp = &arg,
1968 .rpc_resp = &res,
1969 .rpc_cred = cred,
1970 };
1971 unsigned long timestamp = jiffies;
1972 int status;
1973
1974 nfs_fattr_init(fattr);
1975
1976 if (state != NULL) {
1977 nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
1978 current->files, current->tgid);
1979 } else if (nfs4_copy_delegation_stateid(&arg.stateid, inode,
1980 FMODE_WRITE)) {
1981 /* Use that stateid */
1982 } else
1983 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
1984
1985 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
1986 if (status == 0 && state != NULL)
1987 renew_lease(server, timestamp);
1988 return status;
1989}
1990
1991static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
1992 struct nfs_fattr *fattr, struct iattr *sattr,
1993 struct nfs4_state *state)
1994{
1995 struct nfs_server *server = NFS_SERVER(inode);
1996 struct nfs4_exception exception = {
1997 .state = state,
1998 .inode = inode,
1999 };
2000 int err;
2001 do {
2002 err = _nfs4_do_setattr(inode, cred, fattr, sattr, state);
2003 switch (err) {
2004 case -NFS4ERR_OPENMODE:
2005 if (state && !(state->state & FMODE_WRITE)) {
2006 err = -EBADF;
2007 if (sattr->ia_valid & ATTR_OPEN)
2008 err = -EACCES;
2009 goto out;
2010 }
2011 }
2012 err = nfs4_handle_exception(server, err, &exception);
2013 } while (exception.retry);
2014out:
2015 return err;
2016}
2017
2018struct nfs4_closedata {
2019 struct inode *inode;
2020 struct nfs4_state *state;
2021 struct nfs_closeargs arg;
2022 struct nfs_closeres res;
2023 struct nfs_fattr fattr;
2024 unsigned long timestamp;
2025 bool roc;
2026 u32 roc_barrier;
2027};
2028
2029static void nfs4_free_closedata(void *data)
2030{
2031 struct nfs4_closedata *calldata = data;
2032 struct nfs4_state_owner *sp = calldata->state->owner;
2033 struct super_block *sb = calldata->state->inode->i_sb;
2034
2035 if (calldata->roc)
2036 pnfs_roc_release(calldata->state->inode);
2037 nfs4_put_open_state(calldata->state);
2038 nfs_free_seqid(calldata->arg.seqid);
2039 nfs4_put_state_owner(sp);
2040 nfs_sb_deactive(sb);
2041 kfree(calldata);
2042}
2043
2044static void nfs4_close_clear_stateid_flags(struct nfs4_state *state,
2045 fmode_t fmode)
2046{
2047 spin_lock(&state->owner->so_lock);
2048 if (!(fmode & FMODE_READ))
2049 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2050 if (!(fmode & FMODE_WRITE))
2051 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2052 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2053 spin_unlock(&state->owner->so_lock);
2054}
2055
2056static void nfs4_close_done(struct rpc_task *task, void *data)
2057{
2058 struct nfs4_closedata *calldata = data;
2059 struct nfs4_state *state = calldata->state;
2060 struct nfs_server *server = NFS_SERVER(calldata->inode);
2061
2062 dprintk("%s: begin!\n", __func__);
2063 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2064 return;
2065 /* hmm. we are done with the inode, and in the process of freeing
2066 * the state_owner. we keep this around to process errors
2067 */
2068 switch (task->tk_status) {
2069 case 0:
2070 if (calldata->roc)
2071 pnfs_roc_set_barrier(state->inode,
2072 calldata->roc_barrier);
2073 nfs_set_open_stateid(state, &calldata->res.stateid, 0);
2074 renew_lease(server, calldata->timestamp);
2075 nfs4_close_clear_stateid_flags(state,
2076 calldata->arg.fmode);
2077 break;
2078 case -NFS4ERR_STALE_STATEID:
2079 case -NFS4ERR_OLD_STATEID:
2080 case -NFS4ERR_BAD_STATEID:
2081 case -NFS4ERR_EXPIRED:
2082 if (calldata->arg.fmode == 0)
2083 break;
2084 default:
2085 if (nfs4_async_handle_error(task, server, state) == -EAGAIN)
2086 rpc_restart_call_prepare(task);
2087 }
2088 nfs_release_seqid(calldata->arg.seqid);
2089 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2090 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2091}
2092
2093static void nfs4_close_prepare(struct rpc_task *task, void *data)
2094{
2095 struct nfs4_closedata *calldata = data;
2096 struct nfs4_state *state = calldata->state;
2097 int call_close = 0;
2098
2099 dprintk("%s: begin!\n", __func__);
2100 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2101 return;
2102
2103 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2104 calldata->arg.fmode = FMODE_READ|FMODE_WRITE;
2105 spin_lock(&state->owner->so_lock);
2106 /* Calculate the change in open mode */
2107 if (state->n_rdwr == 0) {
2108 if (state->n_rdonly == 0) {
2109 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
2110 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2111 calldata->arg.fmode &= ~FMODE_READ;
2112 }
2113 if (state->n_wronly == 0) {
2114 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
2115 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
2116 calldata->arg.fmode &= ~FMODE_WRITE;
2117 }
2118 }
2119 spin_unlock(&state->owner->so_lock);
2120
2121 if (!call_close) {
2122 /* Note: exit _without_ calling nfs4_close_done */
2123 task->tk_action = NULL;
2124 goto out;
2125 }
2126
2127 if (calldata->arg.fmode == 0) {
2128 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2129 if (calldata->roc &&
2130 pnfs_roc_drain(calldata->inode, &calldata->roc_barrier)) {
2131 rpc_sleep_on(&NFS_SERVER(calldata->inode)->roc_rpcwaitq,
2132 task, NULL);
2133 goto out;
2134 }
2135 }
2136
2137 nfs_fattr_init(calldata->res.fattr);
2138 calldata->timestamp = jiffies;
2139 if (nfs4_setup_sequence(NFS_SERVER(calldata->inode),
2140 &calldata->arg.seq_args,
2141 &calldata->res.seq_res,
2142 task))
2143 goto out;
2144 rpc_call_start(task);
2145out:
2146 dprintk("%s: done!\n", __func__);
2147}
2148
2149static const struct rpc_call_ops nfs4_close_ops = {
2150 .rpc_call_prepare = nfs4_close_prepare,
2151 .rpc_call_done = nfs4_close_done,
2152 .rpc_release = nfs4_free_closedata,
2153};
2154
2155/*
2156 * It is possible for data to be read/written from a mem-mapped file
2157 * after the sys_close call (which hits the vfs layer as a flush).
2158 * This means that we can't safely call nfsv4 close on a file until
2159 * the inode is cleared. This in turn means that we are not good
2160 * NFSv4 citizens - we do not indicate to the server to update the file's
2161 * share state even when we are done with one of the three share
2162 * stateid's in the inode.
2163 *
2164 * NOTE: Caller must be holding the sp->so_owner semaphore!
2165 */
2166int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc)
2167{
2168 struct nfs_server *server = NFS_SERVER(state->inode);
2169 struct nfs4_closedata *calldata;
2170 struct nfs4_state_owner *sp = state->owner;
2171 struct rpc_task *task;
2172 struct rpc_message msg = {
2173 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
2174 .rpc_cred = state->owner->so_cred,
2175 };
2176 struct rpc_task_setup task_setup_data = {
2177 .rpc_client = server->client,
2178 .rpc_message = &msg,
2179 .callback_ops = &nfs4_close_ops,
2180 .workqueue = nfsiod_workqueue,
2181 .flags = RPC_TASK_ASYNC,
2182 };
2183 int status = -ENOMEM;
2184
2185 calldata = kzalloc(sizeof(*calldata), gfp_mask);
2186 if (calldata == NULL)
2187 goto out;
2188 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
2189 calldata->inode = state->inode;
2190 calldata->state = state;
2191 calldata->arg.fh = NFS_FH(state->inode);
2192 calldata->arg.stateid = &state->open_stateid;
2193 /* Serialization for the sequence id */
2194 calldata->arg.seqid = nfs_alloc_seqid(&state->owner->so_seqid, gfp_mask);
2195 if (calldata->arg.seqid == NULL)
2196 goto out_free_calldata;
2197 calldata->arg.fmode = 0;
2198 calldata->arg.bitmask = server->cache_consistency_bitmask;
2199 calldata->res.fattr = &calldata->fattr;
2200 calldata->res.seqid = calldata->arg.seqid;
2201 calldata->res.server = server;
2202 calldata->roc = roc;
2203 nfs_sb_active(calldata->inode->i_sb);
2204
2205 msg.rpc_argp = &calldata->arg;
2206 msg.rpc_resp = &calldata->res;
2207 task_setup_data.callback_data = calldata;
2208 task = rpc_run_task(&task_setup_data);
2209 if (IS_ERR(task))
2210 return PTR_ERR(task);
2211 status = 0;
2212 if (wait)
2213 status = rpc_wait_for_completion_task(task);
2214 rpc_put_task(task);
2215 return status;
2216out_free_calldata:
2217 kfree(calldata);
2218out:
2219 if (roc)
2220 pnfs_roc_release(state->inode);
2221 nfs4_put_open_state(state);
2222 nfs4_put_state_owner(sp);
2223 return status;
2224}
2225
2226static struct inode *
2227nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags, struct iattr *attr)
2228{
2229 struct nfs4_state *state;
2230
2231 /* Protect against concurrent sillydeletes */
2232 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr,
2233 ctx->cred, &ctx->mdsthreshold);
2234 if (IS_ERR(state))
2235 return ERR_CAST(state);
2236 ctx->state = state;
2237 return igrab(state->inode);
2238}
2239
2240static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
2241{
2242 if (ctx->state == NULL)
2243 return;
2244 if (is_sync)
2245 nfs4_close_sync(ctx->state, ctx->mode);
2246 else
2247 nfs4_close_state(ctx->state, ctx->mode);
2248}
2249
2250static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2251{
2252 struct nfs4_server_caps_arg args = {
2253 .fhandle = fhandle,
2254 };
2255 struct nfs4_server_caps_res res = {};
2256 struct rpc_message msg = {
2257 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
2258 .rpc_argp = &args,
2259 .rpc_resp = &res,
2260 };
2261 int status;
2262
2263 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2264 if (status == 0) {
2265 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
2266 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
2267 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
2268 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
2269 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
2270 NFS_CAP_CTIME|NFS_CAP_MTIME);
2271 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL)
2272 server->caps |= NFS_CAP_ACLS;
2273 if (res.has_links != 0)
2274 server->caps |= NFS_CAP_HARDLINKS;
2275 if (res.has_symlinks != 0)
2276 server->caps |= NFS_CAP_SYMLINKS;
2277 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
2278 server->caps |= NFS_CAP_FILEID;
2279 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
2280 server->caps |= NFS_CAP_MODE;
2281 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
2282 server->caps |= NFS_CAP_NLINK;
2283 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
2284 server->caps |= NFS_CAP_OWNER;
2285 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
2286 server->caps |= NFS_CAP_OWNER_GROUP;
2287 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
2288 server->caps |= NFS_CAP_ATIME;
2289 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
2290 server->caps |= NFS_CAP_CTIME;
2291 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
2292 server->caps |= NFS_CAP_MTIME;
2293
2294 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
2295 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
2296 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
2297 server->acl_bitmask = res.acl_bitmask;
2298 server->fh_expire_type = res.fh_expire_type;
2299 }
2300
2301 return status;
2302}
2303
2304int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
2305{
2306 struct nfs4_exception exception = { };
2307 int err;
2308 do {
2309 err = nfs4_handle_exception(server,
2310 _nfs4_server_capabilities(server, fhandle),
2311 &exception);
2312 } while (exception.retry);
2313 return err;
2314}
2315
2316static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2317 struct nfs_fsinfo *info)
2318{
2319 struct nfs4_lookup_root_arg args = {
2320 .bitmask = nfs4_fattr_bitmap,
2321 };
2322 struct nfs4_lookup_res res = {
2323 .server = server,
2324 .fattr = info->fattr,
2325 .fh = fhandle,
2326 };
2327 struct rpc_message msg = {
2328 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
2329 .rpc_argp = &args,
2330 .rpc_resp = &res,
2331 };
2332
2333 nfs_fattr_init(info->fattr);
2334 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2335}
2336
2337static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
2338 struct nfs_fsinfo *info)
2339{
2340 struct nfs4_exception exception = { };
2341 int err;
2342 do {
2343 err = _nfs4_lookup_root(server, fhandle, info);
2344 switch (err) {
2345 case 0:
2346 case -NFS4ERR_WRONGSEC:
2347 goto out;
2348 default:
2349 err = nfs4_handle_exception(server, err, &exception);
2350 }
2351 } while (exception.retry);
2352out:
2353 return err;
2354}
2355
2356static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2357 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
2358{
2359 struct rpc_auth *auth;
2360 int ret;
2361
2362 auth = rpcauth_create(flavor, server->client);
2363 if (!auth) {
2364 ret = -EIO;
2365 goto out;
2366 }
2367 ret = nfs4_lookup_root(server, fhandle, info);
2368out:
2369 return ret;
2370}
2371
2372static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2373 struct nfs_fsinfo *info)
2374{
2375 int i, len, status = 0;
2376 rpc_authflavor_t flav_array[NFS_MAX_SECFLAVORS];
2377
2378 len = gss_mech_list_pseudoflavors(&flav_array[0]);
2379 flav_array[len] = RPC_AUTH_NULL;
2380 len += 1;
2381
2382 for (i = 0; i < len; i++) {
2383 status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]);
2384 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
2385 continue;
2386 break;
2387 }
2388 /*
2389 * -EACCESS could mean that the user doesn't have correct permissions
2390 * to access the mount. It could also mean that we tried to mount
2391 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
2392 * existing mount programs don't handle -EACCES very well so it should
2393 * be mapped to -EPERM instead.
2394 */
2395 if (status == -EACCES)
2396 status = -EPERM;
2397 return status;
2398}
2399
2400/*
2401 * get the file handle for the "/" directory on the server
2402 */
2403int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
2404 struct nfs_fsinfo *info)
2405{
2406 int minor_version = server->nfs_client->cl_minorversion;
2407 int status = nfs4_lookup_root(server, fhandle, info);
2408 if ((status == -NFS4ERR_WRONGSEC) && !(server->flags & NFS_MOUNT_SECFLAVOUR))
2409 /*
2410 * A status of -NFS4ERR_WRONGSEC will be mapped to -EPERM
2411 * by nfs4_map_errors() as this function exits.
2412 */
2413 status = nfs_v4_minor_ops[minor_version]->find_root_sec(server, fhandle, info);
2414 if (status == 0)
2415 status = nfs4_server_capabilities(server, fhandle);
2416 if (status == 0)
2417 status = nfs4_do_fsinfo(server, fhandle, info);
2418 return nfs4_map_errors(status);
2419}
2420
2421static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
2422 struct nfs_fsinfo *info)
2423{
2424 int error;
2425 struct nfs_fattr *fattr = info->fattr;
2426
2427 error = nfs4_server_capabilities(server, mntfh);
2428 if (error < 0) {
2429 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
2430 return error;
2431 }
2432
2433 error = nfs4_proc_getattr(server, mntfh, fattr);
2434 if (error < 0) {
2435 dprintk("nfs4_get_root: getattr error = %d\n", -error);
2436 return error;
2437 }
2438
2439 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
2440 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
2441 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
2442
2443 return error;
2444}
2445
2446/*
2447 * Get locations and (maybe) other attributes of a referral.
2448 * Note that we'll actually follow the referral later when
2449 * we detect fsid mismatch in inode revalidation
2450 */
2451static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
2452 const struct qstr *name, struct nfs_fattr *fattr,
2453 struct nfs_fh *fhandle)
2454{
2455 int status = -ENOMEM;
2456 struct page *page = NULL;
2457 struct nfs4_fs_locations *locations = NULL;
2458
2459 page = alloc_page(GFP_KERNEL);
2460 if (page == NULL)
2461 goto out;
2462 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
2463 if (locations == NULL)
2464 goto out;
2465
2466 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
2467 if (status != 0)
2468 goto out;
2469 /* Make sure server returned a different fsid for the referral */
2470 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
2471 dprintk("%s: server did not return a different fsid for"
2472 " a referral at %s\n", __func__, name->name);
2473 status = -EIO;
2474 goto out;
2475 }
2476 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
2477 nfs_fixup_referral_attributes(&locations->fattr);
2478
2479 /* replace the lookup nfs_fattr with the locations nfs_fattr */
2480 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
2481 memset(fhandle, 0, sizeof(struct nfs_fh));
2482out:
2483 if (page)
2484 __free_page(page);
2485 kfree(locations);
2486 return status;
2487}
2488
2489static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2490{
2491 struct nfs4_getattr_arg args = {
2492 .fh = fhandle,
2493 .bitmask = server->attr_bitmask,
2494 };
2495 struct nfs4_getattr_res res = {
2496 .fattr = fattr,
2497 .server = server,
2498 };
2499 struct rpc_message msg = {
2500 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
2501 .rpc_argp = &args,
2502 .rpc_resp = &res,
2503 };
2504
2505 nfs_fattr_init(fattr);
2506 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2507}
2508
2509static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2510{
2511 struct nfs4_exception exception = { };
2512 int err;
2513 do {
2514 err = nfs4_handle_exception(server,
2515 _nfs4_proc_getattr(server, fhandle, fattr),
2516 &exception);
2517 } while (exception.retry);
2518 return err;
2519}
2520
2521/*
2522 * The file is not closed if it is opened due to the a request to change
2523 * the size of the file. The open call will not be needed once the
2524 * VFS layer lookup-intents are implemented.
2525 *
2526 * Close is called when the inode is destroyed.
2527 * If we haven't opened the file for O_WRONLY, we
2528 * need to in the size_change case to obtain a stateid.
2529 *
2530 * Got race?
2531 * Because OPEN is always done by name in nfsv4, it is
2532 * possible that we opened a different file by the same
2533 * name. We can recognize this race condition, but we
2534 * can't do anything about it besides returning an error.
2535 *
2536 * This will be fixed with VFS changes (lookup-intent).
2537 */
2538static int
2539nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2540 struct iattr *sattr)
2541{
2542 struct inode *inode = dentry->d_inode;
2543 struct rpc_cred *cred = NULL;
2544 struct nfs4_state *state = NULL;
2545 int status;
2546
2547 if (pnfs_ld_layoutret_on_setattr(inode))
2548 pnfs_return_layout(inode);
2549
2550 nfs_fattr_init(fattr);
2551
2552 /* Deal with open(O_TRUNC) */
2553 if (sattr->ia_valid & ATTR_OPEN)
2554 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
2555
2556 /* Optimization: if the end result is no change, don't RPC */
2557 if ((sattr->ia_valid & ~(ATTR_FILE)) == 0)
2558 return 0;
2559
2560 /* Search for an existing open(O_WRITE) file */
2561 if (sattr->ia_valid & ATTR_FILE) {
2562 struct nfs_open_context *ctx;
2563
2564 ctx = nfs_file_open_context(sattr->ia_file);
2565 if (ctx) {
2566 cred = ctx->cred;
2567 state = ctx->state;
2568 }
2569 }
2570
2571 status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2572 if (status == 0)
2573 nfs_setattr_update_inode(inode, sattr);
2574 return status;
2575}
2576
2577static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
2578 const struct qstr *name, struct nfs_fh *fhandle,
2579 struct nfs_fattr *fattr)
2580{
2581 struct nfs_server *server = NFS_SERVER(dir);
2582 int status;
2583 struct nfs4_lookup_arg args = {
2584 .bitmask = server->attr_bitmask,
2585 .dir_fh = NFS_FH(dir),
2586 .name = name,
2587 };
2588 struct nfs4_lookup_res res = {
2589 .server = server,
2590 .fattr = fattr,
2591 .fh = fhandle,
2592 };
2593 struct rpc_message msg = {
2594 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
2595 .rpc_argp = &args,
2596 .rpc_resp = &res,
2597 };
2598
2599 nfs_fattr_init(fattr);
2600
2601 dprintk("NFS call lookup %s\n", name->name);
2602 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
2603 dprintk("NFS reply lookup: %d\n", status);
2604 return status;
2605}
2606
2607static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
2608{
2609 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
2610 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
2611 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
2612 fattr->nlink = 2;
2613}
2614
2615static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
2616 struct qstr *name, struct nfs_fh *fhandle,
2617 struct nfs_fattr *fattr)
2618{
2619 struct nfs4_exception exception = { };
2620 struct rpc_clnt *client = *clnt;
2621 int err;
2622 do {
2623 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr);
2624 switch (err) {
2625 case -NFS4ERR_BADNAME:
2626 err = -ENOENT;
2627 goto out;
2628 case -NFS4ERR_MOVED:
2629 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
2630 goto out;
2631 case -NFS4ERR_WRONGSEC:
2632 err = -EPERM;
2633 if (client != *clnt)
2634 goto out;
2635
2636 client = nfs4_create_sec_client(client, dir, name);
2637 if (IS_ERR(client))
2638 return PTR_ERR(client);
2639
2640 exception.retry = 1;
2641 break;
2642 default:
2643 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
2644 }
2645 } while (exception.retry);
2646
2647out:
2648 if (err == 0)
2649 *clnt = client;
2650 else if (client != *clnt)
2651 rpc_shutdown_client(client);
2652
2653 return err;
2654}
2655
2656static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
2657 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2658{
2659 int status;
2660 struct rpc_clnt *client = NFS_CLIENT(dir);
2661
2662 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2663 if (client != NFS_CLIENT(dir)) {
2664 rpc_shutdown_client(client);
2665 nfs_fixup_secinfo_attributes(fattr);
2666 }
2667 return status;
2668}
2669
2670struct rpc_clnt *
2671nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
2672 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2673{
2674 int status;
2675 struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
2676
2677 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr);
2678 if (status < 0) {
2679 rpc_shutdown_client(client);
2680 return ERR_PTR(status);
2681 }
2682 return client;
2683}
2684
2685static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2686{
2687 struct nfs_server *server = NFS_SERVER(inode);
2688 struct nfs4_accessargs args = {
2689 .fh = NFS_FH(inode),
2690 .bitmask = server->cache_consistency_bitmask,
2691 };
2692 struct nfs4_accessres res = {
2693 .server = server,
2694 };
2695 struct rpc_message msg = {
2696 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
2697 .rpc_argp = &args,
2698 .rpc_resp = &res,
2699 .rpc_cred = entry->cred,
2700 };
2701 int mode = entry->mask;
2702 int status;
2703
2704 /*
2705 * Determine which access bits we want to ask for...
2706 */
2707 if (mode & MAY_READ)
2708 args.access |= NFS4_ACCESS_READ;
2709 if (S_ISDIR(inode->i_mode)) {
2710 if (mode & MAY_WRITE)
2711 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
2712 if (mode & MAY_EXEC)
2713 args.access |= NFS4_ACCESS_LOOKUP;
2714 } else {
2715 if (mode & MAY_WRITE)
2716 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
2717 if (mode & MAY_EXEC)
2718 args.access |= NFS4_ACCESS_EXECUTE;
2719 }
2720
2721 res.fattr = nfs_alloc_fattr();
2722 if (res.fattr == NULL)
2723 return -ENOMEM;
2724
2725 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
2726 if (!status) {
2727 entry->mask = 0;
2728 if (res.access & NFS4_ACCESS_READ)
2729 entry->mask |= MAY_READ;
2730 if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
2731 entry->mask |= MAY_WRITE;
2732 if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
2733 entry->mask |= MAY_EXEC;
2734 nfs_refresh_inode(inode, res.fattr);
2735 }
2736 nfs_free_fattr(res.fattr);
2737 return status;
2738}
2739
2740static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
2741{
2742 struct nfs4_exception exception = { };
2743 int err;
2744 do {
2745 err = nfs4_handle_exception(NFS_SERVER(inode),
2746 _nfs4_proc_access(inode, entry),
2747 &exception);
2748 } while (exception.retry);
2749 return err;
2750}
2751
2752/*
2753 * TODO: For the time being, we don't try to get any attributes
2754 * along with any of the zero-copy operations READ, READDIR,
2755 * READLINK, WRITE.
2756 *
2757 * In the case of the first three, we want to put the GETATTR
2758 * after the read-type operation -- this is because it is hard
2759 * to predict the length of a GETATTR response in v4, and thus
2760 * align the READ data correctly. This means that the GETATTR
2761 * may end up partially falling into the page cache, and we should
2762 * shift it into the 'tail' of the xdr_buf before processing.
2763 * To do this efficiently, we need to know the total length
2764 * of data received, which doesn't seem to be available outside
2765 * of the RPC layer.
2766 *
2767 * In the case of WRITE, we also want to put the GETATTR after
2768 * the operation -- in this case because we want to make sure
2769 * we get the post-operation mtime and size. This means that
2770 * we can't use xdr_encode_pages() as written: we need a variant
2771 * of it which would leave room in the 'tail' iovec.
2772 *
2773 * Both of these changes to the XDR layer would in fact be quite
2774 * minor, but I decided to leave them for a subsequent patch.
2775 */
2776static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
2777 unsigned int pgbase, unsigned int pglen)
2778{
2779 struct nfs4_readlink args = {
2780 .fh = NFS_FH(inode),
2781 .pgbase = pgbase,
2782 .pglen = pglen,
2783 .pages = &page,
2784 };
2785 struct nfs4_readlink_res res;
2786 struct rpc_message msg = {
2787 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
2788 .rpc_argp = &args,
2789 .rpc_resp = &res,
2790 };
2791
2792 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
2793}
2794
2795static int nfs4_proc_readlink(struct inode *inode, struct page *page,
2796 unsigned int pgbase, unsigned int pglen)
2797{
2798 struct nfs4_exception exception = { };
2799 int err;
2800 do {
2801 err = nfs4_handle_exception(NFS_SERVER(inode),
2802 _nfs4_proc_readlink(inode, page, pgbase, pglen),
2803 &exception);
2804 } while (exception.retry);
2805 return err;
2806}
2807
2808/*
2809 * Got race?
2810 * We will need to arrange for the VFS layer to provide an atomic open.
2811 * Until then, this create/open method is prone to inefficiency and race
2812 * conditions due to the lookup, create, and open VFS calls from sys_open()
2813 * placed on the wire.
2814 *
2815 * Given the above sorry state of affairs, I'm simply sending an OPEN.
2816 * The file will be opened again in the subsequent VFS open call
2817 * (nfs4_proc_file_open).
2818 *
2819 * The open for read will just hang around to be used by any process that
2820 * opens the file O_RDONLY. This will all be resolved with the VFS changes.
2821 */
2822
2823static int
2824nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2825 int flags, struct nfs_open_context *ctx)
2826{
2827 struct dentry *de = dentry;
2828 struct nfs4_state *state;
2829 struct rpc_cred *cred = NULL;
2830 fmode_t fmode = 0;
2831 int status = 0;
2832
2833 if (ctx != NULL) {
2834 cred = ctx->cred;
2835 de = ctx->dentry;
2836 fmode = ctx->mode;
2837 }
2838 sattr->ia_mode &= ~current_umask();
2839 state = nfs4_do_open(dir, de, fmode, flags, sattr, cred, NULL);
2840 d_drop(dentry);
2841 if (IS_ERR(state)) {
2842 status = PTR_ERR(state);
2843 goto out;
2844 }
2845 d_add(dentry, igrab(state->inode));
2846 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
2847 if (ctx != NULL)
2848 ctx->state = state;
2849 else
2850 nfs4_close_sync(state, fmode);
2851out:
2852 return status;
2853}
2854
2855static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2856{
2857 struct nfs_server *server = NFS_SERVER(dir);
2858 struct nfs_removeargs args = {
2859 .fh = NFS_FH(dir),
2860 .name = *name,
2861 };
2862 struct nfs_removeres res = {
2863 .server = server,
2864 };
2865 struct rpc_message msg = {
2866 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
2867 .rpc_argp = &args,
2868 .rpc_resp = &res,
2869 };
2870 int status;
2871
2872 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
2873 if (status == 0)
2874 update_changeattr(dir, &res.cinfo);
2875 return status;
2876}
2877
2878static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
2879{
2880 struct nfs4_exception exception = { };
2881 int err;
2882 do {
2883 err = nfs4_handle_exception(NFS_SERVER(dir),
2884 _nfs4_proc_remove(dir, name),
2885 &exception);
2886 } while (exception.retry);
2887 return err;
2888}
2889
2890static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
2891{
2892 struct nfs_server *server = NFS_SERVER(dir);
2893 struct nfs_removeargs *args = msg->rpc_argp;
2894 struct nfs_removeres *res = msg->rpc_resp;
2895
2896 res->server = server;
2897 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
2898 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
2899}
2900
2901static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
2902{
2903 if (nfs4_setup_sequence(NFS_SERVER(data->dir),
2904 &data->args.seq_args,
2905 &data->res.seq_res,
2906 task))
2907 return;
2908 rpc_call_start(task);
2909}
2910
2911static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2912{
2913 struct nfs_removeres *res = task->tk_msg.rpc_resp;
2914
2915 if (!nfs4_sequence_done(task, &res->seq_res))
2916 return 0;
2917 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2918 return 0;
2919 update_changeattr(dir, &res->cinfo);
2920 return 1;
2921}
2922
2923static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
2924{
2925 struct nfs_server *server = NFS_SERVER(dir);
2926 struct nfs_renameargs *arg = msg->rpc_argp;
2927 struct nfs_renameres *res = msg->rpc_resp;
2928
2929 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
2930 res->server = server;
2931 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
2932}
2933
2934static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
2935{
2936 if (nfs4_setup_sequence(NFS_SERVER(data->old_dir),
2937 &data->args.seq_args,
2938 &data->res.seq_res,
2939 task))
2940 return;
2941 rpc_call_start(task);
2942}
2943
2944static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
2945 struct inode *new_dir)
2946{
2947 struct nfs_renameres *res = task->tk_msg.rpc_resp;
2948
2949 if (!nfs4_sequence_done(task, &res->seq_res))
2950 return 0;
2951 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2952 return 0;
2953
2954 update_changeattr(old_dir, &res->old_cinfo);
2955 update_changeattr(new_dir, &res->new_cinfo);
2956 return 1;
2957}
2958
2959static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2960 struct inode *new_dir, struct qstr *new_name)
2961{
2962 struct nfs_server *server = NFS_SERVER(old_dir);
2963 struct nfs_renameargs arg = {
2964 .old_dir = NFS_FH(old_dir),
2965 .new_dir = NFS_FH(new_dir),
2966 .old_name = old_name,
2967 .new_name = new_name,
2968 };
2969 struct nfs_renameres res = {
2970 .server = server,
2971 };
2972 struct rpc_message msg = {
2973 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME],
2974 .rpc_argp = &arg,
2975 .rpc_resp = &res,
2976 };
2977 int status = -ENOMEM;
2978
2979 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2980 if (!status) {
2981 update_changeattr(old_dir, &res.old_cinfo);
2982 update_changeattr(new_dir, &res.new_cinfo);
2983 }
2984 return status;
2985}
2986
2987static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2988 struct inode *new_dir, struct qstr *new_name)
2989{
2990 struct nfs4_exception exception = { };
2991 int err;
2992 do {
2993 err = nfs4_handle_exception(NFS_SERVER(old_dir),
2994 _nfs4_proc_rename(old_dir, old_name,
2995 new_dir, new_name),
2996 &exception);
2997 } while (exception.retry);
2998 return err;
2999}
3000
3001static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3002{
3003 struct nfs_server *server = NFS_SERVER(inode);
3004 struct nfs4_link_arg arg = {
3005 .fh = NFS_FH(inode),
3006 .dir_fh = NFS_FH(dir),
3007 .name = name,
3008 .bitmask = server->attr_bitmask,
3009 };
3010 struct nfs4_link_res res = {
3011 .server = server,
3012 };
3013 struct rpc_message msg = {
3014 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3015 .rpc_argp = &arg,
3016 .rpc_resp = &res,
3017 };
3018 int status = -ENOMEM;
3019
3020 res.fattr = nfs_alloc_fattr();
3021 if (res.fattr == NULL)
3022 goto out;
3023
3024 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3025 if (!status) {
3026 update_changeattr(dir, &res.cinfo);
3027 nfs_post_op_update_inode(inode, res.fattr);
3028 }
3029out:
3030 nfs_free_fattr(res.fattr);
3031 return status;
3032}
3033
3034static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
3035{
3036 struct nfs4_exception exception = { };
3037 int err;
3038 do {
3039 err = nfs4_handle_exception(NFS_SERVER(inode),
3040 _nfs4_proc_link(inode, dir, name),
3041 &exception);
3042 } while (exception.retry);
3043 return err;
3044}
3045
3046struct nfs4_createdata {
3047 struct rpc_message msg;
3048 struct nfs4_create_arg arg;
3049 struct nfs4_create_res res;
3050 struct nfs_fh fh;
3051 struct nfs_fattr fattr;
3052};
3053
3054static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3055 struct qstr *name, struct iattr *sattr, u32 ftype)
3056{
3057 struct nfs4_createdata *data;
3058
3059 data = kzalloc(sizeof(*data), GFP_KERNEL);
3060 if (data != NULL) {
3061 struct nfs_server *server = NFS_SERVER(dir);
3062
3063 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
3064 data->msg.rpc_argp = &data->arg;
3065 data->msg.rpc_resp = &data->res;
3066 data->arg.dir_fh = NFS_FH(dir);
3067 data->arg.server = server;
3068 data->arg.name = name;
3069 data->arg.attrs = sattr;
3070 data->arg.ftype = ftype;
3071 data->arg.bitmask = server->attr_bitmask;
3072 data->res.server = server;
3073 data->res.fh = &data->fh;
3074 data->res.fattr = &data->fattr;
3075 nfs_fattr_init(data->res.fattr);
3076 }
3077 return data;
3078}
3079
3080static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
3081{
3082 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
3083 &data->arg.seq_args, &data->res.seq_res, 1);
3084 if (status == 0) {
3085 update_changeattr(dir, &data->res.dir_cinfo);
3086 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
3087 }
3088 return status;
3089}
3090
3091static void nfs4_free_createdata(struct nfs4_createdata *data)
3092{
3093 kfree(data);
3094}
3095
3096static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3097 struct page *page, unsigned int len, struct iattr *sattr)
3098{
3099 struct nfs4_createdata *data;
3100 int status = -ENAMETOOLONG;
3101
3102 if (len > NFS4_MAXPATHLEN)
3103 goto out;
3104
3105 status = -ENOMEM;
3106 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
3107 if (data == NULL)
3108 goto out;
3109
3110 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
3111 data->arg.u.symlink.pages = &page;
3112 data->arg.u.symlink.len = len;
3113
3114 status = nfs4_do_create(dir, dentry, data);
3115
3116 nfs4_free_createdata(data);
3117out:
3118 return status;
3119}
3120
3121static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
3122 struct page *page, unsigned int len, struct iattr *sattr)
3123{
3124 struct nfs4_exception exception = { };
3125 int err;
3126 do {
3127 err = nfs4_handle_exception(NFS_SERVER(dir),
3128 _nfs4_proc_symlink(dir, dentry, page,
3129 len, sattr),
3130 &exception);
3131 } while (exception.retry);
3132 return err;
3133}
3134
3135static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3136 struct iattr *sattr)
3137{
3138 struct nfs4_createdata *data;
3139 int status = -ENOMEM;
3140
3141 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
3142 if (data == NULL)
3143 goto out;
3144
3145 status = nfs4_do_create(dir, dentry, data);
3146
3147 nfs4_free_createdata(data);
3148out:
3149 return status;
3150}
3151
3152static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
3153 struct iattr *sattr)
3154{
3155 struct nfs4_exception exception = { };
3156 int err;
3157
3158 sattr->ia_mode &= ~current_umask();
3159 do {
3160 err = nfs4_handle_exception(NFS_SERVER(dir),
3161 _nfs4_proc_mkdir(dir, dentry, sattr),
3162 &exception);
3163 } while (exception.retry);
3164 return err;
3165}
3166
3167static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3168 u64 cookie, struct page **pages, unsigned int count, int plus)
3169{
3170 struct inode *dir = dentry->d_inode;
3171 struct nfs4_readdir_arg args = {
3172 .fh = NFS_FH(dir),
3173 .pages = pages,
3174 .pgbase = 0,
3175 .count = count,
3176 .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
3177 .plus = plus,
3178 };
3179 struct nfs4_readdir_res res;
3180 struct rpc_message msg = {
3181 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
3182 .rpc_argp = &args,
3183 .rpc_resp = &res,
3184 .rpc_cred = cred,
3185 };
3186 int status;
3187
3188 dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
3189 dentry->d_parent->d_name.name,
3190 dentry->d_name.name,
3191 (unsigned long long)cookie);
3192 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
3193 res.pgbase = args.pgbase;
3194 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
3195 if (status >= 0) {
3196 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
3197 status += args.pgbase;
3198 }
3199
3200 nfs_invalidate_atime(dir);
3201
3202 dprintk("%s: returns %d\n", __func__, status);
3203 return status;
3204}
3205
3206static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
3207 u64 cookie, struct page **pages, unsigned int count, int plus)
3208{
3209 struct nfs4_exception exception = { };
3210 int err;
3211 do {
3212 err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
3213 _nfs4_proc_readdir(dentry, cred, cookie,
3214 pages, count, plus),
3215 &exception);
3216 } while (exception.retry);
3217 return err;
3218}
3219
3220static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3221 struct iattr *sattr, dev_t rdev)
3222{
3223 struct nfs4_createdata *data;
3224 int mode = sattr->ia_mode;
3225 int status = -ENOMEM;
3226
3227 BUG_ON(!(sattr->ia_valid & ATTR_MODE));
3228 BUG_ON(!S_ISFIFO(mode) && !S_ISBLK(mode) && !S_ISCHR(mode) && !S_ISSOCK(mode));
3229
3230 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
3231 if (data == NULL)
3232 goto out;
3233
3234 if (S_ISFIFO(mode))
3235 data->arg.ftype = NF4FIFO;
3236 else if (S_ISBLK(mode)) {
3237 data->arg.ftype = NF4BLK;
3238 data->arg.u.device.specdata1 = MAJOR(rdev);
3239 data->arg.u.device.specdata2 = MINOR(rdev);
3240 }
3241 else if (S_ISCHR(mode)) {
3242 data->arg.ftype = NF4CHR;
3243 data->arg.u.device.specdata1 = MAJOR(rdev);
3244 data->arg.u.device.specdata2 = MINOR(rdev);
3245 }
3246
3247 status = nfs4_do_create(dir, dentry, data);
3248
3249 nfs4_free_createdata(data);
3250out:
3251 return status;
3252}
3253
3254static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
3255 struct iattr *sattr, dev_t rdev)
3256{
3257 struct nfs4_exception exception = { };
3258 int err;
3259
3260 sattr->ia_mode &= ~current_umask();
3261 do {
3262 err = nfs4_handle_exception(NFS_SERVER(dir),
3263 _nfs4_proc_mknod(dir, dentry, sattr, rdev),
3264 &exception);
3265 } while (exception.retry);
3266 return err;
3267}
3268
3269static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
3270 struct nfs_fsstat *fsstat)
3271{
3272 struct nfs4_statfs_arg args = {
3273 .fh = fhandle,
3274 .bitmask = server->attr_bitmask,
3275 };
3276 struct nfs4_statfs_res res = {
3277 .fsstat = fsstat,
3278 };
3279 struct rpc_message msg = {
3280 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
3281 .rpc_argp = &args,
3282 .rpc_resp = &res,
3283 };
3284
3285 nfs_fattr_init(fsstat->fattr);
3286 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3287}
3288
3289static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
3290{
3291 struct nfs4_exception exception = { };
3292 int err;
3293 do {
3294 err = nfs4_handle_exception(server,
3295 _nfs4_proc_statfs(server, fhandle, fsstat),
3296 &exception);
3297 } while (exception.retry);
3298 return err;
3299}
3300
3301static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
3302 struct nfs_fsinfo *fsinfo)
3303{
3304 struct nfs4_fsinfo_arg args = {
3305 .fh = fhandle,
3306 .bitmask = server->attr_bitmask,
3307 };
3308 struct nfs4_fsinfo_res res = {
3309 .fsinfo = fsinfo,
3310 };
3311 struct rpc_message msg = {
3312 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
3313 .rpc_argp = &args,
3314 .rpc_resp = &res,
3315 };
3316
3317 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3318}
3319
3320static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3321{
3322 struct nfs4_exception exception = { };
3323 int err;
3324
3325 do {
3326 err = nfs4_handle_exception(server,
3327 _nfs4_do_fsinfo(server, fhandle, fsinfo),
3328 &exception);
3329 } while (exception.retry);
3330 return err;
3331}
3332
3333static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
3334{
3335 nfs_fattr_init(fsinfo->fattr);
3336 return nfs4_do_fsinfo(server, fhandle, fsinfo);
3337}
3338
3339static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3340 struct nfs_pathconf *pathconf)
3341{
3342 struct nfs4_pathconf_arg args = {
3343 .fh = fhandle,
3344 .bitmask = server->attr_bitmask,
3345 };
3346 struct nfs4_pathconf_res res = {
3347 .pathconf = pathconf,
3348 };
3349 struct rpc_message msg = {
3350 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
3351 .rpc_argp = &args,
3352 .rpc_resp = &res,
3353 };
3354
3355 /* None of the pathconf attributes are mandatory to implement */
3356 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
3357 memset(pathconf, 0, sizeof(*pathconf));
3358 return 0;
3359 }
3360
3361 nfs_fattr_init(pathconf->fattr);
3362 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3363}
3364
3365static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3366 struct nfs_pathconf *pathconf)
3367{
3368 struct nfs4_exception exception = { };
3369 int err;
3370
3371 do {
3372 err = nfs4_handle_exception(server,
3373 _nfs4_proc_pathconf(server, fhandle, pathconf),
3374 &exception);
3375 } while (exception.retry);
3376 return err;
3377}
3378
3379void __nfs4_read_done_cb(struct nfs_read_data *data)
3380{
3381 nfs_invalidate_atime(data->header->inode);
3382}
3383
3384static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
3385{
3386 struct nfs_server *server = NFS_SERVER(data->header->inode);
3387
3388 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3389 rpc_restart_call_prepare(task);
3390 return -EAGAIN;
3391 }
3392
3393 __nfs4_read_done_cb(data);
3394 if (task->tk_status > 0)
3395 renew_lease(server, data->timestamp);
3396 return 0;
3397}
3398
3399static int nfs4_read_done(struct rpc_task *task, struct nfs_read_data *data)
3400{
3401
3402 dprintk("--> %s\n", __func__);
3403
3404 if (!nfs4_sequence_done(task, &data->res.seq_res))
3405 return -EAGAIN;
3406
3407 return data->read_done_cb ? data->read_done_cb(task, data) :
3408 nfs4_read_done_cb(task, data);
3409}
3410
3411static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
3412{
3413 data->timestamp = jiffies;
3414 data->read_done_cb = nfs4_read_done_cb;
3415 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
3416 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
3417}
3418
3419static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
3420{
3421 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3422 &data->args.seq_args,
3423 &data->res.seq_res,
3424 task))
3425 return;
3426 rpc_call_start(task);
3427}
3428
3429static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3430{
3431 struct inode *inode = data->header->inode;
3432
3433 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3434 rpc_restart_call_prepare(task);
3435 return -EAGAIN;
3436 }
3437 if (task->tk_status >= 0) {
3438 renew_lease(NFS_SERVER(inode), data->timestamp);
3439 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
3440 }
3441 return 0;
3442}
3443
3444static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3445{
3446 if (!nfs4_sequence_done(task, &data->res.seq_res))
3447 return -EAGAIN;
3448 return data->write_done_cb ? data->write_done_cb(task, data) :
3449 nfs4_write_done_cb(task, data);
3450}
3451
3452static
3453bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
3454{
3455 const struct nfs_pgio_header *hdr = data->header;
3456
3457 /* Don't request attributes for pNFS or O_DIRECT writes */
3458 if (data->ds_clp != NULL || hdr->dreq != NULL)
3459 return false;
3460 /* Otherwise, request attributes if and only if we don't hold
3461 * a delegation
3462 */
3463 return nfs_have_delegation(hdr->inode, FMODE_READ) == 0;
3464}
3465
3466static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3467{
3468 struct nfs_server *server = NFS_SERVER(data->header->inode);
3469
3470 if (!nfs4_write_need_cache_consistency_data(data)) {
3471 data->args.bitmask = NULL;
3472 data->res.fattr = NULL;
3473 } else
3474 data->args.bitmask = server->cache_consistency_bitmask;
3475
3476 if (!data->write_done_cb)
3477 data->write_done_cb = nfs4_write_done_cb;
3478 data->res.server = server;
3479 data->timestamp = jiffies;
3480
3481 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
3482 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3483}
3484
3485static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
3486{
3487 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3488 &data->args.seq_args,
3489 &data->res.seq_res,
3490 task))
3491 return;
3492 rpc_call_start(task);
3493}
3494
3495static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
3496{
3497 if (nfs4_setup_sequence(NFS_SERVER(data->inode),
3498 &data->args.seq_args,
3499 &data->res.seq_res,
3500 task))
3501 return;
3502 rpc_call_start(task);
3503}
3504
3505static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
3506{
3507 struct inode *inode = data->inode;
3508
3509 if (nfs4_async_handle_error(task, NFS_SERVER(inode), NULL) == -EAGAIN) {
3510 rpc_restart_call_prepare(task);
3511 return -EAGAIN;
3512 }
3513 return 0;
3514}
3515
3516static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
3517{
3518 if (!nfs4_sequence_done(task, &data->res.seq_res))
3519 return -EAGAIN;
3520 return data->commit_done_cb(task, data);
3521}
3522
3523static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
3524{
3525 struct nfs_server *server = NFS_SERVER(data->inode);
3526
3527 if (data->commit_done_cb == NULL)
3528 data->commit_done_cb = nfs4_commit_done_cb;
3529 data->res.server = server;
3530 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3531 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
3532}
3533
3534struct nfs4_renewdata {
3535 struct nfs_client *client;
3536 unsigned long timestamp;
3537};
3538
3539/*
3540 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
3541 * standalone procedure for queueing an asynchronous RENEW.
3542 */
3543static void nfs4_renew_release(void *calldata)
3544{
3545 struct nfs4_renewdata *data = calldata;
3546 struct nfs_client *clp = data->client;
3547
3548 if (atomic_read(&clp->cl_count) > 1)
3549 nfs4_schedule_state_renewal(clp);
3550 nfs_put_client(clp);
3551 kfree(data);
3552}
3553
3554static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3555{
3556 struct nfs4_renewdata *data = calldata;
3557 struct nfs_client *clp = data->client;
3558 unsigned long timestamp = data->timestamp;
3559
3560 if (task->tk_status < 0) {
3561 /* Unless we're shutting down, schedule state recovery! */
3562 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
3563 return;
3564 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
3565 nfs4_schedule_lease_recovery(clp);
3566 return;
3567 }
3568 nfs4_schedule_path_down_recovery(clp);
3569 }
3570 do_renew_lease(clp, timestamp);
3571}
3572
3573static const struct rpc_call_ops nfs4_renew_ops = {
3574 .rpc_call_done = nfs4_renew_done,
3575 .rpc_release = nfs4_renew_release,
3576};
3577
3578static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
3579{
3580 struct rpc_message msg = {
3581 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3582 .rpc_argp = clp,
3583 .rpc_cred = cred,
3584 };
3585 struct nfs4_renewdata *data;
3586
3587 if (renew_flags == 0)
3588 return 0;
3589 if (!atomic_inc_not_zero(&clp->cl_count))
3590 return -EIO;
3591 data = kmalloc(sizeof(*data), GFP_NOFS);
3592 if (data == NULL)
3593 return -ENOMEM;
3594 data->client = clp;
3595 data->timestamp = jiffies;
3596 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_SOFT,
3597 &nfs4_renew_ops, data);
3598}
3599
3600static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
3601{
3602 struct rpc_message msg = {
3603 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
3604 .rpc_argp = clp,
3605 .rpc_cred = cred,
3606 };
3607 unsigned long now = jiffies;
3608 int status;
3609
3610 status = rpc_call_sync(clp->cl_rpcclient, &msg, 0);
3611 if (status < 0)
3612 return status;
3613 do_renew_lease(clp, now);
3614 return 0;
3615}
3616
3617static inline int nfs4_server_supports_acls(struct nfs_server *server)
3618{
3619 return (server->caps & NFS_CAP_ACLS)
3620 && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3621 && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
3622}
3623
3624/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
3625 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
3626 * the stack.
3627 */
3628#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
3629
3630static int buf_to_pages_noslab(const void *buf, size_t buflen,
3631 struct page **pages, unsigned int *pgbase)
3632{
3633 struct page *newpage, **spages;
3634 int rc = 0;
3635 size_t len;
3636 spages = pages;
3637
3638 do {
3639 len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
3640 newpage = alloc_page(GFP_KERNEL);
3641
3642 if (newpage == NULL)
3643 goto unwind;
3644 memcpy(page_address(newpage), buf, len);
3645 buf += len;
3646 buflen -= len;
3647 *pages++ = newpage;
3648 rc++;
3649 } while (buflen != 0);
3650
3651 return rc;
3652
3653unwind:
3654 for(; rc > 0; rc--)
3655 __free_page(spages[rc-1]);
3656 return -ENOMEM;
3657}
3658
3659struct nfs4_cached_acl {
3660 int cached;
3661 size_t len;
3662 char data[0];
3663};
3664
3665static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
3666{
3667 struct nfs_inode *nfsi = NFS_I(inode);
3668
3669 spin_lock(&inode->i_lock);
3670 kfree(nfsi->nfs4_acl);
3671 nfsi->nfs4_acl = acl;
3672 spin_unlock(&inode->i_lock);
3673}
3674
3675static void nfs4_zap_acl_attr(struct inode *inode)
3676{
3677 nfs4_set_cached_acl(inode, NULL);
3678}
3679
3680static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
3681{
3682 struct nfs_inode *nfsi = NFS_I(inode);
3683 struct nfs4_cached_acl *acl;
3684 int ret = -ENOENT;
3685
3686 spin_lock(&inode->i_lock);
3687 acl = nfsi->nfs4_acl;
3688 if (acl == NULL)
3689 goto out;
3690 if (buf == NULL) /* user is just asking for length */
3691 goto out_len;
3692 if (acl->cached == 0)
3693 goto out;
3694 ret = -ERANGE; /* see getxattr(2) man page */
3695 if (acl->len > buflen)
3696 goto out;
3697 memcpy(buf, acl->data, acl->len);
3698out_len:
3699 ret = acl->len;
3700out:
3701 spin_unlock(&inode->i_lock);
3702 return ret;
3703}
3704
3705static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
3706{
3707 struct nfs4_cached_acl *acl;
3708
3709 if (pages && acl_len <= PAGE_SIZE) {
3710 acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL);
3711 if (acl == NULL)
3712 goto out;
3713 acl->cached = 1;
3714 _copy_from_pages(acl->data, pages, pgbase, acl_len);
3715 } else {
3716 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
3717 if (acl == NULL)
3718 goto out;
3719 acl->cached = 0;
3720 }
3721 acl->len = acl_len;
3722out:
3723 nfs4_set_cached_acl(inode, acl);
3724}
3725
3726/*
3727 * The getxattr API returns the required buffer length when called with a
3728 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
3729 * the required buf. On a NULL buf, we send a page of data to the server
3730 * guessing that the ACL request can be serviced by a page. If so, we cache
3731 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
3732 * the cache. If not so, we throw away the page, and cache the required
3733 * length. The next getxattr call will then produce another round trip to
3734 * the server, this time with the input buf of the required size.
3735 */
3736static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3737{
3738 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
3739 struct nfs_getaclargs args = {
3740 .fh = NFS_FH(inode),
3741 .acl_pages = pages,
3742 .acl_len = buflen,
3743 };
3744 struct nfs_getaclres res = {
3745 .acl_len = buflen,
3746 };
3747 struct rpc_message msg = {
3748 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
3749 .rpc_argp = &args,
3750 .rpc_resp = &res,
3751 };
3752 int ret = -ENOMEM, npages, i, acl_len = 0;
3753
3754 npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3755 /* As long as we're doing a round trip to the server anyway,
3756 * let's be prepared for a page of acl data. */
3757 if (npages == 0)
3758 npages = 1;
3759
3760 /* Add an extra page to handle the bitmap returned */
3761 npages++;
3762
3763 for (i = 0; i < npages; i++) {
3764 pages[i] = alloc_page(GFP_KERNEL);
3765 if (!pages[i])
3766 goto out_free;
3767 }
3768
3769 /* for decoding across pages */
3770 res.acl_scratch = alloc_page(GFP_KERNEL);
3771 if (!res.acl_scratch)
3772 goto out_free;
3773
3774 args.acl_len = npages * PAGE_SIZE;
3775 args.acl_pgbase = 0;
3776
3777 /* Let decode_getfacl know not to fail if the ACL data is larger than
3778 * the page we send as a guess */
3779 if (buf == NULL)
3780 res.acl_flags |= NFS4_ACL_LEN_REQUEST;
3781
3782 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
3783 __func__, buf, buflen, npages, args.acl_len);
3784 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
3785 &msg, &args.seq_args, &res.seq_res, 0);
3786 if (ret)
3787 goto out_free;
3788
3789 acl_len = res.acl_len - res.acl_data_offset;
3790 if (acl_len > args.acl_len)
3791 nfs4_write_cached_acl(inode, NULL, 0, acl_len);
3792 else
3793 nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
3794 acl_len);
3795 if (buf) {
3796 ret = -ERANGE;
3797 if (acl_len > buflen)
3798 goto out_free;
3799 _copy_from_pages(buf, pages, res.acl_data_offset,
3800 acl_len);
3801 }
3802 ret = acl_len;
3803out_free:
3804 for (i = 0; i < npages; i++)
3805 if (pages[i])
3806 __free_page(pages[i]);
3807 if (res.acl_scratch)
3808 __free_page(res.acl_scratch);
3809 return ret;
3810}
3811
3812static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
3813{
3814 struct nfs4_exception exception = { };
3815 ssize_t ret;
3816 do {
3817 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
3818 if (ret >= 0)
3819 break;
3820 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
3821 } while (exception.retry);
3822 return ret;
3823}
3824
3825static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
3826{
3827 struct nfs_server *server = NFS_SERVER(inode);
3828 int ret;
3829
3830 if (!nfs4_server_supports_acls(server))
3831 return -EOPNOTSUPP;
3832 ret = nfs_revalidate_inode(server, inode);
3833 if (ret < 0)
3834 return ret;
3835 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
3836 nfs_zap_acl_cache(inode);
3837 ret = nfs4_read_cached_acl(inode, buf, buflen);
3838 if (ret != -ENOENT)
3839 /* -ENOENT is returned if there is no ACL or if there is an ACL
3840 * but no cached acl data, just the acl length */
3841 return ret;
3842 return nfs4_get_acl_uncached(inode, buf, buflen);
3843}
3844
3845static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3846{
3847 struct nfs_server *server = NFS_SERVER(inode);
3848 struct page *pages[NFS4ACL_MAXPAGES];
3849 struct nfs_setaclargs arg = {
3850 .fh = NFS_FH(inode),
3851 .acl_pages = pages,
3852 .acl_len = buflen,
3853 };
3854 struct nfs_setaclres res;
3855 struct rpc_message msg = {
3856 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
3857 .rpc_argp = &arg,
3858 .rpc_resp = &res,
3859 };
3860 int ret, i;
3861
3862 if (!nfs4_server_supports_acls(server))
3863 return -EOPNOTSUPP;
3864 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3865 if (i < 0)
3866 return i;
3867 nfs_inode_return_delegation(inode);
3868 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3869
3870 /*
3871 * Free each page after tx, so the only ref left is
3872 * held by the network stack
3873 */
3874 for (; i > 0; i--)
3875 put_page(pages[i-1]);
3876
3877 /*
3878 * Acl update can result in inode attribute update.
3879 * so mark the attribute cache invalid.
3880 */
3881 spin_lock(&inode->i_lock);
3882 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
3883 spin_unlock(&inode->i_lock);
3884 nfs_access_zap_cache(inode);
3885 nfs_zap_acl_cache(inode);
3886 return ret;
3887}
3888
3889static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
3890{
3891 struct nfs4_exception exception = { };
3892 int err;
3893 do {
3894 err = nfs4_handle_exception(NFS_SERVER(inode),
3895 __nfs4_proc_set_acl(inode, buf, buflen),
3896 &exception);
3897 } while (exception.retry);
3898 return err;
3899}
3900
3901static int
3902nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, struct nfs4_state *state)
3903{
3904 struct nfs_client *clp = server->nfs_client;
3905
3906 if (task->tk_status >= 0)
3907 return 0;
3908 switch(task->tk_status) {
3909 case -NFS4ERR_DELEG_REVOKED:
3910 case -NFS4ERR_ADMIN_REVOKED:
3911 case -NFS4ERR_BAD_STATEID:
3912 if (state == NULL)
3913 break;
3914 nfs_remove_bad_delegation(state->inode);
3915 case -NFS4ERR_OPENMODE:
3916 if (state == NULL)
3917 break;
3918 nfs4_schedule_stateid_recovery(server, state);
3919 goto wait_on_recovery;
3920 case -NFS4ERR_EXPIRED:
3921 if (state != NULL)
3922 nfs4_schedule_stateid_recovery(server, state);
3923 case -NFS4ERR_STALE_STATEID:
3924 case -NFS4ERR_STALE_CLIENTID:
3925 nfs4_schedule_lease_recovery(clp);
3926 goto wait_on_recovery;
3927#if defined(CONFIG_NFS_V4_1)
3928 case -NFS4ERR_BADSESSION:
3929 case -NFS4ERR_BADSLOT:
3930 case -NFS4ERR_BAD_HIGH_SLOT:
3931 case -NFS4ERR_DEADSESSION:
3932 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
3933 case -NFS4ERR_SEQ_FALSE_RETRY:
3934 case -NFS4ERR_SEQ_MISORDERED:
3935 dprintk("%s ERROR %d, Reset session\n", __func__,
3936 task->tk_status);
3937 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
3938 task->tk_status = 0;
3939 return -EAGAIN;
3940#endif /* CONFIG_NFS_V4_1 */
3941 case -NFS4ERR_DELAY:
3942 nfs_inc_server_stats(server, NFSIOS_DELAY);
3943 case -NFS4ERR_GRACE:
3944 case -EKEYEXPIRED:
3945 rpc_delay(task, NFS4_POLL_RETRY_MAX);
3946 task->tk_status = 0;
3947 return -EAGAIN;
3948 case -NFS4ERR_RETRY_UNCACHED_REP:
3949 case -NFS4ERR_OLD_STATEID:
3950 task->tk_status = 0;
3951 return -EAGAIN;
3952 }
3953 task->tk_status = nfs4_map_errors(task->tk_status);
3954 return 0;
3955wait_on_recovery:
3956 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3957 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3958 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3959 task->tk_status = 0;
3960 return -EAGAIN;
3961}
3962
3963static void nfs4_init_boot_verifier(const struct nfs_client *clp,
3964 nfs4_verifier *bootverf)
3965{
3966 __be32 verf[2];
3967
3968 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
3969 /* An impossible timestamp guarantees this value
3970 * will never match a generated boot time. */
3971 verf[0] = 0;
3972 verf[1] = (__be32)(NSEC_PER_SEC + 1);
3973 } else {
3974 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
3975 verf[0] = (__be32)nn->boot_time.tv_sec;
3976 verf[1] = (__be32)nn->boot_time.tv_nsec;
3977 }
3978 memcpy(bootverf->data, verf, sizeof(bootverf->data));
3979}
3980
3981int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3982 unsigned short port, struct rpc_cred *cred,
3983 struct nfs4_setclientid_res *res)
3984{
3985 nfs4_verifier sc_verifier;
3986 struct nfs4_setclientid setclientid = {
3987 .sc_verifier = &sc_verifier,
3988 .sc_prog = program,
3989 .sc_cb_ident = clp->cl_cb_ident,
3990 };
3991 struct rpc_message msg = {
3992 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
3993 .rpc_argp = &setclientid,
3994 .rpc_resp = res,
3995 .rpc_cred = cred,
3996 };
3997 int loop = 0;
3998 int status;
3999
4000 nfs4_init_boot_verifier(clp, &sc_verifier);
4001
4002 for(;;) {
4003 rcu_read_lock();
4004 setclientid.sc_name_len = scnprintf(setclientid.sc_name,
4005 sizeof(setclientid.sc_name), "%s/%s %s %s %u",
4006 clp->cl_ipaddr,
4007 rpc_peeraddr2str(clp->cl_rpcclient,
4008 RPC_DISPLAY_ADDR),
4009 rpc_peeraddr2str(clp->cl_rpcclient,
4010 RPC_DISPLAY_PROTO),
4011 clp->cl_rpcclient->cl_auth->au_ops->au_name,
4012 clp->cl_id_uniquifier);
4013 setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
4014 sizeof(setclientid.sc_netid),
4015 rpc_peeraddr2str(clp->cl_rpcclient,
4016 RPC_DISPLAY_NETID));
4017 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
4018 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
4019 clp->cl_ipaddr, port >> 8, port & 255);
4020 rcu_read_unlock();
4021
4022 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4023 if (status != -NFS4ERR_CLID_INUSE)
4024 break;
4025 if (loop != 0) {
4026 ++clp->cl_id_uniquifier;
4027 break;
4028 }
4029 ++loop;
4030 ssleep(clp->cl_lease_time / HZ + 1);
4031 }
4032 return status;
4033}
4034
4035int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
4036 struct nfs4_setclientid_res *arg,
4037 struct rpc_cred *cred)
4038{
4039 struct nfs_fsinfo fsinfo;
4040 struct rpc_message msg = {
4041 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
4042 .rpc_argp = arg,
4043 .rpc_resp = &fsinfo,
4044 .rpc_cred = cred,
4045 };
4046 unsigned long now;
4047 int status;
4048
4049 now = jiffies;
4050 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4051 if (status == 0) {
4052 spin_lock(&clp->cl_lock);
4053 clp->cl_lease_time = fsinfo.lease_time * HZ;
4054 clp->cl_last_renewal = now;
4055 spin_unlock(&clp->cl_lock);
4056 }
4057 return status;
4058}
4059
4060struct nfs4_delegreturndata {
4061 struct nfs4_delegreturnargs args;
4062 struct nfs4_delegreturnres res;
4063 struct nfs_fh fh;
4064 nfs4_stateid stateid;
4065 unsigned long timestamp;
4066 struct nfs_fattr fattr;
4067 int rpc_status;
4068};
4069
4070static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
4071{
4072 struct nfs4_delegreturndata *data = calldata;
4073
4074 if (!nfs4_sequence_done(task, &data->res.seq_res))
4075 return;
4076
4077 switch (task->tk_status) {
4078 case -NFS4ERR_STALE_STATEID:
4079 case -NFS4ERR_EXPIRED:
4080 case 0:
4081 renew_lease(data->res.server, data->timestamp);
4082 break;
4083 default:
4084 if (nfs4_async_handle_error(task, data->res.server, NULL) ==
4085 -EAGAIN) {
4086 rpc_restart_call_prepare(task);
4087 return;
4088 }
4089 }
4090 data->rpc_status = task->tk_status;
4091}
4092
4093static void nfs4_delegreturn_release(void *calldata)
4094{
4095 kfree(calldata);
4096}
4097
4098#if defined(CONFIG_NFS_V4_1)
4099static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
4100{
4101 struct nfs4_delegreturndata *d_data;
4102
4103 d_data = (struct nfs4_delegreturndata *)data;
4104
4105 if (nfs4_setup_sequence(d_data->res.server,
4106 &d_data->args.seq_args,
4107 &d_data->res.seq_res, task))
4108 return;
4109 rpc_call_start(task);
4110}
4111#endif /* CONFIG_NFS_V4_1 */
4112
4113static const struct rpc_call_ops nfs4_delegreturn_ops = {
4114#if defined(CONFIG_NFS_V4_1)
4115 .rpc_call_prepare = nfs4_delegreturn_prepare,
4116#endif /* CONFIG_NFS_V4_1 */
4117 .rpc_call_done = nfs4_delegreturn_done,
4118 .rpc_release = nfs4_delegreturn_release,
4119};
4120
4121static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4122{
4123 struct nfs4_delegreturndata *data;
4124 struct nfs_server *server = NFS_SERVER(inode);
4125 struct rpc_task *task;
4126 struct rpc_message msg = {
4127 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
4128 .rpc_cred = cred,
4129 };
4130 struct rpc_task_setup task_setup_data = {
4131 .rpc_client = server->client,
4132 .rpc_message = &msg,
4133 .callback_ops = &nfs4_delegreturn_ops,
4134 .flags = RPC_TASK_ASYNC,
4135 };
4136 int status = 0;
4137
4138 data = kzalloc(sizeof(*data), GFP_NOFS);
4139 if (data == NULL)
4140 return -ENOMEM;
4141 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4142 data->args.fhandle = &data->fh;
4143 data->args.stateid = &data->stateid;
4144 data->args.bitmask = server->cache_consistency_bitmask;
4145 nfs_copy_fh(&data->fh, NFS_FH(inode));
4146 nfs4_stateid_copy(&data->stateid, stateid);
4147 data->res.fattr = &data->fattr;
4148 data->res.server = server;
4149 nfs_fattr_init(data->res.fattr);
4150 data->timestamp = jiffies;
4151 data->rpc_status = 0;
4152
4153 task_setup_data.callback_data = data;
4154 msg.rpc_argp = &data->args;
4155 msg.rpc_resp = &data->res;
4156 task = rpc_run_task(&task_setup_data);
4157 if (IS_ERR(task))
4158 return PTR_ERR(task);
4159 if (!issync)
4160 goto out;
4161 status = nfs4_wait_for_completion_rpc_task(task);
4162 if (status != 0)
4163 goto out;
4164 status = data->rpc_status;
4165 if (status == 0)
4166 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
4167 else
4168 nfs_refresh_inode(inode, &data->fattr);
4169out:
4170 rpc_put_task(task);
4171 return status;
4172}
4173
4174int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
4175{
4176 struct nfs_server *server = NFS_SERVER(inode);
4177 struct nfs4_exception exception = { };
4178 int err;
4179 do {
4180 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
4181 switch (err) {
4182 case -NFS4ERR_STALE_STATEID:
4183 case -NFS4ERR_EXPIRED:
4184 case 0:
4185 return 0;
4186 }
4187 err = nfs4_handle_exception(server, err, &exception);
4188 } while (exception.retry);
4189 return err;
4190}
4191
4192#define NFS4_LOCK_MINTIMEOUT (1 * HZ)
4193#define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
4194
4195/*
4196 * sleep, with exponential backoff, and retry the LOCK operation.
4197 */
4198static unsigned long
4199nfs4_set_lock_task_retry(unsigned long timeout)
4200{
4201 freezable_schedule_timeout_killable(timeout);
4202 timeout <<= 1;
4203 if (timeout > NFS4_LOCK_MAXTIMEOUT)
4204 return NFS4_LOCK_MAXTIMEOUT;
4205 return timeout;
4206}
4207
4208static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4209{
4210 struct inode *inode = state->inode;
4211 struct nfs_server *server = NFS_SERVER(inode);
4212 struct nfs_client *clp = server->nfs_client;
4213 struct nfs_lockt_args arg = {
4214 .fh = NFS_FH(inode),
4215 .fl = request,
4216 };
4217 struct nfs_lockt_res res = {
4218 .denied = request,
4219 };
4220 struct rpc_message msg = {
4221 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
4222 .rpc_argp = &arg,
4223 .rpc_resp = &res,
4224 .rpc_cred = state->owner->so_cred,
4225 };
4226 struct nfs4_lock_state *lsp;
4227 int status;
4228
4229 arg.lock_owner.clientid = clp->cl_clientid;
4230 status = nfs4_set_lock_state(state, request);
4231 if (status != 0)
4232 goto out;
4233 lsp = request->fl_u.nfs4_fl.owner;
4234 arg.lock_owner.id = lsp->ls_seqid.owner_id;
4235 arg.lock_owner.s_dev = server->s_dev;
4236 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4237 switch (status) {
4238 case 0:
4239 request->fl_type = F_UNLCK;
4240 break;
4241 case -NFS4ERR_DENIED:
4242 status = 0;
4243 }
4244 request->fl_ops->fl_release_private(request);
4245out:
4246 return status;
4247}
4248
4249static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4250{
4251 struct nfs4_exception exception = { };
4252 int err;
4253
4254 do {
4255 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4256 _nfs4_proc_getlk(state, cmd, request),
4257 &exception);
4258 } while (exception.retry);
4259 return err;
4260}
4261
4262static int do_vfs_lock(struct file *file, struct file_lock *fl)
4263{
4264 int res = 0;
4265 switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) {
4266 case FL_POSIX:
4267 res = posix_lock_file_wait(file, fl);
4268 break;
4269 case FL_FLOCK:
4270 res = flock_lock_file_wait(file, fl);
4271 break;
4272 default:
4273 BUG();
4274 }
4275 return res;
4276}
4277
4278struct nfs4_unlockdata {
4279 struct nfs_locku_args arg;
4280 struct nfs_locku_res res;
4281 struct nfs4_lock_state *lsp;
4282 struct nfs_open_context *ctx;
4283 struct file_lock fl;
4284 const struct nfs_server *server;
4285 unsigned long timestamp;
4286};
4287
4288static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
4289 struct nfs_open_context *ctx,
4290 struct nfs4_lock_state *lsp,
4291 struct nfs_seqid *seqid)
4292{
4293 struct nfs4_unlockdata *p;
4294 struct inode *inode = lsp->ls_state->inode;
4295
4296 p = kzalloc(sizeof(*p), GFP_NOFS);
4297 if (p == NULL)
4298 return NULL;
4299 p->arg.fh = NFS_FH(inode);
4300 p->arg.fl = &p->fl;
4301 p->arg.seqid = seqid;
4302 p->res.seqid = seqid;
4303 p->arg.stateid = &lsp->ls_stateid;
4304 p->lsp = lsp;
4305 atomic_inc(&lsp->ls_count);
4306 /* Ensure we don't close file until we're done freeing locks! */
4307 p->ctx = get_nfs_open_context(ctx);
4308 memcpy(&p->fl, fl, sizeof(p->fl));
4309 p->server = NFS_SERVER(inode);
4310 return p;
4311}
4312
4313static void nfs4_locku_release_calldata(void *data)
4314{
4315 struct nfs4_unlockdata *calldata = data;
4316 nfs_free_seqid(calldata->arg.seqid);
4317 nfs4_put_lock_state(calldata->lsp);
4318 put_nfs_open_context(calldata->ctx);
4319 kfree(calldata);
4320}
4321
4322static void nfs4_locku_done(struct rpc_task *task, void *data)
4323{
4324 struct nfs4_unlockdata *calldata = data;
4325
4326 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
4327 return;
4328 switch (task->tk_status) {
4329 case 0:
4330 nfs4_stateid_copy(&calldata->lsp->ls_stateid,
4331 &calldata->res.stateid);
4332 renew_lease(calldata->server, calldata->timestamp);
4333 break;
4334 case -NFS4ERR_BAD_STATEID:
4335 case -NFS4ERR_OLD_STATEID:
4336 case -NFS4ERR_STALE_STATEID:
4337 case -NFS4ERR_EXPIRED:
4338 break;
4339 default:
4340 if (nfs4_async_handle_error(task, calldata->server, NULL) == -EAGAIN)
4341 rpc_restart_call_prepare(task);
4342 }
4343}
4344
4345static void nfs4_locku_prepare(struct rpc_task *task, void *data)
4346{
4347 struct nfs4_unlockdata *calldata = data;
4348
4349 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
4350 return;
4351 if ((calldata->lsp->ls_flags & NFS_LOCK_INITIALIZED) == 0) {
4352 /* Note: exit _without_ running nfs4_locku_done */
4353 task->tk_action = NULL;
4354 return;
4355 }
4356 calldata->timestamp = jiffies;
4357 if (nfs4_setup_sequence(calldata->server,
4358 &calldata->arg.seq_args,
4359 &calldata->res.seq_res, task))
4360 return;
4361 rpc_call_start(task);
4362}
4363
4364static const struct rpc_call_ops nfs4_locku_ops = {
4365 .rpc_call_prepare = nfs4_locku_prepare,
4366 .rpc_call_done = nfs4_locku_done,
4367 .rpc_release = nfs4_locku_release_calldata,
4368};
4369
4370static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
4371 struct nfs_open_context *ctx,
4372 struct nfs4_lock_state *lsp,
4373 struct nfs_seqid *seqid)
4374{
4375 struct nfs4_unlockdata *data;
4376 struct rpc_message msg = {
4377 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
4378 .rpc_cred = ctx->cred,
4379 };
4380 struct rpc_task_setup task_setup_data = {
4381 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
4382 .rpc_message = &msg,
4383 .callback_ops = &nfs4_locku_ops,
4384 .workqueue = nfsiod_workqueue,
4385 .flags = RPC_TASK_ASYNC,
4386 };
4387
4388 /* Ensure this is an unlock - when canceling a lock, the
4389 * canceled lock is passed in, and it won't be an unlock.
4390 */
4391 fl->fl_type = F_UNLCK;
4392
4393 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
4394 if (data == NULL) {
4395 nfs_free_seqid(seqid);
4396 return ERR_PTR(-ENOMEM);
4397 }
4398
4399 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4400 msg.rpc_argp = &data->arg;
4401 msg.rpc_resp = &data->res;
4402 task_setup_data.callback_data = data;
4403 return rpc_run_task(&task_setup_data);
4404}
4405
4406static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
4407{
4408 struct nfs_inode *nfsi = NFS_I(state->inode);
4409 struct nfs_seqid *seqid;
4410 struct nfs4_lock_state *lsp;
4411 struct rpc_task *task;
4412 int status = 0;
4413 unsigned char fl_flags = request->fl_flags;
4414
4415 status = nfs4_set_lock_state(state, request);
4416 /* Unlock _before_ we do the RPC call */
4417 request->fl_flags |= FL_EXISTS;
4418 down_read(&nfsi->rwsem);
4419 if (do_vfs_lock(request->fl_file, request) == -ENOENT) {
4420 up_read(&nfsi->rwsem);
4421 goto out;
4422 }
4423 up_read(&nfsi->rwsem);
4424 if (status != 0)
4425 goto out;
4426 /* Is this a delegated lock? */
4427 if (test_bit(NFS_DELEGATED_STATE, &state->flags))
4428 goto out;
4429 lsp = request->fl_u.nfs4_fl.owner;
4430 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
4431 status = -ENOMEM;
4432 if (seqid == NULL)
4433 goto out;
4434 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
4435 status = PTR_ERR(task);
4436 if (IS_ERR(task))
4437 goto out;
4438 status = nfs4_wait_for_completion_rpc_task(task);
4439 rpc_put_task(task);
4440out:
4441 request->fl_flags = fl_flags;
4442 return status;
4443}
4444
4445struct nfs4_lockdata {
4446 struct nfs_lock_args arg;
4447 struct nfs_lock_res res;
4448 struct nfs4_lock_state *lsp;
4449 struct nfs_open_context *ctx;
4450 struct file_lock fl;
4451 unsigned long timestamp;
4452 int rpc_status;
4453 int cancelled;
4454 struct nfs_server *server;
4455};
4456
4457static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
4458 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
4459 gfp_t gfp_mask)
4460{
4461 struct nfs4_lockdata *p;
4462 struct inode *inode = lsp->ls_state->inode;
4463 struct nfs_server *server = NFS_SERVER(inode);
4464
4465 p = kzalloc(sizeof(*p), gfp_mask);
4466 if (p == NULL)
4467 return NULL;
4468
4469 p->arg.fh = NFS_FH(inode);
4470 p->arg.fl = &p->fl;
4471 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
4472 if (p->arg.open_seqid == NULL)
4473 goto out_free;
4474 p->arg.lock_seqid = nfs_alloc_seqid(&lsp->ls_seqid, gfp_mask);
4475 if (p->arg.lock_seqid == NULL)
4476 goto out_free_seqid;
4477 p->arg.lock_stateid = &lsp->ls_stateid;
4478 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
4479 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
4480 p->arg.lock_owner.s_dev = server->s_dev;
4481 p->res.lock_seqid = p->arg.lock_seqid;
4482 p->lsp = lsp;
4483 p->server = server;
4484 atomic_inc(&lsp->ls_count);
4485 p->ctx = get_nfs_open_context(ctx);
4486 memcpy(&p->fl, fl, sizeof(p->fl));
4487 return p;
4488out_free_seqid:
4489 nfs_free_seqid(p->arg.open_seqid);
4490out_free:
4491 kfree(p);
4492 return NULL;
4493}
4494
4495static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
4496{
4497 struct nfs4_lockdata *data = calldata;
4498 struct nfs4_state *state = data->lsp->ls_state;
4499
4500 dprintk("%s: begin!\n", __func__);
4501 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
4502 return;
4503 /* Do we need to do an open_to_lock_owner? */
4504 if (!(data->arg.lock_seqid->sequence->flags & NFS_SEQID_CONFIRMED)) {
4505 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0)
4506 return;
4507 data->arg.open_stateid = &state->stateid;
4508 data->arg.new_lock_owner = 1;
4509 data->res.open_seqid = data->arg.open_seqid;
4510 } else
4511 data->arg.new_lock_owner = 0;
4512 data->timestamp = jiffies;
4513 if (nfs4_setup_sequence(data->server,
4514 &data->arg.seq_args,
4515 &data->res.seq_res, task))
4516 return;
4517 rpc_call_start(task);
4518 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
4519}
4520
4521static void nfs4_recover_lock_prepare(struct rpc_task *task, void *calldata)
4522{
4523 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
4524 nfs4_lock_prepare(task, calldata);
4525}
4526
4527static void nfs4_lock_done(struct rpc_task *task, void *calldata)
4528{
4529 struct nfs4_lockdata *data = calldata;
4530
4531 dprintk("%s: begin!\n", __func__);
4532
4533 if (!nfs4_sequence_done(task, &data->res.seq_res))
4534 return;
4535
4536 data->rpc_status = task->tk_status;
4537 if (data->arg.new_lock_owner != 0) {
4538 if (data->rpc_status == 0)
4539 nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
4540 else
4541 goto out;
4542 }
4543 if (data->rpc_status == 0) {
4544 nfs4_stateid_copy(&data->lsp->ls_stateid, &data->res.stateid);
4545 data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
4546 renew_lease(NFS_SERVER(data->ctx->dentry->d_inode), data->timestamp);
4547 }
4548out:
4549 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
4550}
4551
4552static void nfs4_lock_release(void *calldata)
4553{
4554 struct nfs4_lockdata *data = calldata;
4555
4556 dprintk("%s: begin!\n", __func__);
4557 nfs_free_seqid(data->arg.open_seqid);
4558 if (data->cancelled != 0) {
4559 struct rpc_task *task;
4560 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4561 data->arg.lock_seqid);
4562 if (!IS_ERR(task))
4563 rpc_put_task_async(task);
4564 dprintk("%s: cancelling lock!\n", __func__);
4565 } else
4566 nfs_free_seqid(data->arg.lock_seqid);
4567 nfs4_put_lock_state(data->lsp);
4568 put_nfs_open_context(data->ctx);
4569 kfree(data);
4570 dprintk("%s: done!\n", __func__);
4571}
4572
4573static const struct rpc_call_ops nfs4_lock_ops = {
4574 .rpc_call_prepare = nfs4_lock_prepare,
4575 .rpc_call_done = nfs4_lock_done,
4576 .rpc_release = nfs4_lock_release,
4577};
4578
4579static const struct rpc_call_ops nfs4_recover_lock_ops = {
4580 .rpc_call_prepare = nfs4_recover_lock_prepare,
4581 .rpc_call_done = nfs4_lock_done,
4582 .rpc_release = nfs4_lock_release,
4583};
4584
4585static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4586{
4587 switch (error) {
4588 case -NFS4ERR_ADMIN_REVOKED:
4589 case -NFS4ERR_BAD_STATEID:
4590 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4591 if (new_lock_owner != 0 ||
4592 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4593 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4594 break;
4595 case -NFS4ERR_STALE_STATEID:
4596 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4597 case -NFS4ERR_EXPIRED:
4598 nfs4_schedule_lease_recovery(server->nfs_client);
4599 };
4600}
4601
4602static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
4603{
4604 struct nfs4_lockdata *data;
4605 struct rpc_task *task;
4606 struct rpc_message msg = {
4607 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
4608 .rpc_cred = state->owner->so_cred,
4609 };
4610 struct rpc_task_setup task_setup_data = {
4611 .rpc_client = NFS_CLIENT(state->inode),
4612 .rpc_message = &msg,
4613 .callback_ops = &nfs4_lock_ops,
4614 .workqueue = nfsiod_workqueue,
4615 .flags = RPC_TASK_ASYNC,
4616 };
4617 int ret;
4618
4619 dprintk("%s: begin!\n", __func__);
4620 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
4621 fl->fl_u.nfs4_fl.owner,
4622 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
4623 if (data == NULL)
4624 return -ENOMEM;
4625 if (IS_SETLKW(cmd))
4626 data->arg.block = 1;
4627 if (recovery_type > NFS_LOCK_NEW) {
4628 if (recovery_type == NFS_LOCK_RECLAIM)
4629 data->arg.reclaim = NFS_LOCK_RECLAIM;
4630 task_setup_data.callback_ops = &nfs4_recover_lock_ops;
4631 }
4632 nfs41_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
4633 msg.rpc_argp = &data->arg;
4634 msg.rpc_resp = &data->res;
4635 task_setup_data.callback_data = data;
4636 task = rpc_run_task(&task_setup_data);
4637 if (IS_ERR(task))
4638 return PTR_ERR(task);
4639 ret = nfs4_wait_for_completion_rpc_task(task);
4640 if (ret == 0) {
4641 ret = data->rpc_status;
4642 if (ret)
4643 nfs4_handle_setlk_error(data->server, data->lsp,
4644 data->arg.new_lock_owner, ret);
4645 } else
4646 data->cancelled = 1;
4647 rpc_put_task(task);
4648 dprintk("%s: done, ret = %d!\n", __func__, ret);
4649 return ret;
4650}
4651
4652static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
4653{
4654 struct nfs_server *server = NFS_SERVER(state->inode);
4655 struct nfs4_exception exception = {
4656 .inode = state->inode,
4657 };
4658 int err;
4659
4660 do {
4661 /* Cache the lock if possible... */
4662 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4663 return 0;
4664 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
4665 if (err != -NFS4ERR_DELAY)
4666 break;
4667 nfs4_handle_exception(server, err, &exception);
4668 } while (exception.retry);
4669 return err;
4670}
4671
4672static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
4673{
4674 struct nfs_server *server = NFS_SERVER(state->inode);
4675 struct nfs4_exception exception = {
4676 .inode = state->inode,
4677 };
4678 int err;
4679
4680 err = nfs4_set_lock_state(state, request);
4681 if (err != 0)
4682 return err;
4683 do {
4684 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
4685 return 0;
4686 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
4687 switch (err) {
4688 default:
4689 goto out;
4690 case -NFS4ERR_GRACE:
4691 case -NFS4ERR_DELAY:
4692 nfs4_handle_exception(server, err, &exception);
4693 err = 0;
4694 }
4695 } while (exception.retry);
4696out:
4697 return err;
4698}
4699
4700#if defined(CONFIG_NFS_V4_1)
4701static int nfs41_check_expired_locks(struct nfs4_state *state)
4702{
4703 int status, ret = NFS_OK;
4704 struct nfs4_lock_state *lsp;
4705 struct nfs_server *server = NFS_SERVER(state->inode);
4706
4707 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
4708 if (lsp->ls_flags & NFS_LOCK_INITIALIZED) {
4709 status = nfs41_test_stateid(server, &lsp->ls_stateid);
4710 if (status != NFS_OK) {
4711 nfs41_free_stateid(server, &lsp->ls_stateid);
4712 lsp->ls_flags &= ~NFS_LOCK_INITIALIZED;
4713 ret = status;
4714 }
4715 }
4716 };
4717
4718 return ret;
4719}
4720
4721static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
4722{
4723 int status = NFS_OK;
4724
4725 if (test_bit(LK_STATE_IN_USE, &state->flags))
4726 status = nfs41_check_expired_locks(state);
4727 if (status == NFS_OK)
4728 return status;
4729 return nfs4_lock_expired(state, request);
4730}
4731#endif
4732
4733static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4734{
4735 struct nfs_inode *nfsi = NFS_I(state->inode);
4736 unsigned char fl_flags = request->fl_flags;
4737 int status = -ENOLCK;
4738
4739 if ((fl_flags & FL_POSIX) &&
4740 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
4741 goto out;
4742 /* Is this a delegated open? */
4743 status = nfs4_set_lock_state(state, request);
4744 if (status != 0)
4745 goto out;
4746 request->fl_flags |= FL_ACCESS;
4747 status = do_vfs_lock(request->fl_file, request);
4748 if (status < 0)
4749 goto out;
4750 down_read(&nfsi->rwsem);
4751 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
4752 /* Yes: cache locks! */
4753 /* ...but avoid races with delegation recall... */
4754 request->fl_flags = fl_flags & ~FL_SLEEP;
4755 status = do_vfs_lock(request->fl_file, request);
4756 goto out_unlock;
4757 }
4758 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
4759 if (status != 0)
4760 goto out_unlock;
4761 /* Note: we always want to sleep here! */
4762 request->fl_flags = fl_flags | FL_SLEEP;
4763 if (do_vfs_lock(request->fl_file, request) < 0)
4764 printk(KERN_WARNING "NFS: %s: VFS is out of sync with lock "
4765 "manager!\n", __func__);
4766out_unlock:
4767 up_read(&nfsi->rwsem);
4768out:
4769 request->fl_flags = fl_flags;
4770 return status;
4771}
4772
4773static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
4774{
4775 struct nfs4_exception exception = {
4776 .state = state,
4777 .inode = state->inode,
4778 };
4779 int err;
4780
4781 do {
4782 err = _nfs4_proc_setlk(state, cmd, request);
4783 if (err == -NFS4ERR_DENIED)
4784 err = -EAGAIN;
4785 err = nfs4_handle_exception(NFS_SERVER(state->inode),
4786 err, &exception);
4787 } while (exception.retry);
4788 return err;
4789}
4790
4791static int
4792nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
4793{
4794 struct nfs_open_context *ctx;
4795 struct nfs4_state *state;
4796 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
4797 int status;
4798
4799 /* verify open state */
4800 ctx = nfs_file_open_context(filp);
4801 state = ctx->state;
4802
4803 if (request->fl_start < 0 || request->fl_end < 0)
4804 return -EINVAL;
4805
4806 if (IS_GETLK(cmd)) {
4807 if (state != NULL)
4808 return nfs4_proc_getlk(state, F_GETLK, request);
4809 return 0;
4810 }
4811
4812 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
4813 return -EINVAL;
4814
4815 if (request->fl_type == F_UNLCK) {
4816 if (state != NULL)
4817 return nfs4_proc_unlck(state, cmd, request);
4818 return 0;
4819 }
4820
4821 if (state == NULL)
4822 return -ENOLCK;
4823 /*
4824 * Don't rely on the VFS having checked the file open mode,
4825 * since it won't do this for flock() locks.
4826 */
4827 switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) {
4828 case F_RDLCK:
4829 if (!(filp->f_mode & FMODE_READ))
4830 return -EBADF;
4831 break;
4832 case F_WRLCK:
4833 if (!(filp->f_mode & FMODE_WRITE))
4834 return -EBADF;
4835 }
4836
4837 do {
4838 status = nfs4_proc_setlk(state, cmd, request);
4839 if ((status != -EAGAIN) || IS_SETLK(cmd))
4840 break;
4841 timeout = nfs4_set_lock_task_retry(timeout);
4842 status = -ERESTARTSYS;
4843 if (signalled())
4844 break;
4845 } while(status < 0);
4846 return status;
4847}
4848
4849int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4850{
4851 struct nfs_server *server = NFS_SERVER(state->inode);
4852 struct nfs4_exception exception = { };
4853 int err;
4854
4855 err = nfs4_set_lock_state(state, fl);
4856 if (err != 0)
4857 goto out;
4858 do {
4859 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
4860 switch (err) {
4861 default:
4862 printk(KERN_ERR "NFS: %s: unhandled error "
4863 "%d.\n", __func__, err);
4864 case 0:
4865 case -ESTALE:
4866 goto out;
4867 case -NFS4ERR_EXPIRED:
4868 nfs4_schedule_stateid_recovery(server, state);
4869 case -NFS4ERR_STALE_CLIENTID:
4870 case -NFS4ERR_STALE_STATEID:
4871 nfs4_schedule_lease_recovery(server->nfs_client);
4872 goto out;
4873 case -NFS4ERR_BADSESSION:
4874 case -NFS4ERR_BADSLOT:
4875 case -NFS4ERR_BAD_HIGH_SLOT:
4876 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4877 case -NFS4ERR_DEADSESSION:
4878 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
4879 goto out;
4880 case -ERESTARTSYS:
4881 /*
4882 * The show must go on: exit, but mark the
4883 * stateid as needing recovery.
4884 */
4885 case -NFS4ERR_DELEG_REVOKED:
4886 case -NFS4ERR_ADMIN_REVOKED:
4887 case -NFS4ERR_BAD_STATEID:
4888 case -NFS4ERR_OPENMODE:
4889 nfs4_schedule_stateid_recovery(server, state);
4890 err = 0;
4891 goto out;
4892 case -EKEYEXPIRED:
4893 /*
4894 * User RPCSEC_GSS context has expired.
4895 * We cannot recover this stateid now, so
4896 * skip it and allow recovery thread to
4897 * proceed.
4898 */
4899 err = 0;
4900 goto out;
4901 case -ENOMEM:
4902 case -NFS4ERR_DENIED:
4903 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
4904 err = 0;
4905 goto out;
4906 case -NFS4ERR_DELAY:
4907 break;
4908 }
4909 err = nfs4_handle_exception(server, err, &exception);
4910 } while (exception.retry);
4911out:
4912 return err;
4913}
4914
4915struct nfs_release_lockowner_data {
4916 struct nfs4_lock_state *lsp;
4917 struct nfs_server *server;
4918 struct nfs_release_lockowner_args args;
4919};
4920
4921static void nfs4_release_lockowner_release(void *calldata)
4922{
4923 struct nfs_release_lockowner_data *data = calldata;
4924 nfs4_free_lock_state(data->server, data->lsp);
4925 kfree(calldata);
4926}
4927
4928static const struct rpc_call_ops nfs4_release_lockowner_ops = {
4929 .rpc_release = nfs4_release_lockowner_release,
4930};
4931
4932int nfs4_release_lockowner(struct nfs4_lock_state *lsp)
4933{
4934 struct nfs_server *server = lsp->ls_state->owner->so_server;
4935 struct nfs_release_lockowner_data *data;
4936 struct rpc_message msg = {
4937 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
4938 };
4939
4940 if (server->nfs_client->cl_mvops->minor_version != 0)
4941 return -EINVAL;
4942 data = kmalloc(sizeof(*data), GFP_NOFS);
4943 if (!data)
4944 return -ENOMEM;
4945 data->lsp = lsp;
4946 data->server = server;
4947 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
4948 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
4949 data->args.lock_owner.s_dev = server->s_dev;
4950 msg.rpc_argp = &data->args;
4951 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
4952 return 0;
4953}
4954
4955#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
4956
4957static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry, const char *key,
4958 const void *buf, size_t buflen,
4959 int flags, int type)
4960{
4961 if (strcmp(key, "") != 0)
4962 return -EINVAL;
4963
4964 return nfs4_proc_set_acl(dentry->d_inode, buf, buflen);
4965}
4966
4967static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry, const char *key,
4968 void *buf, size_t buflen, int type)
4969{
4970 if (strcmp(key, "") != 0)
4971 return -EINVAL;
4972
4973 return nfs4_proc_get_acl(dentry->d_inode, buf, buflen);
4974}
4975
4976static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry, char *list,
4977 size_t list_len, const char *name,
4978 size_t name_len, int type)
4979{
4980 size_t len = sizeof(XATTR_NAME_NFSV4_ACL);
4981
4982 if (!nfs4_server_supports_acls(NFS_SERVER(dentry->d_inode)))
4983 return 0;
4984
4985 if (list && len <= list_len)
4986 memcpy(list, XATTR_NAME_NFSV4_ACL, len);
4987 return len;
4988}
4989
4990/*
4991 * nfs_fhget will use either the mounted_on_fileid or the fileid
4992 */
4993static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
4994{
4995 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
4996 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
4997 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
4998 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
4999 return;
5000
5001 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
5002 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
5003 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
5004 fattr->nlink = 2;
5005}
5006
5007static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5008 const struct qstr *name,
5009 struct nfs4_fs_locations *fs_locations,
5010 struct page *page)
5011{
5012 struct nfs_server *server = NFS_SERVER(dir);
5013 u32 bitmask[2] = {
5014 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
5015 };
5016 struct nfs4_fs_locations_arg args = {
5017 .dir_fh = NFS_FH(dir),
5018 .name = name,
5019 .page = page,
5020 .bitmask = bitmask,
5021 };
5022 struct nfs4_fs_locations_res res = {
5023 .fs_locations = fs_locations,
5024 };
5025 struct rpc_message msg = {
5026 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
5027 .rpc_argp = &args,
5028 .rpc_resp = &res,
5029 };
5030 int status;
5031
5032 dprintk("%s: start\n", __func__);
5033
5034 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
5035 * is not supported */
5036 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
5037 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
5038 else
5039 bitmask[0] |= FATTR4_WORD0_FILEID;
5040
5041 nfs_fattr_init(&fs_locations->fattr);
5042 fs_locations->server = server;
5043 fs_locations->nlocations = 0;
5044 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
5045 dprintk("%s: returned status = %d\n", __func__, status);
5046 return status;
5047}
5048
5049int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
5050 const struct qstr *name,
5051 struct nfs4_fs_locations *fs_locations,
5052 struct page *page)
5053{
5054 struct nfs4_exception exception = { };
5055 int err;
5056 do {
5057 err = nfs4_handle_exception(NFS_SERVER(dir),
5058 _nfs4_proc_fs_locations(client, dir, name, fs_locations, page),
5059 &exception);
5060 } while (exception.retry);
5061 return err;
5062}
5063
5064static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors)
5065{
5066 int status;
5067 struct nfs4_secinfo_arg args = {
5068 .dir_fh = NFS_FH(dir),
5069 .name = name,
5070 };
5071 struct nfs4_secinfo_res res = {
5072 .flavors = flavors,
5073 };
5074 struct rpc_message msg = {
5075 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
5076 .rpc_argp = &args,
5077 .rpc_resp = &res,
5078 };
5079
5080 dprintk("NFS call secinfo %s\n", name->name);
5081 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
5082 dprintk("NFS reply secinfo: %d\n", status);
5083 return status;
5084}
5085
5086int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
5087 struct nfs4_secinfo_flavors *flavors)
5088{
5089 struct nfs4_exception exception = { };
5090 int err;
5091 do {
5092 err = nfs4_handle_exception(NFS_SERVER(dir),
5093 _nfs4_proc_secinfo(dir, name, flavors),
5094 &exception);
5095 } while (exception.retry);
5096 return err;
5097}
5098
5099#ifdef CONFIG_NFS_V4_1
5100/*
5101 * Check the exchange flags returned by the server for invalid flags, having
5102 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
5103 * DS flags set.
5104 */
5105static int nfs4_check_cl_exchange_flags(u32 flags)
5106{
5107 if (flags & ~EXCHGID4_FLAG_MASK_R)
5108 goto out_inval;
5109 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
5110 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
5111 goto out_inval;
5112 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
5113 goto out_inval;
5114 return NFS_OK;
5115out_inval:
5116 return -NFS4ERR_INVAL;
5117}
5118
5119static bool
5120nfs41_same_server_scope(struct nfs41_server_scope *a,
5121 struct nfs41_server_scope *b)
5122{
5123 if (a->server_scope_sz == b->server_scope_sz &&
5124 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
5125 return true;
5126
5127 return false;
5128}
5129
5130/*
5131 * nfs4_proc_bind_conn_to_session()
5132 *
5133 * The 4.1 client currently uses the same TCP connection for the
5134 * fore and backchannel.
5135 */
5136int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
5137{
5138 int status;
5139 struct nfs41_bind_conn_to_session_res res;
5140 struct rpc_message msg = {
5141 .rpc_proc =
5142 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
5143 .rpc_argp = clp,
5144 .rpc_resp = &res,
5145 .rpc_cred = cred,
5146 };
5147
5148 dprintk("--> %s\n", __func__);
5149 BUG_ON(clp == NULL);
5150
5151 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5152 if (unlikely(res.session == NULL)) {
5153 status = -ENOMEM;
5154 goto out;
5155 }
5156
5157 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5158 if (status == 0) {
5159 if (memcmp(res.session->sess_id.data,
5160 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
5161 dprintk("NFS: %s: Session ID mismatch\n", __func__);
5162 status = -EIO;
5163 goto out_session;
5164 }
5165 if (res.dir != NFS4_CDFS4_BOTH) {
5166 dprintk("NFS: %s: Unexpected direction from server\n",
5167 __func__);
5168 status = -EIO;
5169 goto out_session;
5170 }
5171 if (res.use_conn_in_rdma_mode) {
5172 dprintk("NFS: %s: Server returned RDMA mode = true\n",
5173 __func__);
5174 status = -EIO;
5175 goto out_session;
5176 }
5177 }
5178out_session:
5179 kfree(res.session);
5180out:
5181 dprintk("<-- %s status= %d\n", __func__, status);
5182 return status;
5183}
5184
5185/*
5186 * nfs4_proc_exchange_id()
5187 *
5188 * Since the clientid has expired, all compounds using sessions
5189 * associated with the stale clientid will be returning
5190 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
5191 * be in some phase of session reset.
5192 */
5193int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5194{
5195 nfs4_verifier verifier;
5196 struct nfs41_exchange_id_args args = {
5197 .verifier = &verifier,
5198 .client = clp,
5199 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
5200 };
5201 struct nfs41_exchange_id_res res = {
5202 0
5203 };
5204 int status;
5205 struct rpc_message msg = {
5206 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
5207 .rpc_argp = &args,
5208 .rpc_resp = &res,
5209 .rpc_cred = cred,
5210 };
5211
5212 dprintk("--> %s\n", __func__);
5213 BUG_ON(clp == NULL);
5214
5215 nfs4_init_boot_verifier(clp, &verifier);
5216
5217 args.id_len = scnprintf(args.id, sizeof(args.id),
5218 "%s/%s/%u",
5219 clp->cl_ipaddr,
5220 clp->cl_rpcclient->cl_nodename,
5221 clp->cl_rpcclient->cl_auth->au_flavor);
5222
5223 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
5224 GFP_NOFS);
5225 if (unlikely(res.server_owner == NULL)) {
5226 status = -ENOMEM;
5227 goto out;
5228 }
5229
5230 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
5231 GFP_NOFS);
5232 if (unlikely(res.server_scope == NULL)) {
5233 status = -ENOMEM;
5234 goto out_server_owner;
5235 }
5236
5237 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
5238 if (unlikely(res.impl_id == NULL)) {
5239 status = -ENOMEM;
5240 goto out_server_scope;
5241 }
5242
5243 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5244 if (status == 0)
5245 status = nfs4_check_cl_exchange_flags(res.flags);
5246
5247 if (status == 0) {
5248 clp->cl_clientid = res.clientid;
5249 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
5250 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
5251 clp->cl_seqid = res.seqid;
5252
5253 kfree(clp->cl_serverowner);
5254 clp->cl_serverowner = res.server_owner;
5255 res.server_owner = NULL;
5256
5257 /* use the most recent implementation id */
5258 kfree(clp->cl_implid);
5259 clp->cl_implid = res.impl_id;
5260
5261 if (clp->cl_serverscope != NULL &&
5262 !nfs41_same_server_scope(clp->cl_serverscope,
5263 res.server_scope)) {
5264 dprintk("%s: server_scope mismatch detected\n",
5265 __func__);
5266 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
5267 kfree(clp->cl_serverscope);
5268 clp->cl_serverscope = NULL;
5269 }
5270
5271 if (clp->cl_serverscope == NULL) {
5272 clp->cl_serverscope = res.server_scope;
5273 goto out;
5274 }
5275 } else
5276 kfree(res.impl_id);
5277
5278out_server_owner:
5279 kfree(res.server_owner);
5280out_server_scope:
5281 kfree(res.server_scope);
5282out:
5283 if (clp->cl_implid != NULL)
5284 dprintk("%s: Server Implementation ID: "
5285 "domain: %s, name: %s, date: %llu,%u\n",
5286 __func__, clp->cl_implid->domain, clp->cl_implid->name,
5287 clp->cl_implid->date.seconds,
5288 clp->cl_implid->date.nseconds);
5289 dprintk("<-- %s status= %d\n", __func__, status);
5290 return status;
5291}
5292
5293static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
5294 struct rpc_cred *cred)
5295{
5296 struct rpc_message msg = {
5297 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
5298 .rpc_argp = clp,
5299 .rpc_cred = cred,
5300 };
5301 int status;
5302
5303 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5304 if (status)
5305 dprintk("NFS: Got error %d from the server %s on "
5306 "DESTROY_CLIENTID.", status, clp->cl_hostname);
5307 return status;
5308}
5309
5310static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
5311 struct rpc_cred *cred)
5312{
5313 unsigned int loop;
5314 int ret;
5315
5316 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
5317 ret = _nfs4_proc_destroy_clientid(clp, cred);
5318 switch (ret) {
5319 case -NFS4ERR_DELAY:
5320 case -NFS4ERR_CLIENTID_BUSY:
5321 ssleep(1);
5322 break;
5323 default:
5324 return ret;
5325 }
5326 }
5327 return 0;
5328}
5329
5330int nfs4_destroy_clientid(struct nfs_client *clp)
5331{
5332 struct rpc_cred *cred;
5333 int ret = 0;
5334
5335 if (clp->cl_mvops->minor_version < 1)
5336 goto out;
5337 if (clp->cl_exchange_flags == 0)
5338 goto out;
5339 cred = nfs4_get_exchange_id_cred(clp);
5340 ret = nfs4_proc_destroy_clientid(clp, cred);
5341 if (cred)
5342 put_rpccred(cred);
5343 switch (ret) {
5344 case 0:
5345 case -NFS4ERR_STALE_CLIENTID:
5346 clp->cl_exchange_flags = 0;
5347 }
5348out:
5349 return ret;
5350}
5351
5352struct nfs4_get_lease_time_data {
5353 struct nfs4_get_lease_time_args *args;
5354 struct nfs4_get_lease_time_res *res;
5355 struct nfs_client *clp;
5356};
5357
5358static void nfs4_get_lease_time_prepare(struct rpc_task *task,
5359 void *calldata)
5360{
5361 int ret;
5362 struct nfs4_get_lease_time_data *data =
5363 (struct nfs4_get_lease_time_data *)calldata;
5364
5365 dprintk("--> %s\n", __func__);
5366 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
5367 /* just setup sequence, do not trigger session recovery
5368 since we're invoked within one */
5369 ret = nfs41_setup_sequence(data->clp->cl_session,
5370 &data->args->la_seq_args,
5371 &data->res->lr_seq_res, task);
5372
5373 BUG_ON(ret == -EAGAIN);
5374 rpc_call_start(task);
5375 dprintk("<-- %s\n", __func__);
5376}
5377
5378/*
5379 * Called from nfs4_state_manager thread for session setup, so don't recover
5380 * from sequence operation or clientid errors.
5381 */
5382static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
5383{
5384 struct nfs4_get_lease_time_data *data =
5385 (struct nfs4_get_lease_time_data *)calldata;
5386
5387 dprintk("--> %s\n", __func__);
5388 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
5389 return;
5390 switch (task->tk_status) {
5391 case -NFS4ERR_DELAY:
5392 case -NFS4ERR_GRACE:
5393 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
5394 rpc_delay(task, NFS4_POLL_RETRY_MIN);
5395 task->tk_status = 0;
5396 /* fall through */
5397 case -NFS4ERR_RETRY_UNCACHED_REP:
5398 rpc_restart_call_prepare(task);
5399 return;
5400 }
5401 dprintk("<-- %s\n", __func__);
5402}
5403
5404static const struct rpc_call_ops nfs4_get_lease_time_ops = {
5405 .rpc_call_prepare = nfs4_get_lease_time_prepare,
5406 .rpc_call_done = nfs4_get_lease_time_done,
5407};
5408
5409int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
5410{
5411 struct rpc_task *task;
5412 struct nfs4_get_lease_time_args args;
5413 struct nfs4_get_lease_time_res res = {
5414 .lr_fsinfo = fsinfo,
5415 };
5416 struct nfs4_get_lease_time_data data = {
5417 .args = &args,
5418 .res = &res,
5419 .clp = clp,
5420 };
5421 struct rpc_message msg = {
5422 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
5423 .rpc_argp = &args,
5424 .rpc_resp = &res,
5425 };
5426 struct rpc_task_setup task_setup = {
5427 .rpc_client = clp->cl_rpcclient,
5428 .rpc_message = &msg,
5429 .callback_ops = &nfs4_get_lease_time_ops,
5430 .callback_data = &data,
5431 .flags = RPC_TASK_TIMEOUT,
5432 };
5433 int status;
5434
5435 nfs41_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
5436 dprintk("--> %s\n", __func__);
5437 task = rpc_run_task(&task_setup);
5438
5439 if (IS_ERR(task))
5440 status = PTR_ERR(task);
5441 else {
5442 status = task->tk_status;
5443 rpc_put_task(task);
5444 }
5445 dprintk("<-- %s return %d\n", __func__, status);
5446
5447 return status;
5448}
5449
5450static struct nfs4_slot *nfs4_alloc_slots(u32 max_slots, gfp_t gfp_flags)
5451{
5452 return kcalloc(max_slots, sizeof(struct nfs4_slot), gfp_flags);
5453}
5454
5455static void nfs4_add_and_init_slots(struct nfs4_slot_table *tbl,
5456 struct nfs4_slot *new,
5457 u32 max_slots,
5458 u32 ivalue)
5459{
5460 struct nfs4_slot *old = NULL;
5461 u32 i;
5462
5463 spin_lock(&tbl->slot_tbl_lock);
5464 if (new) {
5465 old = tbl->slots;
5466 tbl->slots = new;
5467 tbl->max_slots = max_slots;
5468 }
5469 tbl->highest_used_slotid = -1; /* no slot is currently used */
5470 for (i = 0; i < tbl->max_slots; i++)
5471 tbl->slots[i].seq_nr = ivalue;
5472 spin_unlock(&tbl->slot_tbl_lock);
5473 kfree(old);
5474}
5475
5476/*
5477 * (re)Initialise a slot table
5478 */
5479static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl, u32 max_reqs,
5480 u32 ivalue)
5481{
5482 struct nfs4_slot *new = NULL;
5483 int ret = -ENOMEM;
5484
5485 dprintk("--> %s: max_reqs=%u, tbl->max_slots %d\n", __func__,
5486 max_reqs, tbl->max_slots);
5487
5488 /* Does the newly negotiated max_reqs match the existing slot table? */
5489 if (max_reqs != tbl->max_slots) {
5490 new = nfs4_alloc_slots(max_reqs, GFP_NOFS);
5491 if (!new)
5492 goto out;
5493 }
5494 ret = 0;
5495
5496 nfs4_add_and_init_slots(tbl, new, max_reqs, ivalue);
5497 dprintk("%s: tbl=%p slots=%p max_slots=%d\n", __func__,
5498 tbl, tbl->slots, tbl->max_slots);
5499out:
5500 dprintk("<-- %s: return %d\n", __func__, ret);
5501 return ret;
5502}
5503
5504/* Destroy the slot table */
5505static void nfs4_destroy_slot_tables(struct nfs4_session *session)
5506{
5507 if (session->fc_slot_table.slots != NULL) {
5508 kfree(session->fc_slot_table.slots);
5509 session->fc_slot_table.slots = NULL;
5510 }
5511 if (session->bc_slot_table.slots != NULL) {
5512 kfree(session->bc_slot_table.slots);
5513 session->bc_slot_table.slots = NULL;
5514 }
5515 return;
5516}
5517
5518/*
5519 * Initialize or reset the forechannel and backchannel tables
5520 */
5521static int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
5522{
5523 struct nfs4_slot_table *tbl;
5524 int status;
5525
5526 dprintk("--> %s\n", __func__);
5527 /* Fore channel */
5528 tbl = &ses->fc_slot_table;
5529 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
5530 if (status) /* -ENOMEM */
5531 return status;
5532 /* Back channel */
5533 tbl = &ses->bc_slot_table;
5534 status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
5535 if (status && tbl->slots == NULL)
5536 /* Fore and back channel share a connection so get
5537 * both slot tables or neither */
5538 nfs4_destroy_slot_tables(ses);
5539 return status;
5540}
5541
5542struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
5543{
5544 struct nfs4_session *session;
5545 struct nfs4_slot_table *tbl;
5546
5547 session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5548 if (!session)
5549 return NULL;
5550
5551 tbl = &session->fc_slot_table;
5552 tbl->highest_used_slotid = NFS4_NO_SLOT;
5553 spin_lock_init(&tbl->slot_tbl_lock);
5554 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, "ForeChannel Slot table");
5555 init_completion(&tbl->complete);
5556
5557 tbl = &session->bc_slot_table;
5558 tbl->highest_used_slotid = NFS4_NO_SLOT;
5559 spin_lock_init(&tbl->slot_tbl_lock);
5560 rpc_init_wait_queue(&tbl->slot_tbl_waitq, "BackChannel Slot table");
5561 init_completion(&tbl->complete);
5562
5563 session->session_state = 1<<NFS4_SESSION_INITING;
5564
5565 session->clp = clp;
5566 return session;
5567}
5568
5569void nfs4_destroy_session(struct nfs4_session *session)
5570{
5571 struct rpc_xprt *xprt;
5572 struct rpc_cred *cred;
5573
5574 cred = nfs4_get_exchange_id_cred(session->clp);
5575 nfs4_proc_destroy_session(session, cred);
5576 if (cred)
5577 put_rpccred(cred);
5578
5579 rcu_read_lock();
5580 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
5581 rcu_read_unlock();
5582 dprintk("%s Destroy backchannel for xprt %p\n",
5583 __func__, xprt);
5584 xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
5585 nfs4_destroy_slot_tables(session);
5586 kfree(session);
5587}
5588
5589/*
5590 * Initialize the values to be used by the client in CREATE_SESSION
5591 * If nfs4_init_session set the fore channel request and response sizes,
5592 * use them.
5593 *
5594 * Set the back channel max_resp_sz_cached to zero to force the client to
5595 * always set csa_cachethis to FALSE because the current implementation
5596 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
5597 */
5598static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
5599{
5600 struct nfs4_session *session = args->client->cl_session;
5601 unsigned int mxrqst_sz = session->fc_attrs.max_rqst_sz,
5602 mxresp_sz = session->fc_attrs.max_resp_sz;
5603
5604 if (mxrqst_sz == 0)
5605 mxrqst_sz = NFS_MAX_FILE_IO_SIZE;
5606 if (mxresp_sz == 0)
5607 mxresp_sz = NFS_MAX_FILE_IO_SIZE;
5608 /* Fore channel attributes */
5609 args->fc_attrs.max_rqst_sz = mxrqst_sz;
5610 args->fc_attrs.max_resp_sz = mxresp_sz;
5611 args->fc_attrs.max_ops = NFS4_MAX_OPS;
5612 args->fc_attrs.max_reqs = max_session_slots;
5613
5614 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
5615 "max_ops=%u max_reqs=%u\n",
5616 __func__,
5617 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
5618 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
5619
5620 /* Back channel attributes */
5621 args->bc_attrs.max_rqst_sz = PAGE_SIZE;
5622 args->bc_attrs.max_resp_sz = PAGE_SIZE;
5623 args->bc_attrs.max_resp_sz_cached = 0;
5624 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
5625 args->bc_attrs.max_reqs = 1;
5626
5627 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
5628 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
5629 __func__,
5630 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
5631 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
5632 args->bc_attrs.max_reqs);
5633}
5634
5635static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5636{
5637 struct nfs4_channel_attrs *sent = &args->fc_attrs;
5638 struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
5639
5640 if (rcvd->max_resp_sz > sent->max_resp_sz)
5641 return -EINVAL;
5642 /*
5643 * Our requested max_ops is the minimum we need; we're not
5644 * prepared to break up compounds into smaller pieces than that.
5645 * So, no point even trying to continue if the server won't
5646 * cooperate:
5647 */
5648 if (rcvd->max_ops < sent->max_ops)
5649 return -EINVAL;
5650 if (rcvd->max_reqs == 0)
5651 return -EINVAL;
5652 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
5653 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
5654 return 0;
5655}
5656
5657static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
5658{
5659 struct nfs4_channel_attrs *sent = &args->bc_attrs;
5660 struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
5661
5662 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
5663 return -EINVAL;
5664 if (rcvd->max_resp_sz < sent->max_resp_sz)
5665 return -EINVAL;
5666 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
5667 return -EINVAL;
5668 /* These would render the backchannel useless: */
5669 if (rcvd->max_ops != sent->max_ops)
5670 return -EINVAL;
5671 if (rcvd->max_reqs != sent->max_reqs)
5672 return -EINVAL;
5673 return 0;
5674}
5675
5676static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
5677 struct nfs4_session *session)
5678{
5679 int ret;
5680
5681 ret = nfs4_verify_fore_channel_attrs(args, session);
5682 if (ret)
5683 return ret;
5684 return nfs4_verify_back_channel_attrs(args, session);
5685}
5686
5687static int _nfs4_proc_create_session(struct nfs_client *clp,
5688 struct rpc_cred *cred)
5689{
5690 struct nfs4_session *session = clp->cl_session;
5691 struct nfs41_create_session_args args = {
5692 .client = clp,
5693 .cb_program = NFS4_CALLBACK,
5694 };
5695 struct nfs41_create_session_res res = {
5696 .client = clp,
5697 };
5698 struct rpc_message msg = {
5699 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
5700 .rpc_argp = &args,
5701 .rpc_resp = &res,
5702 .rpc_cred = cred,
5703 };
5704 int status;
5705
5706 nfs4_init_channel_attrs(&args);
5707 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
5708
5709 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5710
5711 if (!status)
5712 /* Verify the session's negotiated channel_attrs values */
5713 status = nfs4_verify_channel_attrs(&args, session);
5714 if (!status) {
5715 /* Increment the clientid slot sequence id */
5716 clp->cl_seqid++;
5717 }
5718
5719 return status;
5720}
5721
5722/*
5723 * Issues a CREATE_SESSION operation to the server.
5724 * It is the responsibility of the caller to verify the session is
5725 * expired before calling this routine.
5726 */
5727int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
5728{
5729 int status;
5730 unsigned *ptr;
5731 struct nfs4_session *session = clp->cl_session;
5732
5733 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
5734
5735 status = _nfs4_proc_create_session(clp, cred);
5736 if (status)
5737 goto out;
5738
5739 /* Init or reset the session slot tables */
5740 status = nfs4_setup_session_slot_tables(session);
5741 dprintk("slot table setup returned %d\n", status);
5742 if (status)
5743 goto out;
5744
5745 ptr = (unsigned *)&session->sess_id.data[0];
5746 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
5747 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
5748out:
5749 dprintk("<-- %s\n", __func__);
5750 return status;
5751}
5752
5753/*
5754 * Issue the over-the-wire RPC DESTROY_SESSION.
5755 * The caller must serialize access to this routine.
5756 */
5757int nfs4_proc_destroy_session(struct nfs4_session *session,
5758 struct rpc_cred *cred)
5759{
5760 struct rpc_message msg = {
5761 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
5762 .rpc_argp = session,
5763 .rpc_cred = cred,
5764 };
5765 int status = 0;
5766
5767 dprintk("--> nfs4_proc_destroy_session\n");
5768
5769 /* session is still being setup */
5770 if (session->clp->cl_cons_state != NFS_CS_READY)
5771 return status;
5772
5773 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5774
5775 if (status)
5776 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
5777 "Session has been destroyed regardless...\n", status);
5778
5779 dprintk("<-- nfs4_proc_destroy_session\n");
5780 return status;
5781}
5782
5783/*
5784 * With sessions, the client is not marked ready until after a
5785 * successful EXCHANGE_ID and CREATE_SESSION.
5786 *
5787 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
5788 * other versions of NFS can be tried.
5789 */
5790static int nfs41_check_session_ready(struct nfs_client *clp)
5791{
5792 int ret;
5793
5794 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
5795 ret = nfs4_client_recover_expired_lease(clp);
5796 if (ret)
5797 return ret;
5798 }
5799 if (clp->cl_cons_state < NFS_CS_READY)
5800 return -EPROTONOSUPPORT;
5801 smp_rmb();
5802 return 0;
5803}
5804
5805int nfs4_init_session(struct nfs_server *server)
5806{
5807 struct nfs_client *clp = server->nfs_client;
5808 struct nfs4_session *session;
5809 unsigned int rsize, wsize;
5810
5811 if (!nfs4_has_session(clp))
5812 return 0;
5813
5814 session = clp->cl_session;
5815 spin_lock(&clp->cl_lock);
5816 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
5817
5818 rsize = server->rsize;
5819 if (rsize == 0)
5820 rsize = NFS_MAX_FILE_IO_SIZE;
5821 wsize = server->wsize;
5822 if (wsize == 0)
5823 wsize = NFS_MAX_FILE_IO_SIZE;
5824
5825 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
5826 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
5827 }
5828 spin_unlock(&clp->cl_lock);
5829
5830 return nfs41_check_session_ready(clp);
5831}
5832
5833int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
5834{
5835 struct nfs4_session *session = clp->cl_session;
5836 int ret;
5837
5838 spin_lock(&clp->cl_lock);
5839 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
5840 /*
5841 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
5842 * DS lease to be equal to the MDS lease.
5843 */
5844 clp->cl_lease_time = lease_time;
5845 clp->cl_last_renewal = jiffies;
5846 }
5847 spin_unlock(&clp->cl_lock);
5848
5849 ret = nfs41_check_session_ready(clp);
5850 if (ret)
5851 return ret;
5852 /* Test for the DS role */
5853 if (!is_ds_client(clp))
5854 return -ENODEV;
5855 return 0;
5856}
5857EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
5858
5859
5860/*
5861 * Renew the cl_session lease.
5862 */
5863struct nfs4_sequence_data {
5864 struct nfs_client *clp;
5865 struct nfs4_sequence_args args;
5866 struct nfs4_sequence_res res;
5867};
5868
5869static void nfs41_sequence_release(void *data)
5870{
5871 struct nfs4_sequence_data *calldata = data;
5872 struct nfs_client *clp = calldata->clp;
5873
5874 if (atomic_read(&clp->cl_count) > 1)
5875 nfs4_schedule_state_renewal(clp);
5876 nfs_put_client(clp);
5877 kfree(calldata);
5878}
5879
5880static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
5881{
5882 switch(task->tk_status) {
5883 case -NFS4ERR_DELAY:
5884 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5885 return -EAGAIN;
5886 default:
5887 nfs4_schedule_lease_recovery(clp);
5888 }
5889 return 0;
5890}
5891
5892static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
5893{
5894 struct nfs4_sequence_data *calldata = data;
5895 struct nfs_client *clp = calldata->clp;
5896
5897 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
5898 return;
5899
5900 if (task->tk_status < 0) {
5901 dprintk("%s ERROR %d\n", __func__, task->tk_status);
5902 if (atomic_read(&clp->cl_count) == 1)
5903 goto out;
5904
5905 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
5906 rpc_restart_call_prepare(task);
5907 return;
5908 }
5909 }
5910 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
5911out:
5912 dprintk("<-- %s\n", __func__);
5913}
5914
5915static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
5916{
5917 struct nfs4_sequence_data *calldata = data;
5918 struct nfs_client *clp = calldata->clp;
5919 struct nfs4_sequence_args *args;
5920 struct nfs4_sequence_res *res;
5921
5922 args = task->tk_msg.rpc_argp;
5923 res = task->tk_msg.rpc_resp;
5924
5925 if (nfs41_setup_sequence(clp->cl_session, args, res, task))
5926 return;
5927 rpc_call_start(task);
5928}
5929
5930static const struct rpc_call_ops nfs41_sequence_ops = {
5931 .rpc_call_done = nfs41_sequence_call_done,
5932 .rpc_call_prepare = nfs41_sequence_prepare,
5933 .rpc_release = nfs41_sequence_release,
5934};
5935
5936static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5937{
5938 struct nfs4_sequence_data *calldata;
5939 struct rpc_message msg = {
5940 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
5941 .rpc_cred = cred,
5942 };
5943 struct rpc_task_setup task_setup_data = {
5944 .rpc_client = clp->cl_rpcclient,
5945 .rpc_message = &msg,
5946 .callback_ops = &nfs41_sequence_ops,
5947 .flags = RPC_TASK_ASYNC | RPC_TASK_SOFT,
5948 };
5949
5950 if (!atomic_inc_not_zero(&clp->cl_count))
5951 return ERR_PTR(-EIO);
5952 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
5953 if (calldata == NULL) {
5954 nfs_put_client(clp);
5955 return ERR_PTR(-ENOMEM);
5956 }
5957 nfs41_init_sequence(&calldata->args, &calldata->res, 0);
5958 msg.rpc_argp = &calldata->args;
5959 msg.rpc_resp = &calldata->res;
5960 calldata->clp = clp;
5961 task_setup_data.callback_data = calldata;
5962
5963 return rpc_run_task(&task_setup_data);
5964}
5965
5966static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
5967{
5968 struct rpc_task *task;
5969 int ret = 0;
5970
5971 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
5972 return 0;
5973 task = _nfs41_proc_sequence(clp, cred);
5974 if (IS_ERR(task))
5975 ret = PTR_ERR(task);
5976 else
5977 rpc_put_task_async(task);
5978 dprintk("<-- %s status=%d\n", __func__, ret);
5979 return ret;
5980}
5981
5982static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5983{
5984 struct rpc_task *task;
5985 int ret;
5986
5987 task = _nfs41_proc_sequence(clp, cred);
5988 if (IS_ERR(task)) {
5989 ret = PTR_ERR(task);
5990 goto out;
5991 }
5992 ret = rpc_wait_for_completion_task(task);
5993 if (!ret) {
5994 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
5995
5996 if (task->tk_status == 0)
5997 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
5998 ret = task->tk_status;
5999 }
6000 rpc_put_task(task);
6001out:
6002 dprintk("<-- %s status=%d\n", __func__, ret);
6003 return ret;
6004}
6005
6006struct nfs4_reclaim_complete_data {
6007 struct nfs_client *clp;
6008 struct nfs41_reclaim_complete_args arg;
6009 struct nfs41_reclaim_complete_res res;
6010};
6011
6012static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
6013{
6014 struct nfs4_reclaim_complete_data *calldata = data;
6015
6016 rpc_task_set_priority(task, RPC_PRIORITY_PRIVILEGED);
6017 if (nfs41_setup_sequence(calldata->clp->cl_session,
6018 &calldata->arg.seq_args,
6019 &calldata->res.seq_res, task))
6020 return;
6021
6022 rpc_call_start(task);
6023}
6024
6025static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
6026{
6027 switch(task->tk_status) {
6028 case 0:
6029 case -NFS4ERR_COMPLETE_ALREADY:
6030 case -NFS4ERR_WRONG_CRED: /* What to do here? */
6031 break;
6032 case -NFS4ERR_DELAY:
6033 rpc_delay(task, NFS4_POLL_RETRY_MAX);
6034 /* fall through */
6035 case -NFS4ERR_RETRY_UNCACHED_REP:
6036 return -EAGAIN;
6037 default:
6038 nfs4_schedule_lease_recovery(clp);
6039 }
6040 return 0;
6041}
6042
6043static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
6044{
6045 struct nfs4_reclaim_complete_data *calldata = data;
6046 struct nfs_client *clp = calldata->clp;
6047 struct nfs4_sequence_res *res = &calldata->res.seq_res;
6048
6049 dprintk("--> %s\n", __func__);
6050 if (!nfs41_sequence_done(task, res))
6051 return;
6052
6053 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
6054 rpc_restart_call_prepare(task);
6055 return;
6056 }
6057 dprintk("<-- %s\n", __func__);
6058}
6059
6060static void nfs4_free_reclaim_complete_data(void *data)
6061{
6062 struct nfs4_reclaim_complete_data *calldata = data;
6063
6064 kfree(calldata);
6065}
6066
6067static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
6068 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
6069 .rpc_call_done = nfs4_reclaim_complete_done,
6070 .rpc_release = nfs4_free_reclaim_complete_data,
6071};
6072
6073/*
6074 * Issue a global reclaim complete.
6075 */
6076static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
6077{
6078 struct nfs4_reclaim_complete_data *calldata;
6079 struct rpc_task *task;
6080 struct rpc_message msg = {
6081 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
6082 };
6083 struct rpc_task_setup task_setup_data = {
6084 .rpc_client = clp->cl_rpcclient,
6085 .rpc_message = &msg,
6086 .callback_ops = &nfs4_reclaim_complete_call_ops,
6087 .flags = RPC_TASK_ASYNC,
6088 };
6089 int status = -ENOMEM;
6090
6091 dprintk("--> %s\n", __func__);
6092 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
6093 if (calldata == NULL)
6094 goto out;
6095 calldata->clp = clp;
6096 calldata->arg.one_fs = 0;
6097
6098 nfs41_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
6099 msg.rpc_argp = &calldata->arg;
6100 msg.rpc_resp = &calldata->res;
6101 task_setup_data.callback_data = calldata;
6102 task = rpc_run_task(&task_setup_data);
6103 if (IS_ERR(task)) {
6104 status = PTR_ERR(task);
6105 goto out;
6106 }
6107 status = nfs4_wait_for_completion_rpc_task(task);
6108 if (status == 0)
6109 status = task->tk_status;
6110 rpc_put_task(task);
6111 return 0;
6112out:
6113 dprintk("<-- %s status=%d\n", __func__, status);
6114 return status;
6115}
6116
6117static void
6118nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
6119{
6120 struct nfs4_layoutget *lgp = calldata;
6121 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6122
6123 dprintk("--> %s\n", __func__);
6124 /* Note the is a race here, where a CB_LAYOUTRECALL can come in
6125 * right now covering the LAYOUTGET we are about to send.
6126 * However, that is not so catastrophic, and there seems
6127 * to be no way to prevent it completely.
6128 */
6129 if (nfs4_setup_sequence(server, &lgp->args.seq_args,
6130 &lgp->res.seq_res, task))
6131 return;
6132 if (pnfs_choose_layoutget_stateid(&lgp->args.stateid,
6133 NFS_I(lgp->args.inode)->layout,
6134 lgp->args.ctx->state)) {
6135 rpc_exit(task, NFS4_OK);
6136 return;
6137 }
6138 rpc_call_start(task);
6139}
6140
6141static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
6142{
6143 struct nfs4_layoutget *lgp = calldata;
6144 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6145
6146 dprintk("--> %s\n", __func__);
6147
6148 if (!nfs4_sequence_done(task, &lgp->res.seq_res))
6149 return;
6150
6151 switch (task->tk_status) {
6152 case 0:
6153 break;
6154 case -NFS4ERR_LAYOUTTRYLATER:
6155 case -NFS4ERR_RECALLCONFLICT:
6156 task->tk_status = -NFS4ERR_DELAY;
6157 /* Fall through */
6158 default:
6159 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6160 rpc_restart_call_prepare(task);
6161 return;
6162 }
6163 }
6164 dprintk("<-- %s\n", __func__);
6165}
6166
6167static size_t max_response_pages(struct nfs_server *server)
6168{
6169 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
6170 return nfs_page_array_len(0, max_resp_sz);
6171}
6172
6173static void nfs4_free_pages(struct page **pages, size_t size)
6174{
6175 int i;
6176
6177 if (!pages)
6178 return;
6179
6180 for (i = 0; i < size; i++) {
6181 if (!pages[i])
6182 break;
6183 __free_page(pages[i]);
6184 }
6185 kfree(pages);
6186}
6187
6188static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
6189{
6190 struct page **pages;
6191 int i;
6192
6193 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
6194 if (!pages) {
6195 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
6196 return NULL;
6197 }
6198
6199 for (i = 0; i < size; i++) {
6200 pages[i] = alloc_page(gfp_flags);
6201 if (!pages[i]) {
6202 dprintk("%s: failed to allocate page\n", __func__);
6203 nfs4_free_pages(pages, size);
6204 return NULL;
6205 }
6206 }
6207
6208 return pages;
6209}
6210
6211static void nfs4_layoutget_release(void *calldata)
6212{
6213 struct nfs4_layoutget *lgp = calldata;
6214 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6215 size_t max_pages = max_response_pages(server);
6216
6217 dprintk("--> %s\n", __func__);
6218 nfs4_free_pages(lgp->args.layout.pages, max_pages);
6219 put_nfs_open_context(lgp->args.ctx);
6220 kfree(calldata);
6221 dprintk("<-- %s\n", __func__);
6222}
6223
6224static const struct rpc_call_ops nfs4_layoutget_call_ops = {
6225 .rpc_call_prepare = nfs4_layoutget_prepare,
6226 .rpc_call_done = nfs4_layoutget_done,
6227 .rpc_release = nfs4_layoutget_release,
6228};
6229
6230int nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
6231{
6232 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
6233 size_t max_pages = max_response_pages(server);
6234 struct rpc_task *task;
6235 struct rpc_message msg = {
6236 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
6237 .rpc_argp = &lgp->args,
6238 .rpc_resp = &lgp->res,
6239 };
6240 struct rpc_task_setup task_setup_data = {
6241 .rpc_client = server->client,
6242 .rpc_message = &msg,
6243 .callback_ops = &nfs4_layoutget_call_ops,
6244 .callback_data = lgp,
6245 .flags = RPC_TASK_ASYNC,
6246 };
6247 int status = 0;
6248
6249 dprintk("--> %s\n", __func__);
6250
6251 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
6252 if (!lgp->args.layout.pages) {
6253 nfs4_layoutget_release(lgp);
6254 return -ENOMEM;
6255 }
6256 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
6257
6258 lgp->res.layoutp = &lgp->args.layout;
6259 lgp->res.seq_res.sr_slot = NULL;
6260 nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
6261 task = rpc_run_task(&task_setup_data);
6262 if (IS_ERR(task))
6263 return PTR_ERR(task);
6264 status = nfs4_wait_for_completion_rpc_task(task);
6265 if (status == 0)
6266 status = task->tk_status;
6267 if (status == 0)
6268 status = pnfs_layout_process(lgp);
6269 rpc_put_task(task);
6270 dprintk("<-- %s status=%d\n", __func__, status);
6271 return status;
6272}
6273
6274static void
6275nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
6276{
6277 struct nfs4_layoutreturn *lrp = calldata;
6278
6279 dprintk("--> %s\n", __func__);
6280 if (nfs41_setup_sequence(lrp->clp->cl_session, &lrp->args.seq_args,
6281 &lrp->res.seq_res, task))
6282 return;
6283 rpc_call_start(task);
6284}
6285
6286static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
6287{
6288 struct nfs4_layoutreturn *lrp = calldata;
6289 struct nfs_server *server;
6290 struct pnfs_layout_hdr *lo = lrp->args.layout;
6291
6292 dprintk("--> %s\n", __func__);
6293
6294 if (!nfs4_sequence_done(task, &lrp->res.seq_res))
6295 return;
6296
6297 server = NFS_SERVER(lrp->args.inode);
6298 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6299 rpc_restart_call_prepare(task);
6300 return;
6301 }
6302 spin_lock(&lo->plh_inode->i_lock);
6303 if (task->tk_status == 0 && lrp->res.lrs_present)
6304 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
6305 lo->plh_block_lgets--;
6306 spin_unlock(&lo->plh_inode->i_lock);
6307 dprintk("<-- %s\n", __func__);
6308}
6309
6310static void nfs4_layoutreturn_release(void *calldata)
6311{
6312 struct nfs4_layoutreturn *lrp = calldata;
6313
6314 dprintk("--> %s\n", __func__);
6315 put_layout_hdr(lrp->args.layout);
6316 kfree(calldata);
6317 dprintk("<-- %s\n", __func__);
6318}
6319
6320static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
6321 .rpc_call_prepare = nfs4_layoutreturn_prepare,
6322 .rpc_call_done = nfs4_layoutreturn_done,
6323 .rpc_release = nfs4_layoutreturn_release,
6324};
6325
6326int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp)
6327{
6328 struct rpc_task *task;
6329 struct rpc_message msg = {
6330 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
6331 .rpc_argp = &lrp->args,
6332 .rpc_resp = &lrp->res,
6333 };
6334 struct rpc_task_setup task_setup_data = {
6335 .rpc_client = lrp->clp->cl_rpcclient,
6336 .rpc_message = &msg,
6337 .callback_ops = &nfs4_layoutreturn_call_ops,
6338 .callback_data = lrp,
6339 };
6340 int status;
6341
6342 dprintk("--> %s\n", __func__);
6343 nfs41_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
6344 task = rpc_run_task(&task_setup_data);
6345 if (IS_ERR(task))
6346 return PTR_ERR(task);
6347 status = task->tk_status;
6348 dprintk("<-- %s status=%d\n", __func__, status);
6349 rpc_put_task(task);
6350 return status;
6351}
6352
6353/*
6354 * Retrieve the list of Data Server devices from the MDS.
6355 */
6356static int _nfs4_getdevicelist(struct nfs_server *server,
6357 const struct nfs_fh *fh,
6358 struct pnfs_devicelist *devlist)
6359{
6360 struct nfs4_getdevicelist_args args = {
6361 .fh = fh,
6362 .layoutclass = server->pnfs_curr_ld->id,
6363 };
6364 struct nfs4_getdevicelist_res res = {
6365 .devlist = devlist,
6366 };
6367 struct rpc_message msg = {
6368 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICELIST],
6369 .rpc_argp = &args,
6370 .rpc_resp = &res,
6371 };
6372 int status;
6373
6374 dprintk("--> %s\n", __func__);
6375 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
6376 &res.seq_res, 0);
6377 dprintk("<-- %s status=%d\n", __func__, status);
6378 return status;
6379}
6380
6381int nfs4_proc_getdevicelist(struct nfs_server *server,
6382 const struct nfs_fh *fh,
6383 struct pnfs_devicelist *devlist)
6384{
6385 struct nfs4_exception exception = { };
6386 int err;
6387
6388 do {
6389 err = nfs4_handle_exception(server,
6390 _nfs4_getdevicelist(server, fh, devlist),
6391 &exception);
6392 } while (exception.retry);
6393
6394 dprintk("%s: err=%d, num_devs=%u\n", __func__,
6395 err, devlist->num_devs);
6396
6397 return err;
6398}
6399EXPORT_SYMBOL_GPL(nfs4_proc_getdevicelist);
6400
6401static int
6402_nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6403{
6404 struct nfs4_getdeviceinfo_args args = {
6405 .pdev = pdev,
6406 };
6407 struct nfs4_getdeviceinfo_res res = {
6408 .pdev = pdev,
6409 };
6410 struct rpc_message msg = {
6411 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
6412 .rpc_argp = &args,
6413 .rpc_resp = &res,
6414 };
6415 int status;
6416
6417 dprintk("--> %s\n", __func__);
6418 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6419 dprintk("<-- %s status=%d\n", __func__, status);
6420
6421 return status;
6422}
6423
6424int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *pdev)
6425{
6426 struct nfs4_exception exception = { };
6427 int err;
6428
6429 do {
6430 err = nfs4_handle_exception(server,
6431 _nfs4_proc_getdeviceinfo(server, pdev),
6432 &exception);
6433 } while (exception.retry);
6434 return err;
6435}
6436EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
6437
6438static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
6439{
6440 struct nfs4_layoutcommit_data *data = calldata;
6441 struct nfs_server *server = NFS_SERVER(data->args.inode);
6442
6443 if (nfs4_setup_sequence(server, &data->args.seq_args,
6444 &data->res.seq_res, task))
6445 return;
6446 rpc_call_start(task);
6447}
6448
6449static void
6450nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
6451{
6452 struct nfs4_layoutcommit_data *data = calldata;
6453 struct nfs_server *server = NFS_SERVER(data->args.inode);
6454
6455 if (!nfs4_sequence_done(task, &data->res.seq_res))
6456 return;
6457
6458 switch (task->tk_status) { /* Just ignore these failures */
6459 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
6460 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
6461 case -NFS4ERR_BADLAYOUT: /* no layout */
6462 case -NFS4ERR_GRACE: /* loca_recalim always false */
6463 task->tk_status = 0;
6464 break;
6465 case 0:
6466 nfs_post_op_update_inode_force_wcc(data->args.inode,
6467 data->res.fattr);
6468 break;
6469 default:
6470 if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) {
6471 rpc_restart_call_prepare(task);
6472 return;
6473 }
6474 }
6475}
6476
6477static void nfs4_layoutcommit_release(void *calldata)
6478{
6479 struct nfs4_layoutcommit_data *data = calldata;
6480 struct pnfs_layout_segment *lseg, *tmp;
6481 unsigned long *bitlock = &NFS_I(data->args.inode)->flags;
6482
6483 pnfs_cleanup_layoutcommit(data);
6484 /* Matched by references in pnfs_set_layoutcommit */
6485 list_for_each_entry_safe(lseg, tmp, &data->lseg_list, pls_lc_list) {
6486 list_del_init(&lseg->pls_lc_list);
6487 if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT,
6488 &lseg->pls_flags))
6489 put_lseg(lseg);
6490 }
6491
6492 clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
6493 smp_mb__after_clear_bit();
6494 wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
6495
6496 put_rpccred(data->cred);
6497 kfree(data);
6498}
6499
6500static const struct rpc_call_ops nfs4_layoutcommit_ops = {
6501 .rpc_call_prepare = nfs4_layoutcommit_prepare,
6502 .rpc_call_done = nfs4_layoutcommit_done,
6503 .rpc_release = nfs4_layoutcommit_release,
6504};
6505
6506int
6507nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
6508{
6509 struct rpc_message msg = {
6510 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
6511 .rpc_argp = &data->args,
6512 .rpc_resp = &data->res,
6513 .rpc_cred = data->cred,
6514 };
6515 struct rpc_task_setup task_setup_data = {
6516 .task = &data->task,
6517 .rpc_client = NFS_CLIENT(data->args.inode),
6518 .rpc_message = &msg,
6519 .callback_ops = &nfs4_layoutcommit_ops,
6520 .callback_data = data,
6521 .flags = RPC_TASK_ASYNC,
6522 };
6523 struct rpc_task *task;
6524 int status = 0;
6525
6526 dprintk("NFS: %4d initiating layoutcommit call. sync %d "
6527 "lbw: %llu inode %lu\n",
6528 data->task.tk_pid, sync,
6529 data->args.lastbytewritten,
6530 data->args.inode->i_ino);
6531
6532 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
6533 task = rpc_run_task(&task_setup_data);
6534 if (IS_ERR(task))
6535 return PTR_ERR(task);
6536 if (sync == false)
6537 goto out;
6538 status = nfs4_wait_for_completion_rpc_task(task);
6539 if (status != 0)
6540 goto out;
6541 status = task->tk_status;
6542out:
6543 dprintk("%s: status %d\n", __func__, status);
6544 rpc_put_task(task);
6545 return status;
6546}
6547
6548static int
6549_nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6550 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6551{
6552 struct nfs41_secinfo_no_name_args args = {
6553 .style = SECINFO_STYLE_CURRENT_FH,
6554 };
6555 struct nfs4_secinfo_res res = {
6556 .flavors = flavors,
6557 };
6558 struct rpc_message msg = {
6559 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
6560 .rpc_argp = &args,
6561 .rpc_resp = &res,
6562 };
6563 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
6564}
6565
6566static int
6567nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
6568 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
6569{
6570 struct nfs4_exception exception = { };
6571 int err;
6572 do {
6573 err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6574 switch (err) {
6575 case 0:
6576 case -NFS4ERR_WRONGSEC:
6577 case -NFS4ERR_NOTSUPP:
6578 goto out;
6579 default:
6580 err = nfs4_handle_exception(server, err, &exception);
6581 }
6582 } while (exception.retry);
6583out:
6584 return err;
6585}
6586
6587static int
6588nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
6589 struct nfs_fsinfo *info)
6590{
6591 int err;
6592 struct page *page;
6593 rpc_authflavor_t flavor;
6594 struct nfs4_secinfo_flavors *flavors;
6595
6596 page = alloc_page(GFP_KERNEL);
6597 if (!page) {
6598 err = -ENOMEM;
6599 goto out;
6600 }
6601
6602 flavors = page_address(page);
6603 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
6604
6605 /*
6606 * Fall back on "guess and check" method if
6607 * the server doesn't support SECINFO_NO_NAME
6608 */
6609 if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
6610 err = nfs4_find_root_sec(server, fhandle, info);
6611 goto out_freepage;
6612 }
6613 if (err)
6614 goto out_freepage;
6615
6616 flavor = nfs_find_best_sec(flavors);
6617 if (err == 0)
6618 err = nfs4_lookup_root_sec(server, fhandle, info, flavor);
6619
6620out_freepage:
6621 put_page(page);
6622 if (err == -EACCES)
6623 return -EPERM;
6624out:
6625 return err;
6626}
6627
6628static int _nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6629{
6630 int status;
6631 struct nfs41_test_stateid_args args = {
6632 .stateid = stateid,
6633 };
6634 struct nfs41_test_stateid_res res;
6635 struct rpc_message msg = {
6636 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
6637 .rpc_argp = &args,
6638 .rpc_resp = &res,
6639 };
6640
6641 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6642 status = nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6643
6644 if (status == NFS_OK)
6645 return res.status;
6646 return status;
6647}
6648
6649static int nfs41_test_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6650{
6651 struct nfs4_exception exception = { };
6652 int err;
6653 do {
6654 err = nfs4_handle_exception(server,
6655 _nfs41_test_stateid(server, stateid),
6656 &exception);
6657 } while (exception.retry);
6658 return err;
6659}
6660
6661static int _nfs4_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6662{
6663 struct nfs41_free_stateid_args args = {
6664 .stateid = stateid,
6665 };
6666 struct nfs41_free_stateid_res res;
6667 struct rpc_message msg = {
6668 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
6669 .rpc_argp = &args,
6670 .rpc_resp = &res,
6671 };
6672
6673 nfs41_init_sequence(&args.seq_args, &res.seq_res, 0);
6674 return nfs4_call_sync_sequence(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
6675}
6676
6677static int nfs41_free_stateid(struct nfs_server *server, nfs4_stateid *stateid)
6678{
6679 struct nfs4_exception exception = { };
6680 int err;
6681 do {
6682 err = nfs4_handle_exception(server,
6683 _nfs4_free_stateid(server, stateid),
6684 &exception);
6685 } while (exception.retry);
6686 return err;
6687}
6688
6689static bool nfs41_match_stateid(const nfs4_stateid *s1,
6690 const nfs4_stateid *s2)
6691{
6692 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
6693 return false;
6694
6695 if (s1->seqid == s2->seqid)
6696 return true;
6697 if (s1->seqid == 0 || s2->seqid == 0)
6698 return true;
6699
6700 return false;
6701}
6702
6703#endif /* CONFIG_NFS_V4_1 */
6704
6705static bool nfs4_match_stateid(const nfs4_stateid *s1,
6706 const nfs4_stateid *s2)
6707{
6708 return nfs4_stateid_match(s1, s2);
6709}
6710
6711
6712static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
6713 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6714 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6715 .recover_open = nfs4_open_reclaim,
6716 .recover_lock = nfs4_lock_reclaim,
6717 .establish_clid = nfs4_init_clientid,
6718 .get_clid_cred = nfs4_get_setclientid_cred,
6719};
6720
6721#if defined(CONFIG_NFS_V4_1)
6722static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
6723 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
6724 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
6725 .recover_open = nfs4_open_reclaim,
6726 .recover_lock = nfs4_lock_reclaim,
6727 .establish_clid = nfs41_init_clientid,
6728 .get_clid_cred = nfs4_get_exchange_id_cred,
6729 .reclaim_complete = nfs41_proc_reclaim_complete,
6730};
6731#endif /* CONFIG_NFS_V4_1 */
6732
6733static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
6734 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6735 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6736 .recover_open = nfs4_open_expired,
6737 .recover_lock = nfs4_lock_expired,
6738 .establish_clid = nfs4_init_clientid,
6739 .get_clid_cred = nfs4_get_setclientid_cred,
6740};
6741
6742#if defined(CONFIG_NFS_V4_1)
6743static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
6744 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
6745 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
6746 .recover_open = nfs41_open_expired,
6747 .recover_lock = nfs41_lock_expired,
6748 .establish_clid = nfs41_init_clientid,
6749 .get_clid_cred = nfs4_get_exchange_id_cred,
6750};
6751#endif /* CONFIG_NFS_V4_1 */
6752
6753static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
6754 .sched_state_renewal = nfs4_proc_async_renew,
6755 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
6756 .renew_lease = nfs4_proc_renew,
6757};
6758
6759#if defined(CONFIG_NFS_V4_1)
6760static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
6761 .sched_state_renewal = nfs41_proc_async_sequence,
6762 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
6763 .renew_lease = nfs4_proc_sequence,
6764};
6765#endif
6766
6767static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
6768 .minor_version = 0,
6769 .call_sync = _nfs4_call_sync,
6770 .match_stateid = nfs4_match_stateid,
6771 .find_root_sec = nfs4_find_root_sec,
6772 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
6773 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
6774 .state_renewal_ops = &nfs40_state_renewal_ops,
6775};
6776
6777#if defined(CONFIG_NFS_V4_1)
6778static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
6779 .minor_version = 1,
6780 .call_sync = _nfs4_call_sync_session,
6781 .match_stateid = nfs41_match_stateid,
6782 .find_root_sec = nfs41_find_root_sec,
6783 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
6784 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
6785 .state_renewal_ops = &nfs41_state_renewal_ops,
6786};
6787#endif
6788
6789const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
6790 [0] = &nfs_v4_0_minor_ops,
6791#if defined(CONFIG_NFS_V4_1)
6792 [1] = &nfs_v4_1_minor_ops,
6793#endif
6794};
6795
6796static const struct inode_operations nfs4_file_inode_operations = {
6797 .permission = nfs_permission,
6798 .getattr = nfs_getattr,
6799 .setattr = nfs_setattr,
6800 .getxattr = generic_getxattr,
6801 .setxattr = generic_setxattr,
6802 .listxattr = generic_listxattr,
6803 .removexattr = generic_removexattr,
6804};
6805
6806const struct nfs_rpc_ops nfs_v4_clientops = {
6807 .version = 4, /* protocol version */
6808 .dentry_ops = &nfs4_dentry_operations,
6809 .dir_inode_ops = &nfs4_dir_inode_operations,
6810 .file_inode_ops = &nfs4_file_inode_operations,
6811 .file_ops = &nfs4_file_operations,
6812 .getroot = nfs4_proc_get_root,
6813 .submount = nfs4_submount,
6814 .getattr = nfs4_proc_getattr,
6815 .setattr = nfs4_proc_setattr,
6816 .lookup = nfs4_proc_lookup,
6817 .access = nfs4_proc_access,
6818 .readlink = nfs4_proc_readlink,
6819 .create = nfs4_proc_create,
6820 .remove = nfs4_proc_remove,
6821 .unlink_setup = nfs4_proc_unlink_setup,
6822 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
6823 .unlink_done = nfs4_proc_unlink_done,
6824 .rename = nfs4_proc_rename,
6825 .rename_setup = nfs4_proc_rename_setup,
6826 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
6827 .rename_done = nfs4_proc_rename_done,
6828 .link = nfs4_proc_link,
6829 .symlink = nfs4_proc_symlink,
6830 .mkdir = nfs4_proc_mkdir,
6831 .rmdir = nfs4_proc_remove,
6832 .readdir = nfs4_proc_readdir,
6833 .mknod = nfs4_proc_mknod,
6834 .statfs = nfs4_proc_statfs,
6835 .fsinfo = nfs4_proc_fsinfo,
6836 .pathconf = nfs4_proc_pathconf,
6837 .set_capabilities = nfs4_server_capabilities,
6838 .decode_dirent = nfs4_decode_dirent,
6839 .read_setup = nfs4_proc_read_setup,
6840 .read_rpc_prepare = nfs4_proc_read_rpc_prepare,
6841 .read_done = nfs4_read_done,
6842 .write_setup = nfs4_proc_write_setup,
6843 .write_rpc_prepare = nfs4_proc_write_rpc_prepare,
6844 .write_done = nfs4_write_done,
6845 .commit_setup = nfs4_proc_commit_setup,
6846 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
6847 .commit_done = nfs4_commit_done,
6848 .lock = nfs4_proc_lock,
6849 .clear_acl_cache = nfs4_zap_acl_attr,
6850 .close_context = nfs4_close_context,
6851 .open_context = nfs4_atomic_open,
6852 .init_client = nfs4_init_client,
6853};
6854
6855static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
6856 .prefix = XATTR_NAME_NFSV4_ACL,
6857 .list = nfs4_xattr_list_nfs4_acl,
6858 .get = nfs4_xattr_get_nfs4_acl,
6859 .set = nfs4_xattr_set_nfs4_acl,
6860};
6861
6862const struct xattr_handler *nfs4_xattr_handlers[] = {
6863 &nfs4_xattr_nfs4_acl_handler,
6864 NULL
6865};
6866
6867module_param(max_session_slots, ushort, 0644);
6868MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 "
6869 "requests the client will negotiate");
6870
6871/*
6872 * Local variables:
6873 * c-basic-offset: 8
6874 * End:
6875 */