Loading...
1/*
2* Copyright (c) 2001 The Regents of the University of Michigan.
3* All rights reserved.
4*
5* Kendrick Smith <kmsmith@umich.edu>
6* Andy Adamson <kandros@umich.edu>
7*
8* Redistribution and use in source and binary forms, with or without
9* modification, are permitted provided that the following conditions
10* are met:
11*
12* 1. Redistributions of source code must retain the above copyright
13* notice, this list of conditions and the following disclaimer.
14* 2. Redistributions in binary form must reproduce the above copyright
15* notice, this list of conditions and the following disclaimer in the
16* documentation and/or other materials provided with the distribution.
17* 3. Neither the name of the University nor the names of its
18* contributors may be used to endorse or promote products derived
19* from this software without specific prior written permission.
20*
21* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32*
33*/
34
35#include <linux/file.h>
36#include <linux/fs.h>
37#include <linux/slab.h>
38#include <linux/namei.h>
39#include <linux/swap.h>
40#include <linux/pagemap.h>
41#include <linux/ratelimit.h>
42#include <linux/sunrpc/svcauth_gss.h>
43#include <linux/sunrpc/addr.h>
44#include <linux/jhash.h>
45#include <linux/string_helpers.h>
46#include <linux/fsnotify.h>
47#include <linux/rhashtable.h>
48#include <linux/nfs_ssc.h>
49
50#include "xdr4.h"
51#include "xdr4cb.h"
52#include "vfs.h"
53#include "current_stateid.h"
54
55#include "netns.h"
56#include "pnfs.h"
57#include "filecache.h"
58#include "trace.h"
59
60#define NFSDDBG_FACILITY NFSDDBG_PROC
61
62#define all_ones {{~0,~0},~0}
63static const stateid_t one_stateid = {
64 .si_generation = ~0,
65 .si_opaque = all_ones,
66};
67static const stateid_t zero_stateid = {
68 /* all fields zero */
69};
70static const stateid_t currentstateid = {
71 .si_generation = 1,
72};
73static const stateid_t close_stateid = {
74 .si_generation = 0xffffffffU,
75};
76
77static u64 current_sessionid = 1;
78
79#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
80#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
81#define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
82#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
83
84/* forward declarations */
85static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
86static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
87void nfsd4_end_grace(struct nfsd_net *nn);
88static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
89static void nfsd4_file_hash_remove(struct nfs4_file *fi);
90
91/* Locking: */
92
93/*
94 * Currently used for the del_recall_lru and file hash table. In an
95 * effort to decrease the scope of the client_mutex, this spinlock may
96 * eventually cover more:
97 */
98static DEFINE_SPINLOCK(state_lock);
99
100enum nfsd4_st_mutex_lock_subclass {
101 OPEN_STATEID_MUTEX = 0,
102 LOCK_STATEID_MUTEX = 1,
103};
104
105/*
106 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
107 * the refcount on the open stateid to drop.
108 */
109static DECLARE_WAIT_QUEUE_HEAD(close_wq);
110
111/*
112 * A waitqueue where a writer to clients/#/ctl destroying a client can
113 * wait for cl_rpc_users to drop to 0 and then for the client to be
114 * unhashed.
115 */
116static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
117
118static struct kmem_cache *client_slab;
119static struct kmem_cache *openowner_slab;
120static struct kmem_cache *lockowner_slab;
121static struct kmem_cache *file_slab;
122static struct kmem_cache *stateid_slab;
123static struct kmem_cache *deleg_slab;
124static struct kmem_cache *odstate_slab;
125
126static void free_session(struct nfsd4_session *);
127
128static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
129static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
130
131static struct workqueue_struct *laundry_wq;
132
133int nfsd4_create_laundry_wq(void)
134{
135 int rc = 0;
136
137 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
138 if (laundry_wq == NULL)
139 rc = -ENOMEM;
140 return rc;
141}
142
143void nfsd4_destroy_laundry_wq(void)
144{
145 destroy_workqueue(laundry_wq);
146}
147
148static bool is_session_dead(struct nfsd4_session *ses)
149{
150 return ses->se_flags & NFS4_SESSION_DEAD;
151}
152
153static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
154{
155 if (atomic_read(&ses->se_ref) > ref_held_by_me)
156 return nfserr_jukebox;
157 ses->se_flags |= NFS4_SESSION_DEAD;
158 return nfs_ok;
159}
160
161static bool is_client_expired(struct nfs4_client *clp)
162{
163 return clp->cl_time == 0;
164}
165
166static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn,
167 struct nfs4_client *clp)
168{
169 if (clp->cl_state != NFSD4_ACTIVE)
170 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0);
171}
172
173static __be32 get_client_locked(struct nfs4_client *clp)
174{
175 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
176
177 lockdep_assert_held(&nn->client_lock);
178
179 if (is_client_expired(clp))
180 return nfserr_expired;
181 atomic_inc(&clp->cl_rpc_users);
182 nfsd4_dec_courtesy_client_count(nn, clp);
183 clp->cl_state = NFSD4_ACTIVE;
184 return nfs_ok;
185}
186
187/* must be called under the client_lock */
188static inline void
189renew_client_locked(struct nfs4_client *clp)
190{
191 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
192
193 if (is_client_expired(clp)) {
194 WARN_ON(1);
195 printk("%s: client (clientid %08x/%08x) already expired\n",
196 __func__,
197 clp->cl_clientid.cl_boot,
198 clp->cl_clientid.cl_id);
199 return;
200 }
201
202 list_move_tail(&clp->cl_lru, &nn->client_lru);
203 clp->cl_time = ktime_get_boottime_seconds();
204 nfsd4_dec_courtesy_client_count(nn, clp);
205 clp->cl_state = NFSD4_ACTIVE;
206}
207
208static void put_client_renew_locked(struct nfs4_client *clp)
209{
210 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
211
212 lockdep_assert_held(&nn->client_lock);
213
214 if (!atomic_dec_and_test(&clp->cl_rpc_users))
215 return;
216 if (!is_client_expired(clp))
217 renew_client_locked(clp);
218 else
219 wake_up_all(&expiry_wq);
220}
221
222static void put_client_renew(struct nfs4_client *clp)
223{
224 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
225
226 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
227 return;
228 if (!is_client_expired(clp))
229 renew_client_locked(clp);
230 else
231 wake_up_all(&expiry_wq);
232 spin_unlock(&nn->client_lock);
233}
234
235static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
236{
237 __be32 status;
238
239 if (is_session_dead(ses))
240 return nfserr_badsession;
241 status = get_client_locked(ses->se_client);
242 if (status)
243 return status;
244 atomic_inc(&ses->se_ref);
245 return nfs_ok;
246}
247
248static void nfsd4_put_session_locked(struct nfsd4_session *ses)
249{
250 struct nfs4_client *clp = ses->se_client;
251 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
252
253 lockdep_assert_held(&nn->client_lock);
254
255 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
256 free_session(ses);
257 put_client_renew_locked(clp);
258}
259
260static void nfsd4_put_session(struct nfsd4_session *ses)
261{
262 struct nfs4_client *clp = ses->se_client;
263 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
264
265 spin_lock(&nn->client_lock);
266 nfsd4_put_session_locked(ses);
267 spin_unlock(&nn->client_lock);
268}
269
270static struct nfsd4_blocked_lock *
271find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
272 struct nfsd_net *nn)
273{
274 struct nfsd4_blocked_lock *cur, *found = NULL;
275
276 spin_lock(&nn->blocked_locks_lock);
277 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
278 if (fh_match(fh, &cur->nbl_fh)) {
279 list_del_init(&cur->nbl_list);
280 WARN_ON(list_empty(&cur->nbl_lru));
281 list_del_init(&cur->nbl_lru);
282 found = cur;
283 break;
284 }
285 }
286 spin_unlock(&nn->blocked_locks_lock);
287 if (found)
288 locks_delete_block(&found->nbl_lock);
289 return found;
290}
291
292static struct nfsd4_blocked_lock *
293find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
294 struct nfsd_net *nn)
295{
296 struct nfsd4_blocked_lock *nbl;
297
298 nbl = find_blocked_lock(lo, fh, nn);
299 if (!nbl) {
300 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
301 if (nbl) {
302 INIT_LIST_HEAD(&nbl->nbl_list);
303 INIT_LIST_HEAD(&nbl->nbl_lru);
304 fh_copy_shallow(&nbl->nbl_fh, fh);
305 locks_init_lock(&nbl->nbl_lock);
306 kref_init(&nbl->nbl_kref);
307 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
308 &nfsd4_cb_notify_lock_ops,
309 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
310 }
311 }
312 return nbl;
313}
314
315static void
316free_nbl(struct kref *kref)
317{
318 struct nfsd4_blocked_lock *nbl;
319
320 nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
321 kfree(nbl);
322}
323
324static void
325free_blocked_lock(struct nfsd4_blocked_lock *nbl)
326{
327 locks_delete_block(&nbl->nbl_lock);
328 locks_release_private(&nbl->nbl_lock);
329 kref_put(&nbl->nbl_kref, free_nbl);
330}
331
332static void
333remove_blocked_locks(struct nfs4_lockowner *lo)
334{
335 struct nfs4_client *clp = lo->lo_owner.so_client;
336 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
337 struct nfsd4_blocked_lock *nbl;
338 LIST_HEAD(reaplist);
339
340 /* Dequeue all blocked locks */
341 spin_lock(&nn->blocked_locks_lock);
342 while (!list_empty(&lo->lo_blocked)) {
343 nbl = list_first_entry(&lo->lo_blocked,
344 struct nfsd4_blocked_lock,
345 nbl_list);
346 list_del_init(&nbl->nbl_list);
347 WARN_ON(list_empty(&nbl->nbl_lru));
348 list_move(&nbl->nbl_lru, &reaplist);
349 }
350 spin_unlock(&nn->blocked_locks_lock);
351
352 /* Now free them */
353 while (!list_empty(&reaplist)) {
354 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
355 nbl_lru);
356 list_del_init(&nbl->nbl_lru);
357 free_blocked_lock(nbl);
358 }
359}
360
361static void
362nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
363{
364 struct nfsd4_blocked_lock *nbl = container_of(cb,
365 struct nfsd4_blocked_lock, nbl_cb);
366 locks_delete_block(&nbl->nbl_lock);
367}
368
369static int
370nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
371{
372 trace_nfsd_cb_notify_lock_done(&zero_stateid, task);
373
374 /*
375 * Since this is just an optimization, we don't try very hard if it
376 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
377 * just quit trying on anything else.
378 */
379 switch (task->tk_status) {
380 case -NFS4ERR_DELAY:
381 rpc_delay(task, 1 * HZ);
382 return 0;
383 default:
384 return 1;
385 }
386}
387
388static void
389nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
390{
391 struct nfsd4_blocked_lock *nbl = container_of(cb,
392 struct nfsd4_blocked_lock, nbl_cb);
393
394 free_blocked_lock(nbl);
395}
396
397static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
398 .prepare = nfsd4_cb_notify_lock_prepare,
399 .done = nfsd4_cb_notify_lock_done,
400 .release = nfsd4_cb_notify_lock_release,
401};
402
403/*
404 * We store the NONE, READ, WRITE, and BOTH bits separately in the
405 * st_{access,deny}_bmap field of the stateid, in order to track not
406 * only what share bits are currently in force, but also what
407 * combinations of share bits previous opens have used. This allows us
408 * to enforce the recommendation in
409 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
410 * the server return an error if the client attempt to downgrade to a
411 * combination of share bits not explicable by closing some of its
412 * previous opens.
413 *
414 * This enforcement is arguably incomplete, since we don't keep
415 * track of access/deny bit combinations; so, e.g., we allow:
416 *
417 * OPEN allow read, deny write
418 * OPEN allow both, deny none
419 * DOWNGRADE allow read, deny none
420 *
421 * which we should reject.
422 *
423 * But you could also argue that our current code is already overkill,
424 * since it only exists to return NFS4ERR_INVAL on incorrect client
425 * behavior.
426 */
427static unsigned int
428bmap_to_share_mode(unsigned long bmap)
429{
430 int i;
431 unsigned int access = 0;
432
433 for (i = 1; i < 4; i++) {
434 if (test_bit(i, &bmap))
435 access |= i;
436 }
437 return access;
438}
439
440/* set share access for a given stateid */
441static inline void
442set_access(u32 access, struct nfs4_ol_stateid *stp)
443{
444 unsigned char mask = 1 << access;
445
446 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
447 stp->st_access_bmap |= mask;
448}
449
450/* clear share access for a given stateid */
451static inline void
452clear_access(u32 access, struct nfs4_ol_stateid *stp)
453{
454 unsigned char mask = 1 << access;
455
456 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
457 stp->st_access_bmap &= ~mask;
458}
459
460/* test whether a given stateid has access */
461static inline bool
462test_access(u32 access, struct nfs4_ol_stateid *stp)
463{
464 unsigned char mask = 1 << access;
465
466 return (bool)(stp->st_access_bmap & mask);
467}
468
469/* set share deny for a given stateid */
470static inline void
471set_deny(u32 deny, struct nfs4_ol_stateid *stp)
472{
473 unsigned char mask = 1 << deny;
474
475 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
476 stp->st_deny_bmap |= mask;
477}
478
479/* clear share deny for a given stateid */
480static inline void
481clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
482{
483 unsigned char mask = 1 << deny;
484
485 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
486 stp->st_deny_bmap &= ~mask;
487}
488
489/* test whether a given stateid is denying specific access */
490static inline bool
491test_deny(u32 deny, struct nfs4_ol_stateid *stp)
492{
493 unsigned char mask = 1 << deny;
494
495 return (bool)(stp->st_deny_bmap & mask);
496}
497
498static int nfs4_access_to_omode(u32 access)
499{
500 switch (access & NFS4_SHARE_ACCESS_BOTH) {
501 case NFS4_SHARE_ACCESS_READ:
502 return O_RDONLY;
503 case NFS4_SHARE_ACCESS_WRITE:
504 return O_WRONLY;
505 case NFS4_SHARE_ACCESS_BOTH:
506 return O_RDWR;
507 }
508 WARN_ON_ONCE(1);
509 return O_RDONLY;
510}
511
512static inline int
513access_permit_read(struct nfs4_ol_stateid *stp)
514{
515 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
516 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
517 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
518}
519
520static inline int
521access_permit_write(struct nfs4_ol_stateid *stp)
522{
523 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
524 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
525}
526
527static inline struct nfs4_stateowner *
528nfs4_get_stateowner(struct nfs4_stateowner *sop)
529{
530 atomic_inc(&sop->so_count);
531 return sop;
532}
533
534static int
535same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
536{
537 return (sop->so_owner.len == owner->len) &&
538 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
539}
540
541static struct nfs4_openowner *
542find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
543 struct nfs4_client *clp)
544{
545 struct nfs4_stateowner *so;
546
547 lockdep_assert_held(&clp->cl_lock);
548
549 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
550 so_strhash) {
551 if (!so->so_is_open_owner)
552 continue;
553 if (same_owner_str(so, &open->op_owner))
554 return openowner(nfs4_get_stateowner(so));
555 }
556 return NULL;
557}
558
559static struct nfs4_openowner *
560find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
561 struct nfs4_client *clp)
562{
563 struct nfs4_openowner *oo;
564
565 spin_lock(&clp->cl_lock);
566 oo = find_openstateowner_str_locked(hashval, open, clp);
567 spin_unlock(&clp->cl_lock);
568 return oo;
569}
570
571static inline u32
572opaque_hashval(const void *ptr, int nbytes)
573{
574 unsigned char *cptr = (unsigned char *) ptr;
575
576 u32 x = 0;
577 while (nbytes--) {
578 x *= 37;
579 x += *cptr++;
580 }
581 return x;
582}
583
584static void nfsd4_free_file_rcu(struct rcu_head *rcu)
585{
586 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
587
588 kmem_cache_free(file_slab, fp);
589}
590
591void
592put_nfs4_file(struct nfs4_file *fi)
593{
594 if (refcount_dec_and_test(&fi->fi_ref)) {
595 nfsd4_file_hash_remove(fi);
596 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
597 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
598 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
599 }
600}
601
602static struct nfsd_file *
603__nfs4_get_fd(struct nfs4_file *f, int oflag)
604{
605 if (f->fi_fds[oflag])
606 return nfsd_file_get(f->fi_fds[oflag]);
607 return NULL;
608}
609
610static struct nfsd_file *
611find_writeable_file_locked(struct nfs4_file *f)
612{
613 struct nfsd_file *ret;
614
615 lockdep_assert_held(&f->fi_lock);
616
617 ret = __nfs4_get_fd(f, O_WRONLY);
618 if (!ret)
619 ret = __nfs4_get_fd(f, O_RDWR);
620 return ret;
621}
622
623static struct nfsd_file *
624find_writeable_file(struct nfs4_file *f)
625{
626 struct nfsd_file *ret;
627
628 spin_lock(&f->fi_lock);
629 ret = find_writeable_file_locked(f);
630 spin_unlock(&f->fi_lock);
631
632 return ret;
633}
634
635static struct nfsd_file *
636find_readable_file_locked(struct nfs4_file *f)
637{
638 struct nfsd_file *ret;
639
640 lockdep_assert_held(&f->fi_lock);
641
642 ret = __nfs4_get_fd(f, O_RDONLY);
643 if (!ret)
644 ret = __nfs4_get_fd(f, O_RDWR);
645 return ret;
646}
647
648static struct nfsd_file *
649find_readable_file(struct nfs4_file *f)
650{
651 struct nfsd_file *ret;
652
653 spin_lock(&f->fi_lock);
654 ret = find_readable_file_locked(f);
655 spin_unlock(&f->fi_lock);
656
657 return ret;
658}
659
660struct nfsd_file *
661find_any_file(struct nfs4_file *f)
662{
663 struct nfsd_file *ret;
664
665 if (!f)
666 return NULL;
667 spin_lock(&f->fi_lock);
668 ret = __nfs4_get_fd(f, O_RDWR);
669 if (!ret) {
670 ret = __nfs4_get_fd(f, O_WRONLY);
671 if (!ret)
672 ret = __nfs4_get_fd(f, O_RDONLY);
673 }
674 spin_unlock(&f->fi_lock);
675 return ret;
676}
677
678static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
679{
680 lockdep_assert_held(&f->fi_lock);
681
682 if (f->fi_fds[O_RDWR])
683 return f->fi_fds[O_RDWR];
684 if (f->fi_fds[O_WRONLY])
685 return f->fi_fds[O_WRONLY];
686 if (f->fi_fds[O_RDONLY])
687 return f->fi_fds[O_RDONLY];
688 return NULL;
689}
690
691static struct nfsd_file *find_deleg_file_locked(struct nfs4_file *f)
692{
693 lockdep_assert_held(&f->fi_lock);
694
695 if (f->fi_deleg_file)
696 return f->fi_deleg_file;
697 return NULL;
698}
699
700static atomic_long_t num_delegations;
701unsigned long max_delegations;
702
703/*
704 * Open owner state (share locks)
705 */
706
707/* hash tables for lock and open owners */
708#define OWNER_HASH_BITS 8
709#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
710#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
711
712static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
713{
714 unsigned int ret;
715
716 ret = opaque_hashval(ownername->data, ownername->len);
717 return ret & OWNER_HASH_MASK;
718}
719
720static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp;
721
722static const struct rhashtable_params nfs4_file_rhash_params = {
723 .key_len = sizeof_field(struct nfs4_file, fi_inode),
724 .key_offset = offsetof(struct nfs4_file, fi_inode),
725 .head_offset = offsetof(struct nfs4_file, fi_rlist),
726
727 /*
728 * Start with a single page hash table to reduce resizing churn
729 * on light workloads.
730 */
731 .min_size = 256,
732 .automatic_shrinking = true,
733};
734
735/*
736 * Check if courtesy clients have conflicting access and resolve it if possible
737 *
738 * access: is op_share_access if share_access is true.
739 * Check if access mode, op_share_access, would conflict with
740 * the current deny mode of the file 'fp'.
741 * access: is op_share_deny if share_access is false.
742 * Check if the deny mode, op_share_deny, would conflict with
743 * current access of the file 'fp'.
744 * stp: skip checking this entry.
745 * new_stp: normal open, not open upgrade.
746 *
747 * Function returns:
748 * false - access/deny mode conflict with normal client.
749 * true - no conflict or conflict with courtesy client(s) is resolved.
750 */
751static bool
752nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp,
753 struct nfs4_ol_stateid *stp, u32 access, bool share_access)
754{
755 struct nfs4_ol_stateid *st;
756 bool resolvable = true;
757 unsigned char bmap;
758 struct nfsd_net *nn;
759 struct nfs4_client *clp;
760
761 lockdep_assert_held(&fp->fi_lock);
762 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
763 /* ignore lock stateid */
764 if (st->st_openstp)
765 continue;
766 if (st == stp && new_stp)
767 continue;
768 /* check file access against deny mode or vice versa */
769 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap;
770 if (!(access & bmap_to_share_mode(bmap)))
771 continue;
772 clp = st->st_stid.sc_client;
773 if (try_to_expire_client(clp))
774 continue;
775 resolvable = false;
776 break;
777 }
778 if (resolvable) {
779 clp = stp->st_stid.sc_client;
780 nn = net_generic(clp->net, nfsd_net_id);
781 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
782 }
783 return resolvable;
784}
785
786static void
787__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
788{
789 lockdep_assert_held(&fp->fi_lock);
790
791 if (access & NFS4_SHARE_ACCESS_WRITE)
792 atomic_inc(&fp->fi_access[O_WRONLY]);
793 if (access & NFS4_SHARE_ACCESS_READ)
794 atomic_inc(&fp->fi_access[O_RDONLY]);
795}
796
797static __be32
798nfs4_file_get_access(struct nfs4_file *fp, u32 access)
799{
800 lockdep_assert_held(&fp->fi_lock);
801
802 /* Does this access mode make sense? */
803 if (access & ~NFS4_SHARE_ACCESS_BOTH)
804 return nfserr_inval;
805
806 /* Does it conflict with a deny mode already set? */
807 if ((access & fp->fi_share_deny) != 0)
808 return nfserr_share_denied;
809
810 __nfs4_file_get_access(fp, access);
811 return nfs_ok;
812}
813
814static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
815{
816 /* Common case is that there is no deny mode. */
817 if (deny) {
818 /* Does this deny mode make sense? */
819 if (deny & ~NFS4_SHARE_DENY_BOTH)
820 return nfserr_inval;
821
822 if ((deny & NFS4_SHARE_DENY_READ) &&
823 atomic_read(&fp->fi_access[O_RDONLY]))
824 return nfserr_share_denied;
825
826 if ((deny & NFS4_SHARE_DENY_WRITE) &&
827 atomic_read(&fp->fi_access[O_WRONLY]))
828 return nfserr_share_denied;
829 }
830 return nfs_ok;
831}
832
833static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
834{
835 might_lock(&fp->fi_lock);
836
837 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
838 struct nfsd_file *f1 = NULL;
839 struct nfsd_file *f2 = NULL;
840
841 swap(f1, fp->fi_fds[oflag]);
842 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
843 swap(f2, fp->fi_fds[O_RDWR]);
844 spin_unlock(&fp->fi_lock);
845 if (f1)
846 nfsd_file_put(f1);
847 if (f2)
848 nfsd_file_put(f2);
849 }
850}
851
852static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
853{
854 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
855
856 if (access & NFS4_SHARE_ACCESS_WRITE)
857 __nfs4_file_put_access(fp, O_WRONLY);
858 if (access & NFS4_SHARE_ACCESS_READ)
859 __nfs4_file_put_access(fp, O_RDONLY);
860}
861
862/*
863 * Allocate a new open/delegation state counter. This is needed for
864 * pNFS for proper return on close semantics.
865 *
866 * Note that we only allocate it for pNFS-enabled exports, otherwise
867 * all pointers to struct nfs4_clnt_odstate are always NULL.
868 */
869static struct nfs4_clnt_odstate *
870alloc_clnt_odstate(struct nfs4_client *clp)
871{
872 struct nfs4_clnt_odstate *co;
873
874 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
875 if (co) {
876 co->co_client = clp;
877 refcount_set(&co->co_odcount, 1);
878 }
879 return co;
880}
881
882static void
883hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
884{
885 struct nfs4_file *fp = co->co_file;
886
887 lockdep_assert_held(&fp->fi_lock);
888 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
889}
890
891static inline void
892get_clnt_odstate(struct nfs4_clnt_odstate *co)
893{
894 if (co)
895 refcount_inc(&co->co_odcount);
896}
897
898static void
899put_clnt_odstate(struct nfs4_clnt_odstate *co)
900{
901 struct nfs4_file *fp;
902
903 if (!co)
904 return;
905
906 fp = co->co_file;
907 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
908 list_del(&co->co_perfile);
909 spin_unlock(&fp->fi_lock);
910
911 nfsd4_return_all_file_layouts(co->co_client, fp);
912 kmem_cache_free(odstate_slab, co);
913 }
914}
915
916static struct nfs4_clnt_odstate *
917find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
918{
919 struct nfs4_clnt_odstate *co;
920 struct nfs4_client *cl;
921
922 if (!new)
923 return NULL;
924
925 cl = new->co_client;
926
927 spin_lock(&fp->fi_lock);
928 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
929 if (co->co_client == cl) {
930 get_clnt_odstate(co);
931 goto out;
932 }
933 }
934 co = new;
935 co->co_file = fp;
936 hash_clnt_odstate_locked(new);
937out:
938 spin_unlock(&fp->fi_lock);
939 return co;
940}
941
942struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
943 void (*sc_free)(struct nfs4_stid *))
944{
945 struct nfs4_stid *stid;
946 int new_id;
947
948 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
949 if (!stid)
950 return NULL;
951
952 idr_preload(GFP_KERNEL);
953 spin_lock(&cl->cl_lock);
954 /* Reserving 0 for start of file in nfsdfs "states" file: */
955 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
956 spin_unlock(&cl->cl_lock);
957 idr_preload_end();
958 if (new_id < 0)
959 goto out_free;
960
961 stid->sc_free = sc_free;
962 stid->sc_client = cl;
963 stid->sc_stateid.si_opaque.so_id = new_id;
964 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
965 /* Will be incremented before return to client: */
966 refcount_set(&stid->sc_count, 1);
967 spin_lock_init(&stid->sc_lock);
968 INIT_LIST_HEAD(&stid->sc_cp_list);
969
970 /*
971 * It shouldn't be a problem to reuse an opaque stateid value.
972 * I don't think it is for 4.1. But with 4.0 I worry that, for
973 * example, a stray write retransmission could be accepted by
974 * the server when it should have been rejected. Therefore,
975 * adopt a trick from the sctp code to attempt to maximize the
976 * amount of time until an id is reused, by ensuring they always
977 * "increase" (mod INT_MAX):
978 */
979 return stid;
980out_free:
981 kmem_cache_free(slab, stid);
982 return NULL;
983}
984
985/*
986 * Create a unique stateid_t to represent each COPY.
987 */
988static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
989 unsigned char cs_type)
990{
991 int new_id;
992
993 stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
994 stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
995 stid->cs_type = cs_type;
996
997 idr_preload(GFP_KERNEL);
998 spin_lock(&nn->s2s_cp_lock);
999 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
1000 stid->cs_stid.si_opaque.so_id = new_id;
1001 stid->cs_stid.si_generation = 1;
1002 spin_unlock(&nn->s2s_cp_lock);
1003 idr_preload_end();
1004 if (new_id < 0)
1005 return 0;
1006 return 1;
1007}
1008
1009int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
1010{
1011 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID);
1012}
1013
1014struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
1015 struct nfs4_stid *p_stid)
1016{
1017 struct nfs4_cpntf_state *cps;
1018
1019 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
1020 if (!cps)
1021 return NULL;
1022 cps->cpntf_time = ktime_get_boottime_seconds();
1023 refcount_set(&cps->cp_stateid.cs_count, 1);
1024 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
1025 goto out_free;
1026 spin_lock(&nn->s2s_cp_lock);
1027 list_add(&cps->cp_list, &p_stid->sc_cp_list);
1028 spin_unlock(&nn->s2s_cp_lock);
1029 return cps;
1030out_free:
1031 kfree(cps);
1032 return NULL;
1033}
1034
1035void nfs4_free_copy_state(struct nfsd4_copy *copy)
1036{
1037 struct nfsd_net *nn;
1038
1039 WARN_ON_ONCE(copy->cp_stateid.cs_type != NFS4_COPY_STID);
1040 nn = net_generic(copy->cp_clp->net, nfsd_net_id);
1041 spin_lock(&nn->s2s_cp_lock);
1042 idr_remove(&nn->s2s_cp_stateids,
1043 copy->cp_stateid.cs_stid.si_opaque.so_id);
1044 spin_unlock(&nn->s2s_cp_lock);
1045}
1046
1047static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
1048{
1049 struct nfs4_cpntf_state *cps;
1050 struct nfsd_net *nn;
1051
1052 nn = net_generic(net, nfsd_net_id);
1053 spin_lock(&nn->s2s_cp_lock);
1054 while (!list_empty(&stid->sc_cp_list)) {
1055 cps = list_first_entry(&stid->sc_cp_list,
1056 struct nfs4_cpntf_state, cp_list);
1057 _free_cpntf_state_locked(nn, cps);
1058 }
1059 spin_unlock(&nn->s2s_cp_lock);
1060}
1061
1062static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
1063{
1064 struct nfs4_stid *stid;
1065
1066 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
1067 if (!stid)
1068 return NULL;
1069
1070 return openlockstateid(stid);
1071}
1072
1073static void nfs4_free_deleg(struct nfs4_stid *stid)
1074{
1075 struct nfs4_delegation *dp = delegstateid(stid);
1076
1077 WARN_ON_ONCE(!list_empty(&stid->sc_cp_list));
1078 WARN_ON_ONCE(!list_empty(&dp->dl_perfile));
1079 WARN_ON_ONCE(!list_empty(&dp->dl_perclnt));
1080 WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru));
1081 kmem_cache_free(deleg_slab, stid);
1082 atomic_long_dec(&num_delegations);
1083}
1084
1085/*
1086 * When we recall a delegation, we should be careful not to hand it
1087 * out again straight away.
1088 * To ensure this we keep a pair of bloom filters ('new' and 'old')
1089 * in which the filehandles of recalled delegations are "stored".
1090 * If a filehandle appear in either filter, a delegation is blocked.
1091 * When a delegation is recalled, the filehandle is stored in the "new"
1092 * filter.
1093 * Every 30 seconds we swap the filters and clear the "new" one,
1094 * unless both are empty of course.
1095 *
1096 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
1097 * low 3 bytes as hash-table indices.
1098 *
1099 * 'blocked_delegations_lock', which is always taken in block_delegations(),
1100 * is used to manage concurrent access. Testing does not need the lock
1101 * except when swapping the two filters.
1102 */
1103static DEFINE_SPINLOCK(blocked_delegations_lock);
1104static struct bloom_pair {
1105 int entries, old_entries;
1106 time64_t swap_time;
1107 int new; /* index into 'set' */
1108 DECLARE_BITMAP(set[2], 256);
1109} blocked_delegations;
1110
1111static int delegation_blocked(struct knfsd_fh *fh)
1112{
1113 u32 hash;
1114 struct bloom_pair *bd = &blocked_delegations;
1115
1116 if (bd->entries == 0)
1117 return 0;
1118 if (ktime_get_seconds() - bd->swap_time > 30) {
1119 spin_lock(&blocked_delegations_lock);
1120 if (ktime_get_seconds() - bd->swap_time > 30) {
1121 bd->entries -= bd->old_entries;
1122 bd->old_entries = bd->entries;
1123 memset(bd->set[bd->new], 0,
1124 sizeof(bd->set[0]));
1125 bd->new = 1-bd->new;
1126 bd->swap_time = ktime_get_seconds();
1127 }
1128 spin_unlock(&blocked_delegations_lock);
1129 }
1130 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1131 if (test_bit(hash&255, bd->set[0]) &&
1132 test_bit((hash>>8)&255, bd->set[0]) &&
1133 test_bit((hash>>16)&255, bd->set[0]))
1134 return 1;
1135
1136 if (test_bit(hash&255, bd->set[1]) &&
1137 test_bit((hash>>8)&255, bd->set[1]) &&
1138 test_bit((hash>>16)&255, bd->set[1]))
1139 return 1;
1140
1141 return 0;
1142}
1143
1144static void block_delegations(struct knfsd_fh *fh)
1145{
1146 u32 hash;
1147 struct bloom_pair *bd = &blocked_delegations;
1148
1149 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1150
1151 spin_lock(&blocked_delegations_lock);
1152 __set_bit(hash&255, bd->set[bd->new]);
1153 __set_bit((hash>>8)&255, bd->set[bd->new]);
1154 __set_bit((hash>>16)&255, bd->set[bd->new]);
1155 if (bd->entries == 0)
1156 bd->swap_time = ktime_get_seconds();
1157 bd->entries += 1;
1158 spin_unlock(&blocked_delegations_lock);
1159}
1160
1161static struct nfs4_delegation *
1162alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
1163 struct nfs4_clnt_odstate *odstate)
1164{
1165 struct nfs4_delegation *dp;
1166 long n;
1167
1168 dprintk("NFSD alloc_init_deleg\n");
1169 n = atomic_long_inc_return(&num_delegations);
1170 if (n < 0 || n > max_delegations)
1171 goto out_dec;
1172 if (delegation_blocked(&fp->fi_fhandle))
1173 goto out_dec;
1174 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
1175 if (dp == NULL)
1176 goto out_dec;
1177
1178 /*
1179 * delegation seqid's are never incremented. The 4.1 special
1180 * meaning of seqid 0 isn't meaningful, really, but let's avoid
1181 * 0 anyway just for consistency and use 1:
1182 */
1183 dp->dl_stid.sc_stateid.si_generation = 1;
1184 INIT_LIST_HEAD(&dp->dl_perfile);
1185 INIT_LIST_HEAD(&dp->dl_perclnt);
1186 INIT_LIST_HEAD(&dp->dl_recall_lru);
1187 dp->dl_clnt_odstate = odstate;
1188 get_clnt_odstate(odstate);
1189 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
1190 dp->dl_retries = 1;
1191 dp->dl_recalled = false;
1192 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
1193 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
1194 get_nfs4_file(fp);
1195 dp->dl_stid.sc_file = fp;
1196 return dp;
1197out_dec:
1198 atomic_long_dec(&num_delegations);
1199 return NULL;
1200}
1201
1202void
1203nfs4_put_stid(struct nfs4_stid *s)
1204{
1205 struct nfs4_file *fp = s->sc_file;
1206 struct nfs4_client *clp = s->sc_client;
1207
1208 might_lock(&clp->cl_lock);
1209
1210 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
1211 wake_up_all(&close_wq);
1212 return;
1213 }
1214 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1215 nfs4_free_cpntf_statelist(clp->net, s);
1216 spin_unlock(&clp->cl_lock);
1217 s->sc_free(s);
1218 if (fp)
1219 put_nfs4_file(fp);
1220}
1221
1222void
1223nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
1224{
1225 stateid_t *src = &stid->sc_stateid;
1226
1227 spin_lock(&stid->sc_lock);
1228 if (unlikely(++src->si_generation == 0))
1229 src->si_generation = 1;
1230 memcpy(dst, src, sizeof(*dst));
1231 spin_unlock(&stid->sc_lock);
1232}
1233
1234static void put_deleg_file(struct nfs4_file *fp)
1235{
1236 struct nfsd_file *nf = NULL;
1237
1238 spin_lock(&fp->fi_lock);
1239 if (--fp->fi_delegees == 0)
1240 swap(nf, fp->fi_deleg_file);
1241 spin_unlock(&fp->fi_lock);
1242
1243 if (nf)
1244 nfsd_file_put(nf);
1245}
1246
1247static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1248{
1249 struct nfs4_file *fp = dp->dl_stid.sc_file;
1250 struct nfsd_file *nf = fp->fi_deleg_file;
1251
1252 WARN_ON_ONCE(!fp->fi_delegees);
1253
1254 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1255 put_deleg_file(fp);
1256}
1257
1258static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1259{
1260 put_clnt_odstate(dp->dl_clnt_odstate);
1261 nfs4_unlock_deleg_lease(dp);
1262 nfs4_put_stid(&dp->dl_stid);
1263}
1264
1265void nfs4_unhash_stid(struct nfs4_stid *s)
1266{
1267 s->sc_type = 0;
1268}
1269
1270/**
1271 * nfs4_delegation_exists - Discover if this delegation already exists
1272 * @clp: a pointer to the nfs4_client we're granting a delegation to
1273 * @fp: a pointer to the nfs4_file we're granting a delegation on
1274 *
1275 * Return:
1276 * On success: true iff an existing delegation is found
1277 */
1278
1279static bool
1280nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1281{
1282 struct nfs4_delegation *searchdp = NULL;
1283 struct nfs4_client *searchclp = NULL;
1284
1285 lockdep_assert_held(&state_lock);
1286 lockdep_assert_held(&fp->fi_lock);
1287
1288 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1289 searchclp = searchdp->dl_stid.sc_client;
1290 if (clp == searchclp) {
1291 return true;
1292 }
1293 }
1294 return false;
1295}
1296
1297/**
1298 * hash_delegation_locked - Add a delegation to the appropriate lists
1299 * @dp: a pointer to the nfs4_delegation we are adding.
1300 * @fp: a pointer to the nfs4_file we're granting a delegation on
1301 *
1302 * Return:
1303 * On success: NULL if the delegation was successfully hashed.
1304 *
1305 * On error: -EAGAIN if one was previously granted to this
1306 * nfs4_client for this nfs4_file. Delegation is not hashed.
1307 *
1308 */
1309
1310static int
1311hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1312{
1313 struct nfs4_client *clp = dp->dl_stid.sc_client;
1314
1315 lockdep_assert_held(&state_lock);
1316 lockdep_assert_held(&fp->fi_lock);
1317
1318 if (nfs4_delegation_exists(clp, fp))
1319 return -EAGAIN;
1320 refcount_inc(&dp->dl_stid.sc_count);
1321 dp->dl_stid.sc_type = NFS4_DELEG_STID;
1322 list_add(&dp->dl_perfile, &fp->fi_delegations);
1323 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1324 return 0;
1325}
1326
1327static bool delegation_hashed(struct nfs4_delegation *dp)
1328{
1329 return !(list_empty(&dp->dl_perfile));
1330}
1331
1332static bool
1333unhash_delegation_locked(struct nfs4_delegation *dp)
1334{
1335 struct nfs4_file *fp = dp->dl_stid.sc_file;
1336
1337 lockdep_assert_held(&state_lock);
1338
1339 if (!delegation_hashed(dp))
1340 return false;
1341
1342 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1343 /* Ensure that deleg break won't try to requeue it */
1344 ++dp->dl_time;
1345 spin_lock(&fp->fi_lock);
1346 list_del_init(&dp->dl_perclnt);
1347 list_del_init(&dp->dl_recall_lru);
1348 list_del_init(&dp->dl_perfile);
1349 spin_unlock(&fp->fi_lock);
1350 return true;
1351}
1352
1353static void destroy_delegation(struct nfs4_delegation *dp)
1354{
1355 bool unhashed;
1356
1357 spin_lock(&state_lock);
1358 unhashed = unhash_delegation_locked(dp);
1359 spin_unlock(&state_lock);
1360 if (unhashed)
1361 destroy_unhashed_deleg(dp);
1362}
1363
1364static void revoke_delegation(struct nfs4_delegation *dp)
1365{
1366 struct nfs4_client *clp = dp->dl_stid.sc_client;
1367
1368 WARN_ON(!list_empty(&dp->dl_recall_lru));
1369
1370 trace_nfsd_stid_revoke(&dp->dl_stid);
1371
1372 if (clp->cl_minorversion) {
1373 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1374 refcount_inc(&dp->dl_stid.sc_count);
1375 spin_lock(&clp->cl_lock);
1376 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1377 spin_unlock(&clp->cl_lock);
1378 }
1379 destroy_unhashed_deleg(dp);
1380}
1381
1382/*
1383 * SETCLIENTID state
1384 */
1385
1386static unsigned int clientid_hashval(u32 id)
1387{
1388 return id & CLIENT_HASH_MASK;
1389}
1390
1391static unsigned int clientstr_hashval(struct xdr_netobj name)
1392{
1393 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1394}
1395
1396/*
1397 * A stateid that had a deny mode associated with it is being released
1398 * or downgraded. Recalculate the deny mode on the file.
1399 */
1400static void
1401recalculate_deny_mode(struct nfs4_file *fp)
1402{
1403 struct nfs4_ol_stateid *stp;
1404
1405 spin_lock(&fp->fi_lock);
1406 fp->fi_share_deny = 0;
1407 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1408 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1409 spin_unlock(&fp->fi_lock);
1410}
1411
1412static void
1413reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1414{
1415 int i;
1416 bool change = false;
1417
1418 for (i = 1; i < 4; i++) {
1419 if ((i & deny) != i) {
1420 change = true;
1421 clear_deny(i, stp);
1422 }
1423 }
1424
1425 /* Recalculate per-file deny mode if there was a change */
1426 if (change)
1427 recalculate_deny_mode(stp->st_stid.sc_file);
1428}
1429
1430/* release all access and file references for a given stateid */
1431static void
1432release_all_access(struct nfs4_ol_stateid *stp)
1433{
1434 int i;
1435 struct nfs4_file *fp = stp->st_stid.sc_file;
1436
1437 if (fp && stp->st_deny_bmap != 0)
1438 recalculate_deny_mode(fp);
1439
1440 for (i = 1; i < 4; i++) {
1441 if (test_access(i, stp))
1442 nfs4_file_put_access(stp->st_stid.sc_file, i);
1443 clear_access(i, stp);
1444 }
1445}
1446
1447static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1448{
1449 kfree(sop->so_owner.data);
1450 sop->so_ops->so_free(sop);
1451}
1452
1453static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1454{
1455 struct nfs4_client *clp = sop->so_client;
1456
1457 might_lock(&clp->cl_lock);
1458
1459 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1460 return;
1461 sop->so_ops->so_unhash(sop);
1462 spin_unlock(&clp->cl_lock);
1463 nfs4_free_stateowner(sop);
1464}
1465
1466static bool
1467nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1468{
1469 return list_empty(&stp->st_perfile);
1470}
1471
1472static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1473{
1474 struct nfs4_file *fp = stp->st_stid.sc_file;
1475
1476 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1477
1478 if (list_empty(&stp->st_perfile))
1479 return false;
1480
1481 spin_lock(&fp->fi_lock);
1482 list_del_init(&stp->st_perfile);
1483 spin_unlock(&fp->fi_lock);
1484 list_del(&stp->st_perstateowner);
1485 return true;
1486}
1487
1488static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1489{
1490 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1491
1492 put_clnt_odstate(stp->st_clnt_odstate);
1493 release_all_access(stp);
1494 if (stp->st_stateowner)
1495 nfs4_put_stateowner(stp->st_stateowner);
1496 WARN_ON(!list_empty(&stid->sc_cp_list));
1497 kmem_cache_free(stateid_slab, stid);
1498}
1499
1500static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1501{
1502 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1503 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1504 struct nfsd_file *nf;
1505
1506 nf = find_any_file(stp->st_stid.sc_file);
1507 if (nf) {
1508 get_file(nf->nf_file);
1509 filp_close(nf->nf_file, (fl_owner_t)lo);
1510 nfsd_file_put(nf);
1511 }
1512 nfs4_free_ol_stateid(stid);
1513}
1514
1515/*
1516 * Put the persistent reference to an already unhashed generic stateid, while
1517 * holding the cl_lock. If it's the last reference, then put it onto the
1518 * reaplist for later destruction.
1519 */
1520static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1521 struct list_head *reaplist)
1522{
1523 struct nfs4_stid *s = &stp->st_stid;
1524 struct nfs4_client *clp = s->sc_client;
1525
1526 lockdep_assert_held(&clp->cl_lock);
1527
1528 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1529
1530 if (!refcount_dec_and_test(&s->sc_count)) {
1531 wake_up_all(&close_wq);
1532 return;
1533 }
1534
1535 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1536 list_add(&stp->st_locks, reaplist);
1537}
1538
1539static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1540{
1541 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1542
1543 if (!unhash_ol_stateid(stp))
1544 return false;
1545 list_del_init(&stp->st_locks);
1546 nfs4_unhash_stid(&stp->st_stid);
1547 return true;
1548}
1549
1550static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1551{
1552 struct nfs4_client *clp = stp->st_stid.sc_client;
1553 bool unhashed;
1554
1555 spin_lock(&clp->cl_lock);
1556 unhashed = unhash_lock_stateid(stp);
1557 spin_unlock(&clp->cl_lock);
1558 if (unhashed)
1559 nfs4_put_stid(&stp->st_stid);
1560}
1561
1562static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1563{
1564 struct nfs4_client *clp = lo->lo_owner.so_client;
1565
1566 lockdep_assert_held(&clp->cl_lock);
1567
1568 list_del_init(&lo->lo_owner.so_strhash);
1569}
1570
1571/*
1572 * Free a list of generic stateids that were collected earlier after being
1573 * fully unhashed.
1574 */
1575static void
1576free_ol_stateid_reaplist(struct list_head *reaplist)
1577{
1578 struct nfs4_ol_stateid *stp;
1579 struct nfs4_file *fp;
1580
1581 might_sleep();
1582
1583 while (!list_empty(reaplist)) {
1584 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1585 st_locks);
1586 list_del(&stp->st_locks);
1587 fp = stp->st_stid.sc_file;
1588 stp->st_stid.sc_free(&stp->st_stid);
1589 if (fp)
1590 put_nfs4_file(fp);
1591 }
1592}
1593
1594static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1595 struct list_head *reaplist)
1596{
1597 struct nfs4_ol_stateid *stp;
1598
1599 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1600
1601 while (!list_empty(&open_stp->st_locks)) {
1602 stp = list_entry(open_stp->st_locks.next,
1603 struct nfs4_ol_stateid, st_locks);
1604 WARN_ON(!unhash_lock_stateid(stp));
1605 put_ol_stateid_locked(stp, reaplist);
1606 }
1607}
1608
1609static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1610 struct list_head *reaplist)
1611{
1612 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1613
1614 if (!unhash_ol_stateid(stp))
1615 return false;
1616 release_open_stateid_locks(stp, reaplist);
1617 return true;
1618}
1619
1620static void release_open_stateid(struct nfs4_ol_stateid *stp)
1621{
1622 LIST_HEAD(reaplist);
1623
1624 spin_lock(&stp->st_stid.sc_client->cl_lock);
1625 if (unhash_open_stateid(stp, &reaplist))
1626 put_ol_stateid_locked(stp, &reaplist);
1627 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1628 free_ol_stateid_reaplist(&reaplist);
1629}
1630
1631static void unhash_openowner_locked(struct nfs4_openowner *oo)
1632{
1633 struct nfs4_client *clp = oo->oo_owner.so_client;
1634
1635 lockdep_assert_held(&clp->cl_lock);
1636
1637 list_del_init(&oo->oo_owner.so_strhash);
1638 list_del_init(&oo->oo_perclient);
1639}
1640
1641static void release_last_closed_stateid(struct nfs4_openowner *oo)
1642{
1643 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1644 nfsd_net_id);
1645 struct nfs4_ol_stateid *s;
1646
1647 spin_lock(&nn->client_lock);
1648 s = oo->oo_last_closed_stid;
1649 if (s) {
1650 list_del_init(&oo->oo_close_lru);
1651 oo->oo_last_closed_stid = NULL;
1652 }
1653 spin_unlock(&nn->client_lock);
1654 if (s)
1655 nfs4_put_stid(&s->st_stid);
1656}
1657
1658static void release_openowner(struct nfs4_openowner *oo)
1659{
1660 struct nfs4_ol_stateid *stp;
1661 struct nfs4_client *clp = oo->oo_owner.so_client;
1662 struct list_head reaplist;
1663
1664 INIT_LIST_HEAD(&reaplist);
1665
1666 spin_lock(&clp->cl_lock);
1667 unhash_openowner_locked(oo);
1668 while (!list_empty(&oo->oo_owner.so_stateids)) {
1669 stp = list_first_entry(&oo->oo_owner.so_stateids,
1670 struct nfs4_ol_stateid, st_perstateowner);
1671 if (unhash_open_stateid(stp, &reaplist))
1672 put_ol_stateid_locked(stp, &reaplist);
1673 }
1674 spin_unlock(&clp->cl_lock);
1675 free_ol_stateid_reaplist(&reaplist);
1676 release_last_closed_stateid(oo);
1677 nfs4_put_stateowner(&oo->oo_owner);
1678}
1679
1680static inline int
1681hash_sessionid(struct nfs4_sessionid *sessionid)
1682{
1683 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1684
1685 return sid->sequence % SESSION_HASH_SIZE;
1686}
1687
1688#ifdef CONFIG_SUNRPC_DEBUG
1689static inline void
1690dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1691{
1692 u32 *ptr = (u32 *)(&sessionid->data[0]);
1693 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1694}
1695#else
1696static inline void
1697dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1698{
1699}
1700#endif
1701
1702/*
1703 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1704 * won't be used for replay.
1705 */
1706void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1707{
1708 struct nfs4_stateowner *so = cstate->replay_owner;
1709
1710 if (nfserr == nfserr_replay_me)
1711 return;
1712
1713 if (!seqid_mutating_err(ntohl(nfserr))) {
1714 nfsd4_cstate_clear_replay(cstate);
1715 return;
1716 }
1717 if (!so)
1718 return;
1719 if (so->so_is_open_owner)
1720 release_last_closed_stateid(openowner(so));
1721 so->so_seqid++;
1722 return;
1723}
1724
1725static void
1726gen_sessionid(struct nfsd4_session *ses)
1727{
1728 struct nfs4_client *clp = ses->se_client;
1729 struct nfsd4_sessionid *sid;
1730
1731 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1732 sid->clientid = clp->cl_clientid;
1733 sid->sequence = current_sessionid++;
1734 sid->reserved = 0;
1735}
1736
1737/*
1738 * The protocol defines ca_maxresponssize_cached to include the size of
1739 * the rpc header, but all we need to cache is the data starting after
1740 * the end of the initial SEQUENCE operation--the rest we regenerate
1741 * each time. Therefore we can advertise a ca_maxresponssize_cached
1742 * value that is the number of bytes in our cache plus a few additional
1743 * bytes. In order to stay on the safe side, and not promise more than
1744 * we can cache, those additional bytes must be the minimum possible: 24
1745 * bytes of rpc header (xid through accept state, with AUTH_NULL
1746 * verifier), 12 for the compound header (with zero-length tag), and 44
1747 * for the SEQUENCE op response:
1748 */
1749#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1750
1751static void
1752free_session_slots(struct nfsd4_session *ses)
1753{
1754 int i;
1755
1756 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1757 free_svc_cred(&ses->se_slots[i]->sl_cred);
1758 kfree(ses->se_slots[i]);
1759 }
1760}
1761
1762/*
1763 * We don't actually need to cache the rpc and session headers, so we
1764 * can allocate a little less for each slot:
1765 */
1766static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1767{
1768 u32 size;
1769
1770 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1771 size = 0;
1772 else
1773 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1774 return size + sizeof(struct nfsd4_slot);
1775}
1776
1777/*
1778 * XXX: If we run out of reserved DRC memory we could (up to a point)
1779 * re-negotiate active sessions and reduce their slot usage to make
1780 * room for new connections. For now we just fail the create session.
1781 */
1782static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1783{
1784 u32 slotsize = slot_bytes(ca);
1785 u32 num = ca->maxreqs;
1786 unsigned long avail, total_avail;
1787 unsigned int scale_factor;
1788
1789 spin_lock(&nfsd_drc_lock);
1790 if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1791 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1792 else
1793 /* We have handed out more space than we chose in
1794 * set_max_drc() to allow. That isn't really a
1795 * problem as long as that doesn't make us think we
1796 * have lots more due to integer overflow.
1797 */
1798 total_avail = 0;
1799 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1800 /*
1801 * Never use more than a fraction of the remaining memory,
1802 * unless it's the only way to give this client a slot.
1803 * The chosen fraction is either 1/8 or 1/number of threads,
1804 * whichever is smaller. This ensures there are adequate
1805 * slots to support multiple clients per thread.
1806 * Give the client one slot even if that would require
1807 * over-allocation--it is better than failure.
1808 */
1809 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1810
1811 avail = clamp_t(unsigned long, avail, slotsize,
1812 total_avail/scale_factor);
1813 num = min_t(int, num, avail / slotsize);
1814 num = max_t(int, num, 1);
1815 nfsd_drc_mem_used += num * slotsize;
1816 spin_unlock(&nfsd_drc_lock);
1817
1818 return num;
1819}
1820
1821static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1822{
1823 int slotsize = slot_bytes(ca);
1824
1825 spin_lock(&nfsd_drc_lock);
1826 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1827 spin_unlock(&nfsd_drc_lock);
1828}
1829
1830static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1831 struct nfsd4_channel_attrs *battrs)
1832{
1833 int numslots = fattrs->maxreqs;
1834 int slotsize = slot_bytes(fattrs);
1835 struct nfsd4_session *new;
1836 int i;
1837
1838 BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION)
1839 > PAGE_SIZE);
1840
1841 new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL);
1842 if (!new)
1843 return NULL;
1844 /* allocate each struct nfsd4_slot and data cache in one piece */
1845 for (i = 0; i < numslots; i++) {
1846 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1847 if (!new->se_slots[i])
1848 goto out_free;
1849 }
1850
1851 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1852 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1853
1854 return new;
1855out_free:
1856 while (i--)
1857 kfree(new->se_slots[i]);
1858 kfree(new);
1859 return NULL;
1860}
1861
1862static void free_conn(struct nfsd4_conn *c)
1863{
1864 svc_xprt_put(c->cn_xprt);
1865 kfree(c);
1866}
1867
1868static void nfsd4_conn_lost(struct svc_xpt_user *u)
1869{
1870 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1871 struct nfs4_client *clp = c->cn_session->se_client;
1872
1873 trace_nfsd_cb_lost(clp);
1874
1875 spin_lock(&clp->cl_lock);
1876 if (!list_empty(&c->cn_persession)) {
1877 list_del(&c->cn_persession);
1878 free_conn(c);
1879 }
1880 nfsd4_probe_callback(clp);
1881 spin_unlock(&clp->cl_lock);
1882}
1883
1884static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1885{
1886 struct nfsd4_conn *conn;
1887
1888 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1889 if (!conn)
1890 return NULL;
1891 svc_xprt_get(rqstp->rq_xprt);
1892 conn->cn_xprt = rqstp->rq_xprt;
1893 conn->cn_flags = flags;
1894 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1895 return conn;
1896}
1897
1898static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1899{
1900 conn->cn_session = ses;
1901 list_add(&conn->cn_persession, &ses->se_conns);
1902}
1903
1904static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1905{
1906 struct nfs4_client *clp = ses->se_client;
1907
1908 spin_lock(&clp->cl_lock);
1909 __nfsd4_hash_conn(conn, ses);
1910 spin_unlock(&clp->cl_lock);
1911}
1912
1913static int nfsd4_register_conn(struct nfsd4_conn *conn)
1914{
1915 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1916 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1917}
1918
1919static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1920{
1921 int ret;
1922
1923 nfsd4_hash_conn(conn, ses);
1924 ret = nfsd4_register_conn(conn);
1925 if (ret)
1926 /* oops; xprt is already down: */
1927 nfsd4_conn_lost(&conn->cn_xpt_user);
1928 /* We may have gained or lost a callback channel: */
1929 nfsd4_probe_callback_sync(ses->se_client);
1930}
1931
1932static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1933{
1934 u32 dir = NFS4_CDFC4_FORE;
1935
1936 if (cses->flags & SESSION4_BACK_CHAN)
1937 dir |= NFS4_CDFC4_BACK;
1938 return alloc_conn(rqstp, dir);
1939}
1940
1941/* must be called under client_lock */
1942static void nfsd4_del_conns(struct nfsd4_session *s)
1943{
1944 struct nfs4_client *clp = s->se_client;
1945 struct nfsd4_conn *c;
1946
1947 spin_lock(&clp->cl_lock);
1948 while (!list_empty(&s->se_conns)) {
1949 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1950 list_del_init(&c->cn_persession);
1951 spin_unlock(&clp->cl_lock);
1952
1953 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1954 free_conn(c);
1955
1956 spin_lock(&clp->cl_lock);
1957 }
1958 spin_unlock(&clp->cl_lock);
1959}
1960
1961static void __free_session(struct nfsd4_session *ses)
1962{
1963 free_session_slots(ses);
1964 kfree(ses);
1965}
1966
1967static void free_session(struct nfsd4_session *ses)
1968{
1969 nfsd4_del_conns(ses);
1970 nfsd4_put_drc_mem(&ses->se_fchannel);
1971 __free_session(ses);
1972}
1973
1974static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1975{
1976 int idx;
1977 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1978
1979 new->se_client = clp;
1980 gen_sessionid(new);
1981
1982 INIT_LIST_HEAD(&new->se_conns);
1983
1984 new->se_cb_seq_nr = 1;
1985 new->se_flags = cses->flags;
1986 new->se_cb_prog = cses->callback_prog;
1987 new->se_cb_sec = cses->cb_sec;
1988 atomic_set(&new->se_ref, 0);
1989 idx = hash_sessionid(&new->se_sessionid);
1990 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1991 spin_lock(&clp->cl_lock);
1992 list_add(&new->se_perclnt, &clp->cl_sessions);
1993 spin_unlock(&clp->cl_lock);
1994
1995 {
1996 struct sockaddr *sa = svc_addr(rqstp);
1997 /*
1998 * This is a little silly; with sessions there's no real
1999 * use for the callback address. Use the peer address
2000 * as a reasonable default for now, but consider fixing
2001 * the rpc client not to require an address in the
2002 * future:
2003 */
2004 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
2005 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
2006 }
2007}
2008
2009/* caller must hold client_lock */
2010static struct nfsd4_session *
2011__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
2012{
2013 struct nfsd4_session *elem;
2014 int idx;
2015 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2016
2017 lockdep_assert_held(&nn->client_lock);
2018
2019 dump_sessionid(__func__, sessionid);
2020 idx = hash_sessionid(sessionid);
2021 /* Search in the appropriate list */
2022 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
2023 if (!memcmp(elem->se_sessionid.data, sessionid->data,
2024 NFS4_MAX_SESSIONID_LEN)) {
2025 return elem;
2026 }
2027 }
2028
2029 dprintk("%s: session not found\n", __func__);
2030 return NULL;
2031}
2032
2033static struct nfsd4_session *
2034find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
2035 __be32 *ret)
2036{
2037 struct nfsd4_session *session;
2038 __be32 status = nfserr_badsession;
2039
2040 session = __find_in_sessionid_hashtbl(sessionid, net);
2041 if (!session)
2042 goto out;
2043 status = nfsd4_get_session_locked(session);
2044 if (status)
2045 session = NULL;
2046out:
2047 *ret = status;
2048 return session;
2049}
2050
2051/* caller must hold client_lock */
2052static void
2053unhash_session(struct nfsd4_session *ses)
2054{
2055 struct nfs4_client *clp = ses->se_client;
2056 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2057
2058 lockdep_assert_held(&nn->client_lock);
2059
2060 list_del(&ses->se_hash);
2061 spin_lock(&ses->se_client->cl_lock);
2062 list_del(&ses->se_perclnt);
2063 spin_unlock(&ses->se_client->cl_lock);
2064}
2065
2066/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
2067static int
2068STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
2069{
2070 /*
2071 * We're assuming the clid was not given out from a boot
2072 * precisely 2^32 (about 136 years) before this one. That seems
2073 * a safe assumption:
2074 */
2075 if (clid->cl_boot == (u32)nn->boot_time)
2076 return 0;
2077 trace_nfsd_clid_stale(clid);
2078 return 1;
2079}
2080
2081/*
2082 * XXX Should we use a slab cache ?
2083 * This type of memory management is somewhat inefficient, but we use it
2084 * anyway since SETCLIENTID is not a common operation.
2085 */
2086static struct nfs4_client *alloc_client(struct xdr_netobj name,
2087 struct nfsd_net *nn)
2088{
2089 struct nfs4_client *clp;
2090 int i;
2091
2092 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) {
2093 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
2094 return NULL;
2095 }
2096 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
2097 if (clp == NULL)
2098 return NULL;
2099 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
2100 if (clp->cl_name.data == NULL)
2101 goto err_no_name;
2102 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
2103 sizeof(struct list_head),
2104 GFP_KERNEL);
2105 if (!clp->cl_ownerstr_hashtbl)
2106 goto err_no_hashtbl;
2107 for (i = 0; i < OWNER_HASH_SIZE; i++)
2108 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
2109 INIT_LIST_HEAD(&clp->cl_sessions);
2110 idr_init(&clp->cl_stateids);
2111 atomic_set(&clp->cl_rpc_users, 0);
2112 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
2113 clp->cl_state = NFSD4_ACTIVE;
2114 atomic_inc(&nn->nfs4_client_count);
2115 atomic_set(&clp->cl_delegs_in_recall, 0);
2116 INIT_LIST_HEAD(&clp->cl_idhash);
2117 INIT_LIST_HEAD(&clp->cl_openowners);
2118 INIT_LIST_HEAD(&clp->cl_delegations);
2119 INIT_LIST_HEAD(&clp->cl_lru);
2120 INIT_LIST_HEAD(&clp->cl_revoked);
2121#ifdef CONFIG_NFSD_PNFS
2122 INIT_LIST_HEAD(&clp->cl_lo_states);
2123#endif
2124 INIT_LIST_HEAD(&clp->async_copies);
2125 spin_lock_init(&clp->async_lock);
2126 spin_lock_init(&clp->cl_lock);
2127 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
2128 return clp;
2129err_no_hashtbl:
2130 kfree(clp->cl_name.data);
2131err_no_name:
2132 kmem_cache_free(client_slab, clp);
2133 return NULL;
2134}
2135
2136static void __free_client(struct kref *k)
2137{
2138 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
2139 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
2140
2141 free_svc_cred(&clp->cl_cred);
2142 kfree(clp->cl_ownerstr_hashtbl);
2143 kfree(clp->cl_name.data);
2144 kfree(clp->cl_nii_domain.data);
2145 kfree(clp->cl_nii_name.data);
2146 idr_destroy(&clp->cl_stateids);
2147 kfree(clp->cl_ra);
2148 kmem_cache_free(client_slab, clp);
2149}
2150
2151static void drop_client(struct nfs4_client *clp)
2152{
2153 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2154}
2155
2156static void
2157free_client(struct nfs4_client *clp)
2158{
2159 while (!list_empty(&clp->cl_sessions)) {
2160 struct nfsd4_session *ses;
2161 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2162 se_perclnt);
2163 list_del(&ses->se_perclnt);
2164 WARN_ON_ONCE(atomic_read(&ses->se_ref));
2165 free_session(ses);
2166 }
2167 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2168 if (clp->cl_nfsd_dentry) {
2169 nfsd_client_rmdir(clp->cl_nfsd_dentry);
2170 clp->cl_nfsd_dentry = NULL;
2171 wake_up_all(&expiry_wq);
2172 }
2173 drop_client(clp);
2174}
2175
2176/* must be called under the client_lock */
2177static void
2178unhash_client_locked(struct nfs4_client *clp)
2179{
2180 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2181 struct nfsd4_session *ses;
2182
2183 lockdep_assert_held(&nn->client_lock);
2184
2185 /* Mark the client as expired! */
2186 clp->cl_time = 0;
2187 /* Make it invisible */
2188 if (!list_empty(&clp->cl_idhash)) {
2189 list_del_init(&clp->cl_idhash);
2190 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2191 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2192 else
2193 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2194 }
2195 list_del_init(&clp->cl_lru);
2196 spin_lock(&clp->cl_lock);
2197 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2198 list_del_init(&ses->se_hash);
2199 spin_unlock(&clp->cl_lock);
2200}
2201
2202static void
2203unhash_client(struct nfs4_client *clp)
2204{
2205 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2206
2207 spin_lock(&nn->client_lock);
2208 unhash_client_locked(clp);
2209 spin_unlock(&nn->client_lock);
2210}
2211
2212static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2213{
2214 if (atomic_read(&clp->cl_rpc_users))
2215 return nfserr_jukebox;
2216 unhash_client_locked(clp);
2217 return nfs_ok;
2218}
2219
2220static void
2221__destroy_client(struct nfs4_client *clp)
2222{
2223 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2224 int i;
2225 struct nfs4_openowner *oo;
2226 struct nfs4_delegation *dp;
2227 struct list_head reaplist;
2228
2229 INIT_LIST_HEAD(&reaplist);
2230 spin_lock(&state_lock);
2231 while (!list_empty(&clp->cl_delegations)) {
2232 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2233 WARN_ON(!unhash_delegation_locked(dp));
2234 list_add(&dp->dl_recall_lru, &reaplist);
2235 }
2236 spin_unlock(&state_lock);
2237 while (!list_empty(&reaplist)) {
2238 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2239 list_del_init(&dp->dl_recall_lru);
2240 destroy_unhashed_deleg(dp);
2241 }
2242 while (!list_empty(&clp->cl_revoked)) {
2243 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2244 list_del_init(&dp->dl_recall_lru);
2245 nfs4_put_stid(&dp->dl_stid);
2246 }
2247 while (!list_empty(&clp->cl_openowners)) {
2248 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2249 nfs4_get_stateowner(&oo->oo_owner);
2250 release_openowner(oo);
2251 }
2252 for (i = 0; i < OWNER_HASH_SIZE; i++) {
2253 struct nfs4_stateowner *so, *tmp;
2254
2255 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2256 so_strhash) {
2257 /* Should be no openowners at this point */
2258 WARN_ON_ONCE(so->so_is_open_owner);
2259 remove_blocked_locks(lockowner(so));
2260 }
2261 }
2262 nfsd4_return_all_client_layouts(clp);
2263 nfsd4_shutdown_copy(clp);
2264 nfsd4_shutdown_callback(clp);
2265 if (clp->cl_cb_conn.cb_xprt)
2266 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2267 atomic_add_unless(&nn->nfs4_client_count, -1, 0);
2268 nfsd4_dec_courtesy_client_count(nn, clp);
2269 free_client(clp);
2270 wake_up_all(&expiry_wq);
2271}
2272
2273static void
2274destroy_client(struct nfs4_client *clp)
2275{
2276 unhash_client(clp);
2277 __destroy_client(clp);
2278}
2279
2280static void inc_reclaim_complete(struct nfs4_client *clp)
2281{
2282 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2283
2284 if (!nn->track_reclaim_completes)
2285 return;
2286 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2287 return;
2288 if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2289 nn->reclaim_str_hashtbl_size) {
2290 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2291 clp->net->ns.inum);
2292 nfsd4_end_grace(nn);
2293 }
2294}
2295
2296static void expire_client(struct nfs4_client *clp)
2297{
2298 unhash_client(clp);
2299 nfsd4_client_record_remove(clp);
2300 __destroy_client(clp);
2301}
2302
2303static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2304{
2305 memcpy(target->cl_verifier.data, source->data,
2306 sizeof(target->cl_verifier.data));
2307}
2308
2309static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2310{
2311 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2312 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2313}
2314
2315static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2316{
2317 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2318 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2319 GFP_KERNEL);
2320 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2321 if ((source->cr_principal && !target->cr_principal) ||
2322 (source->cr_raw_principal && !target->cr_raw_principal) ||
2323 (source->cr_targ_princ && !target->cr_targ_princ))
2324 return -ENOMEM;
2325
2326 target->cr_flavor = source->cr_flavor;
2327 target->cr_uid = source->cr_uid;
2328 target->cr_gid = source->cr_gid;
2329 target->cr_group_info = source->cr_group_info;
2330 get_group_info(target->cr_group_info);
2331 target->cr_gss_mech = source->cr_gss_mech;
2332 if (source->cr_gss_mech)
2333 gss_mech_get(source->cr_gss_mech);
2334 return 0;
2335}
2336
2337static int
2338compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2339{
2340 if (o1->len < o2->len)
2341 return -1;
2342 if (o1->len > o2->len)
2343 return 1;
2344 return memcmp(o1->data, o2->data, o1->len);
2345}
2346
2347static int
2348same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2349{
2350 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2351}
2352
2353static int
2354same_clid(clientid_t *cl1, clientid_t *cl2)
2355{
2356 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2357}
2358
2359static bool groups_equal(struct group_info *g1, struct group_info *g2)
2360{
2361 int i;
2362
2363 if (g1->ngroups != g2->ngroups)
2364 return false;
2365 for (i=0; i<g1->ngroups; i++)
2366 if (!gid_eq(g1->gid[i], g2->gid[i]))
2367 return false;
2368 return true;
2369}
2370
2371/*
2372 * RFC 3530 language requires clid_inuse be returned when the
2373 * "principal" associated with a requests differs from that previously
2374 * used. We use uid, gid's, and gss principal string as our best
2375 * approximation. We also don't want to allow non-gss use of a client
2376 * established using gss: in theory cr_principal should catch that
2377 * change, but in practice cr_principal can be null even in the gss case
2378 * since gssd doesn't always pass down a principal string.
2379 */
2380static bool is_gss_cred(struct svc_cred *cr)
2381{
2382 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2383 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2384}
2385
2386
2387static bool
2388same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2389{
2390 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2391 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2392 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2393 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2394 return false;
2395 /* XXX: check that cr_targ_princ fields match ? */
2396 if (cr1->cr_principal == cr2->cr_principal)
2397 return true;
2398 if (!cr1->cr_principal || !cr2->cr_principal)
2399 return false;
2400 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2401}
2402
2403static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2404{
2405 struct svc_cred *cr = &rqstp->rq_cred;
2406 u32 service;
2407
2408 if (!cr->cr_gss_mech)
2409 return false;
2410 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2411 return service == RPC_GSS_SVC_INTEGRITY ||
2412 service == RPC_GSS_SVC_PRIVACY;
2413}
2414
2415bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2416{
2417 struct svc_cred *cr = &rqstp->rq_cred;
2418
2419 if (!cl->cl_mach_cred)
2420 return true;
2421 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2422 return false;
2423 if (!svc_rqst_integrity_protected(rqstp))
2424 return false;
2425 if (cl->cl_cred.cr_raw_principal)
2426 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2427 cr->cr_raw_principal);
2428 if (!cr->cr_principal)
2429 return false;
2430 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2431}
2432
2433static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2434{
2435 __be32 verf[2];
2436
2437 /*
2438 * This is opaque to client, so no need to byte-swap. Use
2439 * __force to keep sparse happy
2440 */
2441 verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2442 verf[1] = (__force __be32)nn->clverifier_counter++;
2443 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2444}
2445
2446static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2447{
2448 clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2449 clp->cl_clientid.cl_id = nn->clientid_counter++;
2450 gen_confirm(clp, nn);
2451}
2452
2453static struct nfs4_stid *
2454find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2455{
2456 struct nfs4_stid *ret;
2457
2458 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2459 if (!ret || !ret->sc_type)
2460 return NULL;
2461 return ret;
2462}
2463
2464static struct nfs4_stid *
2465find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2466{
2467 struct nfs4_stid *s;
2468
2469 spin_lock(&cl->cl_lock);
2470 s = find_stateid_locked(cl, t);
2471 if (s != NULL) {
2472 if (typemask & s->sc_type)
2473 refcount_inc(&s->sc_count);
2474 else
2475 s = NULL;
2476 }
2477 spin_unlock(&cl->cl_lock);
2478 return s;
2479}
2480
2481static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2482{
2483 struct nfsdfs_client *nc;
2484 nc = get_nfsdfs_client(inode);
2485 if (!nc)
2486 return NULL;
2487 return container_of(nc, struct nfs4_client, cl_nfsdfs);
2488}
2489
2490static void seq_quote_mem(struct seq_file *m, char *data, int len)
2491{
2492 seq_printf(m, "\"");
2493 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
2494 seq_printf(m, "\"");
2495}
2496
2497static const char *cb_state2str(int state)
2498{
2499 switch (state) {
2500 case NFSD4_CB_UP:
2501 return "UP";
2502 case NFSD4_CB_UNKNOWN:
2503 return "UNKNOWN";
2504 case NFSD4_CB_DOWN:
2505 return "DOWN";
2506 case NFSD4_CB_FAULT:
2507 return "FAULT";
2508 }
2509 return "UNDEFINED";
2510}
2511
2512static int client_info_show(struct seq_file *m, void *v)
2513{
2514 struct inode *inode = file_inode(m->file);
2515 struct nfs4_client *clp;
2516 u64 clid;
2517
2518 clp = get_nfsdfs_clp(inode);
2519 if (!clp)
2520 return -ENXIO;
2521 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2522 seq_printf(m, "clientid: 0x%llx\n", clid);
2523 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2524
2525 if (clp->cl_state == NFSD4_COURTESY)
2526 seq_puts(m, "status: courtesy\n");
2527 else if (clp->cl_state == NFSD4_EXPIRABLE)
2528 seq_puts(m, "status: expirable\n");
2529 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2530 seq_puts(m, "status: confirmed\n");
2531 else
2532 seq_puts(m, "status: unconfirmed\n");
2533 seq_printf(m, "seconds from last renew: %lld\n",
2534 ktime_get_boottime_seconds() - clp->cl_time);
2535 seq_printf(m, "name: ");
2536 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2537 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2538 if (clp->cl_nii_domain.data) {
2539 seq_printf(m, "Implementation domain: ");
2540 seq_quote_mem(m, clp->cl_nii_domain.data,
2541 clp->cl_nii_domain.len);
2542 seq_printf(m, "\nImplementation name: ");
2543 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2544 seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2545 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2546 }
2547 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
2548 seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
2549 drop_client(clp);
2550
2551 return 0;
2552}
2553
2554DEFINE_SHOW_ATTRIBUTE(client_info);
2555
2556static void *states_start(struct seq_file *s, loff_t *pos)
2557 __acquires(&clp->cl_lock)
2558{
2559 struct nfs4_client *clp = s->private;
2560 unsigned long id = *pos;
2561 void *ret;
2562
2563 spin_lock(&clp->cl_lock);
2564 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2565 *pos = id;
2566 return ret;
2567}
2568
2569static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2570{
2571 struct nfs4_client *clp = s->private;
2572 unsigned long id = *pos;
2573 void *ret;
2574
2575 id = *pos;
2576 id++;
2577 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2578 *pos = id;
2579 return ret;
2580}
2581
2582static void states_stop(struct seq_file *s, void *v)
2583 __releases(&clp->cl_lock)
2584{
2585 struct nfs4_client *clp = s->private;
2586
2587 spin_unlock(&clp->cl_lock);
2588}
2589
2590static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2591{
2592 seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2593}
2594
2595static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2596{
2597 struct inode *inode = file_inode(f->nf_file);
2598
2599 seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2600 MAJOR(inode->i_sb->s_dev),
2601 MINOR(inode->i_sb->s_dev),
2602 inode->i_ino);
2603}
2604
2605static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2606{
2607 seq_printf(s, "owner: ");
2608 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2609}
2610
2611static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2612{
2613 seq_printf(s, "0x%.8x", stid->si_generation);
2614 seq_printf(s, "%12phN", &stid->si_opaque);
2615}
2616
2617static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2618{
2619 struct nfs4_ol_stateid *ols;
2620 struct nfs4_file *nf;
2621 struct nfsd_file *file;
2622 struct nfs4_stateowner *oo;
2623 unsigned int access, deny;
2624
2625 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2626 return 0; /* XXX: or SEQ_SKIP? */
2627 ols = openlockstateid(st);
2628 oo = ols->st_stateowner;
2629 nf = st->sc_file;
2630
2631 spin_lock(&nf->fi_lock);
2632 file = find_any_file_locked(nf);
2633 if (!file)
2634 goto out;
2635
2636 seq_printf(s, "- ");
2637 nfs4_show_stateid(s, &st->sc_stateid);
2638 seq_printf(s, ": { type: open, ");
2639
2640 access = bmap_to_share_mode(ols->st_access_bmap);
2641 deny = bmap_to_share_mode(ols->st_deny_bmap);
2642
2643 seq_printf(s, "access: %s%s, ",
2644 access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2645 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2646 seq_printf(s, "deny: %s%s, ",
2647 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2648 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2649
2650 nfs4_show_superblock(s, file);
2651 seq_printf(s, ", ");
2652 nfs4_show_fname(s, file);
2653 seq_printf(s, ", ");
2654 nfs4_show_owner(s, oo);
2655 seq_printf(s, " }\n");
2656out:
2657 spin_unlock(&nf->fi_lock);
2658 return 0;
2659}
2660
2661static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2662{
2663 struct nfs4_ol_stateid *ols;
2664 struct nfs4_file *nf;
2665 struct nfsd_file *file;
2666 struct nfs4_stateowner *oo;
2667
2668 ols = openlockstateid(st);
2669 oo = ols->st_stateowner;
2670 nf = st->sc_file;
2671 spin_lock(&nf->fi_lock);
2672 file = find_any_file_locked(nf);
2673 if (!file)
2674 goto out;
2675
2676 seq_printf(s, "- ");
2677 nfs4_show_stateid(s, &st->sc_stateid);
2678 seq_printf(s, ": { type: lock, ");
2679
2680 /*
2681 * Note: a lock stateid isn't really the same thing as a lock,
2682 * it's the locking state held by one owner on a file, and there
2683 * may be multiple (or no) lock ranges associated with it.
2684 * (Same for the matter is true of open stateids.)
2685 */
2686
2687 nfs4_show_superblock(s, file);
2688 /* XXX: open stateid? */
2689 seq_printf(s, ", ");
2690 nfs4_show_fname(s, file);
2691 seq_printf(s, ", ");
2692 nfs4_show_owner(s, oo);
2693 seq_printf(s, " }\n");
2694out:
2695 spin_unlock(&nf->fi_lock);
2696 return 0;
2697}
2698
2699static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2700{
2701 struct nfs4_delegation *ds;
2702 struct nfs4_file *nf;
2703 struct nfsd_file *file;
2704
2705 ds = delegstateid(st);
2706 nf = st->sc_file;
2707 spin_lock(&nf->fi_lock);
2708 file = find_deleg_file_locked(nf);
2709 if (!file)
2710 goto out;
2711
2712 seq_printf(s, "- ");
2713 nfs4_show_stateid(s, &st->sc_stateid);
2714 seq_printf(s, ": { type: deleg, ");
2715
2716 /* Kinda dead code as long as we only support read delegs: */
2717 seq_printf(s, "access: %s, ",
2718 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2719
2720 /* XXX: lease time, whether it's being recalled. */
2721
2722 nfs4_show_superblock(s, file);
2723 seq_printf(s, ", ");
2724 nfs4_show_fname(s, file);
2725 seq_printf(s, " }\n");
2726out:
2727 spin_unlock(&nf->fi_lock);
2728 return 0;
2729}
2730
2731static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2732{
2733 struct nfs4_layout_stateid *ls;
2734 struct nfsd_file *file;
2735
2736 ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2737 file = ls->ls_file;
2738
2739 seq_printf(s, "- ");
2740 nfs4_show_stateid(s, &st->sc_stateid);
2741 seq_printf(s, ": { type: layout, ");
2742
2743 /* XXX: What else would be useful? */
2744
2745 nfs4_show_superblock(s, file);
2746 seq_printf(s, ", ");
2747 nfs4_show_fname(s, file);
2748 seq_printf(s, " }\n");
2749
2750 return 0;
2751}
2752
2753static int states_show(struct seq_file *s, void *v)
2754{
2755 struct nfs4_stid *st = v;
2756
2757 switch (st->sc_type) {
2758 case NFS4_OPEN_STID:
2759 return nfs4_show_open(s, st);
2760 case NFS4_LOCK_STID:
2761 return nfs4_show_lock(s, st);
2762 case NFS4_DELEG_STID:
2763 return nfs4_show_deleg(s, st);
2764 case NFS4_LAYOUT_STID:
2765 return nfs4_show_layout(s, st);
2766 default:
2767 return 0; /* XXX: or SEQ_SKIP? */
2768 }
2769 /* XXX: copy stateids? */
2770}
2771
2772static struct seq_operations states_seq_ops = {
2773 .start = states_start,
2774 .next = states_next,
2775 .stop = states_stop,
2776 .show = states_show
2777};
2778
2779static int client_states_open(struct inode *inode, struct file *file)
2780{
2781 struct seq_file *s;
2782 struct nfs4_client *clp;
2783 int ret;
2784
2785 clp = get_nfsdfs_clp(inode);
2786 if (!clp)
2787 return -ENXIO;
2788
2789 ret = seq_open(file, &states_seq_ops);
2790 if (ret)
2791 return ret;
2792 s = file->private_data;
2793 s->private = clp;
2794 return 0;
2795}
2796
2797static int client_opens_release(struct inode *inode, struct file *file)
2798{
2799 struct seq_file *m = file->private_data;
2800 struct nfs4_client *clp = m->private;
2801
2802 /* XXX: alternatively, we could get/drop in seq start/stop */
2803 drop_client(clp);
2804 return 0;
2805}
2806
2807static const struct file_operations client_states_fops = {
2808 .open = client_states_open,
2809 .read = seq_read,
2810 .llseek = seq_lseek,
2811 .release = client_opens_release,
2812};
2813
2814/*
2815 * Normally we refuse to destroy clients that are in use, but here the
2816 * administrator is telling us to just do it. We also want to wait
2817 * so the caller has a guarantee that the client's locks are gone by
2818 * the time the write returns:
2819 */
2820static void force_expire_client(struct nfs4_client *clp)
2821{
2822 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2823 bool already_expired;
2824
2825 trace_nfsd_clid_admin_expired(&clp->cl_clientid);
2826
2827 spin_lock(&nn->client_lock);
2828 clp->cl_time = 0;
2829 spin_unlock(&nn->client_lock);
2830
2831 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2832 spin_lock(&nn->client_lock);
2833 already_expired = list_empty(&clp->cl_lru);
2834 if (!already_expired)
2835 unhash_client_locked(clp);
2836 spin_unlock(&nn->client_lock);
2837
2838 if (!already_expired)
2839 expire_client(clp);
2840 else
2841 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2842}
2843
2844static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2845 size_t size, loff_t *pos)
2846{
2847 char *data;
2848 struct nfs4_client *clp;
2849
2850 data = simple_transaction_get(file, buf, size);
2851 if (IS_ERR(data))
2852 return PTR_ERR(data);
2853 if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2854 return -EINVAL;
2855 clp = get_nfsdfs_clp(file_inode(file));
2856 if (!clp)
2857 return -ENXIO;
2858 force_expire_client(clp);
2859 drop_client(clp);
2860 return 7;
2861}
2862
2863static const struct file_operations client_ctl_fops = {
2864 .write = client_ctl_write,
2865 .release = simple_transaction_release,
2866};
2867
2868static const struct tree_descr client_files[] = {
2869 [0] = {"info", &client_info_fops, S_IRUSR},
2870 [1] = {"states", &client_states_fops, S_IRUSR},
2871 [2] = {"ctl", &client_ctl_fops, S_IWUSR},
2872 [3] = {""},
2873};
2874
2875static int
2876nfsd4_cb_recall_any_done(struct nfsd4_callback *cb,
2877 struct rpc_task *task)
2878{
2879 trace_nfsd_cb_recall_any_done(cb, task);
2880 switch (task->tk_status) {
2881 case -NFS4ERR_DELAY:
2882 rpc_delay(task, 2 * HZ);
2883 return 0;
2884 default:
2885 return 1;
2886 }
2887}
2888
2889static void
2890nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
2891{
2892 struct nfs4_client *clp = cb->cb_clp;
2893 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2894
2895 spin_lock(&nn->client_lock);
2896 clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
2897 put_client_renew_locked(clp);
2898 spin_unlock(&nn->client_lock);
2899}
2900
2901static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
2902 .done = nfsd4_cb_recall_any_done,
2903 .release = nfsd4_cb_recall_any_release,
2904};
2905
2906static struct nfs4_client *create_client(struct xdr_netobj name,
2907 struct svc_rqst *rqstp, nfs4_verifier *verf)
2908{
2909 struct nfs4_client *clp;
2910 struct sockaddr *sa = svc_addr(rqstp);
2911 int ret;
2912 struct net *net = SVC_NET(rqstp);
2913 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2914 struct dentry *dentries[ARRAY_SIZE(client_files)];
2915
2916 clp = alloc_client(name, nn);
2917 if (clp == NULL)
2918 return NULL;
2919
2920 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2921 if (ret) {
2922 free_client(clp);
2923 return NULL;
2924 }
2925 gen_clid(clp, nn);
2926 kref_init(&clp->cl_nfsdfs.cl_ref);
2927 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2928 clp->cl_time = ktime_get_boottime_seconds();
2929 clear_bit(0, &clp->cl_cb_slot_busy);
2930 copy_verf(clp, verf);
2931 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2932 clp->cl_cb_session = NULL;
2933 clp->net = net;
2934 clp->cl_nfsd_dentry = nfsd_client_mkdir(
2935 nn, &clp->cl_nfsdfs,
2936 clp->cl_clientid.cl_id - nn->clientid_base,
2937 client_files, dentries);
2938 clp->cl_nfsd_info_dentry = dentries[0];
2939 if (!clp->cl_nfsd_dentry) {
2940 free_client(clp);
2941 return NULL;
2942 }
2943 clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL);
2944 if (!clp->cl_ra) {
2945 free_client(clp);
2946 return NULL;
2947 }
2948 clp->cl_ra_time = 0;
2949 nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops,
2950 NFSPROC4_CLNT_CB_RECALL_ANY);
2951 return clp;
2952}
2953
2954static void
2955add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2956{
2957 struct rb_node **new = &(root->rb_node), *parent = NULL;
2958 struct nfs4_client *clp;
2959
2960 while (*new) {
2961 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2962 parent = *new;
2963
2964 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2965 new = &((*new)->rb_left);
2966 else
2967 new = &((*new)->rb_right);
2968 }
2969
2970 rb_link_node(&new_clp->cl_namenode, parent, new);
2971 rb_insert_color(&new_clp->cl_namenode, root);
2972}
2973
2974static struct nfs4_client *
2975find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2976{
2977 int cmp;
2978 struct rb_node *node = root->rb_node;
2979 struct nfs4_client *clp;
2980
2981 while (node) {
2982 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2983 cmp = compare_blob(&clp->cl_name, name);
2984 if (cmp > 0)
2985 node = node->rb_left;
2986 else if (cmp < 0)
2987 node = node->rb_right;
2988 else
2989 return clp;
2990 }
2991 return NULL;
2992}
2993
2994static void
2995add_to_unconfirmed(struct nfs4_client *clp)
2996{
2997 unsigned int idhashval;
2998 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2999
3000 lockdep_assert_held(&nn->client_lock);
3001
3002 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3003 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
3004 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3005 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
3006 renew_client_locked(clp);
3007}
3008
3009static void
3010move_to_confirmed(struct nfs4_client *clp)
3011{
3012 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3013 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3014
3015 lockdep_assert_held(&nn->client_lock);
3016
3017 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
3018 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
3019 add_clp_to_name_tree(clp, &nn->conf_name_tree);
3020 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3021 trace_nfsd_clid_confirmed(&clp->cl_clientid);
3022 renew_client_locked(clp);
3023}
3024
3025static struct nfs4_client *
3026find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
3027{
3028 struct nfs4_client *clp;
3029 unsigned int idhashval = clientid_hashval(clid->cl_id);
3030
3031 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
3032 if (same_clid(&clp->cl_clientid, clid)) {
3033 if ((bool)clp->cl_minorversion != sessions)
3034 return NULL;
3035 renew_client_locked(clp);
3036 return clp;
3037 }
3038 }
3039 return NULL;
3040}
3041
3042static struct nfs4_client *
3043find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3044{
3045 struct list_head *tbl = nn->conf_id_hashtbl;
3046
3047 lockdep_assert_held(&nn->client_lock);
3048 return find_client_in_id_table(tbl, clid, sessions);
3049}
3050
3051static struct nfs4_client *
3052find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3053{
3054 struct list_head *tbl = nn->unconf_id_hashtbl;
3055
3056 lockdep_assert_held(&nn->client_lock);
3057 return find_client_in_id_table(tbl, clid, sessions);
3058}
3059
3060static bool clp_used_exchangeid(struct nfs4_client *clp)
3061{
3062 return clp->cl_exchange_flags != 0;
3063}
3064
3065static struct nfs4_client *
3066find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3067{
3068 lockdep_assert_held(&nn->client_lock);
3069 return find_clp_in_name_tree(name, &nn->conf_name_tree);
3070}
3071
3072static struct nfs4_client *
3073find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3074{
3075 lockdep_assert_held(&nn->client_lock);
3076 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
3077}
3078
3079static void
3080gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
3081{
3082 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
3083 struct sockaddr *sa = svc_addr(rqstp);
3084 u32 scopeid = rpc_get_scope_id(sa);
3085 unsigned short expected_family;
3086
3087 /* Currently, we only support tcp and tcp6 for the callback channel */
3088 if (se->se_callback_netid_len == 3 &&
3089 !memcmp(se->se_callback_netid_val, "tcp", 3))
3090 expected_family = AF_INET;
3091 else if (se->se_callback_netid_len == 4 &&
3092 !memcmp(se->se_callback_netid_val, "tcp6", 4))
3093 expected_family = AF_INET6;
3094 else
3095 goto out_err;
3096
3097 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
3098 se->se_callback_addr_len,
3099 (struct sockaddr *)&conn->cb_addr,
3100 sizeof(conn->cb_addr));
3101
3102 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
3103 goto out_err;
3104
3105 if (conn->cb_addr.ss_family == AF_INET6)
3106 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
3107
3108 conn->cb_prog = se->se_callback_prog;
3109 conn->cb_ident = se->se_callback_ident;
3110 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
3111 trace_nfsd_cb_args(clp, conn);
3112 return;
3113out_err:
3114 conn->cb_addr.ss_family = AF_UNSPEC;
3115 conn->cb_addrlen = 0;
3116 trace_nfsd_cb_nodelegs(clp);
3117 return;
3118}
3119
3120/*
3121 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
3122 */
3123static void
3124nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
3125{
3126 struct xdr_buf *buf = resp->xdr->buf;
3127 struct nfsd4_slot *slot = resp->cstate.slot;
3128 unsigned int base;
3129
3130 dprintk("--> %s slot %p\n", __func__, slot);
3131
3132 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
3133 slot->sl_opcnt = resp->opcnt;
3134 slot->sl_status = resp->cstate.status;
3135 free_svc_cred(&slot->sl_cred);
3136 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
3137
3138 if (!nfsd4_cache_this(resp)) {
3139 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
3140 return;
3141 }
3142 slot->sl_flags |= NFSD4_SLOT_CACHED;
3143
3144 base = resp->cstate.data_offset;
3145 slot->sl_datalen = buf->len - base;
3146 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
3147 WARN(1, "%s: sessions DRC could not cache compound\n",
3148 __func__);
3149 return;
3150}
3151
3152/*
3153 * Encode the replay sequence operation from the slot values.
3154 * If cachethis is FALSE encode the uncached rep error on the next
3155 * operation which sets resp->p and increments resp->opcnt for
3156 * nfs4svc_encode_compoundres.
3157 *
3158 */
3159static __be32
3160nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
3161 struct nfsd4_compoundres *resp)
3162{
3163 struct nfsd4_op *op;
3164 struct nfsd4_slot *slot = resp->cstate.slot;
3165
3166 /* Encode the replayed sequence operation */
3167 op = &args->ops[resp->opcnt - 1];
3168 nfsd4_encode_operation(resp, op);
3169
3170 if (slot->sl_flags & NFSD4_SLOT_CACHED)
3171 return op->status;
3172 if (args->opcnt == 1) {
3173 /*
3174 * The original operation wasn't a solo sequence--we
3175 * always cache those--so this retry must not match the
3176 * original:
3177 */
3178 op->status = nfserr_seq_false_retry;
3179 } else {
3180 op = &args->ops[resp->opcnt++];
3181 op->status = nfserr_retry_uncached_rep;
3182 nfsd4_encode_operation(resp, op);
3183 }
3184 return op->status;
3185}
3186
3187/*
3188 * The sequence operation is not cached because we can use the slot and
3189 * session values.
3190 */
3191static __be32
3192nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
3193 struct nfsd4_sequence *seq)
3194{
3195 struct nfsd4_slot *slot = resp->cstate.slot;
3196 struct xdr_stream *xdr = resp->xdr;
3197 __be32 *p;
3198 __be32 status;
3199
3200 dprintk("--> %s slot %p\n", __func__, slot);
3201
3202 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
3203 if (status)
3204 return status;
3205
3206 p = xdr_reserve_space(xdr, slot->sl_datalen);
3207 if (!p) {
3208 WARN_ON_ONCE(1);
3209 return nfserr_serverfault;
3210 }
3211 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
3212 xdr_commit_encode(xdr);
3213
3214 resp->opcnt = slot->sl_opcnt;
3215 return slot->sl_status;
3216}
3217
3218/*
3219 * Set the exchange_id flags returned by the server.
3220 */
3221static void
3222nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3223{
3224#ifdef CONFIG_NFSD_PNFS
3225 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3226#else
3227 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3228#endif
3229
3230 /* Referrals are supported, Migration is not. */
3231 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3232
3233 /* set the wire flags to return to client. */
3234 clid->flags = new->cl_exchange_flags;
3235}
3236
3237static bool client_has_openowners(struct nfs4_client *clp)
3238{
3239 struct nfs4_openowner *oo;
3240
3241 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3242 if (!list_empty(&oo->oo_owner.so_stateids))
3243 return true;
3244 }
3245 return false;
3246}
3247
3248static bool client_has_state(struct nfs4_client *clp)
3249{
3250 return client_has_openowners(clp)
3251#ifdef CONFIG_NFSD_PNFS
3252 || !list_empty(&clp->cl_lo_states)
3253#endif
3254 || !list_empty(&clp->cl_delegations)
3255 || !list_empty(&clp->cl_sessions)
3256 || !list_empty(&clp->async_copies);
3257}
3258
3259static __be32 copy_impl_id(struct nfs4_client *clp,
3260 struct nfsd4_exchange_id *exid)
3261{
3262 if (!exid->nii_domain.data)
3263 return 0;
3264 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3265 if (!clp->cl_nii_domain.data)
3266 return nfserr_jukebox;
3267 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3268 if (!clp->cl_nii_name.data)
3269 return nfserr_jukebox;
3270 clp->cl_nii_time = exid->nii_time;
3271 return 0;
3272}
3273
3274__be32
3275nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3276 union nfsd4_op_u *u)
3277{
3278 struct nfsd4_exchange_id *exid = &u->exchange_id;
3279 struct nfs4_client *conf, *new;
3280 struct nfs4_client *unconf = NULL;
3281 __be32 status;
3282 char addr_str[INET6_ADDRSTRLEN];
3283 nfs4_verifier verf = exid->verifier;
3284 struct sockaddr *sa = svc_addr(rqstp);
3285 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3286 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3287
3288 rpc_ntop(sa, addr_str, sizeof(addr_str));
3289 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3290 "ip_addr=%s flags %x, spa_how %u\n",
3291 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
3292 addr_str, exid->flags, exid->spa_how);
3293
3294 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3295 return nfserr_inval;
3296
3297 new = create_client(exid->clname, rqstp, &verf);
3298 if (new == NULL)
3299 return nfserr_jukebox;
3300 status = copy_impl_id(new, exid);
3301 if (status)
3302 goto out_nolock;
3303
3304 switch (exid->spa_how) {
3305 case SP4_MACH_CRED:
3306 exid->spo_must_enforce[0] = 0;
3307 exid->spo_must_enforce[1] = (
3308 1 << (OP_BIND_CONN_TO_SESSION - 32) |
3309 1 << (OP_EXCHANGE_ID - 32) |
3310 1 << (OP_CREATE_SESSION - 32) |
3311 1 << (OP_DESTROY_SESSION - 32) |
3312 1 << (OP_DESTROY_CLIENTID - 32));
3313
3314 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3315 1 << (OP_OPEN_DOWNGRADE) |
3316 1 << (OP_LOCKU) |
3317 1 << (OP_DELEGRETURN));
3318
3319 exid->spo_must_allow[1] &= (
3320 1 << (OP_TEST_STATEID - 32) |
3321 1 << (OP_FREE_STATEID - 32));
3322 if (!svc_rqst_integrity_protected(rqstp)) {
3323 status = nfserr_inval;
3324 goto out_nolock;
3325 }
3326 /*
3327 * Sometimes userspace doesn't give us a principal.
3328 * Which is a bug, really. Anyway, we can't enforce
3329 * MACH_CRED in that case, better to give up now:
3330 */
3331 if (!new->cl_cred.cr_principal &&
3332 !new->cl_cred.cr_raw_principal) {
3333 status = nfserr_serverfault;
3334 goto out_nolock;
3335 }
3336 new->cl_mach_cred = true;
3337 break;
3338 case SP4_NONE:
3339 break;
3340 default: /* checked by xdr code */
3341 WARN_ON_ONCE(1);
3342 fallthrough;
3343 case SP4_SSV:
3344 status = nfserr_encr_alg_unsupp;
3345 goto out_nolock;
3346 }
3347
3348 /* Cases below refer to rfc 5661 section 18.35.4: */
3349 spin_lock(&nn->client_lock);
3350 conf = find_confirmed_client_by_name(&exid->clname, nn);
3351 if (conf) {
3352 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3353 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3354
3355 if (update) {
3356 if (!clp_used_exchangeid(conf)) { /* buggy client */
3357 status = nfserr_inval;
3358 goto out;
3359 }
3360 if (!nfsd4_mach_creds_match(conf, rqstp)) {
3361 status = nfserr_wrong_cred;
3362 goto out;
3363 }
3364 if (!creds_match) { /* case 9 */
3365 status = nfserr_perm;
3366 goto out;
3367 }
3368 if (!verfs_match) { /* case 8 */
3369 status = nfserr_not_same;
3370 goto out;
3371 }
3372 /* case 6 */
3373 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3374 trace_nfsd_clid_confirmed_r(conf);
3375 goto out_copy;
3376 }
3377 if (!creds_match) { /* case 3 */
3378 if (client_has_state(conf)) {
3379 status = nfserr_clid_inuse;
3380 trace_nfsd_clid_cred_mismatch(conf, rqstp);
3381 goto out;
3382 }
3383 goto out_new;
3384 }
3385 if (verfs_match) { /* case 2 */
3386 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3387 trace_nfsd_clid_confirmed_r(conf);
3388 goto out_copy;
3389 }
3390 /* case 5, client reboot */
3391 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
3392 conf = NULL;
3393 goto out_new;
3394 }
3395
3396 if (update) { /* case 7 */
3397 status = nfserr_noent;
3398 goto out;
3399 }
3400
3401 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3402 if (unconf) /* case 4, possible retry or client restart */
3403 unhash_client_locked(unconf);
3404
3405 /* case 1, new owner ID */
3406 trace_nfsd_clid_fresh(new);
3407
3408out_new:
3409 if (conf) {
3410 status = mark_client_expired_locked(conf);
3411 if (status)
3412 goto out;
3413 trace_nfsd_clid_replaced(&conf->cl_clientid);
3414 }
3415 new->cl_minorversion = cstate->minorversion;
3416 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3417 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3418
3419 add_to_unconfirmed(new);
3420 swap(new, conf);
3421out_copy:
3422 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3423 exid->clientid.cl_id = conf->cl_clientid.cl_id;
3424
3425 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3426 nfsd4_set_ex_flags(conf, exid);
3427
3428 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3429 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3430 status = nfs_ok;
3431
3432out:
3433 spin_unlock(&nn->client_lock);
3434out_nolock:
3435 if (new)
3436 expire_client(new);
3437 if (unconf) {
3438 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
3439 expire_client(unconf);
3440 }
3441 return status;
3442}
3443
3444static __be32
3445check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3446{
3447 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3448 slot_seqid);
3449
3450 /* The slot is in use, and no response has been sent. */
3451 if (slot_inuse) {
3452 if (seqid == slot_seqid)
3453 return nfserr_jukebox;
3454 else
3455 return nfserr_seq_misordered;
3456 }
3457 /* Note unsigned 32-bit arithmetic handles wraparound: */
3458 if (likely(seqid == slot_seqid + 1))
3459 return nfs_ok;
3460 if (seqid == slot_seqid)
3461 return nfserr_replay_cache;
3462 return nfserr_seq_misordered;
3463}
3464
3465/*
3466 * Cache the create session result into the create session single DRC
3467 * slot cache by saving the xdr structure. sl_seqid has been set.
3468 * Do this for solo or embedded create session operations.
3469 */
3470static void
3471nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3472 struct nfsd4_clid_slot *slot, __be32 nfserr)
3473{
3474 slot->sl_status = nfserr;
3475 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3476}
3477
3478static __be32
3479nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3480 struct nfsd4_clid_slot *slot)
3481{
3482 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3483 return slot->sl_status;
3484}
3485
3486#define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3487 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3488 1 + /* MIN tag is length with zero, only length */ \
3489 3 + /* version, opcount, opcode */ \
3490 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3491 /* seqid, slotID, slotID, cache */ \
3492 4 ) * sizeof(__be32))
3493
3494#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3495 2 + /* verifier: AUTH_NULL, length 0 */\
3496 1 + /* status */ \
3497 1 + /* MIN tag is length with zero, only length */ \
3498 3 + /* opcount, opcode, opstatus*/ \
3499 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3500 /* seqid, slotID, slotID, slotID, status */ \
3501 5 ) * sizeof(__be32))
3502
3503static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3504{
3505 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3506
3507 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3508 return nfserr_toosmall;
3509 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3510 return nfserr_toosmall;
3511 ca->headerpadsz = 0;
3512 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3513 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3514 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3515 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3516 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3517 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3518 /*
3519 * Note decreasing slot size below client's request may make it
3520 * difficult for client to function correctly, whereas
3521 * decreasing the number of slots will (just?) affect
3522 * performance. When short on memory we therefore prefer to
3523 * decrease number of slots instead of their size. Clients that
3524 * request larger slots than they need will get poor results:
3525 * Note that we always allow at least one slot, because our
3526 * accounting is soft and provides no guarantees either way.
3527 */
3528 ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3529
3530 return nfs_ok;
3531}
3532
3533/*
3534 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3535 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3536 */
3537#define RPC_MAX_HEADER_WITH_AUTH_SYS \
3538 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3539
3540#define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3541 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3542
3543#define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3544 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3545#define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3546 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3547 sizeof(__be32))
3548
3549static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3550{
3551 ca->headerpadsz = 0;
3552
3553 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3554 return nfserr_toosmall;
3555 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3556 return nfserr_toosmall;
3557 ca->maxresp_cached = 0;
3558 if (ca->maxops < 2)
3559 return nfserr_toosmall;
3560
3561 return nfs_ok;
3562}
3563
3564static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3565{
3566 switch (cbs->flavor) {
3567 case RPC_AUTH_NULL:
3568 case RPC_AUTH_UNIX:
3569 return nfs_ok;
3570 default:
3571 /*
3572 * GSS case: the spec doesn't allow us to return this
3573 * error. But it also doesn't allow us not to support
3574 * GSS.
3575 * I'd rather this fail hard than return some error the
3576 * client might think it can already handle:
3577 */
3578 return nfserr_encr_alg_unsupp;
3579 }
3580}
3581
3582__be32
3583nfsd4_create_session(struct svc_rqst *rqstp,
3584 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3585{
3586 struct nfsd4_create_session *cr_ses = &u->create_session;
3587 struct sockaddr *sa = svc_addr(rqstp);
3588 struct nfs4_client *conf, *unconf;
3589 struct nfs4_client *old = NULL;
3590 struct nfsd4_session *new;
3591 struct nfsd4_conn *conn;
3592 struct nfsd4_clid_slot *cs_slot = NULL;
3593 __be32 status = 0;
3594 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3595
3596 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3597 return nfserr_inval;
3598 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3599 if (status)
3600 return status;
3601 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3602 if (status)
3603 return status;
3604 status = check_backchannel_attrs(&cr_ses->back_channel);
3605 if (status)
3606 goto out_release_drc_mem;
3607 status = nfserr_jukebox;
3608 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3609 if (!new)
3610 goto out_release_drc_mem;
3611 conn = alloc_conn_from_crses(rqstp, cr_ses);
3612 if (!conn)
3613 goto out_free_session;
3614
3615 spin_lock(&nn->client_lock);
3616 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3617 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3618 WARN_ON_ONCE(conf && unconf);
3619
3620 if (conf) {
3621 status = nfserr_wrong_cred;
3622 if (!nfsd4_mach_creds_match(conf, rqstp))
3623 goto out_free_conn;
3624 cs_slot = &conf->cl_cs_slot;
3625 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3626 if (status) {
3627 if (status == nfserr_replay_cache)
3628 status = nfsd4_replay_create_session(cr_ses, cs_slot);
3629 goto out_free_conn;
3630 }
3631 } else if (unconf) {
3632 status = nfserr_clid_inuse;
3633 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3634 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3635 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
3636 goto out_free_conn;
3637 }
3638 status = nfserr_wrong_cred;
3639 if (!nfsd4_mach_creds_match(unconf, rqstp))
3640 goto out_free_conn;
3641 cs_slot = &unconf->cl_cs_slot;
3642 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3643 if (status) {
3644 /* an unconfirmed replay returns misordered */
3645 status = nfserr_seq_misordered;
3646 goto out_free_conn;
3647 }
3648 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3649 if (old) {
3650 status = mark_client_expired_locked(old);
3651 if (status) {
3652 old = NULL;
3653 goto out_free_conn;
3654 }
3655 trace_nfsd_clid_replaced(&old->cl_clientid);
3656 }
3657 move_to_confirmed(unconf);
3658 conf = unconf;
3659 } else {
3660 status = nfserr_stale_clientid;
3661 goto out_free_conn;
3662 }
3663 status = nfs_ok;
3664 /* Persistent sessions are not supported */
3665 cr_ses->flags &= ~SESSION4_PERSIST;
3666 /* Upshifting from TCP to RDMA is not supported */
3667 cr_ses->flags &= ~SESSION4_RDMA;
3668
3669 init_session(rqstp, new, conf, cr_ses);
3670 nfsd4_get_session_locked(new);
3671
3672 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3673 NFS4_MAX_SESSIONID_LEN);
3674 cs_slot->sl_seqid++;
3675 cr_ses->seqid = cs_slot->sl_seqid;
3676
3677 /* cache solo and embedded create sessions under the client_lock */
3678 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3679 spin_unlock(&nn->client_lock);
3680 if (conf == unconf)
3681 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
3682 /* init connection and backchannel */
3683 nfsd4_init_conn(rqstp, conn, new);
3684 nfsd4_put_session(new);
3685 if (old)
3686 expire_client(old);
3687 return status;
3688out_free_conn:
3689 spin_unlock(&nn->client_lock);
3690 free_conn(conn);
3691 if (old)
3692 expire_client(old);
3693out_free_session:
3694 __free_session(new);
3695out_release_drc_mem:
3696 nfsd4_put_drc_mem(&cr_ses->fore_channel);
3697 return status;
3698}
3699
3700static __be32 nfsd4_map_bcts_dir(u32 *dir)
3701{
3702 switch (*dir) {
3703 case NFS4_CDFC4_FORE:
3704 case NFS4_CDFC4_BACK:
3705 return nfs_ok;
3706 case NFS4_CDFC4_FORE_OR_BOTH:
3707 case NFS4_CDFC4_BACK_OR_BOTH:
3708 *dir = NFS4_CDFC4_BOTH;
3709 return nfs_ok;
3710 }
3711 return nfserr_inval;
3712}
3713
3714__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3715 struct nfsd4_compound_state *cstate,
3716 union nfsd4_op_u *u)
3717{
3718 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3719 struct nfsd4_session *session = cstate->session;
3720 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3721 __be32 status;
3722
3723 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3724 if (status)
3725 return status;
3726 spin_lock(&nn->client_lock);
3727 session->se_cb_prog = bc->bc_cb_program;
3728 session->se_cb_sec = bc->bc_cb_sec;
3729 spin_unlock(&nn->client_lock);
3730
3731 nfsd4_probe_callback(session->se_client);
3732
3733 return nfs_ok;
3734}
3735
3736static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3737{
3738 struct nfsd4_conn *c;
3739
3740 list_for_each_entry(c, &s->se_conns, cn_persession) {
3741 if (c->cn_xprt == xpt) {
3742 return c;
3743 }
3744 }
3745 return NULL;
3746}
3747
3748static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3749 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
3750{
3751 struct nfs4_client *clp = session->se_client;
3752 struct svc_xprt *xpt = rqst->rq_xprt;
3753 struct nfsd4_conn *c;
3754 __be32 status;
3755
3756 /* Following the last paragraph of RFC 5661 Section 18.34.3: */
3757 spin_lock(&clp->cl_lock);
3758 c = __nfsd4_find_conn(xpt, session);
3759 if (!c)
3760 status = nfserr_noent;
3761 else if (req == c->cn_flags)
3762 status = nfs_ok;
3763 else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3764 c->cn_flags != NFS4_CDFC4_BACK)
3765 status = nfs_ok;
3766 else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3767 c->cn_flags != NFS4_CDFC4_FORE)
3768 status = nfs_ok;
3769 else
3770 status = nfserr_inval;
3771 spin_unlock(&clp->cl_lock);
3772 if (status == nfs_ok && conn)
3773 *conn = c;
3774 return status;
3775}
3776
3777__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3778 struct nfsd4_compound_state *cstate,
3779 union nfsd4_op_u *u)
3780{
3781 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3782 __be32 status;
3783 struct nfsd4_conn *conn;
3784 struct nfsd4_session *session;
3785 struct net *net = SVC_NET(rqstp);
3786 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3787
3788 if (!nfsd4_last_compound_op(rqstp))
3789 return nfserr_not_only_op;
3790 spin_lock(&nn->client_lock);
3791 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3792 spin_unlock(&nn->client_lock);
3793 if (!session)
3794 goto out_no_session;
3795 status = nfserr_wrong_cred;
3796 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3797 goto out;
3798 status = nfsd4_match_existing_connection(rqstp, session,
3799 bcts->dir, &conn);
3800 if (status == nfs_ok) {
3801 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
3802 bcts->dir == NFS4_CDFC4_BACK)
3803 conn->cn_flags |= NFS4_CDFC4_BACK;
3804 nfsd4_probe_callback(session->se_client);
3805 goto out;
3806 }
3807 if (status == nfserr_inval)
3808 goto out;
3809 status = nfsd4_map_bcts_dir(&bcts->dir);
3810 if (status)
3811 goto out;
3812 conn = alloc_conn(rqstp, bcts->dir);
3813 status = nfserr_jukebox;
3814 if (!conn)
3815 goto out;
3816 nfsd4_init_conn(rqstp, conn, session);
3817 status = nfs_ok;
3818out:
3819 nfsd4_put_session(session);
3820out_no_session:
3821 return status;
3822}
3823
3824static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3825{
3826 if (!cstate->session)
3827 return false;
3828 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3829}
3830
3831__be32
3832nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3833 union nfsd4_op_u *u)
3834{
3835 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3836 struct nfsd4_session *ses;
3837 __be32 status;
3838 int ref_held_by_me = 0;
3839 struct net *net = SVC_NET(r);
3840 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3841
3842 status = nfserr_not_only_op;
3843 if (nfsd4_compound_in_session(cstate, sessionid)) {
3844 if (!nfsd4_last_compound_op(r))
3845 goto out;
3846 ref_held_by_me++;
3847 }
3848 dump_sessionid(__func__, sessionid);
3849 spin_lock(&nn->client_lock);
3850 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3851 if (!ses)
3852 goto out_client_lock;
3853 status = nfserr_wrong_cred;
3854 if (!nfsd4_mach_creds_match(ses->se_client, r))
3855 goto out_put_session;
3856 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3857 if (status)
3858 goto out_put_session;
3859 unhash_session(ses);
3860 spin_unlock(&nn->client_lock);
3861
3862 nfsd4_probe_callback_sync(ses->se_client);
3863
3864 spin_lock(&nn->client_lock);
3865 status = nfs_ok;
3866out_put_session:
3867 nfsd4_put_session_locked(ses);
3868out_client_lock:
3869 spin_unlock(&nn->client_lock);
3870out:
3871 return status;
3872}
3873
3874static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3875{
3876 struct nfs4_client *clp = ses->se_client;
3877 struct nfsd4_conn *c;
3878 __be32 status = nfs_ok;
3879 int ret;
3880
3881 spin_lock(&clp->cl_lock);
3882 c = __nfsd4_find_conn(new->cn_xprt, ses);
3883 if (c)
3884 goto out_free;
3885 status = nfserr_conn_not_bound_to_session;
3886 if (clp->cl_mach_cred)
3887 goto out_free;
3888 __nfsd4_hash_conn(new, ses);
3889 spin_unlock(&clp->cl_lock);
3890 ret = nfsd4_register_conn(new);
3891 if (ret)
3892 /* oops; xprt is already down: */
3893 nfsd4_conn_lost(&new->cn_xpt_user);
3894 return nfs_ok;
3895out_free:
3896 spin_unlock(&clp->cl_lock);
3897 free_conn(new);
3898 return status;
3899}
3900
3901static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3902{
3903 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3904
3905 return args->opcnt > session->se_fchannel.maxops;
3906}
3907
3908static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3909 struct nfsd4_session *session)
3910{
3911 struct xdr_buf *xb = &rqstp->rq_arg;
3912
3913 return xb->len > session->se_fchannel.maxreq_sz;
3914}
3915
3916static bool replay_matches_cache(struct svc_rqst *rqstp,
3917 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3918{
3919 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3920
3921 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3922 (bool)seq->cachethis)
3923 return false;
3924 /*
3925 * If there's an error then the reply can have fewer ops than
3926 * the call.
3927 */
3928 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3929 return false;
3930 /*
3931 * But if we cached a reply with *more* ops than the call you're
3932 * sending us now, then this new call is clearly not really a
3933 * replay of the old one:
3934 */
3935 if (slot->sl_opcnt > argp->opcnt)
3936 return false;
3937 /* This is the only check explicitly called by spec: */
3938 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3939 return false;
3940 /*
3941 * There may be more comparisons we could actually do, but the
3942 * spec doesn't require us to catch every case where the calls
3943 * don't match (that would require caching the call as well as
3944 * the reply), so we don't bother.
3945 */
3946 return true;
3947}
3948
3949__be32
3950nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3951 union nfsd4_op_u *u)
3952{
3953 struct nfsd4_sequence *seq = &u->sequence;
3954 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3955 struct xdr_stream *xdr = resp->xdr;
3956 struct nfsd4_session *session;
3957 struct nfs4_client *clp;
3958 struct nfsd4_slot *slot;
3959 struct nfsd4_conn *conn;
3960 __be32 status;
3961 int buflen;
3962 struct net *net = SVC_NET(rqstp);
3963 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3964
3965 if (resp->opcnt != 1)
3966 return nfserr_sequence_pos;
3967
3968 /*
3969 * Will be either used or freed by nfsd4_sequence_check_conn
3970 * below.
3971 */
3972 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3973 if (!conn)
3974 return nfserr_jukebox;
3975
3976 spin_lock(&nn->client_lock);
3977 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3978 if (!session)
3979 goto out_no_session;
3980 clp = session->se_client;
3981
3982 status = nfserr_too_many_ops;
3983 if (nfsd4_session_too_many_ops(rqstp, session))
3984 goto out_put_session;
3985
3986 status = nfserr_req_too_big;
3987 if (nfsd4_request_too_big(rqstp, session))
3988 goto out_put_session;
3989
3990 status = nfserr_badslot;
3991 if (seq->slotid >= session->se_fchannel.maxreqs)
3992 goto out_put_session;
3993
3994 slot = session->se_slots[seq->slotid];
3995 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3996
3997 /* We do not negotiate the number of slots yet, so set the
3998 * maxslots to the session maxreqs which is used to encode
3999 * sr_highest_slotid and the sr_target_slot id to maxslots */
4000 seq->maxslots = session->se_fchannel.maxreqs;
4001
4002 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
4003 slot->sl_flags & NFSD4_SLOT_INUSE);
4004 if (status == nfserr_replay_cache) {
4005 status = nfserr_seq_misordered;
4006 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
4007 goto out_put_session;
4008 status = nfserr_seq_false_retry;
4009 if (!replay_matches_cache(rqstp, seq, slot))
4010 goto out_put_session;
4011 cstate->slot = slot;
4012 cstate->session = session;
4013 cstate->clp = clp;
4014 /* Return the cached reply status and set cstate->status
4015 * for nfsd4_proc_compound processing */
4016 status = nfsd4_replay_cache_entry(resp, seq);
4017 cstate->status = nfserr_replay_cache;
4018 goto out;
4019 }
4020 if (status)
4021 goto out_put_session;
4022
4023 status = nfsd4_sequence_check_conn(conn, session);
4024 conn = NULL;
4025 if (status)
4026 goto out_put_session;
4027
4028 buflen = (seq->cachethis) ?
4029 session->se_fchannel.maxresp_cached :
4030 session->se_fchannel.maxresp_sz;
4031 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
4032 nfserr_rep_too_big;
4033 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
4034 goto out_put_session;
4035 svc_reserve(rqstp, buflen);
4036
4037 status = nfs_ok;
4038 /* Success! bump slot seqid */
4039 slot->sl_seqid = seq->seqid;
4040 slot->sl_flags |= NFSD4_SLOT_INUSE;
4041 if (seq->cachethis)
4042 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
4043 else
4044 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
4045
4046 cstate->slot = slot;
4047 cstate->session = session;
4048 cstate->clp = clp;
4049
4050out:
4051 switch (clp->cl_cb_state) {
4052 case NFSD4_CB_DOWN:
4053 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
4054 break;
4055 case NFSD4_CB_FAULT:
4056 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
4057 break;
4058 default:
4059 seq->status_flags = 0;
4060 }
4061 if (!list_empty(&clp->cl_revoked))
4062 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
4063out_no_session:
4064 if (conn)
4065 free_conn(conn);
4066 spin_unlock(&nn->client_lock);
4067 return status;
4068out_put_session:
4069 nfsd4_put_session_locked(session);
4070 goto out_no_session;
4071}
4072
4073void
4074nfsd4_sequence_done(struct nfsd4_compoundres *resp)
4075{
4076 struct nfsd4_compound_state *cs = &resp->cstate;
4077
4078 if (nfsd4_has_session(cs)) {
4079 if (cs->status != nfserr_replay_cache) {
4080 nfsd4_store_cache_entry(resp);
4081 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
4082 }
4083 /* Drop session reference that was taken in nfsd4_sequence() */
4084 nfsd4_put_session(cs->session);
4085 } else if (cs->clp)
4086 put_client_renew(cs->clp);
4087}
4088
4089__be32
4090nfsd4_destroy_clientid(struct svc_rqst *rqstp,
4091 struct nfsd4_compound_state *cstate,
4092 union nfsd4_op_u *u)
4093{
4094 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
4095 struct nfs4_client *conf, *unconf;
4096 struct nfs4_client *clp = NULL;
4097 __be32 status = 0;
4098 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4099
4100 spin_lock(&nn->client_lock);
4101 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
4102 conf = find_confirmed_client(&dc->clientid, true, nn);
4103 WARN_ON_ONCE(conf && unconf);
4104
4105 if (conf) {
4106 if (client_has_state(conf)) {
4107 status = nfserr_clientid_busy;
4108 goto out;
4109 }
4110 status = mark_client_expired_locked(conf);
4111 if (status)
4112 goto out;
4113 clp = conf;
4114 } else if (unconf)
4115 clp = unconf;
4116 else {
4117 status = nfserr_stale_clientid;
4118 goto out;
4119 }
4120 if (!nfsd4_mach_creds_match(clp, rqstp)) {
4121 clp = NULL;
4122 status = nfserr_wrong_cred;
4123 goto out;
4124 }
4125 trace_nfsd_clid_destroyed(&clp->cl_clientid);
4126 unhash_client_locked(clp);
4127out:
4128 spin_unlock(&nn->client_lock);
4129 if (clp)
4130 expire_client(clp);
4131 return status;
4132}
4133
4134__be32
4135nfsd4_reclaim_complete(struct svc_rqst *rqstp,
4136 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
4137{
4138 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
4139 struct nfs4_client *clp = cstate->clp;
4140 __be32 status = 0;
4141
4142 if (rc->rca_one_fs) {
4143 if (!cstate->current_fh.fh_dentry)
4144 return nfserr_nofilehandle;
4145 /*
4146 * We don't take advantage of the rca_one_fs case.
4147 * That's OK, it's optional, we can safely ignore it.
4148 */
4149 return nfs_ok;
4150 }
4151
4152 status = nfserr_complete_already;
4153 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
4154 goto out;
4155
4156 status = nfserr_stale_clientid;
4157 if (is_client_expired(clp))
4158 /*
4159 * The following error isn't really legal.
4160 * But we only get here if the client just explicitly
4161 * destroyed the client. Surely it no longer cares what
4162 * error it gets back on an operation for the dead
4163 * client.
4164 */
4165 goto out;
4166
4167 status = nfs_ok;
4168 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
4169 nfsd4_client_record_create(clp);
4170 inc_reclaim_complete(clp);
4171out:
4172 return status;
4173}
4174
4175__be32
4176nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4177 union nfsd4_op_u *u)
4178{
4179 struct nfsd4_setclientid *setclid = &u->setclientid;
4180 struct xdr_netobj clname = setclid->se_name;
4181 nfs4_verifier clverifier = setclid->se_verf;
4182 struct nfs4_client *conf, *new;
4183 struct nfs4_client *unconf = NULL;
4184 __be32 status;
4185 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4186
4187 new = create_client(clname, rqstp, &clverifier);
4188 if (new == NULL)
4189 return nfserr_jukebox;
4190 spin_lock(&nn->client_lock);
4191 conf = find_confirmed_client_by_name(&clname, nn);
4192 if (conf && client_has_state(conf)) {
4193 status = nfserr_clid_inuse;
4194 if (clp_used_exchangeid(conf))
4195 goto out;
4196 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4197 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4198 goto out;
4199 }
4200 }
4201 unconf = find_unconfirmed_client_by_name(&clname, nn);
4202 if (unconf)
4203 unhash_client_locked(unconf);
4204 if (conf) {
4205 if (same_verf(&conf->cl_verifier, &clverifier)) {
4206 copy_clid(new, conf);
4207 gen_confirm(new, nn);
4208 } else
4209 trace_nfsd_clid_verf_mismatch(conf, rqstp,
4210 &clverifier);
4211 } else
4212 trace_nfsd_clid_fresh(new);
4213 new->cl_minorversion = 0;
4214 gen_callback(new, setclid, rqstp);
4215 add_to_unconfirmed(new);
4216 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
4217 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
4218 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
4219 new = NULL;
4220 status = nfs_ok;
4221out:
4222 spin_unlock(&nn->client_lock);
4223 if (new)
4224 free_client(new);
4225 if (unconf) {
4226 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
4227 expire_client(unconf);
4228 }
4229 return status;
4230}
4231
4232__be32
4233nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
4234 struct nfsd4_compound_state *cstate,
4235 union nfsd4_op_u *u)
4236{
4237 struct nfsd4_setclientid_confirm *setclientid_confirm =
4238 &u->setclientid_confirm;
4239 struct nfs4_client *conf, *unconf;
4240 struct nfs4_client *old = NULL;
4241 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
4242 clientid_t * clid = &setclientid_confirm->sc_clientid;
4243 __be32 status;
4244 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4245
4246 if (STALE_CLIENTID(clid, nn))
4247 return nfserr_stale_clientid;
4248
4249 spin_lock(&nn->client_lock);
4250 conf = find_confirmed_client(clid, false, nn);
4251 unconf = find_unconfirmed_client(clid, false, nn);
4252 /*
4253 * We try hard to give out unique clientid's, so if we get an
4254 * attempt to confirm the same clientid with a different cred,
4255 * the client may be buggy; this should never happen.
4256 *
4257 * Nevertheless, RFC 7530 recommends INUSE for this case:
4258 */
4259 status = nfserr_clid_inuse;
4260 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
4261 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
4262 goto out;
4263 }
4264 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4265 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4266 goto out;
4267 }
4268 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4269 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4270 status = nfs_ok;
4271 } else
4272 status = nfserr_stale_clientid;
4273 goto out;
4274 }
4275 status = nfs_ok;
4276 if (conf) {
4277 old = unconf;
4278 unhash_client_locked(old);
4279 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4280 } else {
4281 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4282 if (old) {
4283 status = nfserr_clid_inuse;
4284 if (client_has_state(old)
4285 && !same_creds(&unconf->cl_cred,
4286 &old->cl_cred)) {
4287 old = NULL;
4288 goto out;
4289 }
4290 status = mark_client_expired_locked(old);
4291 if (status) {
4292 old = NULL;
4293 goto out;
4294 }
4295 trace_nfsd_clid_replaced(&old->cl_clientid);
4296 }
4297 move_to_confirmed(unconf);
4298 conf = unconf;
4299 }
4300 get_client_locked(conf);
4301 spin_unlock(&nn->client_lock);
4302 if (conf == unconf)
4303 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
4304 nfsd4_probe_callback(conf);
4305 spin_lock(&nn->client_lock);
4306 put_client_renew_locked(conf);
4307out:
4308 spin_unlock(&nn->client_lock);
4309 if (old)
4310 expire_client(old);
4311 return status;
4312}
4313
4314static struct nfs4_file *nfsd4_alloc_file(void)
4315{
4316 return kmem_cache_alloc(file_slab, GFP_KERNEL);
4317}
4318
4319/* OPEN Share state helper functions */
4320
4321static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp)
4322{
4323 refcount_set(&fp->fi_ref, 1);
4324 spin_lock_init(&fp->fi_lock);
4325 INIT_LIST_HEAD(&fp->fi_stateids);
4326 INIT_LIST_HEAD(&fp->fi_delegations);
4327 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4328 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
4329 fp->fi_deleg_file = NULL;
4330 fp->fi_had_conflict = false;
4331 fp->fi_share_deny = 0;
4332 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4333 memset(fp->fi_access, 0, sizeof(fp->fi_access));
4334 fp->fi_aliased = false;
4335 fp->fi_inode = d_inode(fh->fh_dentry);
4336#ifdef CONFIG_NFSD_PNFS
4337 INIT_LIST_HEAD(&fp->fi_lo_states);
4338 atomic_set(&fp->fi_lo_recalls, 0);
4339#endif
4340}
4341
4342void
4343nfsd4_free_slabs(void)
4344{
4345 kmem_cache_destroy(client_slab);
4346 kmem_cache_destroy(openowner_slab);
4347 kmem_cache_destroy(lockowner_slab);
4348 kmem_cache_destroy(file_slab);
4349 kmem_cache_destroy(stateid_slab);
4350 kmem_cache_destroy(deleg_slab);
4351 kmem_cache_destroy(odstate_slab);
4352}
4353
4354int
4355nfsd4_init_slabs(void)
4356{
4357 client_slab = kmem_cache_create("nfsd4_clients",
4358 sizeof(struct nfs4_client), 0, 0, NULL);
4359 if (client_slab == NULL)
4360 goto out;
4361 openowner_slab = kmem_cache_create("nfsd4_openowners",
4362 sizeof(struct nfs4_openowner), 0, 0, NULL);
4363 if (openowner_slab == NULL)
4364 goto out_free_client_slab;
4365 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
4366 sizeof(struct nfs4_lockowner), 0, 0, NULL);
4367 if (lockowner_slab == NULL)
4368 goto out_free_openowner_slab;
4369 file_slab = kmem_cache_create("nfsd4_files",
4370 sizeof(struct nfs4_file), 0, 0, NULL);
4371 if (file_slab == NULL)
4372 goto out_free_lockowner_slab;
4373 stateid_slab = kmem_cache_create("nfsd4_stateids",
4374 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
4375 if (stateid_slab == NULL)
4376 goto out_free_file_slab;
4377 deleg_slab = kmem_cache_create("nfsd4_delegations",
4378 sizeof(struct nfs4_delegation), 0, 0, NULL);
4379 if (deleg_slab == NULL)
4380 goto out_free_stateid_slab;
4381 odstate_slab = kmem_cache_create("nfsd4_odstate",
4382 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4383 if (odstate_slab == NULL)
4384 goto out_free_deleg_slab;
4385 return 0;
4386
4387out_free_deleg_slab:
4388 kmem_cache_destroy(deleg_slab);
4389out_free_stateid_slab:
4390 kmem_cache_destroy(stateid_slab);
4391out_free_file_slab:
4392 kmem_cache_destroy(file_slab);
4393out_free_lockowner_slab:
4394 kmem_cache_destroy(lockowner_slab);
4395out_free_openowner_slab:
4396 kmem_cache_destroy(openowner_slab);
4397out_free_client_slab:
4398 kmem_cache_destroy(client_slab);
4399out:
4400 return -ENOMEM;
4401}
4402
4403static unsigned long
4404nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
4405{
4406 int count;
4407 struct nfsd_net *nn = container_of(shrink,
4408 struct nfsd_net, nfsd_client_shrinker);
4409
4410 count = atomic_read(&nn->nfsd_courtesy_clients);
4411 if (!count)
4412 count = atomic_long_read(&num_delegations);
4413 if (count)
4414 queue_work(laundry_wq, &nn->nfsd_shrinker_work);
4415 return (unsigned long)count;
4416}
4417
4418static unsigned long
4419nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
4420{
4421 return SHRINK_STOP;
4422}
4423
4424void
4425nfsd4_init_leases_net(struct nfsd_net *nn)
4426{
4427 struct sysinfo si;
4428 u64 max_clients;
4429
4430 nn->nfsd4_lease = 90; /* default lease time */
4431 nn->nfsd4_grace = 90;
4432 nn->somebody_reclaimed = false;
4433 nn->track_reclaim_completes = false;
4434 nn->clverifier_counter = get_random_u32();
4435 nn->clientid_base = get_random_u32();
4436 nn->clientid_counter = nn->clientid_base + 1;
4437 nn->s2s_cp_cl_id = nn->clientid_counter++;
4438
4439 atomic_set(&nn->nfs4_client_count, 0);
4440 si_meminfo(&si);
4441 max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024);
4442 max_clients *= NFS4_CLIENTS_PER_GB;
4443 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
4444
4445 atomic_set(&nn->nfsd_courtesy_clients, 0);
4446}
4447
4448static void init_nfs4_replay(struct nfs4_replay *rp)
4449{
4450 rp->rp_status = nfserr_serverfault;
4451 rp->rp_buflen = 0;
4452 rp->rp_buf = rp->rp_ibuf;
4453 mutex_init(&rp->rp_mutex);
4454}
4455
4456static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4457 struct nfs4_stateowner *so)
4458{
4459 if (!nfsd4_has_session(cstate)) {
4460 mutex_lock(&so->so_replay.rp_mutex);
4461 cstate->replay_owner = nfs4_get_stateowner(so);
4462 }
4463}
4464
4465void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4466{
4467 struct nfs4_stateowner *so = cstate->replay_owner;
4468
4469 if (so != NULL) {
4470 cstate->replay_owner = NULL;
4471 mutex_unlock(&so->so_replay.rp_mutex);
4472 nfs4_put_stateowner(so);
4473 }
4474}
4475
4476static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4477{
4478 struct nfs4_stateowner *sop;
4479
4480 sop = kmem_cache_alloc(slab, GFP_KERNEL);
4481 if (!sop)
4482 return NULL;
4483
4484 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4485 if (!sop->so_owner.data) {
4486 kmem_cache_free(slab, sop);
4487 return NULL;
4488 }
4489
4490 INIT_LIST_HEAD(&sop->so_stateids);
4491 sop->so_client = clp;
4492 init_nfs4_replay(&sop->so_replay);
4493 atomic_set(&sop->so_count, 1);
4494 return sop;
4495}
4496
4497static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4498{
4499 lockdep_assert_held(&clp->cl_lock);
4500
4501 list_add(&oo->oo_owner.so_strhash,
4502 &clp->cl_ownerstr_hashtbl[strhashval]);
4503 list_add(&oo->oo_perclient, &clp->cl_openowners);
4504}
4505
4506static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4507{
4508 unhash_openowner_locked(openowner(so));
4509}
4510
4511static void nfs4_free_openowner(struct nfs4_stateowner *so)
4512{
4513 struct nfs4_openowner *oo = openowner(so);
4514
4515 kmem_cache_free(openowner_slab, oo);
4516}
4517
4518static const struct nfs4_stateowner_operations openowner_ops = {
4519 .so_unhash = nfs4_unhash_openowner,
4520 .so_free = nfs4_free_openowner,
4521};
4522
4523static struct nfs4_ol_stateid *
4524nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4525{
4526 struct nfs4_ol_stateid *local, *ret = NULL;
4527 struct nfs4_openowner *oo = open->op_openowner;
4528
4529 lockdep_assert_held(&fp->fi_lock);
4530
4531 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4532 /* ignore lock owners */
4533 if (local->st_stateowner->so_is_open_owner == 0)
4534 continue;
4535 if (local->st_stateowner != &oo->oo_owner)
4536 continue;
4537 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4538 ret = local;
4539 refcount_inc(&ret->st_stid.sc_count);
4540 break;
4541 }
4542 }
4543 return ret;
4544}
4545
4546static __be32
4547nfsd4_verify_open_stid(struct nfs4_stid *s)
4548{
4549 __be32 ret = nfs_ok;
4550
4551 switch (s->sc_type) {
4552 default:
4553 break;
4554 case 0:
4555 case NFS4_CLOSED_STID:
4556 case NFS4_CLOSED_DELEG_STID:
4557 ret = nfserr_bad_stateid;
4558 break;
4559 case NFS4_REVOKED_DELEG_STID:
4560 ret = nfserr_deleg_revoked;
4561 }
4562 return ret;
4563}
4564
4565/* Lock the stateid st_mutex, and deal with races with CLOSE */
4566static __be32
4567nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4568{
4569 __be32 ret;
4570
4571 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4572 ret = nfsd4_verify_open_stid(&stp->st_stid);
4573 if (ret != nfs_ok)
4574 mutex_unlock(&stp->st_mutex);
4575 return ret;
4576}
4577
4578static struct nfs4_ol_stateid *
4579nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4580{
4581 struct nfs4_ol_stateid *stp;
4582 for (;;) {
4583 spin_lock(&fp->fi_lock);
4584 stp = nfsd4_find_existing_open(fp, open);
4585 spin_unlock(&fp->fi_lock);
4586 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4587 break;
4588 nfs4_put_stid(&stp->st_stid);
4589 }
4590 return stp;
4591}
4592
4593static struct nfs4_openowner *
4594alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4595 struct nfsd4_compound_state *cstate)
4596{
4597 struct nfs4_client *clp = cstate->clp;
4598 struct nfs4_openowner *oo, *ret;
4599
4600 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4601 if (!oo)
4602 return NULL;
4603 oo->oo_owner.so_ops = &openowner_ops;
4604 oo->oo_owner.so_is_open_owner = 1;
4605 oo->oo_owner.so_seqid = open->op_seqid;
4606 oo->oo_flags = 0;
4607 if (nfsd4_has_session(cstate))
4608 oo->oo_flags |= NFS4_OO_CONFIRMED;
4609 oo->oo_time = 0;
4610 oo->oo_last_closed_stid = NULL;
4611 INIT_LIST_HEAD(&oo->oo_close_lru);
4612 spin_lock(&clp->cl_lock);
4613 ret = find_openstateowner_str_locked(strhashval, open, clp);
4614 if (ret == NULL) {
4615 hash_openowner(oo, clp, strhashval);
4616 ret = oo;
4617 } else
4618 nfs4_free_stateowner(&oo->oo_owner);
4619
4620 spin_unlock(&clp->cl_lock);
4621 return ret;
4622}
4623
4624static struct nfs4_ol_stateid *
4625init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4626{
4627
4628 struct nfs4_openowner *oo = open->op_openowner;
4629 struct nfs4_ol_stateid *retstp = NULL;
4630 struct nfs4_ol_stateid *stp;
4631
4632 stp = open->op_stp;
4633 /* We are moving these outside of the spinlocks to avoid the warnings */
4634 mutex_init(&stp->st_mutex);
4635 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4636
4637retry:
4638 spin_lock(&oo->oo_owner.so_client->cl_lock);
4639 spin_lock(&fp->fi_lock);
4640
4641 retstp = nfsd4_find_existing_open(fp, open);
4642 if (retstp)
4643 goto out_unlock;
4644
4645 open->op_stp = NULL;
4646 refcount_inc(&stp->st_stid.sc_count);
4647 stp->st_stid.sc_type = NFS4_OPEN_STID;
4648 INIT_LIST_HEAD(&stp->st_locks);
4649 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4650 get_nfs4_file(fp);
4651 stp->st_stid.sc_file = fp;
4652 stp->st_access_bmap = 0;
4653 stp->st_deny_bmap = 0;
4654 stp->st_openstp = NULL;
4655 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4656 list_add(&stp->st_perfile, &fp->fi_stateids);
4657
4658out_unlock:
4659 spin_unlock(&fp->fi_lock);
4660 spin_unlock(&oo->oo_owner.so_client->cl_lock);
4661 if (retstp) {
4662 /* Handle races with CLOSE */
4663 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4664 nfs4_put_stid(&retstp->st_stid);
4665 goto retry;
4666 }
4667 /* To keep mutex tracking happy */
4668 mutex_unlock(&stp->st_mutex);
4669 stp = retstp;
4670 }
4671 return stp;
4672}
4673
4674/*
4675 * In the 4.0 case we need to keep the owners around a little while to handle
4676 * CLOSE replay. We still do need to release any file access that is held by
4677 * them before returning however.
4678 */
4679static void
4680move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4681{
4682 struct nfs4_ol_stateid *last;
4683 struct nfs4_openowner *oo = openowner(s->st_stateowner);
4684 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4685 nfsd_net_id);
4686
4687 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4688
4689 /*
4690 * We know that we hold one reference via nfsd4_close, and another
4691 * "persistent" reference for the client. If the refcount is higher
4692 * than 2, then there are still calls in progress that are using this
4693 * stateid. We can't put the sc_file reference until they are finished.
4694 * Wait for the refcount to drop to 2. Since it has been unhashed,
4695 * there should be no danger of the refcount going back up again at
4696 * this point.
4697 */
4698 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4699
4700 release_all_access(s);
4701 if (s->st_stid.sc_file) {
4702 put_nfs4_file(s->st_stid.sc_file);
4703 s->st_stid.sc_file = NULL;
4704 }
4705
4706 spin_lock(&nn->client_lock);
4707 last = oo->oo_last_closed_stid;
4708 oo->oo_last_closed_stid = s;
4709 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4710 oo->oo_time = ktime_get_boottime_seconds();
4711 spin_unlock(&nn->client_lock);
4712 if (last)
4713 nfs4_put_stid(&last->st_stid);
4714}
4715
4716static noinline_for_stack struct nfs4_file *
4717nfsd4_file_hash_lookup(const struct svc_fh *fhp)
4718{
4719 struct inode *inode = d_inode(fhp->fh_dentry);
4720 struct rhlist_head *tmp, *list;
4721 struct nfs4_file *fi;
4722
4723 rcu_read_lock();
4724 list = rhltable_lookup(&nfs4_file_rhltable, &inode,
4725 nfs4_file_rhash_params);
4726 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
4727 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
4728 if (refcount_inc_not_zero(&fi->fi_ref)) {
4729 rcu_read_unlock();
4730 return fi;
4731 }
4732 }
4733 }
4734 rcu_read_unlock();
4735 return NULL;
4736}
4737
4738/*
4739 * On hash insertion, identify entries with the same inode but
4740 * distinct filehandles. They will all be on the list returned
4741 * by rhltable_lookup().
4742 *
4743 * inode->i_lock prevents racing insertions from adding an entry
4744 * for the same inode/fhp pair twice.
4745 */
4746static noinline_for_stack struct nfs4_file *
4747nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp)
4748{
4749 struct inode *inode = d_inode(fhp->fh_dentry);
4750 struct rhlist_head *tmp, *list;
4751 struct nfs4_file *ret = NULL;
4752 bool alias_found = false;
4753 struct nfs4_file *fi;
4754 int err;
4755
4756 rcu_read_lock();
4757 spin_lock(&inode->i_lock);
4758
4759 list = rhltable_lookup(&nfs4_file_rhltable, &inode,
4760 nfs4_file_rhash_params);
4761 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
4762 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
4763 if (refcount_inc_not_zero(&fi->fi_ref))
4764 ret = fi;
4765 } else
4766 fi->fi_aliased = alias_found = true;
4767 }
4768 if (ret)
4769 goto out_unlock;
4770
4771 nfsd4_file_init(fhp, new);
4772 err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist,
4773 nfs4_file_rhash_params);
4774 if (err)
4775 goto out_unlock;
4776
4777 new->fi_aliased = alias_found;
4778 ret = new;
4779
4780out_unlock:
4781 spin_unlock(&inode->i_lock);
4782 rcu_read_unlock();
4783 return ret;
4784}
4785
4786static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi)
4787{
4788 rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist,
4789 nfs4_file_rhash_params);
4790}
4791
4792/*
4793 * Called to check deny when READ with all zero stateid or
4794 * WRITE with all zero or all one stateid
4795 */
4796static __be32
4797nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4798{
4799 struct nfs4_file *fp;
4800 __be32 ret = nfs_ok;
4801
4802 fp = nfsd4_file_hash_lookup(current_fh);
4803 if (!fp)
4804 return ret;
4805
4806 /* Check for conflicting share reservations */
4807 spin_lock(&fp->fi_lock);
4808 if (fp->fi_share_deny & deny_type)
4809 ret = nfserr_locked;
4810 spin_unlock(&fp->fi_lock);
4811 put_nfs4_file(fp);
4812 return ret;
4813}
4814
4815static bool nfsd4_deleg_present(const struct inode *inode)
4816{
4817 struct file_lock_context *ctx = locks_inode_context(inode);
4818
4819 return ctx && !list_empty_careful(&ctx->flc_lease);
4820}
4821
4822/**
4823 * nfsd_wait_for_delegreturn - wait for delegations to be returned
4824 * @rqstp: the RPC transaction being executed
4825 * @inode: in-core inode of the file being waited for
4826 *
4827 * The timeout prevents deadlock if all nfsd threads happen to be
4828 * tied up waiting for returning delegations.
4829 *
4830 * Return values:
4831 * %true: delegation was returned
4832 * %false: timed out waiting for delegreturn
4833 */
4834bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode)
4835{
4836 long __maybe_unused timeo;
4837
4838 timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode),
4839 NFSD_DELEGRETURN_TIMEOUT);
4840 trace_nfsd_delegret_wakeup(rqstp, inode, timeo);
4841 return timeo > 0;
4842}
4843
4844static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
4845{
4846 struct nfs4_delegation *dp = cb_to_delegation(cb);
4847 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4848 nfsd_net_id);
4849
4850 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4851
4852 /*
4853 * We can't do this in nfsd_break_deleg_cb because it is
4854 * already holding inode->i_lock.
4855 *
4856 * If the dl_time != 0, then we know that it has already been
4857 * queued for a lease break. Don't queue it again.
4858 */
4859 spin_lock(&state_lock);
4860 if (delegation_hashed(dp) && dp->dl_time == 0) {
4861 dp->dl_time = ktime_get_boottime_seconds();
4862 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4863 }
4864 spin_unlock(&state_lock);
4865}
4866
4867static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4868 struct rpc_task *task)
4869{
4870 struct nfs4_delegation *dp = cb_to_delegation(cb);
4871
4872 trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task);
4873
4874 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
4875 dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4876 return 1;
4877
4878 switch (task->tk_status) {
4879 case 0:
4880 return 1;
4881 case -NFS4ERR_DELAY:
4882 rpc_delay(task, 2 * HZ);
4883 return 0;
4884 case -EBADHANDLE:
4885 case -NFS4ERR_BAD_STATEID:
4886 /*
4887 * Race: client probably got cb_recall before open reply
4888 * granting delegation.
4889 */
4890 if (dp->dl_retries--) {
4891 rpc_delay(task, 2 * HZ);
4892 return 0;
4893 }
4894 fallthrough;
4895 default:
4896 return 1;
4897 }
4898}
4899
4900static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4901{
4902 struct nfs4_delegation *dp = cb_to_delegation(cb);
4903
4904 nfs4_put_stid(&dp->dl_stid);
4905}
4906
4907static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4908 .prepare = nfsd4_cb_recall_prepare,
4909 .done = nfsd4_cb_recall_done,
4910 .release = nfsd4_cb_recall_release,
4911};
4912
4913static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4914{
4915 /*
4916 * We're assuming the state code never drops its reference
4917 * without first removing the lease. Since we're in this lease
4918 * callback (and since the lease code is serialized by the
4919 * flc_lock) we know the server hasn't removed the lease yet, and
4920 * we know it's safe to take a reference.
4921 */
4922 refcount_inc(&dp->dl_stid.sc_count);
4923 WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
4924}
4925
4926/* Called from break_lease() with flc_lock held. */
4927static bool
4928nfsd_break_deleg_cb(struct file_lock *fl)
4929{
4930 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4931 struct nfs4_file *fp = dp->dl_stid.sc_file;
4932 struct nfs4_client *clp = dp->dl_stid.sc_client;
4933 struct nfsd_net *nn;
4934
4935 trace_nfsd_cb_recall(&dp->dl_stid);
4936
4937 dp->dl_recalled = true;
4938 atomic_inc(&clp->cl_delegs_in_recall);
4939 if (try_to_expire_client(clp)) {
4940 nn = net_generic(clp->net, nfsd_net_id);
4941 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
4942 }
4943
4944 /*
4945 * We don't want the locks code to timeout the lease for us;
4946 * we'll remove it ourself if a delegation isn't returned
4947 * in time:
4948 */
4949 fl->fl_break_time = 0;
4950
4951 spin_lock(&fp->fi_lock);
4952 fp->fi_had_conflict = true;
4953 nfsd_break_one_deleg(dp);
4954 spin_unlock(&fp->fi_lock);
4955 return false;
4956}
4957
4958/**
4959 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
4960 * @fl: Lock state to check
4961 *
4962 * Return values:
4963 * %true: Lease conflict was resolved
4964 * %false: Lease conflict was not resolved.
4965 */
4966static bool nfsd_breaker_owns_lease(struct file_lock *fl)
4967{
4968 struct nfs4_delegation *dl = fl->fl_owner;
4969 struct svc_rqst *rqst;
4970 struct nfs4_client *clp;
4971
4972 if (!i_am_nfsd())
4973 return false;
4974 rqst = kthread_data(current);
4975 /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
4976 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
4977 return false;
4978 clp = *(rqst->rq_lease_breaker);
4979 return dl->dl_stid.sc_client == clp;
4980}
4981
4982static int
4983nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4984 struct list_head *dispose)
4985{
4986 struct nfs4_delegation *dp = (struct nfs4_delegation *)onlist->fl_owner;
4987 struct nfs4_client *clp = dp->dl_stid.sc_client;
4988
4989 if (arg & F_UNLCK) {
4990 if (dp->dl_recalled)
4991 atomic_dec(&clp->cl_delegs_in_recall);
4992 return lease_modify(onlist, arg, dispose);
4993 } else
4994 return -EAGAIN;
4995}
4996
4997static const struct lock_manager_operations nfsd_lease_mng_ops = {
4998 .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
4999 .lm_break = nfsd_break_deleg_cb,
5000 .lm_change = nfsd_change_deleg_cb,
5001};
5002
5003static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
5004{
5005 if (nfsd4_has_session(cstate))
5006 return nfs_ok;
5007 if (seqid == so->so_seqid - 1)
5008 return nfserr_replay_me;
5009 if (seqid == so->so_seqid)
5010 return nfs_ok;
5011 return nfserr_bad_seqid;
5012}
5013
5014static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
5015 struct nfsd_net *nn)
5016{
5017 struct nfs4_client *found;
5018
5019 spin_lock(&nn->client_lock);
5020 found = find_confirmed_client(clid, sessions, nn);
5021 if (found)
5022 atomic_inc(&found->cl_rpc_users);
5023 spin_unlock(&nn->client_lock);
5024 return found;
5025}
5026
5027static __be32 set_client(clientid_t *clid,
5028 struct nfsd4_compound_state *cstate,
5029 struct nfsd_net *nn)
5030{
5031 if (cstate->clp) {
5032 if (!same_clid(&cstate->clp->cl_clientid, clid))
5033 return nfserr_stale_clientid;
5034 return nfs_ok;
5035 }
5036 if (STALE_CLIENTID(clid, nn))
5037 return nfserr_stale_clientid;
5038 /*
5039 * We're in the 4.0 case (otherwise the SEQUENCE op would have
5040 * set cstate->clp), so session = false:
5041 */
5042 cstate->clp = lookup_clientid(clid, false, nn);
5043 if (!cstate->clp)
5044 return nfserr_expired;
5045 return nfs_ok;
5046}
5047
5048__be32
5049nfsd4_process_open1(struct nfsd4_compound_state *cstate,
5050 struct nfsd4_open *open, struct nfsd_net *nn)
5051{
5052 clientid_t *clientid = &open->op_clientid;
5053 struct nfs4_client *clp = NULL;
5054 unsigned int strhashval;
5055 struct nfs4_openowner *oo = NULL;
5056 __be32 status;
5057
5058 /*
5059 * In case we need it later, after we've already created the
5060 * file and don't want to risk a further failure:
5061 */
5062 open->op_file = nfsd4_alloc_file();
5063 if (open->op_file == NULL)
5064 return nfserr_jukebox;
5065
5066 status = set_client(clientid, cstate, nn);
5067 if (status)
5068 return status;
5069 clp = cstate->clp;
5070
5071 strhashval = ownerstr_hashval(&open->op_owner);
5072 oo = find_openstateowner_str(strhashval, open, clp);
5073 open->op_openowner = oo;
5074 if (!oo) {
5075 goto new_owner;
5076 }
5077 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5078 /* Replace unconfirmed owners without checking for replay. */
5079 release_openowner(oo);
5080 open->op_openowner = NULL;
5081 goto new_owner;
5082 }
5083 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
5084 if (status)
5085 return status;
5086 goto alloc_stateid;
5087new_owner:
5088 oo = alloc_init_open_stateowner(strhashval, open, cstate);
5089 if (oo == NULL)
5090 return nfserr_jukebox;
5091 open->op_openowner = oo;
5092alloc_stateid:
5093 open->op_stp = nfs4_alloc_open_stateid(clp);
5094 if (!open->op_stp)
5095 return nfserr_jukebox;
5096
5097 if (nfsd4_has_session(cstate) &&
5098 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
5099 open->op_odstate = alloc_clnt_odstate(clp);
5100 if (!open->op_odstate)
5101 return nfserr_jukebox;
5102 }
5103
5104 return nfs_ok;
5105}
5106
5107static inline __be32
5108nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
5109{
5110 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
5111 return nfserr_openmode;
5112 else
5113 return nfs_ok;
5114}
5115
5116static int share_access_to_flags(u32 share_access)
5117{
5118 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
5119}
5120
5121static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
5122{
5123 struct nfs4_stid *ret;
5124
5125 ret = find_stateid_by_type(cl, s,
5126 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
5127 if (!ret)
5128 return NULL;
5129 return delegstateid(ret);
5130}
5131
5132static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
5133{
5134 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
5135 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
5136}
5137
5138static __be32
5139nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
5140 struct nfs4_delegation **dp)
5141{
5142 int flags;
5143 __be32 status = nfserr_bad_stateid;
5144 struct nfs4_delegation *deleg;
5145
5146 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
5147 if (deleg == NULL)
5148 goto out;
5149 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
5150 nfs4_put_stid(&deleg->dl_stid);
5151 if (cl->cl_minorversion)
5152 status = nfserr_deleg_revoked;
5153 goto out;
5154 }
5155 flags = share_access_to_flags(open->op_share_access);
5156 status = nfs4_check_delegmode(deleg, flags);
5157 if (status) {
5158 nfs4_put_stid(&deleg->dl_stid);
5159 goto out;
5160 }
5161 *dp = deleg;
5162out:
5163 if (!nfsd4_is_deleg_cur(open))
5164 return nfs_ok;
5165 if (status)
5166 return status;
5167 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5168 return nfs_ok;
5169}
5170
5171static inline int nfs4_access_to_access(u32 nfs4_access)
5172{
5173 int flags = 0;
5174
5175 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
5176 flags |= NFSD_MAY_READ;
5177 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
5178 flags |= NFSD_MAY_WRITE;
5179 return flags;
5180}
5181
5182static inline __be32
5183nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
5184 struct nfsd4_open *open)
5185{
5186 struct iattr iattr = {
5187 .ia_valid = ATTR_SIZE,
5188 .ia_size = 0,
5189 };
5190 struct nfsd_attrs attrs = {
5191 .na_iattr = &iattr,
5192 };
5193 if (!open->op_truncate)
5194 return 0;
5195 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
5196 return nfserr_inval;
5197 return nfsd_setattr(rqstp, fh, &attrs, 0, (time64_t)0);
5198}
5199
5200static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
5201 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5202 struct nfsd4_open *open, bool new_stp)
5203{
5204 struct nfsd_file *nf = NULL;
5205 __be32 status;
5206 int oflag = nfs4_access_to_omode(open->op_share_access);
5207 int access = nfs4_access_to_access(open->op_share_access);
5208 unsigned char old_access_bmap, old_deny_bmap;
5209
5210 spin_lock(&fp->fi_lock);
5211
5212 /*
5213 * Are we trying to set a deny mode that would conflict with
5214 * current access?
5215 */
5216 status = nfs4_file_check_deny(fp, open->op_share_deny);
5217 if (status != nfs_ok) {
5218 if (status != nfserr_share_denied) {
5219 spin_unlock(&fp->fi_lock);
5220 goto out;
5221 }
5222 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5223 stp, open->op_share_deny, false))
5224 status = nfserr_jukebox;
5225 spin_unlock(&fp->fi_lock);
5226 goto out;
5227 }
5228
5229 /* set access to the file */
5230 status = nfs4_file_get_access(fp, open->op_share_access);
5231 if (status != nfs_ok) {
5232 if (status != nfserr_share_denied) {
5233 spin_unlock(&fp->fi_lock);
5234 goto out;
5235 }
5236 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5237 stp, open->op_share_access, true))
5238 status = nfserr_jukebox;
5239 spin_unlock(&fp->fi_lock);
5240 goto out;
5241 }
5242
5243 /* Set access bits in stateid */
5244 old_access_bmap = stp->st_access_bmap;
5245 set_access(open->op_share_access, stp);
5246
5247 /* Set new deny mask */
5248 old_deny_bmap = stp->st_deny_bmap;
5249 set_deny(open->op_share_deny, stp);
5250 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5251
5252 if (!fp->fi_fds[oflag]) {
5253 spin_unlock(&fp->fi_lock);
5254
5255 status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
5256 open->op_filp, &nf);
5257 if (status != nfs_ok)
5258 goto out_put_access;
5259
5260 spin_lock(&fp->fi_lock);
5261 if (!fp->fi_fds[oflag]) {
5262 fp->fi_fds[oflag] = nf;
5263 nf = NULL;
5264 }
5265 }
5266 spin_unlock(&fp->fi_lock);
5267 if (nf)
5268 nfsd_file_put(nf);
5269
5270 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
5271 access));
5272 if (status)
5273 goto out_put_access;
5274
5275 status = nfsd4_truncate(rqstp, cur_fh, open);
5276 if (status)
5277 goto out_put_access;
5278out:
5279 return status;
5280out_put_access:
5281 stp->st_access_bmap = old_access_bmap;
5282 nfs4_file_put_access(fp, open->op_share_access);
5283 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
5284 goto out;
5285}
5286
5287static __be32
5288nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
5289 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5290 struct nfsd4_open *open)
5291{
5292 __be32 status;
5293 unsigned char old_deny_bmap = stp->st_deny_bmap;
5294
5295 if (!test_access(open->op_share_access, stp))
5296 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false);
5297
5298 /* test and set deny mode */
5299 spin_lock(&fp->fi_lock);
5300 status = nfs4_file_check_deny(fp, open->op_share_deny);
5301 if (status == nfs_ok) {
5302 if (status != nfserr_share_denied) {
5303 set_deny(open->op_share_deny, stp);
5304 fp->fi_share_deny |=
5305 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5306 } else {
5307 if (nfs4_resolve_deny_conflicts_locked(fp, false,
5308 stp, open->op_share_deny, false))
5309 status = nfserr_jukebox;
5310 }
5311 }
5312 spin_unlock(&fp->fi_lock);
5313
5314 if (status != nfs_ok)
5315 return status;
5316
5317 status = nfsd4_truncate(rqstp, cur_fh, open);
5318 if (status != nfs_ok)
5319 reset_union_bmap_deny(old_deny_bmap, stp);
5320 return status;
5321}
5322
5323/* Should we give out recallable state?: */
5324static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
5325{
5326 if (clp->cl_cb_state == NFSD4_CB_UP)
5327 return true;
5328 /*
5329 * In the sessions case, since we don't have to establish a
5330 * separate connection for callbacks, we assume it's OK
5331 * until we hear otherwise:
5332 */
5333 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
5334}
5335
5336static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
5337 int flag)
5338{
5339 struct file_lock *fl;
5340
5341 fl = locks_alloc_lock();
5342 if (!fl)
5343 return NULL;
5344 fl->fl_lmops = &nfsd_lease_mng_ops;
5345 fl->fl_flags = FL_DELEG;
5346 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
5347 fl->fl_end = OFFSET_MAX;
5348 fl->fl_owner = (fl_owner_t)dp;
5349 fl->fl_pid = current->tgid;
5350 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
5351 return fl;
5352}
5353
5354static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
5355 struct nfs4_file *fp)
5356{
5357 struct nfs4_ol_stateid *st;
5358 struct file *f = fp->fi_deleg_file->nf_file;
5359 struct inode *ino = locks_inode(f);
5360 int writes;
5361
5362 writes = atomic_read(&ino->i_writecount);
5363 if (!writes)
5364 return 0;
5365 /*
5366 * There could be multiple filehandles (hence multiple
5367 * nfs4_files) referencing this file, but that's not too
5368 * common; let's just give up in that case rather than
5369 * trying to go look up all the clients using that other
5370 * nfs4_file as well:
5371 */
5372 if (fp->fi_aliased)
5373 return -EAGAIN;
5374 /*
5375 * If there's a close in progress, make sure that we see it
5376 * clear any fi_fds[] entries before we see it decrement
5377 * i_writecount:
5378 */
5379 smp_mb__after_atomic();
5380
5381 if (fp->fi_fds[O_WRONLY])
5382 writes--;
5383 if (fp->fi_fds[O_RDWR])
5384 writes--;
5385 if (writes > 0)
5386 return -EAGAIN; /* There may be non-NFSv4 writers */
5387 /*
5388 * It's possible there are non-NFSv4 write opens in progress,
5389 * but if they haven't incremented i_writecount yet then they
5390 * also haven't called break lease yet; so, they'll break this
5391 * lease soon enough. So, all that's left to check for is NFSv4
5392 * opens:
5393 */
5394 spin_lock(&fp->fi_lock);
5395 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
5396 if (st->st_openstp == NULL /* it's an open */ &&
5397 access_permit_write(st) &&
5398 st->st_stid.sc_client != clp) {
5399 spin_unlock(&fp->fi_lock);
5400 return -EAGAIN;
5401 }
5402 }
5403 spin_unlock(&fp->fi_lock);
5404 /*
5405 * There's a small chance that we could be racing with another
5406 * NFSv4 open. However, any open that hasn't added itself to
5407 * the fi_stateids list also hasn't called break_lease yet; so,
5408 * they'll break this lease soon enough.
5409 */
5410 return 0;
5411}
5412
5413/*
5414 * It's possible that between opening the dentry and setting the delegation,
5415 * that it has been renamed or unlinked. Redo the lookup to verify that this
5416 * hasn't happened.
5417 */
5418static int
5419nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
5420 struct svc_fh *parent)
5421{
5422 struct svc_export *exp;
5423 struct dentry *child;
5424 __be32 err;
5425
5426 err = nfsd_lookup_dentry(open->op_rqstp, parent,
5427 open->op_fname, open->op_fnamelen,
5428 &exp, &child);
5429
5430 if (err)
5431 return -EAGAIN;
5432
5433 exp_put(exp);
5434 dput(child);
5435 if (child != file_dentry(fp->fi_deleg_file->nf_file))
5436 return -EAGAIN;
5437
5438 return 0;
5439}
5440
5441static struct nfs4_delegation *
5442nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5443 struct svc_fh *parent)
5444{
5445 int status = 0;
5446 struct nfs4_client *clp = stp->st_stid.sc_client;
5447 struct nfs4_file *fp = stp->st_stid.sc_file;
5448 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
5449 struct nfs4_delegation *dp;
5450 struct nfsd_file *nf;
5451 struct file_lock *fl;
5452
5453 /*
5454 * The fi_had_conflict and nfs_get_existing_delegation checks
5455 * here are just optimizations; we'll need to recheck them at
5456 * the end:
5457 */
5458 if (fp->fi_had_conflict)
5459 return ERR_PTR(-EAGAIN);
5460
5461 nf = find_readable_file(fp);
5462 if (!nf) {
5463 /*
5464 * We probably could attempt another open and get a read
5465 * delegation, but for now, don't bother until the
5466 * client actually sends us one.
5467 */
5468 return ERR_PTR(-EAGAIN);
5469 }
5470 spin_lock(&state_lock);
5471 spin_lock(&fp->fi_lock);
5472 if (nfs4_delegation_exists(clp, fp))
5473 status = -EAGAIN;
5474 else if (!fp->fi_deleg_file) {
5475 fp->fi_deleg_file = nf;
5476 /* increment early to prevent fi_deleg_file from being
5477 * cleared */
5478 fp->fi_delegees = 1;
5479 nf = NULL;
5480 } else
5481 fp->fi_delegees++;
5482 spin_unlock(&fp->fi_lock);
5483 spin_unlock(&state_lock);
5484 if (nf)
5485 nfsd_file_put(nf);
5486 if (status)
5487 return ERR_PTR(status);
5488
5489 status = -ENOMEM;
5490 dp = alloc_init_deleg(clp, fp, odstate);
5491 if (!dp)
5492 goto out_delegees;
5493
5494 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
5495 if (!fl)
5496 goto out_clnt_odstate;
5497
5498 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
5499 if (fl)
5500 locks_free_lock(fl);
5501 if (status)
5502 goto out_clnt_odstate;
5503
5504 if (parent) {
5505 status = nfsd4_verify_deleg_dentry(open, fp, parent);
5506 if (status)
5507 goto out_unlock;
5508 }
5509
5510 status = nfsd4_check_conflicting_opens(clp, fp);
5511 if (status)
5512 goto out_unlock;
5513
5514 spin_lock(&state_lock);
5515 spin_lock(&fp->fi_lock);
5516 if (fp->fi_had_conflict)
5517 status = -EAGAIN;
5518 else
5519 status = hash_delegation_locked(dp, fp);
5520 spin_unlock(&fp->fi_lock);
5521 spin_unlock(&state_lock);
5522
5523 if (status)
5524 goto out_unlock;
5525
5526 return dp;
5527out_unlock:
5528 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5529out_clnt_odstate:
5530 put_clnt_odstate(dp->dl_clnt_odstate);
5531 nfs4_put_stid(&dp->dl_stid);
5532out_delegees:
5533 put_deleg_file(fp);
5534 return ERR_PTR(status);
5535}
5536
5537static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5538{
5539 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5540 if (status == -EAGAIN)
5541 open->op_why_no_deleg = WND4_CONTENTION;
5542 else {
5543 open->op_why_no_deleg = WND4_RESOURCE;
5544 switch (open->op_deleg_want) {
5545 case NFS4_SHARE_WANT_READ_DELEG:
5546 case NFS4_SHARE_WANT_WRITE_DELEG:
5547 case NFS4_SHARE_WANT_ANY_DELEG:
5548 break;
5549 case NFS4_SHARE_WANT_CANCEL:
5550 open->op_why_no_deleg = WND4_CANCELLED;
5551 break;
5552 case NFS4_SHARE_WANT_NO_DELEG:
5553 WARN_ON_ONCE(1);
5554 }
5555 }
5556}
5557
5558/*
5559 * Attempt to hand out a delegation.
5560 *
5561 * Note we don't support write delegations, and won't until the vfs has
5562 * proper support for them.
5563 */
5564static void
5565nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5566 struct svc_fh *currentfh)
5567{
5568 struct nfs4_delegation *dp;
5569 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5570 struct nfs4_client *clp = stp->st_stid.sc_client;
5571 struct svc_fh *parent = NULL;
5572 int cb_up;
5573 int status = 0;
5574
5575 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5576 open->op_recall = 0;
5577 switch (open->op_claim_type) {
5578 case NFS4_OPEN_CLAIM_PREVIOUS:
5579 if (!cb_up)
5580 open->op_recall = 1;
5581 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
5582 goto out_no_deleg;
5583 break;
5584 case NFS4_OPEN_CLAIM_NULL:
5585 parent = currentfh;
5586 fallthrough;
5587 case NFS4_OPEN_CLAIM_FH:
5588 /*
5589 * Let's not give out any delegations till everyone's
5590 * had the chance to reclaim theirs, *and* until
5591 * NLM locks have all been reclaimed:
5592 */
5593 if (locks_in_grace(clp->net))
5594 goto out_no_deleg;
5595 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5596 goto out_no_deleg;
5597 break;
5598 default:
5599 goto out_no_deleg;
5600 }
5601 dp = nfs4_set_delegation(open, stp, parent);
5602 if (IS_ERR(dp))
5603 goto out_no_deleg;
5604
5605 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5606
5607 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
5608 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5609 nfs4_put_stid(&dp->dl_stid);
5610 return;
5611out_no_deleg:
5612 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5613 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5614 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5615 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5616 open->op_recall = 1;
5617 }
5618
5619 /* 4.1 client asking for a delegation? */
5620 if (open->op_deleg_want)
5621 nfsd4_open_deleg_none_ext(open, status);
5622 return;
5623}
5624
5625static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5626 struct nfs4_delegation *dp)
5627{
5628 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5629 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5630 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5631 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
5632 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
5633 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5634 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5635 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
5636 }
5637 /* Otherwise the client must be confused wanting a delegation
5638 * it already has, therefore we don't return
5639 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5640 */
5641}
5642
5643/**
5644 * nfsd4_process_open2 - finish open processing
5645 * @rqstp: the RPC transaction being executed
5646 * @current_fh: NFSv4 COMPOUND's current filehandle
5647 * @open: OPEN arguments
5648 *
5649 * If successful, (1) truncate the file if open->op_truncate was
5650 * set, (2) set open->op_stateid, (3) set open->op_delegation.
5651 *
5652 * Returns %nfs_ok on success; otherwise an nfs4stat value in
5653 * network byte order is returned.
5654 */
5655__be32
5656nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5657{
5658 struct nfsd4_compoundres *resp = rqstp->rq_resp;
5659 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5660 struct nfs4_file *fp = NULL;
5661 struct nfs4_ol_stateid *stp = NULL;
5662 struct nfs4_delegation *dp = NULL;
5663 __be32 status;
5664 bool new_stp = false;
5665
5666 /*
5667 * Lookup file; if found, lookup stateid and check open request,
5668 * and check for delegations in the process of being recalled.
5669 * If not found, create the nfs4_file struct
5670 */
5671 fp = nfsd4_file_hash_insert(open->op_file, current_fh);
5672 if (unlikely(!fp))
5673 return nfserr_jukebox;
5674 if (fp != open->op_file) {
5675 status = nfs4_check_deleg(cl, open, &dp);
5676 if (status)
5677 goto out;
5678 stp = nfsd4_find_and_lock_existing_open(fp, open);
5679 } else {
5680 open->op_file = NULL;
5681 status = nfserr_bad_stateid;
5682 if (nfsd4_is_deleg_cur(open))
5683 goto out;
5684 }
5685
5686 if (!stp) {
5687 stp = init_open_stateid(fp, open);
5688 if (!open->op_stp)
5689 new_stp = true;
5690 }
5691
5692 /*
5693 * OPEN the file, or upgrade an existing OPEN.
5694 * If truncate fails, the OPEN fails.
5695 *
5696 * stp is already locked.
5697 */
5698 if (!new_stp) {
5699 /* Stateid was found, this is an OPEN upgrade */
5700 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5701 if (status) {
5702 mutex_unlock(&stp->st_mutex);
5703 goto out;
5704 }
5705 } else {
5706 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true);
5707 if (status) {
5708 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5709 release_open_stateid(stp);
5710 mutex_unlock(&stp->st_mutex);
5711 goto out;
5712 }
5713
5714 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5715 open->op_odstate);
5716 if (stp->st_clnt_odstate == open->op_odstate)
5717 open->op_odstate = NULL;
5718 }
5719
5720 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5721 mutex_unlock(&stp->st_mutex);
5722
5723 if (nfsd4_has_session(&resp->cstate)) {
5724 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5725 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5726 open->op_why_no_deleg = WND4_NOT_WANTED;
5727 goto nodeleg;
5728 }
5729 }
5730
5731 /*
5732 * Attempt to hand out a delegation. No error return, because the
5733 * OPEN succeeds even if we fail.
5734 */
5735 nfs4_open_delegation(open, stp, &resp->cstate.current_fh);
5736nodeleg:
5737 status = nfs_ok;
5738 trace_nfsd_open(&stp->st_stid.sc_stateid);
5739out:
5740 /* 4.1 client trying to upgrade/downgrade delegation? */
5741 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5742 open->op_deleg_want)
5743 nfsd4_deleg_xgrade_none_ext(open, dp);
5744
5745 if (fp)
5746 put_nfs4_file(fp);
5747 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5748 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5749 /*
5750 * To finish the open response, we just need to set the rflags.
5751 */
5752 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5753 if (nfsd4_has_session(&resp->cstate))
5754 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5755 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5756 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5757
5758 if (dp)
5759 nfs4_put_stid(&dp->dl_stid);
5760 if (stp)
5761 nfs4_put_stid(&stp->st_stid);
5762
5763 return status;
5764}
5765
5766void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5767 struct nfsd4_open *open)
5768{
5769 if (open->op_openowner) {
5770 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5771
5772 nfsd4_cstate_assign_replay(cstate, so);
5773 nfs4_put_stateowner(so);
5774 }
5775 if (open->op_file)
5776 kmem_cache_free(file_slab, open->op_file);
5777 if (open->op_stp)
5778 nfs4_put_stid(&open->op_stp->st_stid);
5779 if (open->op_odstate)
5780 kmem_cache_free(odstate_slab, open->op_odstate);
5781}
5782
5783__be32
5784nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5785 union nfsd4_op_u *u)
5786{
5787 clientid_t *clid = &u->renew;
5788 struct nfs4_client *clp;
5789 __be32 status;
5790 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5791
5792 trace_nfsd_clid_renew(clid);
5793 status = set_client(clid, cstate, nn);
5794 if (status)
5795 return status;
5796 clp = cstate->clp;
5797 if (!list_empty(&clp->cl_delegations)
5798 && clp->cl_cb_state != NFSD4_CB_UP)
5799 return nfserr_cb_path_down;
5800 return nfs_ok;
5801}
5802
5803void
5804nfsd4_end_grace(struct nfsd_net *nn)
5805{
5806 /* do nothing if grace period already ended */
5807 if (nn->grace_ended)
5808 return;
5809
5810 trace_nfsd_grace_complete(nn);
5811 nn->grace_ended = true;
5812 /*
5813 * If the server goes down again right now, an NFSv4
5814 * client will still be allowed to reclaim after it comes back up,
5815 * even if it hasn't yet had a chance to reclaim state this time.
5816 *
5817 */
5818 nfsd4_record_grace_done(nn);
5819 /*
5820 * At this point, NFSv4 clients can still reclaim. But if the
5821 * server crashes, any that have not yet reclaimed will be out
5822 * of luck on the next boot.
5823 *
5824 * (NFSv4.1+ clients are considered to have reclaimed once they
5825 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
5826 * have reclaimed after their first OPEN.)
5827 */
5828 locks_end_grace(&nn->nfsd4_manager);
5829 /*
5830 * At this point, and once lockd and/or any other containers
5831 * exit their grace period, further reclaims will fail and
5832 * regular locking can resume.
5833 */
5834}
5835
5836/*
5837 * If we've waited a lease period but there are still clients trying to
5838 * reclaim, wait a little longer to give them a chance to finish.
5839 */
5840static bool clients_still_reclaiming(struct nfsd_net *nn)
5841{
5842 time64_t double_grace_period_end = nn->boot_time +
5843 2 * nn->nfsd4_lease;
5844
5845 if (nn->track_reclaim_completes &&
5846 atomic_read(&nn->nr_reclaim_complete) ==
5847 nn->reclaim_str_hashtbl_size)
5848 return false;
5849 if (!nn->somebody_reclaimed)
5850 return false;
5851 nn->somebody_reclaimed = false;
5852 /*
5853 * If we've given them *two* lease times to reclaim, and they're
5854 * still not done, give up:
5855 */
5856 if (ktime_get_boottime_seconds() > double_grace_period_end)
5857 return false;
5858 return true;
5859}
5860
5861struct laundry_time {
5862 time64_t cutoff;
5863 time64_t new_timeo;
5864};
5865
5866static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
5867{
5868 time64_t time_remaining;
5869
5870 if (last_refresh < lt->cutoff)
5871 return true;
5872 time_remaining = last_refresh - lt->cutoff;
5873 lt->new_timeo = min(lt->new_timeo, time_remaining);
5874 return false;
5875}
5876
5877#ifdef CONFIG_NFSD_V4_2_INTER_SSC
5878void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
5879{
5880 spin_lock_init(&nn->nfsd_ssc_lock);
5881 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
5882 init_waitqueue_head(&nn->nfsd_ssc_waitq);
5883}
5884EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
5885
5886/*
5887 * This is called when nfsd is being shutdown, after all inter_ssc
5888 * cleanup were done, to destroy the ssc delayed unmount list.
5889 */
5890static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
5891{
5892 struct nfsd4_ssc_umount_item *ni = NULL;
5893 struct nfsd4_ssc_umount_item *tmp;
5894
5895 spin_lock(&nn->nfsd_ssc_lock);
5896 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5897 list_del(&ni->nsui_list);
5898 spin_unlock(&nn->nfsd_ssc_lock);
5899 mntput(ni->nsui_vfsmount);
5900 kfree(ni);
5901 spin_lock(&nn->nfsd_ssc_lock);
5902 }
5903 spin_unlock(&nn->nfsd_ssc_lock);
5904}
5905
5906static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
5907{
5908 bool do_wakeup = false;
5909 struct nfsd4_ssc_umount_item *ni = NULL;
5910 struct nfsd4_ssc_umount_item *tmp;
5911
5912 spin_lock(&nn->nfsd_ssc_lock);
5913 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5914 if (time_after(jiffies, ni->nsui_expire)) {
5915 if (refcount_read(&ni->nsui_refcnt) > 1)
5916 continue;
5917
5918 /* mark being unmount */
5919 ni->nsui_busy = true;
5920 spin_unlock(&nn->nfsd_ssc_lock);
5921 mntput(ni->nsui_vfsmount);
5922 spin_lock(&nn->nfsd_ssc_lock);
5923
5924 /* waiters need to start from begin of list */
5925 list_del(&ni->nsui_list);
5926 kfree(ni);
5927
5928 /* wakeup ssc_connect waiters */
5929 do_wakeup = true;
5930 continue;
5931 }
5932 break;
5933 }
5934 if (do_wakeup)
5935 wake_up_all(&nn->nfsd_ssc_waitq);
5936 spin_unlock(&nn->nfsd_ssc_lock);
5937}
5938#endif
5939
5940/* Check if any lock belonging to this lockowner has any blockers */
5941static bool
5942nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
5943{
5944 struct file_lock_context *ctx;
5945 struct nfs4_ol_stateid *stp;
5946 struct nfs4_file *nf;
5947
5948 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
5949 nf = stp->st_stid.sc_file;
5950 ctx = locks_inode_context(nf->fi_inode);
5951 if (!ctx)
5952 continue;
5953 if (locks_owner_has_blockers(ctx, lo))
5954 return true;
5955 }
5956 return false;
5957}
5958
5959static bool
5960nfs4_anylock_blockers(struct nfs4_client *clp)
5961{
5962 int i;
5963 struct nfs4_stateowner *so;
5964 struct nfs4_lockowner *lo;
5965
5966 if (atomic_read(&clp->cl_delegs_in_recall))
5967 return true;
5968 spin_lock(&clp->cl_lock);
5969 for (i = 0; i < OWNER_HASH_SIZE; i++) {
5970 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i],
5971 so_strhash) {
5972 if (so->so_is_open_owner)
5973 continue;
5974 lo = lockowner(so);
5975 if (nfs4_lockowner_has_blockers(lo)) {
5976 spin_unlock(&clp->cl_lock);
5977 return true;
5978 }
5979 }
5980 }
5981 spin_unlock(&clp->cl_lock);
5982 return false;
5983}
5984
5985static void
5986nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
5987 struct laundry_time *lt)
5988{
5989 unsigned int maxreap, reapcnt = 0;
5990 struct list_head *pos, *next;
5991 struct nfs4_client *clp;
5992
5993 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ?
5994 NFSD_CLIENT_MAX_TRIM_PER_RUN : 0;
5995 INIT_LIST_HEAD(reaplist);
5996 spin_lock(&nn->client_lock);
5997 list_for_each_safe(pos, next, &nn->client_lru) {
5998 clp = list_entry(pos, struct nfs4_client, cl_lru);
5999 if (clp->cl_state == NFSD4_EXPIRABLE)
6000 goto exp_client;
6001 if (!state_expired(lt, clp->cl_time))
6002 break;
6003 if (!atomic_read(&clp->cl_rpc_users)) {
6004 if (clp->cl_state == NFSD4_ACTIVE)
6005 atomic_inc(&nn->nfsd_courtesy_clients);
6006 clp->cl_state = NFSD4_COURTESY;
6007 }
6008 if (!client_has_state(clp))
6009 goto exp_client;
6010 if (!nfs4_anylock_blockers(clp))
6011 if (reapcnt >= maxreap)
6012 continue;
6013exp_client:
6014 if (!mark_client_expired_locked(clp)) {
6015 list_add(&clp->cl_lru, reaplist);
6016 reapcnt++;
6017 }
6018 }
6019 spin_unlock(&nn->client_lock);
6020}
6021
6022static void
6023nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn,
6024 struct list_head *reaplist)
6025{
6026 unsigned int maxreap = 0, reapcnt = 0;
6027 struct list_head *pos, *next;
6028 struct nfs4_client *clp;
6029
6030 maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN;
6031 INIT_LIST_HEAD(reaplist);
6032
6033 spin_lock(&nn->client_lock);
6034 list_for_each_safe(pos, next, &nn->client_lru) {
6035 clp = list_entry(pos, struct nfs4_client, cl_lru);
6036 if (clp->cl_state == NFSD4_ACTIVE)
6037 break;
6038 if (reapcnt >= maxreap)
6039 break;
6040 if (!mark_client_expired_locked(clp)) {
6041 list_add(&clp->cl_lru, reaplist);
6042 reapcnt++;
6043 }
6044 }
6045 spin_unlock(&nn->client_lock);
6046}
6047
6048static void
6049nfs4_process_client_reaplist(struct list_head *reaplist)
6050{
6051 struct list_head *pos, *next;
6052 struct nfs4_client *clp;
6053
6054 list_for_each_safe(pos, next, reaplist) {
6055 clp = list_entry(pos, struct nfs4_client, cl_lru);
6056 trace_nfsd_clid_purged(&clp->cl_clientid);
6057 list_del_init(&clp->cl_lru);
6058 expire_client(clp);
6059 }
6060}
6061
6062static time64_t
6063nfs4_laundromat(struct nfsd_net *nn)
6064{
6065 struct nfs4_openowner *oo;
6066 struct nfs4_delegation *dp;
6067 struct nfs4_ol_stateid *stp;
6068 struct nfsd4_blocked_lock *nbl;
6069 struct list_head *pos, *next, reaplist;
6070 struct laundry_time lt = {
6071 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
6072 .new_timeo = nn->nfsd4_lease
6073 };
6074 struct nfs4_cpntf_state *cps;
6075 copy_stateid_t *cps_t;
6076 int i;
6077
6078 if (clients_still_reclaiming(nn)) {
6079 lt.new_timeo = 0;
6080 goto out;
6081 }
6082 nfsd4_end_grace(nn);
6083
6084 spin_lock(&nn->s2s_cp_lock);
6085 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
6086 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
6087 if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID &&
6088 state_expired(<, cps->cpntf_time))
6089 _free_cpntf_state_locked(nn, cps);
6090 }
6091 spin_unlock(&nn->s2s_cp_lock);
6092 nfs4_get_client_reaplist(nn, &reaplist, <);
6093 nfs4_process_client_reaplist(&reaplist);
6094
6095 spin_lock(&state_lock);
6096 list_for_each_safe(pos, next, &nn->del_recall_lru) {
6097 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6098 if (!state_expired(<, dp->dl_time))
6099 break;
6100 WARN_ON(!unhash_delegation_locked(dp));
6101 list_add(&dp->dl_recall_lru, &reaplist);
6102 }
6103 spin_unlock(&state_lock);
6104 while (!list_empty(&reaplist)) {
6105 dp = list_first_entry(&reaplist, struct nfs4_delegation,
6106 dl_recall_lru);
6107 list_del_init(&dp->dl_recall_lru);
6108 revoke_delegation(dp);
6109 }
6110
6111 spin_lock(&nn->client_lock);
6112 while (!list_empty(&nn->close_lru)) {
6113 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
6114 oo_close_lru);
6115 if (!state_expired(<, oo->oo_time))
6116 break;
6117 list_del_init(&oo->oo_close_lru);
6118 stp = oo->oo_last_closed_stid;
6119 oo->oo_last_closed_stid = NULL;
6120 spin_unlock(&nn->client_lock);
6121 nfs4_put_stid(&stp->st_stid);
6122 spin_lock(&nn->client_lock);
6123 }
6124 spin_unlock(&nn->client_lock);
6125
6126 /*
6127 * It's possible for a client to try and acquire an already held lock
6128 * that is being held for a long time, and then lose interest in it.
6129 * So, we clean out any un-revisited request after a lease period
6130 * under the assumption that the client is no longer interested.
6131 *
6132 * RFC5661, sec. 9.6 states that the client must not rely on getting
6133 * notifications and must continue to poll for locks, even when the
6134 * server supports them. Thus this shouldn't lead to clients blocking
6135 * indefinitely once the lock does become free.
6136 */
6137 BUG_ON(!list_empty(&reaplist));
6138 spin_lock(&nn->blocked_locks_lock);
6139 while (!list_empty(&nn->blocked_locks_lru)) {
6140 nbl = list_first_entry(&nn->blocked_locks_lru,
6141 struct nfsd4_blocked_lock, nbl_lru);
6142 if (!state_expired(<, nbl->nbl_time))
6143 break;
6144 list_move(&nbl->nbl_lru, &reaplist);
6145 list_del_init(&nbl->nbl_list);
6146 }
6147 spin_unlock(&nn->blocked_locks_lock);
6148
6149 while (!list_empty(&reaplist)) {
6150 nbl = list_first_entry(&reaplist,
6151 struct nfsd4_blocked_lock, nbl_lru);
6152 list_del_init(&nbl->nbl_lru);
6153 free_blocked_lock(nbl);
6154 }
6155#ifdef CONFIG_NFSD_V4_2_INTER_SSC
6156 /* service the server-to-server copy delayed unmount list */
6157 nfsd4_ssc_expire_umount(nn);
6158#endif
6159out:
6160 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
6161}
6162
6163static void laundromat_main(struct work_struct *);
6164
6165static void
6166laundromat_main(struct work_struct *laundry)
6167{
6168 time64_t t;
6169 struct delayed_work *dwork = to_delayed_work(laundry);
6170 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
6171 laundromat_work);
6172
6173 t = nfs4_laundromat(nn);
6174 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
6175}
6176
6177static void
6178courtesy_client_reaper(struct nfsd_net *nn)
6179{
6180 struct list_head reaplist;
6181
6182 nfs4_get_courtesy_client_reaplist(nn, &reaplist);
6183 nfs4_process_client_reaplist(&reaplist);
6184}
6185
6186static void
6187deleg_reaper(struct nfsd_net *nn)
6188{
6189 struct list_head *pos, *next;
6190 struct nfs4_client *clp;
6191 struct list_head cblist;
6192
6193 INIT_LIST_HEAD(&cblist);
6194 spin_lock(&nn->client_lock);
6195 list_for_each_safe(pos, next, &nn->client_lru) {
6196 clp = list_entry(pos, struct nfs4_client, cl_lru);
6197 if (clp->cl_state != NFSD4_ACTIVE ||
6198 list_empty(&clp->cl_delegations) ||
6199 atomic_read(&clp->cl_delegs_in_recall) ||
6200 test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) ||
6201 (ktime_get_boottime_seconds() -
6202 clp->cl_ra_time < 5)) {
6203 continue;
6204 }
6205 list_add(&clp->cl_ra_cblist, &cblist);
6206
6207 /* release in nfsd4_cb_recall_any_release */
6208 atomic_inc(&clp->cl_rpc_users);
6209 set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
6210 clp->cl_ra_time = ktime_get_boottime_seconds();
6211 }
6212 spin_unlock(&nn->client_lock);
6213
6214 while (!list_empty(&cblist)) {
6215 clp = list_first_entry(&cblist, struct nfs4_client,
6216 cl_ra_cblist);
6217 list_del_init(&clp->cl_ra_cblist);
6218 clp->cl_ra->ra_keep = 0;
6219 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG);
6220 trace_nfsd_cb_recall_any(clp->cl_ra);
6221 nfsd4_run_cb(&clp->cl_ra->ra_cb);
6222 }
6223}
6224
6225static void
6226nfsd4_state_shrinker_worker(struct work_struct *work)
6227{
6228 struct nfsd_net *nn = container_of(work, struct nfsd_net,
6229 nfsd_shrinker_work);
6230
6231 courtesy_client_reaper(nn);
6232 deleg_reaper(nn);
6233}
6234
6235static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
6236{
6237 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
6238 return nfserr_bad_stateid;
6239 return nfs_ok;
6240}
6241
6242static
6243__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
6244{
6245 __be32 status = nfserr_openmode;
6246
6247 /* For lock stateid's, we test the parent open, not the lock: */
6248 if (stp->st_openstp)
6249 stp = stp->st_openstp;
6250 if ((flags & WR_STATE) && !access_permit_write(stp))
6251 goto out;
6252 if ((flags & RD_STATE) && !access_permit_read(stp))
6253 goto out;
6254 status = nfs_ok;
6255out:
6256 return status;
6257}
6258
6259static inline __be32
6260check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
6261{
6262 if (ONE_STATEID(stateid) && (flags & RD_STATE))
6263 return nfs_ok;
6264 else if (opens_in_grace(net)) {
6265 /* Answer in remaining cases depends on existence of
6266 * conflicting state; so we must wait out the grace period. */
6267 return nfserr_grace;
6268 } else if (flags & WR_STATE)
6269 return nfs4_share_conflict(current_fh,
6270 NFS4_SHARE_DENY_WRITE);
6271 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
6272 return nfs4_share_conflict(current_fh,
6273 NFS4_SHARE_DENY_READ);
6274}
6275
6276static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
6277{
6278 /*
6279 * When sessions are used the stateid generation number is ignored
6280 * when it is zero.
6281 */
6282 if (has_session && in->si_generation == 0)
6283 return nfs_ok;
6284
6285 if (in->si_generation == ref->si_generation)
6286 return nfs_ok;
6287
6288 /* If the client sends us a stateid from the future, it's buggy: */
6289 if (nfsd4_stateid_generation_after(in, ref))
6290 return nfserr_bad_stateid;
6291 /*
6292 * However, we could see a stateid from the past, even from a
6293 * non-buggy client. For example, if the client sends a lock
6294 * while some IO is outstanding, the lock may bump si_generation
6295 * while the IO is still in flight. The client could avoid that
6296 * situation by waiting for responses on all the IO requests,
6297 * but better performance may result in retrying IO that
6298 * receives an old_stateid error if requests are rarely
6299 * reordered in flight:
6300 */
6301 return nfserr_old_stateid;
6302}
6303
6304static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
6305{
6306 __be32 ret;
6307
6308 spin_lock(&s->sc_lock);
6309 ret = nfsd4_verify_open_stid(s);
6310 if (ret == nfs_ok)
6311 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
6312 spin_unlock(&s->sc_lock);
6313 return ret;
6314}
6315
6316static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
6317{
6318 if (ols->st_stateowner->so_is_open_owner &&
6319 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
6320 return nfserr_bad_stateid;
6321 return nfs_ok;
6322}
6323
6324static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
6325{
6326 struct nfs4_stid *s;
6327 __be32 status = nfserr_bad_stateid;
6328
6329 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6330 CLOSE_STATEID(stateid))
6331 return status;
6332 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
6333 return status;
6334 spin_lock(&cl->cl_lock);
6335 s = find_stateid_locked(cl, stateid);
6336 if (!s)
6337 goto out_unlock;
6338 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
6339 if (status)
6340 goto out_unlock;
6341 switch (s->sc_type) {
6342 case NFS4_DELEG_STID:
6343 status = nfs_ok;
6344 break;
6345 case NFS4_REVOKED_DELEG_STID:
6346 status = nfserr_deleg_revoked;
6347 break;
6348 case NFS4_OPEN_STID:
6349 case NFS4_LOCK_STID:
6350 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
6351 break;
6352 default:
6353 printk("unknown stateid type %x\n", s->sc_type);
6354 fallthrough;
6355 case NFS4_CLOSED_STID:
6356 case NFS4_CLOSED_DELEG_STID:
6357 status = nfserr_bad_stateid;
6358 }
6359out_unlock:
6360 spin_unlock(&cl->cl_lock);
6361 return status;
6362}
6363
6364__be32
6365nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
6366 stateid_t *stateid, unsigned char typemask,
6367 struct nfs4_stid **s, struct nfsd_net *nn)
6368{
6369 __be32 status;
6370 struct nfs4_stid *stid;
6371 bool return_revoked = false;
6372
6373 /*
6374 * only return revoked delegations if explicitly asked.
6375 * otherwise we report revoked or bad_stateid status.
6376 */
6377 if (typemask & NFS4_REVOKED_DELEG_STID)
6378 return_revoked = true;
6379 else if (typemask & NFS4_DELEG_STID)
6380 typemask |= NFS4_REVOKED_DELEG_STID;
6381
6382 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6383 CLOSE_STATEID(stateid))
6384 return nfserr_bad_stateid;
6385 status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
6386 if (status == nfserr_stale_clientid) {
6387 if (cstate->session)
6388 return nfserr_bad_stateid;
6389 return nfserr_stale_stateid;
6390 }
6391 if (status)
6392 return status;
6393 stid = find_stateid_by_type(cstate->clp, stateid, typemask);
6394 if (!stid)
6395 return nfserr_bad_stateid;
6396 if ((stid->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
6397 nfs4_put_stid(stid);
6398 if (cstate->minorversion)
6399 return nfserr_deleg_revoked;
6400 return nfserr_bad_stateid;
6401 }
6402 *s = stid;
6403 return nfs_ok;
6404}
6405
6406static struct nfsd_file *
6407nfs4_find_file(struct nfs4_stid *s, int flags)
6408{
6409 if (!s)
6410 return NULL;
6411
6412 switch (s->sc_type) {
6413 case NFS4_DELEG_STID:
6414 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
6415 return NULL;
6416 return nfsd_file_get(s->sc_file->fi_deleg_file);
6417 case NFS4_OPEN_STID:
6418 case NFS4_LOCK_STID:
6419 if (flags & RD_STATE)
6420 return find_readable_file(s->sc_file);
6421 else
6422 return find_writeable_file(s->sc_file);
6423 }
6424
6425 return NULL;
6426}
6427
6428static __be32
6429nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
6430{
6431 __be32 status;
6432
6433 status = nfsd4_check_openowner_confirmed(ols);
6434 if (status)
6435 return status;
6436 return nfs4_check_openmode(ols, flags);
6437}
6438
6439static __be32
6440nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
6441 struct nfsd_file **nfp, int flags)
6442{
6443 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
6444 struct nfsd_file *nf;
6445 __be32 status;
6446
6447 nf = nfs4_find_file(s, flags);
6448 if (nf) {
6449 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
6450 acc | NFSD_MAY_OWNER_OVERRIDE);
6451 if (status) {
6452 nfsd_file_put(nf);
6453 goto out;
6454 }
6455 } else {
6456 status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
6457 if (status)
6458 return status;
6459 }
6460 *nfp = nf;
6461out:
6462 return status;
6463}
6464static void
6465_free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6466{
6467 WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID);
6468 if (!refcount_dec_and_test(&cps->cp_stateid.cs_count))
6469 return;
6470 list_del(&cps->cp_list);
6471 idr_remove(&nn->s2s_cp_stateids,
6472 cps->cp_stateid.cs_stid.si_opaque.so_id);
6473 kfree(cps);
6474}
6475/*
6476 * A READ from an inter server to server COPY will have a
6477 * copy stateid. Look up the copy notify stateid from the
6478 * idr structure and take a reference on it.
6479 */
6480__be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6481 struct nfs4_client *clp,
6482 struct nfs4_cpntf_state **cps)
6483{
6484 copy_stateid_t *cps_t;
6485 struct nfs4_cpntf_state *state = NULL;
6486
6487 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
6488 return nfserr_bad_stateid;
6489 spin_lock(&nn->s2s_cp_lock);
6490 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
6491 if (cps_t) {
6492 state = container_of(cps_t, struct nfs4_cpntf_state,
6493 cp_stateid);
6494 if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) {
6495 state = NULL;
6496 goto unlock;
6497 }
6498 if (!clp)
6499 refcount_inc(&state->cp_stateid.cs_count);
6500 else
6501 _free_cpntf_state_locked(nn, state);
6502 }
6503unlock:
6504 spin_unlock(&nn->s2s_cp_lock);
6505 if (!state)
6506 return nfserr_bad_stateid;
6507 if (!clp && state)
6508 *cps = state;
6509 return 0;
6510}
6511
6512static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6513 struct nfs4_stid **stid)
6514{
6515 __be32 status;
6516 struct nfs4_cpntf_state *cps = NULL;
6517 struct nfs4_client *found;
6518
6519 status = manage_cpntf_state(nn, st, NULL, &cps);
6520 if (status)
6521 return status;
6522
6523 cps->cpntf_time = ktime_get_boottime_seconds();
6524
6525 status = nfserr_expired;
6526 found = lookup_clientid(&cps->cp_p_clid, true, nn);
6527 if (!found)
6528 goto out;
6529
6530 *stid = find_stateid_by_type(found, &cps->cp_p_stateid,
6531 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
6532 if (*stid)
6533 status = nfs_ok;
6534 else
6535 status = nfserr_bad_stateid;
6536
6537 put_client_renew(found);
6538out:
6539 nfs4_put_cpntf_state(nn, cps);
6540 return status;
6541}
6542
6543void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6544{
6545 spin_lock(&nn->s2s_cp_lock);
6546 _free_cpntf_state_locked(nn, cps);
6547 spin_unlock(&nn->s2s_cp_lock);
6548}
6549
6550/*
6551 * Checks for stateid operations
6552 */
6553__be32
6554nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
6555 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
6556 stateid_t *stateid, int flags, struct nfsd_file **nfp,
6557 struct nfs4_stid **cstid)
6558{
6559 struct net *net = SVC_NET(rqstp);
6560 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6561 struct nfs4_stid *s = NULL;
6562 __be32 status;
6563
6564 if (nfp)
6565 *nfp = NULL;
6566
6567 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
6568 if (cstid)
6569 status = nfserr_bad_stateid;
6570 else
6571 status = check_special_stateids(net, fhp, stateid,
6572 flags);
6573 goto done;
6574 }
6575
6576 status = nfsd4_lookup_stateid(cstate, stateid,
6577 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
6578 &s, nn);
6579 if (status == nfserr_bad_stateid)
6580 status = find_cpntf_state(nn, stateid, &s);
6581 if (status)
6582 return status;
6583 status = nfsd4_stid_check_stateid_generation(stateid, s,
6584 nfsd4_has_session(cstate));
6585 if (status)
6586 goto out;
6587
6588 switch (s->sc_type) {
6589 case NFS4_DELEG_STID:
6590 status = nfs4_check_delegmode(delegstateid(s), flags);
6591 break;
6592 case NFS4_OPEN_STID:
6593 case NFS4_LOCK_STID:
6594 status = nfs4_check_olstateid(openlockstateid(s), flags);
6595 break;
6596 default:
6597 status = nfserr_bad_stateid;
6598 break;
6599 }
6600 if (status)
6601 goto out;
6602 status = nfs4_check_fh(fhp, s);
6603
6604done:
6605 if (status == nfs_ok && nfp)
6606 status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
6607out:
6608 if (s) {
6609 if (!status && cstid)
6610 *cstid = s;
6611 else
6612 nfs4_put_stid(s);
6613 }
6614 return status;
6615}
6616
6617/*
6618 * Test if the stateid is valid
6619 */
6620__be32
6621nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6622 union nfsd4_op_u *u)
6623{
6624 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
6625 struct nfsd4_test_stateid_id *stateid;
6626 struct nfs4_client *cl = cstate->clp;
6627
6628 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
6629 stateid->ts_id_status =
6630 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
6631
6632 return nfs_ok;
6633}
6634
6635static __be32
6636nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
6637{
6638 struct nfs4_ol_stateid *stp = openlockstateid(s);
6639 __be32 ret;
6640
6641 ret = nfsd4_lock_ol_stateid(stp);
6642 if (ret)
6643 goto out_put_stid;
6644
6645 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6646 if (ret)
6647 goto out;
6648
6649 ret = nfserr_locks_held;
6650 if (check_for_locks(stp->st_stid.sc_file,
6651 lockowner(stp->st_stateowner)))
6652 goto out;
6653
6654 release_lock_stateid(stp);
6655 ret = nfs_ok;
6656
6657out:
6658 mutex_unlock(&stp->st_mutex);
6659out_put_stid:
6660 nfs4_put_stid(s);
6661 return ret;
6662}
6663
6664__be32
6665nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6666 union nfsd4_op_u *u)
6667{
6668 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
6669 stateid_t *stateid = &free_stateid->fr_stateid;
6670 struct nfs4_stid *s;
6671 struct nfs4_delegation *dp;
6672 struct nfs4_client *cl = cstate->clp;
6673 __be32 ret = nfserr_bad_stateid;
6674
6675 spin_lock(&cl->cl_lock);
6676 s = find_stateid_locked(cl, stateid);
6677 if (!s)
6678 goto out_unlock;
6679 spin_lock(&s->sc_lock);
6680 switch (s->sc_type) {
6681 case NFS4_DELEG_STID:
6682 ret = nfserr_locks_held;
6683 break;
6684 case NFS4_OPEN_STID:
6685 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6686 if (ret)
6687 break;
6688 ret = nfserr_locks_held;
6689 break;
6690 case NFS4_LOCK_STID:
6691 spin_unlock(&s->sc_lock);
6692 refcount_inc(&s->sc_count);
6693 spin_unlock(&cl->cl_lock);
6694 ret = nfsd4_free_lock_stateid(stateid, s);
6695 goto out;
6696 case NFS4_REVOKED_DELEG_STID:
6697 spin_unlock(&s->sc_lock);
6698 dp = delegstateid(s);
6699 list_del_init(&dp->dl_recall_lru);
6700 spin_unlock(&cl->cl_lock);
6701 nfs4_put_stid(s);
6702 ret = nfs_ok;
6703 goto out;
6704 /* Default falls through and returns nfserr_bad_stateid */
6705 }
6706 spin_unlock(&s->sc_lock);
6707out_unlock:
6708 spin_unlock(&cl->cl_lock);
6709out:
6710 return ret;
6711}
6712
6713static inline int
6714setlkflg (int type)
6715{
6716 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
6717 RD_STATE : WR_STATE;
6718}
6719
6720static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
6721{
6722 struct svc_fh *current_fh = &cstate->current_fh;
6723 struct nfs4_stateowner *sop = stp->st_stateowner;
6724 __be32 status;
6725
6726 status = nfsd4_check_seqid(cstate, sop, seqid);
6727 if (status)
6728 return status;
6729 status = nfsd4_lock_ol_stateid(stp);
6730 if (status != nfs_ok)
6731 return status;
6732 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
6733 if (status == nfs_ok)
6734 status = nfs4_check_fh(current_fh, &stp->st_stid);
6735 if (status != nfs_ok)
6736 mutex_unlock(&stp->st_mutex);
6737 return status;
6738}
6739
6740/*
6741 * Checks for sequence id mutating operations.
6742 */
6743static __be32
6744nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6745 stateid_t *stateid, char typemask,
6746 struct nfs4_ol_stateid **stpp,
6747 struct nfsd_net *nn)
6748{
6749 __be32 status;
6750 struct nfs4_stid *s;
6751 struct nfs4_ol_stateid *stp = NULL;
6752
6753 trace_nfsd_preprocess(seqid, stateid);
6754
6755 *stpp = NULL;
6756 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
6757 if (status)
6758 return status;
6759 stp = openlockstateid(s);
6760 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
6761
6762 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
6763 if (!status)
6764 *stpp = stp;
6765 else
6766 nfs4_put_stid(&stp->st_stid);
6767 return status;
6768}
6769
6770static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6771 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
6772{
6773 __be32 status;
6774 struct nfs4_openowner *oo;
6775 struct nfs4_ol_stateid *stp;
6776
6777 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
6778 NFS4_OPEN_STID, &stp, nn);
6779 if (status)
6780 return status;
6781 oo = openowner(stp->st_stateowner);
6782 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
6783 mutex_unlock(&stp->st_mutex);
6784 nfs4_put_stid(&stp->st_stid);
6785 return nfserr_bad_stateid;
6786 }
6787 *stpp = stp;
6788 return nfs_ok;
6789}
6790
6791__be32
6792nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6793 union nfsd4_op_u *u)
6794{
6795 struct nfsd4_open_confirm *oc = &u->open_confirm;
6796 __be32 status;
6797 struct nfs4_openowner *oo;
6798 struct nfs4_ol_stateid *stp;
6799 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6800
6801 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
6802 cstate->current_fh.fh_dentry);
6803
6804 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
6805 if (status)
6806 return status;
6807
6808 status = nfs4_preprocess_seqid_op(cstate,
6809 oc->oc_seqid, &oc->oc_req_stateid,
6810 NFS4_OPEN_STID, &stp, nn);
6811 if (status)
6812 goto out;
6813 oo = openowner(stp->st_stateowner);
6814 status = nfserr_bad_stateid;
6815 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
6816 mutex_unlock(&stp->st_mutex);
6817 goto put_stateid;
6818 }
6819 oo->oo_flags |= NFS4_OO_CONFIRMED;
6820 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
6821 mutex_unlock(&stp->st_mutex);
6822 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
6823 nfsd4_client_record_create(oo->oo_owner.so_client);
6824 status = nfs_ok;
6825put_stateid:
6826 nfs4_put_stid(&stp->st_stid);
6827out:
6828 nfsd4_bump_seqid(cstate, status);
6829 return status;
6830}
6831
6832static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
6833{
6834 if (!test_access(access, stp))
6835 return;
6836 nfs4_file_put_access(stp->st_stid.sc_file, access);
6837 clear_access(access, stp);
6838}
6839
6840static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
6841{
6842 switch (to_access) {
6843 case NFS4_SHARE_ACCESS_READ:
6844 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
6845 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6846 break;
6847 case NFS4_SHARE_ACCESS_WRITE:
6848 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
6849 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6850 break;
6851 case NFS4_SHARE_ACCESS_BOTH:
6852 break;
6853 default:
6854 WARN_ON_ONCE(1);
6855 }
6856}
6857
6858__be32
6859nfsd4_open_downgrade(struct svc_rqst *rqstp,
6860 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
6861{
6862 struct nfsd4_open_downgrade *od = &u->open_downgrade;
6863 __be32 status;
6864 struct nfs4_ol_stateid *stp;
6865 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6866
6867 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
6868 cstate->current_fh.fh_dentry);
6869
6870 /* We don't yet support WANT bits: */
6871 if (od->od_deleg_want)
6872 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
6873 od->od_deleg_want);
6874
6875 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
6876 &od->od_stateid, &stp, nn);
6877 if (status)
6878 goto out;
6879 status = nfserr_inval;
6880 if (!test_access(od->od_share_access, stp)) {
6881 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6882 stp->st_access_bmap, od->od_share_access);
6883 goto put_stateid;
6884 }
6885 if (!test_deny(od->od_share_deny, stp)) {
6886 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6887 stp->st_deny_bmap, od->od_share_deny);
6888 goto put_stateid;
6889 }
6890 nfs4_stateid_downgrade(stp, od->od_share_access);
6891 reset_union_bmap_deny(od->od_share_deny, stp);
6892 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
6893 status = nfs_ok;
6894put_stateid:
6895 mutex_unlock(&stp->st_mutex);
6896 nfs4_put_stid(&stp->st_stid);
6897out:
6898 nfsd4_bump_seqid(cstate, status);
6899 return status;
6900}
6901
6902static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
6903{
6904 struct nfs4_client *clp = s->st_stid.sc_client;
6905 bool unhashed;
6906 LIST_HEAD(reaplist);
6907 struct nfs4_ol_stateid *stp;
6908
6909 spin_lock(&clp->cl_lock);
6910 unhashed = unhash_open_stateid(s, &reaplist);
6911
6912 if (clp->cl_minorversion) {
6913 if (unhashed)
6914 put_ol_stateid_locked(s, &reaplist);
6915 spin_unlock(&clp->cl_lock);
6916 list_for_each_entry(stp, &reaplist, st_locks)
6917 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
6918 free_ol_stateid_reaplist(&reaplist);
6919 } else {
6920 spin_unlock(&clp->cl_lock);
6921 free_ol_stateid_reaplist(&reaplist);
6922 if (unhashed)
6923 move_to_close_lru(s, clp->net);
6924 }
6925}
6926
6927/*
6928 * nfs4_unlock_state() called after encode
6929 */
6930__be32
6931nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6932 union nfsd4_op_u *u)
6933{
6934 struct nfsd4_close *close = &u->close;
6935 __be32 status;
6936 struct nfs4_ol_stateid *stp;
6937 struct net *net = SVC_NET(rqstp);
6938 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6939
6940 dprintk("NFSD: nfsd4_close on file %pd\n",
6941 cstate->current_fh.fh_dentry);
6942
6943 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
6944 &close->cl_stateid,
6945 NFS4_OPEN_STID|NFS4_CLOSED_STID,
6946 &stp, nn);
6947 nfsd4_bump_seqid(cstate, status);
6948 if (status)
6949 goto out;
6950
6951 stp->st_stid.sc_type = NFS4_CLOSED_STID;
6952
6953 /*
6954 * Technically we don't _really_ have to increment or copy it, since
6955 * it should just be gone after this operation and we clobber the
6956 * copied value below, but we continue to do so here just to ensure
6957 * that racing ops see that there was a state change.
6958 */
6959 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
6960
6961 nfsd4_close_open_stateid(stp);
6962 mutex_unlock(&stp->st_mutex);
6963
6964 /* v4.1+ suggests that we send a special stateid in here, since the
6965 * clients should just ignore this anyway. Since this is not useful
6966 * for v4.0 clients either, we set it to the special close_stateid
6967 * universally.
6968 *
6969 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
6970 */
6971 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
6972
6973 /* put reference from nfs4_preprocess_seqid_op */
6974 nfs4_put_stid(&stp->st_stid);
6975out:
6976 return status;
6977}
6978
6979__be32
6980nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6981 union nfsd4_op_u *u)
6982{
6983 struct nfsd4_delegreturn *dr = &u->delegreturn;
6984 struct nfs4_delegation *dp;
6985 stateid_t *stateid = &dr->dr_stateid;
6986 struct nfs4_stid *s;
6987 __be32 status;
6988 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6989
6990 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6991 return status;
6992
6993 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
6994 if (status)
6995 goto out;
6996 dp = delegstateid(s);
6997 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
6998 if (status)
6999 goto put_stateid;
7000
7001 trace_nfsd_deleg_return(stateid);
7002 wake_up_var(d_inode(cstate->current_fh.fh_dentry));
7003 destroy_delegation(dp);
7004put_stateid:
7005 nfs4_put_stid(&dp->dl_stid);
7006out:
7007 return status;
7008}
7009
7010/* last octet in a range */
7011static inline u64
7012last_byte_offset(u64 start, u64 len)
7013{
7014 u64 end;
7015
7016 WARN_ON_ONCE(!len);
7017 end = start + len;
7018 return end > start ? end - 1: NFS4_MAX_UINT64;
7019}
7020
7021/*
7022 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
7023 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
7024 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
7025 * locking, this prevents us from being completely protocol-compliant. The
7026 * real solution to this problem is to start using unsigned file offsets in
7027 * the VFS, but this is a very deep change!
7028 */
7029static inline void
7030nfs4_transform_lock_offset(struct file_lock *lock)
7031{
7032 if (lock->fl_start < 0)
7033 lock->fl_start = OFFSET_MAX;
7034 if (lock->fl_end < 0)
7035 lock->fl_end = OFFSET_MAX;
7036}
7037
7038static fl_owner_t
7039nfsd4_lm_get_owner(fl_owner_t owner)
7040{
7041 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7042
7043 nfs4_get_stateowner(&lo->lo_owner);
7044 return owner;
7045}
7046
7047static void
7048nfsd4_lm_put_owner(fl_owner_t owner)
7049{
7050 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7051
7052 if (lo)
7053 nfs4_put_stateowner(&lo->lo_owner);
7054}
7055
7056/* return pointer to struct nfs4_client if client is expirable */
7057static bool
7058nfsd4_lm_lock_expirable(struct file_lock *cfl)
7059{
7060 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)cfl->fl_owner;
7061 struct nfs4_client *clp = lo->lo_owner.so_client;
7062 struct nfsd_net *nn;
7063
7064 if (try_to_expire_client(clp)) {
7065 nn = net_generic(clp->net, nfsd_net_id);
7066 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
7067 return true;
7068 }
7069 return false;
7070}
7071
7072/* schedule laundromat to run immediately and wait for it to complete */
7073static void
7074nfsd4_lm_expire_lock(void)
7075{
7076 flush_workqueue(laundry_wq);
7077}
7078
7079static void
7080nfsd4_lm_notify(struct file_lock *fl)
7081{
7082 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
7083 struct net *net = lo->lo_owner.so_client->net;
7084 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7085 struct nfsd4_blocked_lock *nbl = container_of(fl,
7086 struct nfsd4_blocked_lock, nbl_lock);
7087 bool queue = false;
7088
7089 /* An empty list means that something else is going to be using it */
7090 spin_lock(&nn->blocked_locks_lock);
7091 if (!list_empty(&nbl->nbl_list)) {
7092 list_del_init(&nbl->nbl_list);
7093 list_del_init(&nbl->nbl_lru);
7094 queue = true;
7095 }
7096 spin_unlock(&nn->blocked_locks_lock);
7097
7098 if (queue) {
7099 trace_nfsd_cb_notify_lock(lo, nbl);
7100 nfsd4_run_cb(&nbl->nbl_cb);
7101 }
7102}
7103
7104static const struct lock_manager_operations nfsd_posix_mng_ops = {
7105 .lm_mod_owner = THIS_MODULE,
7106 .lm_notify = nfsd4_lm_notify,
7107 .lm_get_owner = nfsd4_lm_get_owner,
7108 .lm_put_owner = nfsd4_lm_put_owner,
7109 .lm_lock_expirable = nfsd4_lm_lock_expirable,
7110 .lm_expire_lock = nfsd4_lm_expire_lock,
7111};
7112
7113static inline void
7114nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
7115{
7116 struct nfs4_lockowner *lo;
7117
7118 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
7119 lo = (struct nfs4_lockowner *) fl->fl_owner;
7120 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
7121 GFP_KERNEL);
7122 if (!deny->ld_owner.data)
7123 /* We just don't care that much */
7124 goto nevermind;
7125 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
7126 } else {
7127nevermind:
7128 deny->ld_owner.len = 0;
7129 deny->ld_owner.data = NULL;
7130 deny->ld_clientid.cl_boot = 0;
7131 deny->ld_clientid.cl_id = 0;
7132 }
7133 deny->ld_start = fl->fl_start;
7134 deny->ld_length = NFS4_MAX_UINT64;
7135 if (fl->fl_end != NFS4_MAX_UINT64)
7136 deny->ld_length = fl->fl_end - fl->fl_start + 1;
7137 deny->ld_type = NFS4_READ_LT;
7138 if (fl->fl_type != F_RDLCK)
7139 deny->ld_type = NFS4_WRITE_LT;
7140}
7141
7142static struct nfs4_lockowner *
7143find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
7144{
7145 unsigned int strhashval = ownerstr_hashval(owner);
7146 struct nfs4_stateowner *so;
7147
7148 lockdep_assert_held(&clp->cl_lock);
7149
7150 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
7151 so_strhash) {
7152 if (so->so_is_open_owner)
7153 continue;
7154 if (same_owner_str(so, owner))
7155 return lockowner(nfs4_get_stateowner(so));
7156 }
7157 return NULL;
7158}
7159
7160static struct nfs4_lockowner *
7161find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
7162{
7163 struct nfs4_lockowner *lo;
7164
7165 spin_lock(&clp->cl_lock);
7166 lo = find_lockowner_str_locked(clp, owner);
7167 spin_unlock(&clp->cl_lock);
7168 return lo;
7169}
7170
7171static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
7172{
7173 unhash_lockowner_locked(lockowner(sop));
7174}
7175
7176static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
7177{
7178 struct nfs4_lockowner *lo = lockowner(sop);
7179
7180 kmem_cache_free(lockowner_slab, lo);
7181}
7182
7183static const struct nfs4_stateowner_operations lockowner_ops = {
7184 .so_unhash = nfs4_unhash_lockowner,
7185 .so_free = nfs4_free_lockowner,
7186};
7187
7188/*
7189 * Alloc a lock owner structure.
7190 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
7191 * occurred.
7192 *
7193 * strhashval = ownerstr_hashval
7194 */
7195static struct nfs4_lockowner *
7196alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
7197 struct nfs4_ol_stateid *open_stp,
7198 struct nfsd4_lock *lock)
7199{
7200 struct nfs4_lockowner *lo, *ret;
7201
7202 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
7203 if (!lo)
7204 return NULL;
7205 INIT_LIST_HEAD(&lo->lo_blocked);
7206 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
7207 lo->lo_owner.so_is_open_owner = 0;
7208 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
7209 lo->lo_owner.so_ops = &lockowner_ops;
7210 spin_lock(&clp->cl_lock);
7211 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
7212 if (ret == NULL) {
7213 list_add(&lo->lo_owner.so_strhash,
7214 &clp->cl_ownerstr_hashtbl[strhashval]);
7215 ret = lo;
7216 } else
7217 nfs4_free_stateowner(&lo->lo_owner);
7218
7219 spin_unlock(&clp->cl_lock);
7220 return ret;
7221}
7222
7223static struct nfs4_ol_stateid *
7224find_lock_stateid(const struct nfs4_lockowner *lo,
7225 const struct nfs4_ol_stateid *ost)
7226{
7227 struct nfs4_ol_stateid *lst;
7228
7229 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
7230
7231 /* If ost is not hashed, ost->st_locks will not be valid */
7232 if (!nfs4_ol_stateid_unhashed(ost))
7233 list_for_each_entry(lst, &ost->st_locks, st_locks) {
7234 if (lst->st_stateowner == &lo->lo_owner) {
7235 refcount_inc(&lst->st_stid.sc_count);
7236 return lst;
7237 }
7238 }
7239 return NULL;
7240}
7241
7242static struct nfs4_ol_stateid *
7243init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
7244 struct nfs4_file *fp, struct inode *inode,
7245 struct nfs4_ol_stateid *open_stp)
7246{
7247 struct nfs4_client *clp = lo->lo_owner.so_client;
7248 struct nfs4_ol_stateid *retstp;
7249
7250 mutex_init(&stp->st_mutex);
7251 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
7252retry:
7253 spin_lock(&clp->cl_lock);
7254 if (nfs4_ol_stateid_unhashed(open_stp))
7255 goto out_close;
7256 retstp = find_lock_stateid(lo, open_stp);
7257 if (retstp)
7258 goto out_found;
7259 refcount_inc(&stp->st_stid.sc_count);
7260 stp->st_stid.sc_type = NFS4_LOCK_STID;
7261 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
7262 get_nfs4_file(fp);
7263 stp->st_stid.sc_file = fp;
7264 stp->st_access_bmap = 0;
7265 stp->st_deny_bmap = open_stp->st_deny_bmap;
7266 stp->st_openstp = open_stp;
7267 spin_lock(&fp->fi_lock);
7268 list_add(&stp->st_locks, &open_stp->st_locks);
7269 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
7270 list_add(&stp->st_perfile, &fp->fi_stateids);
7271 spin_unlock(&fp->fi_lock);
7272 spin_unlock(&clp->cl_lock);
7273 return stp;
7274out_found:
7275 spin_unlock(&clp->cl_lock);
7276 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
7277 nfs4_put_stid(&retstp->st_stid);
7278 goto retry;
7279 }
7280 /* To keep mutex tracking happy */
7281 mutex_unlock(&stp->st_mutex);
7282 return retstp;
7283out_close:
7284 spin_unlock(&clp->cl_lock);
7285 mutex_unlock(&stp->st_mutex);
7286 return NULL;
7287}
7288
7289static struct nfs4_ol_stateid *
7290find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
7291 struct inode *inode, struct nfs4_ol_stateid *ost,
7292 bool *new)
7293{
7294 struct nfs4_stid *ns = NULL;
7295 struct nfs4_ol_stateid *lst;
7296 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7297 struct nfs4_client *clp = oo->oo_owner.so_client;
7298
7299 *new = false;
7300 spin_lock(&clp->cl_lock);
7301 lst = find_lock_stateid(lo, ost);
7302 spin_unlock(&clp->cl_lock);
7303 if (lst != NULL) {
7304 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
7305 goto out;
7306 nfs4_put_stid(&lst->st_stid);
7307 }
7308 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
7309 if (ns == NULL)
7310 return NULL;
7311
7312 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
7313 if (lst == openlockstateid(ns))
7314 *new = true;
7315 else
7316 nfs4_put_stid(ns);
7317out:
7318 return lst;
7319}
7320
7321static int
7322check_lock_length(u64 offset, u64 length)
7323{
7324 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
7325 (length > ~offset)));
7326}
7327
7328static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
7329{
7330 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
7331
7332 lockdep_assert_held(&fp->fi_lock);
7333
7334 if (test_access(access, lock_stp))
7335 return;
7336 __nfs4_file_get_access(fp, access);
7337 set_access(access, lock_stp);
7338}
7339
7340static __be32
7341lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
7342 struct nfs4_ol_stateid *ost,
7343 struct nfsd4_lock *lock,
7344 struct nfs4_ol_stateid **plst, bool *new)
7345{
7346 __be32 status;
7347 struct nfs4_file *fi = ost->st_stid.sc_file;
7348 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7349 struct nfs4_client *cl = oo->oo_owner.so_client;
7350 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
7351 struct nfs4_lockowner *lo;
7352 struct nfs4_ol_stateid *lst;
7353 unsigned int strhashval;
7354
7355 lo = find_lockowner_str(cl, &lock->lk_new_owner);
7356 if (!lo) {
7357 strhashval = ownerstr_hashval(&lock->lk_new_owner);
7358 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
7359 if (lo == NULL)
7360 return nfserr_jukebox;
7361 } else {
7362 /* with an existing lockowner, seqids must be the same */
7363 status = nfserr_bad_seqid;
7364 if (!cstate->minorversion &&
7365 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
7366 goto out;
7367 }
7368
7369 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
7370 if (lst == NULL) {
7371 status = nfserr_jukebox;
7372 goto out;
7373 }
7374
7375 status = nfs_ok;
7376 *plst = lst;
7377out:
7378 nfs4_put_stateowner(&lo->lo_owner);
7379 return status;
7380}
7381
7382/*
7383 * LOCK operation
7384 */
7385__be32
7386nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7387 union nfsd4_op_u *u)
7388{
7389 struct nfsd4_lock *lock = &u->lock;
7390 struct nfs4_openowner *open_sop = NULL;
7391 struct nfs4_lockowner *lock_sop = NULL;
7392 struct nfs4_ol_stateid *lock_stp = NULL;
7393 struct nfs4_ol_stateid *open_stp = NULL;
7394 struct nfs4_file *fp;
7395 struct nfsd_file *nf = NULL;
7396 struct nfsd4_blocked_lock *nbl = NULL;
7397 struct file_lock *file_lock = NULL;
7398 struct file_lock *conflock = NULL;
7399 __be32 status = 0;
7400 int lkflg;
7401 int err;
7402 bool new = false;
7403 unsigned char fl_type;
7404 unsigned int fl_flags = FL_POSIX;
7405 struct net *net = SVC_NET(rqstp);
7406 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7407
7408 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
7409 (long long) lock->lk_offset,
7410 (long long) lock->lk_length);
7411
7412 if (check_lock_length(lock->lk_offset, lock->lk_length))
7413 return nfserr_inval;
7414
7415 if ((status = fh_verify(rqstp, &cstate->current_fh,
7416 S_IFREG, NFSD_MAY_LOCK))) {
7417 dprintk("NFSD: nfsd4_lock: permission denied!\n");
7418 return status;
7419 }
7420
7421 if (lock->lk_is_new) {
7422 if (nfsd4_has_session(cstate))
7423 /* See rfc 5661 18.10.3: given clientid is ignored: */
7424 memcpy(&lock->lk_new_clientid,
7425 &cstate->clp->cl_clientid,
7426 sizeof(clientid_t));
7427
7428 /* validate and update open stateid and open seqid */
7429 status = nfs4_preprocess_confirmed_seqid_op(cstate,
7430 lock->lk_new_open_seqid,
7431 &lock->lk_new_open_stateid,
7432 &open_stp, nn);
7433 if (status)
7434 goto out;
7435 mutex_unlock(&open_stp->st_mutex);
7436 open_sop = openowner(open_stp->st_stateowner);
7437 status = nfserr_bad_stateid;
7438 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
7439 &lock->lk_new_clientid))
7440 goto out;
7441 status = lookup_or_create_lock_state(cstate, open_stp, lock,
7442 &lock_stp, &new);
7443 } else {
7444 status = nfs4_preprocess_seqid_op(cstate,
7445 lock->lk_old_lock_seqid,
7446 &lock->lk_old_lock_stateid,
7447 NFS4_LOCK_STID, &lock_stp, nn);
7448 }
7449 if (status)
7450 goto out;
7451 lock_sop = lockowner(lock_stp->st_stateowner);
7452
7453 lkflg = setlkflg(lock->lk_type);
7454 status = nfs4_check_openmode(lock_stp, lkflg);
7455 if (status)
7456 goto out;
7457
7458 status = nfserr_grace;
7459 if (locks_in_grace(net) && !lock->lk_reclaim)
7460 goto out;
7461 status = nfserr_no_grace;
7462 if (!locks_in_grace(net) && lock->lk_reclaim)
7463 goto out;
7464
7465 if (lock->lk_reclaim)
7466 fl_flags |= FL_RECLAIM;
7467
7468 fp = lock_stp->st_stid.sc_file;
7469 switch (lock->lk_type) {
7470 case NFS4_READW_LT:
7471 if (nfsd4_has_session(cstate))
7472 fl_flags |= FL_SLEEP;
7473 fallthrough;
7474 case NFS4_READ_LT:
7475 spin_lock(&fp->fi_lock);
7476 nf = find_readable_file_locked(fp);
7477 if (nf)
7478 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
7479 spin_unlock(&fp->fi_lock);
7480 fl_type = F_RDLCK;
7481 break;
7482 case NFS4_WRITEW_LT:
7483 if (nfsd4_has_session(cstate))
7484 fl_flags |= FL_SLEEP;
7485 fallthrough;
7486 case NFS4_WRITE_LT:
7487 spin_lock(&fp->fi_lock);
7488 nf = find_writeable_file_locked(fp);
7489 if (nf)
7490 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
7491 spin_unlock(&fp->fi_lock);
7492 fl_type = F_WRLCK;
7493 break;
7494 default:
7495 status = nfserr_inval;
7496 goto out;
7497 }
7498
7499 if (!nf) {
7500 status = nfserr_openmode;
7501 goto out;
7502 }
7503
7504 /*
7505 * Most filesystems with their own ->lock operations will block
7506 * the nfsd thread waiting to acquire the lock. That leads to
7507 * deadlocks (we don't want every nfsd thread tied up waiting
7508 * for file locks), so don't attempt blocking lock notifications
7509 * on those filesystems:
7510 */
7511 if (nf->nf_file->f_op->lock)
7512 fl_flags &= ~FL_SLEEP;
7513
7514 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
7515 if (!nbl) {
7516 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
7517 status = nfserr_jukebox;
7518 goto out;
7519 }
7520
7521 file_lock = &nbl->nbl_lock;
7522 file_lock->fl_type = fl_type;
7523 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
7524 file_lock->fl_pid = current->tgid;
7525 file_lock->fl_file = nf->nf_file;
7526 file_lock->fl_flags = fl_flags;
7527 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7528 file_lock->fl_start = lock->lk_offset;
7529 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
7530 nfs4_transform_lock_offset(file_lock);
7531
7532 conflock = locks_alloc_lock();
7533 if (!conflock) {
7534 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7535 status = nfserr_jukebox;
7536 goto out;
7537 }
7538
7539 if (fl_flags & FL_SLEEP) {
7540 nbl->nbl_time = ktime_get_boottime_seconds();
7541 spin_lock(&nn->blocked_locks_lock);
7542 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
7543 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
7544 kref_get(&nbl->nbl_kref);
7545 spin_unlock(&nn->blocked_locks_lock);
7546 }
7547
7548 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
7549 switch (err) {
7550 case 0: /* success! */
7551 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
7552 status = 0;
7553 if (lock->lk_reclaim)
7554 nn->somebody_reclaimed = true;
7555 break;
7556 case FILE_LOCK_DEFERRED:
7557 kref_put(&nbl->nbl_kref, free_nbl);
7558 nbl = NULL;
7559 fallthrough;
7560 case -EAGAIN: /* conflock holds conflicting lock */
7561 status = nfserr_denied;
7562 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
7563 nfs4_set_lock_denied(conflock, &lock->lk_denied);
7564 break;
7565 case -EDEADLK:
7566 status = nfserr_deadlock;
7567 break;
7568 default:
7569 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
7570 status = nfserrno(err);
7571 break;
7572 }
7573out:
7574 if (nbl) {
7575 /* dequeue it if we queued it before */
7576 if (fl_flags & FL_SLEEP) {
7577 spin_lock(&nn->blocked_locks_lock);
7578 if (!list_empty(&nbl->nbl_list) &&
7579 !list_empty(&nbl->nbl_lru)) {
7580 list_del_init(&nbl->nbl_list);
7581 list_del_init(&nbl->nbl_lru);
7582 kref_put(&nbl->nbl_kref, free_nbl);
7583 }
7584 /* nbl can use one of lists to be linked to reaplist */
7585 spin_unlock(&nn->blocked_locks_lock);
7586 }
7587 free_blocked_lock(nbl);
7588 }
7589 if (nf)
7590 nfsd_file_put(nf);
7591 if (lock_stp) {
7592 /* Bump seqid manually if the 4.0 replay owner is openowner */
7593 if (cstate->replay_owner &&
7594 cstate->replay_owner != &lock_sop->lo_owner &&
7595 seqid_mutating_err(ntohl(status)))
7596 lock_sop->lo_owner.so_seqid++;
7597
7598 /*
7599 * If this is a new, never-before-used stateid, and we are
7600 * returning an error, then just go ahead and release it.
7601 */
7602 if (status && new)
7603 release_lock_stateid(lock_stp);
7604
7605 mutex_unlock(&lock_stp->st_mutex);
7606
7607 nfs4_put_stid(&lock_stp->st_stid);
7608 }
7609 if (open_stp)
7610 nfs4_put_stid(&open_stp->st_stid);
7611 nfsd4_bump_seqid(cstate, status);
7612 if (conflock)
7613 locks_free_lock(conflock);
7614 return status;
7615}
7616
7617/*
7618 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
7619 * so we do a temporary open here just to get an open file to pass to
7620 * vfs_test_lock.
7621 */
7622static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
7623{
7624 struct nfsd_file *nf;
7625 struct inode *inode;
7626 __be32 err;
7627
7628 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
7629 if (err)
7630 return err;
7631 inode = fhp->fh_dentry->d_inode;
7632 inode_lock(inode); /* to block new leases till after test_lock: */
7633 err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
7634 if (err)
7635 goto out;
7636 lock->fl_file = nf->nf_file;
7637 err = nfserrno(vfs_test_lock(nf->nf_file, lock));
7638 lock->fl_file = NULL;
7639out:
7640 inode_unlock(inode);
7641 nfsd_file_put(nf);
7642 return err;
7643}
7644
7645/*
7646 * LOCKT operation
7647 */
7648__be32
7649nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7650 union nfsd4_op_u *u)
7651{
7652 struct nfsd4_lockt *lockt = &u->lockt;
7653 struct file_lock *file_lock = NULL;
7654 struct nfs4_lockowner *lo = NULL;
7655 __be32 status;
7656 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7657
7658 if (locks_in_grace(SVC_NET(rqstp)))
7659 return nfserr_grace;
7660
7661 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
7662 return nfserr_inval;
7663
7664 if (!nfsd4_has_session(cstate)) {
7665 status = set_client(&lockt->lt_clientid, cstate, nn);
7666 if (status)
7667 goto out;
7668 }
7669
7670 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7671 goto out;
7672
7673 file_lock = locks_alloc_lock();
7674 if (!file_lock) {
7675 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7676 status = nfserr_jukebox;
7677 goto out;
7678 }
7679
7680 switch (lockt->lt_type) {
7681 case NFS4_READ_LT:
7682 case NFS4_READW_LT:
7683 file_lock->fl_type = F_RDLCK;
7684 break;
7685 case NFS4_WRITE_LT:
7686 case NFS4_WRITEW_LT:
7687 file_lock->fl_type = F_WRLCK;
7688 break;
7689 default:
7690 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
7691 status = nfserr_inval;
7692 goto out;
7693 }
7694
7695 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
7696 if (lo)
7697 file_lock->fl_owner = (fl_owner_t)lo;
7698 file_lock->fl_pid = current->tgid;
7699 file_lock->fl_flags = FL_POSIX;
7700
7701 file_lock->fl_start = lockt->lt_offset;
7702 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
7703
7704 nfs4_transform_lock_offset(file_lock);
7705
7706 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
7707 if (status)
7708 goto out;
7709
7710 if (file_lock->fl_type != F_UNLCK) {
7711 status = nfserr_denied;
7712 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
7713 }
7714out:
7715 if (lo)
7716 nfs4_put_stateowner(&lo->lo_owner);
7717 if (file_lock)
7718 locks_free_lock(file_lock);
7719 return status;
7720}
7721
7722__be32
7723nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7724 union nfsd4_op_u *u)
7725{
7726 struct nfsd4_locku *locku = &u->locku;
7727 struct nfs4_ol_stateid *stp;
7728 struct nfsd_file *nf = NULL;
7729 struct file_lock *file_lock = NULL;
7730 __be32 status;
7731 int err;
7732 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7733
7734 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
7735 (long long) locku->lu_offset,
7736 (long long) locku->lu_length);
7737
7738 if (check_lock_length(locku->lu_offset, locku->lu_length))
7739 return nfserr_inval;
7740
7741 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
7742 &locku->lu_stateid, NFS4_LOCK_STID,
7743 &stp, nn);
7744 if (status)
7745 goto out;
7746 nf = find_any_file(stp->st_stid.sc_file);
7747 if (!nf) {
7748 status = nfserr_lock_range;
7749 goto put_stateid;
7750 }
7751 file_lock = locks_alloc_lock();
7752 if (!file_lock) {
7753 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7754 status = nfserr_jukebox;
7755 goto put_file;
7756 }
7757
7758 file_lock->fl_type = F_UNLCK;
7759 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
7760 file_lock->fl_pid = current->tgid;
7761 file_lock->fl_file = nf->nf_file;
7762 file_lock->fl_flags = FL_POSIX;
7763 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7764 file_lock->fl_start = locku->lu_offset;
7765
7766 file_lock->fl_end = last_byte_offset(locku->lu_offset,
7767 locku->lu_length);
7768 nfs4_transform_lock_offset(file_lock);
7769
7770 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
7771 if (err) {
7772 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
7773 goto out_nfserr;
7774 }
7775 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
7776put_file:
7777 nfsd_file_put(nf);
7778put_stateid:
7779 mutex_unlock(&stp->st_mutex);
7780 nfs4_put_stid(&stp->st_stid);
7781out:
7782 nfsd4_bump_seqid(cstate, status);
7783 if (file_lock)
7784 locks_free_lock(file_lock);
7785 return status;
7786
7787out_nfserr:
7788 status = nfserrno(err);
7789 goto put_file;
7790}
7791
7792/*
7793 * returns
7794 * true: locks held by lockowner
7795 * false: no locks held by lockowner
7796 */
7797static bool
7798check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
7799{
7800 struct file_lock *fl;
7801 int status = false;
7802 struct nfsd_file *nf = find_any_file(fp);
7803 struct inode *inode;
7804 struct file_lock_context *flctx;
7805
7806 if (!nf) {
7807 /* Any valid lock stateid should have some sort of access */
7808 WARN_ON_ONCE(1);
7809 return status;
7810 }
7811
7812 inode = locks_inode(nf->nf_file);
7813 flctx = locks_inode_context(inode);
7814
7815 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
7816 spin_lock(&flctx->flc_lock);
7817 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
7818 if (fl->fl_owner == (fl_owner_t)lowner) {
7819 status = true;
7820 break;
7821 }
7822 }
7823 spin_unlock(&flctx->flc_lock);
7824 }
7825 nfsd_file_put(nf);
7826 return status;
7827}
7828
7829/**
7830 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
7831 * @rqstp: RPC transaction
7832 * @cstate: NFSv4 COMPOUND state
7833 * @u: RELEASE_LOCKOWNER arguments
7834 *
7835 * The lockowner's so_count is bumped when a lock record is added
7836 * or when copying a conflicting lock. The latter case is brief,
7837 * but can lead to fleeting false positives when looking for
7838 * locks-in-use.
7839 *
7840 * Return values:
7841 * %nfs_ok: lockowner released or not found
7842 * %nfserr_locks_held: lockowner still in use
7843 * %nfserr_stale_clientid: clientid no longer active
7844 * %nfserr_expired: clientid not recognized
7845 */
7846__be32
7847nfsd4_release_lockowner(struct svc_rqst *rqstp,
7848 struct nfsd4_compound_state *cstate,
7849 union nfsd4_op_u *u)
7850{
7851 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
7852 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7853 clientid_t *clid = &rlockowner->rl_clientid;
7854 struct nfs4_ol_stateid *stp;
7855 struct nfs4_lockowner *lo;
7856 struct nfs4_client *clp;
7857 LIST_HEAD(reaplist);
7858 __be32 status;
7859
7860 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
7861 clid->cl_boot, clid->cl_id);
7862
7863 status = set_client(clid, cstate, nn);
7864 if (status)
7865 return status;
7866 clp = cstate->clp;
7867
7868 spin_lock(&clp->cl_lock);
7869 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner);
7870 if (!lo) {
7871 spin_unlock(&clp->cl_lock);
7872 return nfs_ok;
7873 }
7874 if (atomic_read(&lo->lo_owner.so_count) != 2) {
7875 spin_unlock(&clp->cl_lock);
7876 nfs4_put_stateowner(&lo->lo_owner);
7877 return nfserr_locks_held;
7878 }
7879 unhash_lockowner_locked(lo);
7880 while (!list_empty(&lo->lo_owner.so_stateids)) {
7881 stp = list_first_entry(&lo->lo_owner.so_stateids,
7882 struct nfs4_ol_stateid,
7883 st_perstateowner);
7884 WARN_ON(!unhash_lock_stateid(stp));
7885 put_ol_stateid_locked(stp, &reaplist);
7886 }
7887 spin_unlock(&clp->cl_lock);
7888
7889 free_ol_stateid_reaplist(&reaplist);
7890 remove_blocked_locks(lo);
7891 nfs4_put_stateowner(&lo->lo_owner);
7892 return nfs_ok;
7893}
7894
7895static inline struct nfs4_client_reclaim *
7896alloc_reclaim(void)
7897{
7898 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
7899}
7900
7901bool
7902nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
7903{
7904 struct nfs4_client_reclaim *crp;
7905
7906 crp = nfsd4_find_reclaim_client(name, nn);
7907 return (crp && crp->cr_clp);
7908}
7909
7910/*
7911 * failure => all reset bets are off, nfserr_no_grace...
7912 *
7913 * The caller is responsible for freeing name.data if NULL is returned (it
7914 * will be freed in nfs4_remove_reclaim_record in the normal case).
7915 */
7916struct nfs4_client_reclaim *
7917nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
7918 struct nfsd_net *nn)
7919{
7920 unsigned int strhashval;
7921 struct nfs4_client_reclaim *crp;
7922
7923 crp = alloc_reclaim();
7924 if (crp) {
7925 strhashval = clientstr_hashval(name);
7926 INIT_LIST_HEAD(&crp->cr_strhash);
7927 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
7928 crp->cr_name.data = name.data;
7929 crp->cr_name.len = name.len;
7930 crp->cr_princhash.data = princhash.data;
7931 crp->cr_princhash.len = princhash.len;
7932 crp->cr_clp = NULL;
7933 nn->reclaim_str_hashtbl_size++;
7934 }
7935 return crp;
7936}
7937
7938void
7939nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
7940{
7941 list_del(&crp->cr_strhash);
7942 kfree(crp->cr_name.data);
7943 kfree(crp->cr_princhash.data);
7944 kfree(crp);
7945 nn->reclaim_str_hashtbl_size--;
7946}
7947
7948void
7949nfs4_release_reclaim(struct nfsd_net *nn)
7950{
7951 struct nfs4_client_reclaim *crp = NULL;
7952 int i;
7953
7954 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7955 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
7956 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
7957 struct nfs4_client_reclaim, cr_strhash);
7958 nfs4_remove_reclaim_record(crp, nn);
7959 }
7960 }
7961 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
7962}
7963
7964/*
7965 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
7966struct nfs4_client_reclaim *
7967nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
7968{
7969 unsigned int strhashval;
7970 struct nfs4_client_reclaim *crp = NULL;
7971
7972 strhashval = clientstr_hashval(name);
7973 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
7974 if (compare_blob(&crp->cr_name, &name) == 0) {
7975 return crp;
7976 }
7977 }
7978 return NULL;
7979}
7980
7981__be32
7982nfs4_check_open_reclaim(struct nfs4_client *clp)
7983{
7984 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
7985 return nfserr_no_grace;
7986
7987 if (nfsd4_client_record_check(clp))
7988 return nfserr_reclaim_bad;
7989
7990 return nfs_ok;
7991}
7992
7993/*
7994 * Since the lifetime of a delegation isn't limited to that of an open, a
7995 * client may quite reasonably hang on to a delegation as long as it has
7996 * the inode cached. This becomes an obvious problem the first time a
7997 * client's inode cache approaches the size of the server's total memory.
7998 *
7999 * For now we avoid this problem by imposing a hard limit on the number
8000 * of delegations, which varies according to the server's memory size.
8001 */
8002static void
8003set_max_delegations(void)
8004{
8005 /*
8006 * Allow at most 4 delegations per megabyte of RAM. Quick
8007 * estimates suggest that in the worst case (where every delegation
8008 * is for a different inode), a delegation could take about 1.5K,
8009 * giving a worst case usage of about 6% of memory.
8010 */
8011 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
8012}
8013
8014static int nfs4_state_create_net(struct net *net)
8015{
8016 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8017 int i;
8018
8019 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8020 sizeof(struct list_head),
8021 GFP_KERNEL);
8022 if (!nn->conf_id_hashtbl)
8023 goto err;
8024 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8025 sizeof(struct list_head),
8026 GFP_KERNEL);
8027 if (!nn->unconf_id_hashtbl)
8028 goto err_unconf_id;
8029 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
8030 sizeof(struct list_head),
8031 GFP_KERNEL);
8032 if (!nn->sessionid_hashtbl)
8033 goto err_sessionid;
8034
8035 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8036 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
8037 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
8038 }
8039 for (i = 0; i < SESSION_HASH_SIZE; i++)
8040 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
8041 nn->conf_name_tree = RB_ROOT;
8042 nn->unconf_name_tree = RB_ROOT;
8043 nn->boot_time = ktime_get_real_seconds();
8044 nn->grace_ended = false;
8045 nn->nfsd4_manager.block_opens = true;
8046 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
8047 INIT_LIST_HEAD(&nn->client_lru);
8048 INIT_LIST_HEAD(&nn->close_lru);
8049 INIT_LIST_HEAD(&nn->del_recall_lru);
8050 spin_lock_init(&nn->client_lock);
8051 spin_lock_init(&nn->s2s_cp_lock);
8052 idr_init(&nn->s2s_cp_stateids);
8053
8054 spin_lock_init(&nn->blocked_locks_lock);
8055 INIT_LIST_HEAD(&nn->blocked_locks_lru);
8056
8057 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
8058 INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
8059 get_net(net);
8060
8061 nn->nfsd_client_shrinker.scan_objects = nfsd4_state_shrinker_scan;
8062 nn->nfsd_client_shrinker.count_objects = nfsd4_state_shrinker_count;
8063 nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS;
8064
8065 if (register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client"))
8066 goto err_shrinker;
8067 return 0;
8068
8069err_shrinker:
8070 put_net(net);
8071 kfree(nn->sessionid_hashtbl);
8072err_sessionid:
8073 kfree(nn->unconf_id_hashtbl);
8074err_unconf_id:
8075 kfree(nn->conf_id_hashtbl);
8076err:
8077 return -ENOMEM;
8078}
8079
8080static void
8081nfs4_state_destroy_net(struct net *net)
8082{
8083 int i;
8084 struct nfs4_client *clp = NULL;
8085 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8086
8087 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8088 while (!list_empty(&nn->conf_id_hashtbl[i])) {
8089 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8090 destroy_client(clp);
8091 }
8092 }
8093
8094 WARN_ON(!list_empty(&nn->blocked_locks_lru));
8095
8096 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8097 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
8098 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8099 destroy_client(clp);
8100 }
8101 }
8102
8103 kfree(nn->sessionid_hashtbl);
8104 kfree(nn->unconf_id_hashtbl);
8105 kfree(nn->conf_id_hashtbl);
8106 put_net(net);
8107}
8108
8109int
8110nfs4_state_start_net(struct net *net)
8111{
8112 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8113 int ret;
8114
8115 ret = nfs4_state_create_net(net);
8116 if (ret)
8117 return ret;
8118 locks_start_grace(net, &nn->nfsd4_manager);
8119 nfsd4_client_tracking_init(net);
8120 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
8121 goto skip_grace;
8122 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
8123 nn->nfsd4_grace, net->ns.inum);
8124 trace_nfsd_grace_start(nn);
8125 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
8126 return 0;
8127
8128skip_grace:
8129 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
8130 net->ns.inum);
8131 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
8132 nfsd4_end_grace(nn);
8133 return 0;
8134}
8135
8136/* initialization to perform when the nfsd service is started: */
8137
8138int
8139nfs4_state_start(void)
8140{
8141 int ret;
8142
8143 ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params);
8144 if (ret)
8145 return ret;
8146
8147 ret = nfsd4_create_callback_queue();
8148 if (ret) {
8149 rhltable_destroy(&nfs4_file_rhltable);
8150 return ret;
8151 }
8152
8153 set_max_delegations();
8154 return 0;
8155}
8156
8157void
8158nfs4_state_shutdown_net(struct net *net)
8159{
8160 struct nfs4_delegation *dp = NULL;
8161 struct list_head *pos, *next, reaplist;
8162 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8163
8164 unregister_shrinker(&nn->nfsd_client_shrinker);
8165 cancel_work(&nn->nfsd_shrinker_work);
8166 cancel_delayed_work_sync(&nn->laundromat_work);
8167 locks_end_grace(&nn->nfsd4_manager);
8168
8169 INIT_LIST_HEAD(&reaplist);
8170 spin_lock(&state_lock);
8171 list_for_each_safe(pos, next, &nn->del_recall_lru) {
8172 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8173 WARN_ON(!unhash_delegation_locked(dp));
8174 list_add(&dp->dl_recall_lru, &reaplist);
8175 }
8176 spin_unlock(&state_lock);
8177 list_for_each_safe(pos, next, &reaplist) {
8178 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8179 list_del_init(&dp->dl_recall_lru);
8180 destroy_unhashed_deleg(dp);
8181 }
8182
8183 nfsd4_client_tracking_exit(net);
8184 nfs4_state_destroy_net(net);
8185#ifdef CONFIG_NFSD_V4_2_INTER_SSC
8186 nfsd4_ssc_shutdown_umount(nn);
8187#endif
8188}
8189
8190void
8191nfs4_state_shutdown(void)
8192{
8193 nfsd4_destroy_callback_queue();
8194 rhltable_destroy(&nfs4_file_rhltable);
8195}
8196
8197static void
8198get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8199{
8200 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
8201 CURRENT_STATEID(stateid))
8202 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
8203}
8204
8205static void
8206put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8207{
8208 if (cstate->minorversion) {
8209 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
8210 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8211 }
8212}
8213
8214void
8215clear_current_stateid(struct nfsd4_compound_state *cstate)
8216{
8217 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8218}
8219
8220/*
8221 * functions to set current state id
8222 */
8223void
8224nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
8225 union nfsd4_op_u *u)
8226{
8227 put_stateid(cstate, &u->open_downgrade.od_stateid);
8228}
8229
8230void
8231nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
8232 union nfsd4_op_u *u)
8233{
8234 put_stateid(cstate, &u->open.op_stateid);
8235}
8236
8237void
8238nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
8239 union nfsd4_op_u *u)
8240{
8241 put_stateid(cstate, &u->close.cl_stateid);
8242}
8243
8244void
8245nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
8246 union nfsd4_op_u *u)
8247{
8248 put_stateid(cstate, &u->lock.lk_resp_stateid);
8249}
8250
8251/*
8252 * functions to consume current state id
8253 */
8254
8255void
8256nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
8257 union nfsd4_op_u *u)
8258{
8259 get_stateid(cstate, &u->open_downgrade.od_stateid);
8260}
8261
8262void
8263nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
8264 union nfsd4_op_u *u)
8265{
8266 get_stateid(cstate, &u->delegreturn.dr_stateid);
8267}
8268
8269void
8270nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
8271 union nfsd4_op_u *u)
8272{
8273 get_stateid(cstate, &u->free_stateid.fr_stateid);
8274}
8275
8276void
8277nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
8278 union nfsd4_op_u *u)
8279{
8280 get_stateid(cstate, &u->setattr.sa_stateid);
8281}
8282
8283void
8284nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
8285 union nfsd4_op_u *u)
8286{
8287 get_stateid(cstate, &u->close.cl_stateid);
8288}
8289
8290void
8291nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
8292 union nfsd4_op_u *u)
8293{
8294 get_stateid(cstate, &u->locku.lu_stateid);
8295}
8296
8297void
8298nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
8299 union nfsd4_op_u *u)
8300{
8301 get_stateid(cstate, &u->read.rd_stateid);
8302}
8303
8304void
8305nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
8306 union nfsd4_op_u *u)
8307{
8308 get_stateid(cstate, &u->write.wr_stateid);
8309}
1/*
2* Copyright (c) 2001 The Regents of the University of Michigan.
3* All rights reserved.
4*
5* Kendrick Smith <kmsmith@umich.edu>
6* Andy Adamson <kandros@umich.edu>
7*
8* Redistribution and use in source and binary forms, with or without
9* modification, are permitted provided that the following conditions
10* are met:
11*
12* 1. Redistributions of source code must retain the above copyright
13* notice, this list of conditions and the following disclaimer.
14* 2. Redistributions in binary form must reproduce the above copyright
15* notice, this list of conditions and the following disclaimer in the
16* documentation and/or other materials provided with the distribution.
17* 3. Neither the name of the University nor the names of its
18* contributors may be used to endorse or promote products derived
19* from this software without specific prior written permission.
20*
21* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32*
33*/
34
35#include <linux/file.h>
36#include <linux/fs.h>
37#include <linux/slab.h>
38#include <linux/namei.h>
39#include <linux/swap.h>
40#include <linux/pagemap.h>
41#include <linux/ratelimit.h>
42#include <linux/sunrpc/svcauth_gss.h>
43#include <linux/sunrpc/addr.h>
44#include <linux/jhash.h>
45#include <linux/string_helpers.h>
46#include <linux/fsnotify.h>
47#include <linux/nfs_ssc.h>
48#include "xdr4.h"
49#include "xdr4cb.h"
50#include "vfs.h"
51#include "current_stateid.h"
52
53#include "netns.h"
54#include "pnfs.h"
55#include "filecache.h"
56#include "trace.h"
57
58#define NFSDDBG_FACILITY NFSDDBG_PROC
59
60#define all_ones {{~0,~0},~0}
61static const stateid_t one_stateid = {
62 .si_generation = ~0,
63 .si_opaque = all_ones,
64};
65static const stateid_t zero_stateid = {
66 /* all fields zero */
67};
68static const stateid_t currentstateid = {
69 .si_generation = 1,
70};
71static const stateid_t close_stateid = {
72 .si_generation = 0xffffffffU,
73};
74
75static u64 current_sessionid = 1;
76
77#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
78#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
79#define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
80#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
81
82/* forward declarations */
83static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
84static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
85void nfsd4_end_grace(struct nfsd_net *nn);
86static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
87
88/* Locking: */
89
90/*
91 * Currently used for the del_recall_lru and file hash table. In an
92 * effort to decrease the scope of the client_mutex, this spinlock may
93 * eventually cover more:
94 */
95static DEFINE_SPINLOCK(state_lock);
96
97enum nfsd4_st_mutex_lock_subclass {
98 OPEN_STATEID_MUTEX = 0,
99 LOCK_STATEID_MUTEX = 1,
100};
101
102/*
103 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
104 * the refcount on the open stateid to drop.
105 */
106static DECLARE_WAIT_QUEUE_HEAD(close_wq);
107
108/*
109 * A waitqueue where a writer to clients/#/ctl destroying a client can
110 * wait for cl_rpc_users to drop to 0 and then for the client to be
111 * unhashed.
112 */
113static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
114
115static struct kmem_cache *client_slab;
116static struct kmem_cache *openowner_slab;
117static struct kmem_cache *lockowner_slab;
118static struct kmem_cache *file_slab;
119static struct kmem_cache *stateid_slab;
120static struct kmem_cache *deleg_slab;
121static struct kmem_cache *odstate_slab;
122
123static void free_session(struct nfsd4_session *);
124
125static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
126static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
127
128static bool is_session_dead(struct nfsd4_session *ses)
129{
130 return ses->se_flags & NFS4_SESSION_DEAD;
131}
132
133static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
134{
135 if (atomic_read(&ses->se_ref) > ref_held_by_me)
136 return nfserr_jukebox;
137 ses->se_flags |= NFS4_SESSION_DEAD;
138 return nfs_ok;
139}
140
141static bool is_client_expired(struct nfs4_client *clp)
142{
143 return clp->cl_time == 0;
144}
145
146static __be32 get_client_locked(struct nfs4_client *clp)
147{
148 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
149
150 lockdep_assert_held(&nn->client_lock);
151
152 if (is_client_expired(clp))
153 return nfserr_expired;
154 atomic_inc(&clp->cl_rpc_users);
155 return nfs_ok;
156}
157
158/* must be called under the client_lock */
159static inline void
160renew_client_locked(struct nfs4_client *clp)
161{
162 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
163
164 if (is_client_expired(clp)) {
165 WARN_ON(1);
166 printk("%s: client (clientid %08x/%08x) already expired\n",
167 __func__,
168 clp->cl_clientid.cl_boot,
169 clp->cl_clientid.cl_id);
170 return;
171 }
172
173 list_move_tail(&clp->cl_lru, &nn->client_lru);
174 clp->cl_time = ktime_get_boottime_seconds();
175}
176
177static void put_client_renew_locked(struct nfs4_client *clp)
178{
179 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
180
181 lockdep_assert_held(&nn->client_lock);
182
183 if (!atomic_dec_and_test(&clp->cl_rpc_users))
184 return;
185 if (!is_client_expired(clp))
186 renew_client_locked(clp);
187 else
188 wake_up_all(&expiry_wq);
189}
190
191static void put_client_renew(struct nfs4_client *clp)
192{
193 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
194
195 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
196 return;
197 if (!is_client_expired(clp))
198 renew_client_locked(clp);
199 else
200 wake_up_all(&expiry_wq);
201 spin_unlock(&nn->client_lock);
202}
203
204static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
205{
206 __be32 status;
207
208 if (is_session_dead(ses))
209 return nfserr_badsession;
210 status = get_client_locked(ses->se_client);
211 if (status)
212 return status;
213 atomic_inc(&ses->se_ref);
214 return nfs_ok;
215}
216
217static void nfsd4_put_session_locked(struct nfsd4_session *ses)
218{
219 struct nfs4_client *clp = ses->se_client;
220 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
221
222 lockdep_assert_held(&nn->client_lock);
223
224 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
225 free_session(ses);
226 put_client_renew_locked(clp);
227}
228
229static void nfsd4_put_session(struct nfsd4_session *ses)
230{
231 struct nfs4_client *clp = ses->se_client;
232 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
233
234 spin_lock(&nn->client_lock);
235 nfsd4_put_session_locked(ses);
236 spin_unlock(&nn->client_lock);
237}
238
239static struct nfsd4_blocked_lock *
240find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
241 struct nfsd_net *nn)
242{
243 struct nfsd4_blocked_lock *cur, *found = NULL;
244
245 spin_lock(&nn->blocked_locks_lock);
246 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
247 if (fh_match(fh, &cur->nbl_fh)) {
248 list_del_init(&cur->nbl_list);
249 list_del_init(&cur->nbl_lru);
250 found = cur;
251 break;
252 }
253 }
254 spin_unlock(&nn->blocked_locks_lock);
255 if (found)
256 locks_delete_block(&found->nbl_lock);
257 return found;
258}
259
260static struct nfsd4_blocked_lock *
261find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
262 struct nfsd_net *nn)
263{
264 struct nfsd4_blocked_lock *nbl;
265
266 nbl = find_blocked_lock(lo, fh, nn);
267 if (!nbl) {
268 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
269 if (nbl) {
270 INIT_LIST_HEAD(&nbl->nbl_list);
271 INIT_LIST_HEAD(&nbl->nbl_lru);
272 fh_copy_shallow(&nbl->nbl_fh, fh);
273 locks_init_lock(&nbl->nbl_lock);
274 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
275 &nfsd4_cb_notify_lock_ops,
276 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
277 }
278 }
279 return nbl;
280}
281
282static void
283free_blocked_lock(struct nfsd4_blocked_lock *nbl)
284{
285 locks_delete_block(&nbl->nbl_lock);
286 locks_release_private(&nbl->nbl_lock);
287 kfree(nbl);
288}
289
290static void
291remove_blocked_locks(struct nfs4_lockowner *lo)
292{
293 struct nfs4_client *clp = lo->lo_owner.so_client;
294 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
295 struct nfsd4_blocked_lock *nbl;
296 LIST_HEAD(reaplist);
297
298 /* Dequeue all blocked locks */
299 spin_lock(&nn->blocked_locks_lock);
300 while (!list_empty(&lo->lo_blocked)) {
301 nbl = list_first_entry(&lo->lo_blocked,
302 struct nfsd4_blocked_lock,
303 nbl_list);
304 list_del_init(&nbl->nbl_list);
305 list_move(&nbl->nbl_lru, &reaplist);
306 }
307 spin_unlock(&nn->blocked_locks_lock);
308
309 /* Now free them */
310 while (!list_empty(&reaplist)) {
311 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
312 nbl_lru);
313 list_del_init(&nbl->nbl_lru);
314 free_blocked_lock(nbl);
315 }
316}
317
318static void
319nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
320{
321 struct nfsd4_blocked_lock *nbl = container_of(cb,
322 struct nfsd4_blocked_lock, nbl_cb);
323 locks_delete_block(&nbl->nbl_lock);
324}
325
326static int
327nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
328{
329 /*
330 * Since this is just an optimization, we don't try very hard if it
331 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
332 * just quit trying on anything else.
333 */
334 switch (task->tk_status) {
335 case -NFS4ERR_DELAY:
336 rpc_delay(task, 1 * HZ);
337 return 0;
338 default:
339 return 1;
340 }
341}
342
343static void
344nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
345{
346 struct nfsd4_blocked_lock *nbl = container_of(cb,
347 struct nfsd4_blocked_lock, nbl_cb);
348
349 free_blocked_lock(nbl);
350}
351
352static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
353 .prepare = nfsd4_cb_notify_lock_prepare,
354 .done = nfsd4_cb_notify_lock_done,
355 .release = nfsd4_cb_notify_lock_release,
356};
357
358/*
359 * We store the NONE, READ, WRITE, and BOTH bits separately in the
360 * st_{access,deny}_bmap field of the stateid, in order to track not
361 * only what share bits are currently in force, but also what
362 * combinations of share bits previous opens have used. This allows us
363 * to enforce the recommendation of rfc 3530 14.2.19 that the server
364 * return an error if the client attempt to downgrade to a combination
365 * of share bits not explicable by closing some of its previous opens.
366 *
367 * XXX: This enforcement is actually incomplete, since we don't keep
368 * track of access/deny bit combinations; so, e.g., we allow:
369 *
370 * OPEN allow read, deny write
371 * OPEN allow both, deny none
372 * DOWNGRADE allow read, deny none
373 *
374 * which we should reject.
375 */
376static unsigned int
377bmap_to_share_mode(unsigned long bmap)
378{
379 int i;
380 unsigned int access = 0;
381
382 for (i = 1; i < 4; i++) {
383 if (test_bit(i, &bmap))
384 access |= i;
385 }
386 return access;
387}
388
389/* set share access for a given stateid */
390static inline void
391set_access(u32 access, struct nfs4_ol_stateid *stp)
392{
393 unsigned char mask = 1 << access;
394
395 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
396 stp->st_access_bmap |= mask;
397}
398
399/* clear share access for a given stateid */
400static inline void
401clear_access(u32 access, struct nfs4_ol_stateid *stp)
402{
403 unsigned char mask = 1 << access;
404
405 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
406 stp->st_access_bmap &= ~mask;
407}
408
409/* test whether a given stateid has access */
410static inline bool
411test_access(u32 access, struct nfs4_ol_stateid *stp)
412{
413 unsigned char mask = 1 << access;
414
415 return (bool)(stp->st_access_bmap & mask);
416}
417
418/* set share deny for a given stateid */
419static inline void
420set_deny(u32 deny, struct nfs4_ol_stateid *stp)
421{
422 unsigned char mask = 1 << deny;
423
424 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
425 stp->st_deny_bmap |= mask;
426}
427
428/* clear share deny for a given stateid */
429static inline void
430clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
431{
432 unsigned char mask = 1 << deny;
433
434 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
435 stp->st_deny_bmap &= ~mask;
436}
437
438/* test whether a given stateid is denying specific access */
439static inline bool
440test_deny(u32 deny, struct nfs4_ol_stateid *stp)
441{
442 unsigned char mask = 1 << deny;
443
444 return (bool)(stp->st_deny_bmap & mask);
445}
446
447static int nfs4_access_to_omode(u32 access)
448{
449 switch (access & NFS4_SHARE_ACCESS_BOTH) {
450 case NFS4_SHARE_ACCESS_READ:
451 return O_RDONLY;
452 case NFS4_SHARE_ACCESS_WRITE:
453 return O_WRONLY;
454 case NFS4_SHARE_ACCESS_BOTH:
455 return O_RDWR;
456 }
457 WARN_ON_ONCE(1);
458 return O_RDONLY;
459}
460
461static inline int
462access_permit_read(struct nfs4_ol_stateid *stp)
463{
464 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
465 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
466 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
467}
468
469static inline int
470access_permit_write(struct nfs4_ol_stateid *stp)
471{
472 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
473 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
474}
475
476static inline struct nfs4_stateowner *
477nfs4_get_stateowner(struct nfs4_stateowner *sop)
478{
479 atomic_inc(&sop->so_count);
480 return sop;
481}
482
483static int
484same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
485{
486 return (sop->so_owner.len == owner->len) &&
487 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
488}
489
490static struct nfs4_openowner *
491find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
492 struct nfs4_client *clp)
493{
494 struct nfs4_stateowner *so;
495
496 lockdep_assert_held(&clp->cl_lock);
497
498 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
499 so_strhash) {
500 if (!so->so_is_open_owner)
501 continue;
502 if (same_owner_str(so, &open->op_owner))
503 return openowner(nfs4_get_stateowner(so));
504 }
505 return NULL;
506}
507
508static struct nfs4_openowner *
509find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
510 struct nfs4_client *clp)
511{
512 struct nfs4_openowner *oo;
513
514 spin_lock(&clp->cl_lock);
515 oo = find_openstateowner_str_locked(hashval, open, clp);
516 spin_unlock(&clp->cl_lock);
517 return oo;
518}
519
520static inline u32
521opaque_hashval(const void *ptr, int nbytes)
522{
523 unsigned char *cptr = (unsigned char *) ptr;
524
525 u32 x = 0;
526 while (nbytes--) {
527 x *= 37;
528 x += *cptr++;
529 }
530 return x;
531}
532
533static void nfsd4_free_file_rcu(struct rcu_head *rcu)
534{
535 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
536
537 kmem_cache_free(file_slab, fp);
538}
539
540void
541put_nfs4_file(struct nfs4_file *fi)
542{
543 might_lock(&state_lock);
544
545 if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
546 hlist_del_rcu(&fi->fi_hash);
547 spin_unlock(&state_lock);
548 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
549 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
550 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
551 }
552}
553
554static struct nfsd_file *
555__nfs4_get_fd(struct nfs4_file *f, int oflag)
556{
557 if (f->fi_fds[oflag])
558 return nfsd_file_get(f->fi_fds[oflag]);
559 return NULL;
560}
561
562static struct nfsd_file *
563find_writeable_file_locked(struct nfs4_file *f)
564{
565 struct nfsd_file *ret;
566
567 lockdep_assert_held(&f->fi_lock);
568
569 ret = __nfs4_get_fd(f, O_WRONLY);
570 if (!ret)
571 ret = __nfs4_get_fd(f, O_RDWR);
572 return ret;
573}
574
575static struct nfsd_file *
576find_writeable_file(struct nfs4_file *f)
577{
578 struct nfsd_file *ret;
579
580 spin_lock(&f->fi_lock);
581 ret = find_writeable_file_locked(f);
582 spin_unlock(&f->fi_lock);
583
584 return ret;
585}
586
587static struct nfsd_file *
588find_readable_file_locked(struct nfs4_file *f)
589{
590 struct nfsd_file *ret;
591
592 lockdep_assert_held(&f->fi_lock);
593
594 ret = __nfs4_get_fd(f, O_RDONLY);
595 if (!ret)
596 ret = __nfs4_get_fd(f, O_RDWR);
597 return ret;
598}
599
600static struct nfsd_file *
601find_readable_file(struct nfs4_file *f)
602{
603 struct nfsd_file *ret;
604
605 spin_lock(&f->fi_lock);
606 ret = find_readable_file_locked(f);
607 spin_unlock(&f->fi_lock);
608
609 return ret;
610}
611
612struct nfsd_file *
613find_any_file(struct nfs4_file *f)
614{
615 struct nfsd_file *ret;
616
617 if (!f)
618 return NULL;
619 spin_lock(&f->fi_lock);
620 ret = __nfs4_get_fd(f, O_RDWR);
621 if (!ret) {
622 ret = __nfs4_get_fd(f, O_WRONLY);
623 if (!ret)
624 ret = __nfs4_get_fd(f, O_RDONLY);
625 }
626 spin_unlock(&f->fi_lock);
627 return ret;
628}
629
630static struct nfsd_file *find_deleg_file(struct nfs4_file *f)
631{
632 struct nfsd_file *ret = NULL;
633
634 spin_lock(&f->fi_lock);
635 if (f->fi_deleg_file)
636 ret = nfsd_file_get(f->fi_deleg_file);
637 spin_unlock(&f->fi_lock);
638 return ret;
639}
640
641static atomic_long_t num_delegations;
642unsigned long max_delegations;
643
644/*
645 * Open owner state (share locks)
646 */
647
648/* hash tables for lock and open owners */
649#define OWNER_HASH_BITS 8
650#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
651#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
652
653static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
654{
655 unsigned int ret;
656
657 ret = opaque_hashval(ownername->data, ownername->len);
658 return ret & OWNER_HASH_MASK;
659}
660
661/* hash table for nfs4_file */
662#define FILE_HASH_BITS 8
663#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
664
665static unsigned int file_hashval(struct svc_fh *fh)
666{
667 struct inode *inode = d_inode(fh->fh_dentry);
668
669 /* XXX: why not (here & in file cache) use inode? */
670 return (unsigned int)hash_long(inode->i_ino, FILE_HASH_BITS);
671}
672
673static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
674
675static void
676__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
677{
678 lockdep_assert_held(&fp->fi_lock);
679
680 if (access & NFS4_SHARE_ACCESS_WRITE)
681 atomic_inc(&fp->fi_access[O_WRONLY]);
682 if (access & NFS4_SHARE_ACCESS_READ)
683 atomic_inc(&fp->fi_access[O_RDONLY]);
684}
685
686static __be32
687nfs4_file_get_access(struct nfs4_file *fp, u32 access)
688{
689 lockdep_assert_held(&fp->fi_lock);
690
691 /* Does this access mode make sense? */
692 if (access & ~NFS4_SHARE_ACCESS_BOTH)
693 return nfserr_inval;
694
695 /* Does it conflict with a deny mode already set? */
696 if ((access & fp->fi_share_deny) != 0)
697 return nfserr_share_denied;
698
699 __nfs4_file_get_access(fp, access);
700 return nfs_ok;
701}
702
703static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
704{
705 /* Common case is that there is no deny mode. */
706 if (deny) {
707 /* Does this deny mode make sense? */
708 if (deny & ~NFS4_SHARE_DENY_BOTH)
709 return nfserr_inval;
710
711 if ((deny & NFS4_SHARE_DENY_READ) &&
712 atomic_read(&fp->fi_access[O_RDONLY]))
713 return nfserr_share_denied;
714
715 if ((deny & NFS4_SHARE_DENY_WRITE) &&
716 atomic_read(&fp->fi_access[O_WRONLY]))
717 return nfserr_share_denied;
718 }
719 return nfs_ok;
720}
721
722static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
723{
724 might_lock(&fp->fi_lock);
725
726 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
727 struct nfsd_file *f1 = NULL;
728 struct nfsd_file *f2 = NULL;
729
730 swap(f1, fp->fi_fds[oflag]);
731 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
732 swap(f2, fp->fi_fds[O_RDWR]);
733 spin_unlock(&fp->fi_lock);
734 if (f1)
735 nfsd_file_put(f1);
736 if (f2)
737 nfsd_file_put(f2);
738 }
739}
740
741static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
742{
743 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
744
745 if (access & NFS4_SHARE_ACCESS_WRITE)
746 __nfs4_file_put_access(fp, O_WRONLY);
747 if (access & NFS4_SHARE_ACCESS_READ)
748 __nfs4_file_put_access(fp, O_RDONLY);
749}
750
751/*
752 * Allocate a new open/delegation state counter. This is needed for
753 * pNFS for proper return on close semantics.
754 *
755 * Note that we only allocate it for pNFS-enabled exports, otherwise
756 * all pointers to struct nfs4_clnt_odstate are always NULL.
757 */
758static struct nfs4_clnt_odstate *
759alloc_clnt_odstate(struct nfs4_client *clp)
760{
761 struct nfs4_clnt_odstate *co;
762
763 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
764 if (co) {
765 co->co_client = clp;
766 refcount_set(&co->co_odcount, 1);
767 }
768 return co;
769}
770
771static void
772hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
773{
774 struct nfs4_file *fp = co->co_file;
775
776 lockdep_assert_held(&fp->fi_lock);
777 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
778}
779
780static inline void
781get_clnt_odstate(struct nfs4_clnt_odstate *co)
782{
783 if (co)
784 refcount_inc(&co->co_odcount);
785}
786
787static void
788put_clnt_odstate(struct nfs4_clnt_odstate *co)
789{
790 struct nfs4_file *fp;
791
792 if (!co)
793 return;
794
795 fp = co->co_file;
796 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
797 list_del(&co->co_perfile);
798 spin_unlock(&fp->fi_lock);
799
800 nfsd4_return_all_file_layouts(co->co_client, fp);
801 kmem_cache_free(odstate_slab, co);
802 }
803}
804
805static struct nfs4_clnt_odstate *
806find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
807{
808 struct nfs4_clnt_odstate *co;
809 struct nfs4_client *cl;
810
811 if (!new)
812 return NULL;
813
814 cl = new->co_client;
815
816 spin_lock(&fp->fi_lock);
817 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
818 if (co->co_client == cl) {
819 get_clnt_odstate(co);
820 goto out;
821 }
822 }
823 co = new;
824 co->co_file = fp;
825 hash_clnt_odstate_locked(new);
826out:
827 spin_unlock(&fp->fi_lock);
828 return co;
829}
830
831struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
832 void (*sc_free)(struct nfs4_stid *))
833{
834 struct nfs4_stid *stid;
835 int new_id;
836
837 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
838 if (!stid)
839 return NULL;
840
841 idr_preload(GFP_KERNEL);
842 spin_lock(&cl->cl_lock);
843 /* Reserving 0 for start of file in nfsdfs "states" file: */
844 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
845 spin_unlock(&cl->cl_lock);
846 idr_preload_end();
847 if (new_id < 0)
848 goto out_free;
849
850 stid->sc_free = sc_free;
851 stid->sc_client = cl;
852 stid->sc_stateid.si_opaque.so_id = new_id;
853 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
854 /* Will be incremented before return to client: */
855 refcount_set(&stid->sc_count, 1);
856 spin_lock_init(&stid->sc_lock);
857 INIT_LIST_HEAD(&stid->sc_cp_list);
858
859 /*
860 * It shouldn't be a problem to reuse an opaque stateid value.
861 * I don't think it is for 4.1. But with 4.0 I worry that, for
862 * example, a stray write retransmission could be accepted by
863 * the server when it should have been rejected. Therefore,
864 * adopt a trick from the sctp code to attempt to maximize the
865 * amount of time until an id is reused, by ensuring they always
866 * "increase" (mod INT_MAX):
867 */
868 return stid;
869out_free:
870 kmem_cache_free(slab, stid);
871 return NULL;
872}
873
874/*
875 * Create a unique stateid_t to represent each COPY.
876 */
877static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
878 unsigned char sc_type)
879{
880 int new_id;
881
882 stid->stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
883 stid->stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
884 stid->sc_type = sc_type;
885
886 idr_preload(GFP_KERNEL);
887 spin_lock(&nn->s2s_cp_lock);
888 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
889 stid->stid.si_opaque.so_id = new_id;
890 stid->stid.si_generation = 1;
891 spin_unlock(&nn->s2s_cp_lock);
892 idr_preload_end();
893 if (new_id < 0)
894 return 0;
895 return 1;
896}
897
898int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
899{
900 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID);
901}
902
903struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
904 struct nfs4_stid *p_stid)
905{
906 struct nfs4_cpntf_state *cps;
907
908 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
909 if (!cps)
910 return NULL;
911 cps->cpntf_time = ktime_get_boottime_seconds();
912 refcount_set(&cps->cp_stateid.sc_count, 1);
913 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
914 goto out_free;
915 spin_lock(&nn->s2s_cp_lock);
916 list_add(&cps->cp_list, &p_stid->sc_cp_list);
917 spin_unlock(&nn->s2s_cp_lock);
918 return cps;
919out_free:
920 kfree(cps);
921 return NULL;
922}
923
924void nfs4_free_copy_state(struct nfsd4_copy *copy)
925{
926 struct nfsd_net *nn;
927
928 WARN_ON_ONCE(copy->cp_stateid.sc_type != NFS4_COPY_STID);
929 nn = net_generic(copy->cp_clp->net, nfsd_net_id);
930 spin_lock(&nn->s2s_cp_lock);
931 idr_remove(&nn->s2s_cp_stateids,
932 copy->cp_stateid.stid.si_opaque.so_id);
933 spin_unlock(&nn->s2s_cp_lock);
934}
935
936static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
937{
938 struct nfs4_cpntf_state *cps;
939 struct nfsd_net *nn;
940
941 nn = net_generic(net, nfsd_net_id);
942 spin_lock(&nn->s2s_cp_lock);
943 while (!list_empty(&stid->sc_cp_list)) {
944 cps = list_first_entry(&stid->sc_cp_list,
945 struct nfs4_cpntf_state, cp_list);
946 _free_cpntf_state_locked(nn, cps);
947 }
948 spin_unlock(&nn->s2s_cp_lock);
949}
950
951static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
952{
953 struct nfs4_stid *stid;
954
955 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
956 if (!stid)
957 return NULL;
958
959 return openlockstateid(stid);
960}
961
962static void nfs4_free_deleg(struct nfs4_stid *stid)
963{
964 kmem_cache_free(deleg_slab, stid);
965 atomic_long_dec(&num_delegations);
966}
967
968/*
969 * When we recall a delegation, we should be careful not to hand it
970 * out again straight away.
971 * To ensure this we keep a pair of bloom filters ('new' and 'old')
972 * in which the filehandles of recalled delegations are "stored".
973 * If a filehandle appear in either filter, a delegation is blocked.
974 * When a delegation is recalled, the filehandle is stored in the "new"
975 * filter.
976 * Every 30 seconds we swap the filters and clear the "new" one,
977 * unless both are empty of course.
978 *
979 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
980 * low 3 bytes as hash-table indices.
981 *
982 * 'blocked_delegations_lock', which is always taken in block_delegations(),
983 * is used to manage concurrent access. Testing does not need the lock
984 * except when swapping the two filters.
985 */
986static DEFINE_SPINLOCK(blocked_delegations_lock);
987static struct bloom_pair {
988 int entries, old_entries;
989 time64_t swap_time;
990 int new; /* index into 'set' */
991 DECLARE_BITMAP(set[2], 256);
992} blocked_delegations;
993
994static int delegation_blocked(struct knfsd_fh *fh)
995{
996 u32 hash;
997 struct bloom_pair *bd = &blocked_delegations;
998
999 if (bd->entries == 0)
1000 return 0;
1001 if (ktime_get_seconds() - bd->swap_time > 30) {
1002 spin_lock(&blocked_delegations_lock);
1003 if (ktime_get_seconds() - bd->swap_time > 30) {
1004 bd->entries -= bd->old_entries;
1005 bd->old_entries = bd->entries;
1006 memset(bd->set[bd->new], 0,
1007 sizeof(bd->set[0]));
1008 bd->new = 1-bd->new;
1009 bd->swap_time = ktime_get_seconds();
1010 }
1011 spin_unlock(&blocked_delegations_lock);
1012 }
1013 hash = jhash(&fh->fh_base, fh->fh_size, 0);
1014 if (test_bit(hash&255, bd->set[0]) &&
1015 test_bit((hash>>8)&255, bd->set[0]) &&
1016 test_bit((hash>>16)&255, bd->set[0]))
1017 return 1;
1018
1019 if (test_bit(hash&255, bd->set[1]) &&
1020 test_bit((hash>>8)&255, bd->set[1]) &&
1021 test_bit((hash>>16)&255, bd->set[1]))
1022 return 1;
1023
1024 return 0;
1025}
1026
1027static void block_delegations(struct knfsd_fh *fh)
1028{
1029 u32 hash;
1030 struct bloom_pair *bd = &blocked_delegations;
1031
1032 hash = jhash(&fh->fh_base, fh->fh_size, 0);
1033
1034 spin_lock(&blocked_delegations_lock);
1035 __set_bit(hash&255, bd->set[bd->new]);
1036 __set_bit((hash>>8)&255, bd->set[bd->new]);
1037 __set_bit((hash>>16)&255, bd->set[bd->new]);
1038 if (bd->entries == 0)
1039 bd->swap_time = ktime_get_seconds();
1040 bd->entries += 1;
1041 spin_unlock(&blocked_delegations_lock);
1042}
1043
1044static struct nfs4_delegation *
1045alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
1046 struct svc_fh *current_fh,
1047 struct nfs4_clnt_odstate *odstate)
1048{
1049 struct nfs4_delegation *dp;
1050 long n;
1051
1052 dprintk("NFSD alloc_init_deleg\n");
1053 n = atomic_long_inc_return(&num_delegations);
1054 if (n < 0 || n > max_delegations)
1055 goto out_dec;
1056 if (delegation_blocked(¤t_fh->fh_handle))
1057 goto out_dec;
1058 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
1059 if (dp == NULL)
1060 goto out_dec;
1061
1062 /*
1063 * delegation seqid's are never incremented. The 4.1 special
1064 * meaning of seqid 0 isn't meaningful, really, but let's avoid
1065 * 0 anyway just for consistency and use 1:
1066 */
1067 dp->dl_stid.sc_stateid.si_generation = 1;
1068 INIT_LIST_HEAD(&dp->dl_perfile);
1069 INIT_LIST_HEAD(&dp->dl_perclnt);
1070 INIT_LIST_HEAD(&dp->dl_recall_lru);
1071 dp->dl_clnt_odstate = odstate;
1072 get_clnt_odstate(odstate);
1073 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
1074 dp->dl_retries = 1;
1075 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
1076 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
1077 get_nfs4_file(fp);
1078 dp->dl_stid.sc_file = fp;
1079 return dp;
1080out_dec:
1081 atomic_long_dec(&num_delegations);
1082 return NULL;
1083}
1084
1085void
1086nfs4_put_stid(struct nfs4_stid *s)
1087{
1088 struct nfs4_file *fp = s->sc_file;
1089 struct nfs4_client *clp = s->sc_client;
1090
1091 might_lock(&clp->cl_lock);
1092
1093 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
1094 wake_up_all(&close_wq);
1095 return;
1096 }
1097 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1098 nfs4_free_cpntf_statelist(clp->net, s);
1099 spin_unlock(&clp->cl_lock);
1100 s->sc_free(s);
1101 if (fp)
1102 put_nfs4_file(fp);
1103}
1104
1105void
1106nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
1107{
1108 stateid_t *src = &stid->sc_stateid;
1109
1110 spin_lock(&stid->sc_lock);
1111 if (unlikely(++src->si_generation == 0))
1112 src->si_generation = 1;
1113 memcpy(dst, src, sizeof(*dst));
1114 spin_unlock(&stid->sc_lock);
1115}
1116
1117static void put_deleg_file(struct nfs4_file *fp)
1118{
1119 struct nfsd_file *nf = NULL;
1120
1121 spin_lock(&fp->fi_lock);
1122 if (--fp->fi_delegees == 0)
1123 swap(nf, fp->fi_deleg_file);
1124 spin_unlock(&fp->fi_lock);
1125
1126 if (nf)
1127 nfsd_file_put(nf);
1128}
1129
1130static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1131{
1132 struct nfs4_file *fp = dp->dl_stid.sc_file;
1133 struct nfsd_file *nf = fp->fi_deleg_file;
1134
1135 WARN_ON_ONCE(!fp->fi_delegees);
1136
1137 vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1138 put_deleg_file(fp);
1139}
1140
1141static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1142{
1143 put_clnt_odstate(dp->dl_clnt_odstate);
1144 nfs4_unlock_deleg_lease(dp);
1145 nfs4_put_stid(&dp->dl_stid);
1146}
1147
1148void nfs4_unhash_stid(struct nfs4_stid *s)
1149{
1150 s->sc_type = 0;
1151}
1152
1153/**
1154 * nfs4_delegation_exists - Discover if this delegation already exists
1155 * @clp: a pointer to the nfs4_client we're granting a delegation to
1156 * @fp: a pointer to the nfs4_file we're granting a delegation on
1157 *
1158 * Return:
1159 * On success: true iff an existing delegation is found
1160 */
1161
1162static bool
1163nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1164{
1165 struct nfs4_delegation *searchdp = NULL;
1166 struct nfs4_client *searchclp = NULL;
1167
1168 lockdep_assert_held(&state_lock);
1169 lockdep_assert_held(&fp->fi_lock);
1170
1171 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1172 searchclp = searchdp->dl_stid.sc_client;
1173 if (clp == searchclp) {
1174 return true;
1175 }
1176 }
1177 return false;
1178}
1179
1180/**
1181 * hash_delegation_locked - Add a delegation to the appropriate lists
1182 * @dp: a pointer to the nfs4_delegation we are adding.
1183 * @fp: a pointer to the nfs4_file we're granting a delegation on
1184 *
1185 * Return:
1186 * On success: NULL if the delegation was successfully hashed.
1187 *
1188 * On error: -EAGAIN if one was previously granted to this
1189 * nfs4_client for this nfs4_file. Delegation is not hashed.
1190 *
1191 */
1192
1193static int
1194hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1195{
1196 struct nfs4_client *clp = dp->dl_stid.sc_client;
1197
1198 lockdep_assert_held(&state_lock);
1199 lockdep_assert_held(&fp->fi_lock);
1200
1201 if (nfs4_delegation_exists(clp, fp))
1202 return -EAGAIN;
1203 refcount_inc(&dp->dl_stid.sc_count);
1204 dp->dl_stid.sc_type = NFS4_DELEG_STID;
1205 list_add(&dp->dl_perfile, &fp->fi_delegations);
1206 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1207 return 0;
1208}
1209
1210static bool
1211unhash_delegation_locked(struct nfs4_delegation *dp)
1212{
1213 struct nfs4_file *fp = dp->dl_stid.sc_file;
1214
1215 lockdep_assert_held(&state_lock);
1216
1217 if (list_empty(&dp->dl_perfile))
1218 return false;
1219
1220 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1221 /* Ensure that deleg break won't try to requeue it */
1222 ++dp->dl_time;
1223 spin_lock(&fp->fi_lock);
1224 list_del_init(&dp->dl_perclnt);
1225 list_del_init(&dp->dl_recall_lru);
1226 list_del_init(&dp->dl_perfile);
1227 spin_unlock(&fp->fi_lock);
1228 return true;
1229}
1230
1231static void destroy_delegation(struct nfs4_delegation *dp)
1232{
1233 bool unhashed;
1234
1235 spin_lock(&state_lock);
1236 unhashed = unhash_delegation_locked(dp);
1237 spin_unlock(&state_lock);
1238 if (unhashed)
1239 destroy_unhashed_deleg(dp);
1240}
1241
1242static void revoke_delegation(struct nfs4_delegation *dp)
1243{
1244 struct nfs4_client *clp = dp->dl_stid.sc_client;
1245
1246 WARN_ON(!list_empty(&dp->dl_recall_lru));
1247
1248 if (clp->cl_minorversion) {
1249 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1250 refcount_inc(&dp->dl_stid.sc_count);
1251 spin_lock(&clp->cl_lock);
1252 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1253 spin_unlock(&clp->cl_lock);
1254 }
1255 destroy_unhashed_deleg(dp);
1256}
1257
1258/*
1259 * SETCLIENTID state
1260 */
1261
1262static unsigned int clientid_hashval(u32 id)
1263{
1264 return id & CLIENT_HASH_MASK;
1265}
1266
1267static unsigned int clientstr_hashval(struct xdr_netobj name)
1268{
1269 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1270}
1271
1272/*
1273 * A stateid that had a deny mode associated with it is being released
1274 * or downgraded. Recalculate the deny mode on the file.
1275 */
1276static void
1277recalculate_deny_mode(struct nfs4_file *fp)
1278{
1279 struct nfs4_ol_stateid *stp;
1280
1281 spin_lock(&fp->fi_lock);
1282 fp->fi_share_deny = 0;
1283 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1284 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1285 spin_unlock(&fp->fi_lock);
1286}
1287
1288static void
1289reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1290{
1291 int i;
1292 bool change = false;
1293
1294 for (i = 1; i < 4; i++) {
1295 if ((i & deny) != i) {
1296 change = true;
1297 clear_deny(i, stp);
1298 }
1299 }
1300
1301 /* Recalculate per-file deny mode if there was a change */
1302 if (change)
1303 recalculate_deny_mode(stp->st_stid.sc_file);
1304}
1305
1306/* release all access and file references for a given stateid */
1307static void
1308release_all_access(struct nfs4_ol_stateid *stp)
1309{
1310 int i;
1311 struct nfs4_file *fp = stp->st_stid.sc_file;
1312
1313 if (fp && stp->st_deny_bmap != 0)
1314 recalculate_deny_mode(fp);
1315
1316 for (i = 1; i < 4; i++) {
1317 if (test_access(i, stp))
1318 nfs4_file_put_access(stp->st_stid.sc_file, i);
1319 clear_access(i, stp);
1320 }
1321}
1322
1323static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1324{
1325 kfree(sop->so_owner.data);
1326 sop->so_ops->so_free(sop);
1327}
1328
1329static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1330{
1331 struct nfs4_client *clp = sop->so_client;
1332
1333 might_lock(&clp->cl_lock);
1334
1335 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1336 return;
1337 sop->so_ops->so_unhash(sop);
1338 spin_unlock(&clp->cl_lock);
1339 nfs4_free_stateowner(sop);
1340}
1341
1342static bool
1343nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1344{
1345 return list_empty(&stp->st_perfile);
1346}
1347
1348static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1349{
1350 struct nfs4_file *fp = stp->st_stid.sc_file;
1351
1352 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1353
1354 if (list_empty(&stp->st_perfile))
1355 return false;
1356
1357 spin_lock(&fp->fi_lock);
1358 list_del_init(&stp->st_perfile);
1359 spin_unlock(&fp->fi_lock);
1360 list_del(&stp->st_perstateowner);
1361 return true;
1362}
1363
1364static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1365{
1366 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1367
1368 put_clnt_odstate(stp->st_clnt_odstate);
1369 release_all_access(stp);
1370 if (stp->st_stateowner)
1371 nfs4_put_stateowner(stp->st_stateowner);
1372 kmem_cache_free(stateid_slab, stid);
1373}
1374
1375static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1376{
1377 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1378 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1379 struct nfsd_file *nf;
1380
1381 nf = find_any_file(stp->st_stid.sc_file);
1382 if (nf) {
1383 get_file(nf->nf_file);
1384 filp_close(nf->nf_file, (fl_owner_t)lo);
1385 nfsd_file_put(nf);
1386 }
1387 nfs4_free_ol_stateid(stid);
1388}
1389
1390/*
1391 * Put the persistent reference to an already unhashed generic stateid, while
1392 * holding the cl_lock. If it's the last reference, then put it onto the
1393 * reaplist for later destruction.
1394 */
1395static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1396 struct list_head *reaplist)
1397{
1398 struct nfs4_stid *s = &stp->st_stid;
1399 struct nfs4_client *clp = s->sc_client;
1400
1401 lockdep_assert_held(&clp->cl_lock);
1402
1403 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1404
1405 if (!refcount_dec_and_test(&s->sc_count)) {
1406 wake_up_all(&close_wq);
1407 return;
1408 }
1409
1410 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1411 list_add(&stp->st_locks, reaplist);
1412}
1413
1414static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1415{
1416 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1417
1418 if (!unhash_ol_stateid(stp))
1419 return false;
1420 list_del_init(&stp->st_locks);
1421 nfs4_unhash_stid(&stp->st_stid);
1422 return true;
1423}
1424
1425static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1426{
1427 struct nfs4_client *clp = stp->st_stid.sc_client;
1428 bool unhashed;
1429
1430 spin_lock(&clp->cl_lock);
1431 unhashed = unhash_lock_stateid(stp);
1432 spin_unlock(&clp->cl_lock);
1433 if (unhashed)
1434 nfs4_put_stid(&stp->st_stid);
1435}
1436
1437static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1438{
1439 struct nfs4_client *clp = lo->lo_owner.so_client;
1440
1441 lockdep_assert_held(&clp->cl_lock);
1442
1443 list_del_init(&lo->lo_owner.so_strhash);
1444}
1445
1446/*
1447 * Free a list of generic stateids that were collected earlier after being
1448 * fully unhashed.
1449 */
1450static void
1451free_ol_stateid_reaplist(struct list_head *reaplist)
1452{
1453 struct nfs4_ol_stateid *stp;
1454 struct nfs4_file *fp;
1455
1456 might_sleep();
1457
1458 while (!list_empty(reaplist)) {
1459 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1460 st_locks);
1461 list_del(&stp->st_locks);
1462 fp = stp->st_stid.sc_file;
1463 stp->st_stid.sc_free(&stp->st_stid);
1464 if (fp)
1465 put_nfs4_file(fp);
1466 }
1467}
1468
1469static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1470 struct list_head *reaplist)
1471{
1472 struct nfs4_ol_stateid *stp;
1473
1474 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1475
1476 while (!list_empty(&open_stp->st_locks)) {
1477 stp = list_entry(open_stp->st_locks.next,
1478 struct nfs4_ol_stateid, st_locks);
1479 WARN_ON(!unhash_lock_stateid(stp));
1480 put_ol_stateid_locked(stp, reaplist);
1481 }
1482}
1483
1484static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1485 struct list_head *reaplist)
1486{
1487 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1488
1489 if (!unhash_ol_stateid(stp))
1490 return false;
1491 release_open_stateid_locks(stp, reaplist);
1492 return true;
1493}
1494
1495static void release_open_stateid(struct nfs4_ol_stateid *stp)
1496{
1497 LIST_HEAD(reaplist);
1498
1499 spin_lock(&stp->st_stid.sc_client->cl_lock);
1500 if (unhash_open_stateid(stp, &reaplist))
1501 put_ol_stateid_locked(stp, &reaplist);
1502 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1503 free_ol_stateid_reaplist(&reaplist);
1504}
1505
1506static void unhash_openowner_locked(struct nfs4_openowner *oo)
1507{
1508 struct nfs4_client *clp = oo->oo_owner.so_client;
1509
1510 lockdep_assert_held(&clp->cl_lock);
1511
1512 list_del_init(&oo->oo_owner.so_strhash);
1513 list_del_init(&oo->oo_perclient);
1514}
1515
1516static void release_last_closed_stateid(struct nfs4_openowner *oo)
1517{
1518 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1519 nfsd_net_id);
1520 struct nfs4_ol_stateid *s;
1521
1522 spin_lock(&nn->client_lock);
1523 s = oo->oo_last_closed_stid;
1524 if (s) {
1525 list_del_init(&oo->oo_close_lru);
1526 oo->oo_last_closed_stid = NULL;
1527 }
1528 spin_unlock(&nn->client_lock);
1529 if (s)
1530 nfs4_put_stid(&s->st_stid);
1531}
1532
1533static void release_openowner(struct nfs4_openowner *oo)
1534{
1535 struct nfs4_ol_stateid *stp;
1536 struct nfs4_client *clp = oo->oo_owner.so_client;
1537 struct list_head reaplist;
1538
1539 INIT_LIST_HEAD(&reaplist);
1540
1541 spin_lock(&clp->cl_lock);
1542 unhash_openowner_locked(oo);
1543 while (!list_empty(&oo->oo_owner.so_stateids)) {
1544 stp = list_first_entry(&oo->oo_owner.so_stateids,
1545 struct nfs4_ol_stateid, st_perstateowner);
1546 if (unhash_open_stateid(stp, &reaplist))
1547 put_ol_stateid_locked(stp, &reaplist);
1548 }
1549 spin_unlock(&clp->cl_lock);
1550 free_ol_stateid_reaplist(&reaplist);
1551 release_last_closed_stateid(oo);
1552 nfs4_put_stateowner(&oo->oo_owner);
1553}
1554
1555static inline int
1556hash_sessionid(struct nfs4_sessionid *sessionid)
1557{
1558 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1559
1560 return sid->sequence % SESSION_HASH_SIZE;
1561}
1562
1563#ifdef CONFIG_SUNRPC_DEBUG
1564static inline void
1565dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1566{
1567 u32 *ptr = (u32 *)(&sessionid->data[0]);
1568 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1569}
1570#else
1571static inline void
1572dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1573{
1574}
1575#endif
1576
1577/*
1578 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1579 * won't be used for replay.
1580 */
1581void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1582{
1583 struct nfs4_stateowner *so = cstate->replay_owner;
1584
1585 if (nfserr == nfserr_replay_me)
1586 return;
1587
1588 if (!seqid_mutating_err(ntohl(nfserr))) {
1589 nfsd4_cstate_clear_replay(cstate);
1590 return;
1591 }
1592 if (!so)
1593 return;
1594 if (so->so_is_open_owner)
1595 release_last_closed_stateid(openowner(so));
1596 so->so_seqid++;
1597 return;
1598}
1599
1600static void
1601gen_sessionid(struct nfsd4_session *ses)
1602{
1603 struct nfs4_client *clp = ses->se_client;
1604 struct nfsd4_sessionid *sid;
1605
1606 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1607 sid->clientid = clp->cl_clientid;
1608 sid->sequence = current_sessionid++;
1609 sid->reserved = 0;
1610}
1611
1612/*
1613 * The protocol defines ca_maxresponssize_cached to include the size of
1614 * the rpc header, but all we need to cache is the data starting after
1615 * the end of the initial SEQUENCE operation--the rest we regenerate
1616 * each time. Therefore we can advertise a ca_maxresponssize_cached
1617 * value that is the number of bytes in our cache plus a few additional
1618 * bytes. In order to stay on the safe side, and not promise more than
1619 * we can cache, those additional bytes must be the minimum possible: 24
1620 * bytes of rpc header (xid through accept state, with AUTH_NULL
1621 * verifier), 12 for the compound header (with zero-length tag), and 44
1622 * for the SEQUENCE op response:
1623 */
1624#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1625
1626static void
1627free_session_slots(struct nfsd4_session *ses)
1628{
1629 int i;
1630
1631 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1632 free_svc_cred(&ses->se_slots[i]->sl_cred);
1633 kfree(ses->se_slots[i]);
1634 }
1635}
1636
1637/*
1638 * We don't actually need to cache the rpc and session headers, so we
1639 * can allocate a little less for each slot:
1640 */
1641static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1642{
1643 u32 size;
1644
1645 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1646 size = 0;
1647 else
1648 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1649 return size + sizeof(struct nfsd4_slot);
1650}
1651
1652/*
1653 * XXX: If we run out of reserved DRC memory we could (up to a point)
1654 * re-negotiate active sessions and reduce their slot usage to make
1655 * room for new connections. For now we just fail the create session.
1656 */
1657static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1658{
1659 u32 slotsize = slot_bytes(ca);
1660 u32 num = ca->maxreqs;
1661 unsigned long avail, total_avail;
1662 unsigned int scale_factor;
1663
1664 spin_lock(&nfsd_drc_lock);
1665 if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1666 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1667 else
1668 /* We have handed out more space than we chose in
1669 * set_max_drc() to allow. That isn't really a
1670 * problem as long as that doesn't make us think we
1671 * have lots more due to integer overflow.
1672 */
1673 total_avail = 0;
1674 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1675 /*
1676 * Never use more than a fraction of the remaining memory,
1677 * unless it's the only way to give this client a slot.
1678 * The chosen fraction is either 1/8 or 1/number of threads,
1679 * whichever is smaller. This ensures there are adequate
1680 * slots to support multiple clients per thread.
1681 * Give the client one slot even if that would require
1682 * over-allocation--it is better than failure.
1683 */
1684 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1685
1686 avail = clamp_t(unsigned long, avail, slotsize,
1687 total_avail/scale_factor);
1688 num = min_t(int, num, avail / slotsize);
1689 num = max_t(int, num, 1);
1690 nfsd_drc_mem_used += num * slotsize;
1691 spin_unlock(&nfsd_drc_lock);
1692
1693 return num;
1694}
1695
1696static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1697{
1698 int slotsize = slot_bytes(ca);
1699
1700 spin_lock(&nfsd_drc_lock);
1701 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1702 spin_unlock(&nfsd_drc_lock);
1703}
1704
1705static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1706 struct nfsd4_channel_attrs *battrs)
1707{
1708 int numslots = fattrs->maxreqs;
1709 int slotsize = slot_bytes(fattrs);
1710 struct nfsd4_session *new;
1711 int mem, i;
1712
1713 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1714 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1715 mem = numslots * sizeof(struct nfsd4_slot *);
1716
1717 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1718 if (!new)
1719 return NULL;
1720 /* allocate each struct nfsd4_slot and data cache in one piece */
1721 for (i = 0; i < numslots; i++) {
1722 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1723 if (!new->se_slots[i])
1724 goto out_free;
1725 }
1726
1727 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1728 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1729
1730 return new;
1731out_free:
1732 while (i--)
1733 kfree(new->se_slots[i]);
1734 kfree(new);
1735 return NULL;
1736}
1737
1738static void free_conn(struct nfsd4_conn *c)
1739{
1740 svc_xprt_put(c->cn_xprt);
1741 kfree(c);
1742}
1743
1744static void nfsd4_conn_lost(struct svc_xpt_user *u)
1745{
1746 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1747 struct nfs4_client *clp = c->cn_session->se_client;
1748
1749 trace_nfsd_cb_lost(clp);
1750
1751 spin_lock(&clp->cl_lock);
1752 if (!list_empty(&c->cn_persession)) {
1753 list_del(&c->cn_persession);
1754 free_conn(c);
1755 }
1756 nfsd4_probe_callback(clp);
1757 spin_unlock(&clp->cl_lock);
1758}
1759
1760static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1761{
1762 struct nfsd4_conn *conn;
1763
1764 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1765 if (!conn)
1766 return NULL;
1767 svc_xprt_get(rqstp->rq_xprt);
1768 conn->cn_xprt = rqstp->rq_xprt;
1769 conn->cn_flags = flags;
1770 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1771 return conn;
1772}
1773
1774static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1775{
1776 conn->cn_session = ses;
1777 list_add(&conn->cn_persession, &ses->se_conns);
1778}
1779
1780static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1781{
1782 struct nfs4_client *clp = ses->se_client;
1783
1784 spin_lock(&clp->cl_lock);
1785 __nfsd4_hash_conn(conn, ses);
1786 spin_unlock(&clp->cl_lock);
1787}
1788
1789static int nfsd4_register_conn(struct nfsd4_conn *conn)
1790{
1791 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1792 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1793}
1794
1795static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1796{
1797 int ret;
1798
1799 nfsd4_hash_conn(conn, ses);
1800 ret = nfsd4_register_conn(conn);
1801 if (ret)
1802 /* oops; xprt is already down: */
1803 nfsd4_conn_lost(&conn->cn_xpt_user);
1804 /* We may have gained or lost a callback channel: */
1805 nfsd4_probe_callback_sync(ses->se_client);
1806}
1807
1808static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1809{
1810 u32 dir = NFS4_CDFC4_FORE;
1811
1812 if (cses->flags & SESSION4_BACK_CHAN)
1813 dir |= NFS4_CDFC4_BACK;
1814 return alloc_conn(rqstp, dir);
1815}
1816
1817/* must be called under client_lock */
1818static void nfsd4_del_conns(struct nfsd4_session *s)
1819{
1820 struct nfs4_client *clp = s->se_client;
1821 struct nfsd4_conn *c;
1822
1823 spin_lock(&clp->cl_lock);
1824 while (!list_empty(&s->se_conns)) {
1825 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1826 list_del_init(&c->cn_persession);
1827 spin_unlock(&clp->cl_lock);
1828
1829 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1830 free_conn(c);
1831
1832 spin_lock(&clp->cl_lock);
1833 }
1834 spin_unlock(&clp->cl_lock);
1835}
1836
1837static void __free_session(struct nfsd4_session *ses)
1838{
1839 free_session_slots(ses);
1840 kfree(ses);
1841}
1842
1843static void free_session(struct nfsd4_session *ses)
1844{
1845 nfsd4_del_conns(ses);
1846 nfsd4_put_drc_mem(&ses->se_fchannel);
1847 __free_session(ses);
1848}
1849
1850static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1851{
1852 int idx;
1853 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1854
1855 new->se_client = clp;
1856 gen_sessionid(new);
1857
1858 INIT_LIST_HEAD(&new->se_conns);
1859
1860 new->se_cb_seq_nr = 1;
1861 new->se_flags = cses->flags;
1862 new->se_cb_prog = cses->callback_prog;
1863 new->se_cb_sec = cses->cb_sec;
1864 atomic_set(&new->se_ref, 0);
1865 idx = hash_sessionid(&new->se_sessionid);
1866 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1867 spin_lock(&clp->cl_lock);
1868 list_add(&new->se_perclnt, &clp->cl_sessions);
1869 spin_unlock(&clp->cl_lock);
1870
1871 {
1872 struct sockaddr *sa = svc_addr(rqstp);
1873 /*
1874 * This is a little silly; with sessions there's no real
1875 * use for the callback address. Use the peer address
1876 * as a reasonable default for now, but consider fixing
1877 * the rpc client not to require an address in the
1878 * future:
1879 */
1880 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1881 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1882 }
1883}
1884
1885/* caller must hold client_lock */
1886static struct nfsd4_session *
1887__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1888{
1889 struct nfsd4_session *elem;
1890 int idx;
1891 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1892
1893 lockdep_assert_held(&nn->client_lock);
1894
1895 dump_sessionid(__func__, sessionid);
1896 idx = hash_sessionid(sessionid);
1897 /* Search in the appropriate list */
1898 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1899 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1900 NFS4_MAX_SESSIONID_LEN)) {
1901 return elem;
1902 }
1903 }
1904
1905 dprintk("%s: session not found\n", __func__);
1906 return NULL;
1907}
1908
1909static struct nfsd4_session *
1910find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1911 __be32 *ret)
1912{
1913 struct nfsd4_session *session;
1914 __be32 status = nfserr_badsession;
1915
1916 session = __find_in_sessionid_hashtbl(sessionid, net);
1917 if (!session)
1918 goto out;
1919 status = nfsd4_get_session_locked(session);
1920 if (status)
1921 session = NULL;
1922out:
1923 *ret = status;
1924 return session;
1925}
1926
1927/* caller must hold client_lock */
1928static void
1929unhash_session(struct nfsd4_session *ses)
1930{
1931 struct nfs4_client *clp = ses->se_client;
1932 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1933
1934 lockdep_assert_held(&nn->client_lock);
1935
1936 list_del(&ses->se_hash);
1937 spin_lock(&ses->se_client->cl_lock);
1938 list_del(&ses->se_perclnt);
1939 spin_unlock(&ses->se_client->cl_lock);
1940}
1941
1942/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1943static int
1944STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1945{
1946 /*
1947 * We're assuming the clid was not given out from a boot
1948 * precisely 2^32 (about 136 years) before this one. That seems
1949 * a safe assumption:
1950 */
1951 if (clid->cl_boot == (u32)nn->boot_time)
1952 return 0;
1953 trace_nfsd_clid_stale(clid);
1954 return 1;
1955}
1956
1957/*
1958 * XXX Should we use a slab cache ?
1959 * This type of memory management is somewhat inefficient, but we use it
1960 * anyway since SETCLIENTID is not a common operation.
1961 */
1962static struct nfs4_client *alloc_client(struct xdr_netobj name)
1963{
1964 struct nfs4_client *clp;
1965 int i;
1966
1967 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
1968 if (clp == NULL)
1969 return NULL;
1970 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
1971 if (clp->cl_name.data == NULL)
1972 goto err_no_name;
1973 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
1974 sizeof(struct list_head),
1975 GFP_KERNEL);
1976 if (!clp->cl_ownerstr_hashtbl)
1977 goto err_no_hashtbl;
1978 for (i = 0; i < OWNER_HASH_SIZE; i++)
1979 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1980 INIT_LIST_HEAD(&clp->cl_sessions);
1981 idr_init(&clp->cl_stateids);
1982 atomic_set(&clp->cl_rpc_users, 0);
1983 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1984 INIT_LIST_HEAD(&clp->cl_idhash);
1985 INIT_LIST_HEAD(&clp->cl_openowners);
1986 INIT_LIST_HEAD(&clp->cl_delegations);
1987 INIT_LIST_HEAD(&clp->cl_lru);
1988 INIT_LIST_HEAD(&clp->cl_revoked);
1989#ifdef CONFIG_NFSD_PNFS
1990 INIT_LIST_HEAD(&clp->cl_lo_states);
1991#endif
1992 INIT_LIST_HEAD(&clp->async_copies);
1993 spin_lock_init(&clp->async_lock);
1994 spin_lock_init(&clp->cl_lock);
1995 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1996 return clp;
1997err_no_hashtbl:
1998 kfree(clp->cl_name.data);
1999err_no_name:
2000 kmem_cache_free(client_slab, clp);
2001 return NULL;
2002}
2003
2004static void __free_client(struct kref *k)
2005{
2006 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
2007 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
2008
2009 free_svc_cred(&clp->cl_cred);
2010 kfree(clp->cl_ownerstr_hashtbl);
2011 kfree(clp->cl_name.data);
2012 kfree(clp->cl_nii_domain.data);
2013 kfree(clp->cl_nii_name.data);
2014 idr_destroy(&clp->cl_stateids);
2015 kmem_cache_free(client_slab, clp);
2016}
2017
2018static void drop_client(struct nfs4_client *clp)
2019{
2020 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2021}
2022
2023static void
2024free_client(struct nfs4_client *clp)
2025{
2026 while (!list_empty(&clp->cl_sessions)) {
2027 struct nfsd4_session *ses;
2028 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2029 se_perclnt);
2030 list_del(&ses->se_perclnt);
2031 WARN_ON_ONCE(atomic_read(&ses->se_ref));
2032 free_session(ses);
2033 }
2034 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2035 if (clp->cl_nfsd_dentry) {
2036 nfsd_client_rmdir(clp->cl_nfsd_dentry);
2037 clp->cl_nfsd_dentry = NULL;
2038 wake_up_all(&expiry_wq);
2039 }
2040 drop_client(clp);
2041}
2042
2043/* must be called under the client_lock */
2044static void
2045unhash_client_locked(struct nfs4_client *clp)
2046{
2047 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2048 struct nfsd4_session *ses;
2049
2050 lockdep_assert_held(&nn->client_lock);
2051
2052 /* Mark the client as expired! */
2053 clp->cl_time = 0;
2054 /* Make it invisible */
2055 if (!list_empty(&clp->cl_idhash)) {
2056 list_del_init(&clp->cl_idhash);
2057 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2058 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2059 else
2060 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2061 }
2062 list_del_init(&clp->cl_lru);
2063 spin_lock(&clp->cl_lock);
2064 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2065 list_del_init(&ses->se_hash);
2066 spin_unlock(&clp->cl_lock);
2067}
2068
2069static void
2070unhash_client(struct nfs4_client *clp)
2071{
2072 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2073
2074 spin_lock(&nn->client_lock);
2075 unhash_client_locked(clp);
2076 spin_unlock(&nn->client_lock);
2077}
2078
2079static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2080{
2081 if (atomic_read(&clp->cl_rpc_users))
2082 return nfserr_jukebox;
2083 unhash_client_locked(clp);
2084 return nfs_ok;
2085}
2086
2087static void
2088__destroy_client(struct nfs4_client *clp)
2089{
2090 int i;
2091 struct nfs4_openowner *oo;
2092 struct nfs4_delegation *dp;
2093 struct list_head reaplist;
2094
2095 INIT_LIST_HEAD(&reaplist);
2096 spin_lock(&state_lock);
2097 while (!list_empty(&clp->cl_delegations)) {
2098 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2099 WARN_ON(!unhash_delegation_locked(dp));
2100 list_add(&dp->dl_recall_lru, &reaplist);
2101 }
2102 spin_unlock(&state_lock);
2103 while (!list_empty(&reaplist)) {
2104 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2105 list_del_init(&dp->dl_recall_lru);
2106 destroy_unhashed_deleg(dp);
2107 }
2108 while (!list_empty(&clp->cl_revoked)) {
2109 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2110 list_del_init(&dp->dl_recall_lru);
2111 nfs4_put_stid(&dp->dl_stid);
2112 }
2113 while (!list_empty(&clp->cl_openowners)) {
2114 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2115 nfs4_get_stateowner(&oo->oo_owner);
2116 release_openowner(oo);
2117 }
2118 for (i = 0; i < OWNER_HASH_SIZE; i++) {
2119 struct nfs4_stateowner *so, *tmp;
2120
2121 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2122 so_strhash) {
2123 /* Should be no openowners at this point */
2124 WARN_ON_ONCE(so->so_is_open_owner);
2125 remove_blocked_locks(lockowner(so));
2126 }
2127 }
2128 nfsd4_return_all_client_layouts(clp);
2129 nfsd4_shutdown_copy(clp);
2130 nfsd4_shutdown_callback(clp);
2131 if (clp->cl_cb_conn.cb_xprt)
2132 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2133 free_client(clp);
2134 wake_up_all(&expiry_wq);
2135}
2136
2137static void
2138destroy_client(struct nfs4_client *clp)
2139{
2140 unhash_client(clp);
2141 __destroy_client(clp);
2142}
2143
2144static void inc_reclaim_complete(struct nfs4_client *clp)
2145{
2146 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2147
2148 if (!nn->track_reclaim_completes)
2149 return;
2150 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2151 return;
2152 if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2153 nn->reclaim_str_hashtbl_size) {
2154 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2155 clp->net->ns.inum);
2156 nfsd4_end_grace(nn);
2157 }
2158}
2159
2160static void expire_client(struct nfs4_client *clp)
2161{
2162 unhash_client(clp);
2163 nfsd4_client_record_remove(clp);
2164 __destroy_client(clp);
2165}
2166
2167static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2168{
2169 memcpy(target->cl_verifier.data, source->data,
2170 sizeof(target->cl_verifier.data));
2171}
2172
2173static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2174{
2175 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2176 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2177}
2178
2179static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2180{
2181 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2182 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2183 GFP_KERNEL);
2184 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2185 if ((source->cr_principal && !target->cr_principal) ||
2186 (source->cr_raw_principal && !target->cr_raw_principal) ||
2187 (source->cr_targ_princ && !target->cr_targ_princ))
2188 return -ENOMEM;
2189
2190 target->cr_flavor = source->cr_flavor;
2191 target->cr_uid = source->cr_uid;
2192 target->cr_gid = source->cr_gid;
2193 target->cr_group_info = source->cr_group_info;
2194 get_group_info(target->cr_group_info);
2195 target->cr_gss_mech = source->cr_gss_mech;
2196 if (source->cr_gss_mech)
2197 gss_mech_get(source->cr_gss_mech);
2198 return 0;
2199}
2200
2201static int
2202compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2203{
2204 if (o1->len < o2->len)
2205 return -1;
2206 if (o1->len > o2->len)
2207 return 1;
2208 return memcmp(o1->data, o2->data, o1->len);
2209}
2210
2211static int
2212same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2213{
2214 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2215}
2216
2217static int
2218same_clid(clientid_t *cl1, clientid_t *cl2)
2219{
2220 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2221}
2222
2223static bool groups_equal(struct group_info *g1, struct group_info *g2)
2224{
2225 int i;
2226
2227 if (g1->ngroups != g2->ngroups)
2228 return false;
2229 for (i=0; i<g1->ngroups; i++)
2230 if (!gid_eq(g1->gid[i], g2->gid[i]))
2231 return false;
2232 return true;
2233}
2234
2235/*
2236 * RFC 3530 language requires clid_inuse be returned when the
2237 * "principal" associated with a requests differs from that previously
2238 * used. We use uid, gid's, and gss principal string as our best
2239 * approximation. We also don't want to allow non-gss use of a client
2240 * established using gss: in theory cr_principal should catch that
2241 * change, but in practice cr_principal can be null even in the gss case
2242 * since gssd doesn't always pass down a principal string.
2243 */
2244static bool is_gss_cred(struct svc_cred *cr)
2245{
2246 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2247 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2248}
2249
2250
2251static bool
2252same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2253{
2254 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2255 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2256 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2257 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2258 return false;
2259 /* XXX: check that cr_targ_princ fields match ? */
2260 if (cr1->cr_principal == cr2->cr_principal)
2261 return true;
2262 if (!cr1->cr_principal || !cr2->cr_principal)
2263 return false;
2264 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2265}
2266
2267static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2268{
2269 struct svc_cred *cr = &rqstp->rq_cred;
2270 u32 service;
2271
2272 if (!cr->cr_gss_mech)
2273 return false;
2274 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2275 return service == RPC_GSS_SVC_INTEGRITY ||
2276 service == RPC_GSS_SVC_PRIVACY;
2277}
2278
2279bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2280{
2281 struct svc_cred *cr = &rqstp->rq_cred;
2282
2283 if (!cl->cl_mach_cred)
2284 return true;
2285 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2286 return false;
2287 if (!svc_rqst_integrity_protected(rqstp))
2288 return false;
2289 if (cl->cl_cred.cr_raw_principal)
2290 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2291 cr->cr_raw_principal);
2292 if (!cr->cr_principal)
2293 return false;
2294 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2295}
2296
2297static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2298{
2299 __be32 verf[2];
2300
2301 /*
2302 * This is opaque to client, so no need to byte-swap. Use
2303 * __force to keep sparse happy
2304 */
2305 verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2306 verf[1] = (__force __be32)nn->clverifier_counter++;
2307 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2308}
2309
2310static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2311{
2312 clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2313 clp->cl_clientid.cl_id = nn->clientid_counter++;
2314 gen_confirm(clp, nn);
2315}
2316
2317static struct nfs4_stid *
2318find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2319{
2320 struct nfs4_stid *ret;
2321
2322 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2323 if (!ret || !ret->sc_type)
2324 return NULL;
2325 return ret;
2326}
2327
2328static struct nfs4_stid *
2329find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2330{
2331 struct nfs4_stid *s;
2332
2333 spin_lock(&cl->cl_lock);
2334 s = find_stateid_locked(cl, t);
2335 if (s != NULL) {
2336 if (typemask & s->sc_type)
2337 refcount_inc(&s->sc_count);
2338 else
2339 s = NULL;
2340 }
2341 spin_unlock(&cl->cl_lock);
2342 return s;
2343}
2344
2345static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2346{
2347 struct nfsdfs_client *nc;
2348 nc = get_nfsdfs_client(inode);
2349 if (!nc)
2350 return NULL;
2351 return container_of(nc, struct nfs4_client, cl_nfsdfs);
2352}
2353
2354static void seq_quote_mem(struct seq_file *m, char *data, int len)
2355{
2356 seq_printf(m, "\"");
2357 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
2358 seq_printf(m, "\"");
2359}
2360
2361static const char *cb_state2str(int state)
2362{
2363 switch (state) {
2364 case NFSD4_CB_UP:
2365 return "UP";
2366 case NFSD4_CB_UNKNOWN:
2367 return "UNKNOWN";
2368 case NFSD4_CB_DOWN:
2369 return "DOWN";
2370 case NFSD4_CB_FAULT:
2371 return "FAULT";
2372 }
2373 return "UNDEFINED";
2374}
2375
2376static int client_info_show(struct seq_file *m, void *v)
2377{
2378 struct inode *inode = m->private;
2379 struct nfs4_client *clp;
2380 u64 clid;
2381
2382 clp = get_nfsdfs_clp(inode);
2383 if (!clp)
2384 return -ENXIO;
2385 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2386 seq_printf(m, "clientid: 0x%llx\n", clid);
2387 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2388 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2389 seq_puts(m, "status: confirmed\n");
2390 else
2391 seq_puts(m, "status: unconfirmed\n");
2392 seq_printf(m, "name: ");
2393 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2394 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2395 if (clp->cl_nii_domain.data) {
2396 seq_printf(m, "Implementation domain: ");
2397 seq_quote_mem(m, clp->cl_nii_domain.data,
2398 clp->cl_nii_domain.len);
2399 seq_printf(m, "\nImplementation name: ");
2400 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2401 seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2402 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2403 }
2404 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
2405 seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
2406 drop_client(clp);
2407
2408 return 0;
2409}
2410
2411static int client_info_open(struct inode *inode, struct file *file)
2412{
2413 return single_open(file, client_info_show, inode);
2414}
2415
2416static const struct file_operations client_info_fops = {
2417 .open = client_info_open,
2418 .read = seq_read,
2419 .llseek = seq_lseek,
2420 .release = single_release,
2421};
2422
2423static void *states_start(struct seq_file *s, loff_t *pos)
2424 __acquires(&clp->cl_lock)
2425{
2426 struct nfs4_client *clp = s->private;
2427 unsigned long id = *pos;
2428 void *ret;
2429
2430 spin_lock(&clp->cl_lock);
2431 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2432 *pos = id;
2433 return ret;
2434}
2435
2436static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2437{
2438 struct nfs4_client *clp = s->private;
2439 unsigned long id = *pos;
2440 void *ret;
2441
2442 id = *pos;
2443 id++;
2444 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2445 *pos = id;
2446 return ret;
2447}
2448
2449static void states_stop(struct seq_file *s, void *v)
2450 __releases(&clp->cl_lock)
2451{
2452 struct nfs4_client *clp = s->private;
2453
2454 spin_unlock(&clp->cl_lock);
2455}
2456
2457static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2458{
2459 seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2460}
2461
2462static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2463{
2464 struct inode *inode = f->nf_inode;
2465
2466 seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2467 MAJOR(inode->i_sb->s_dev),
2468 MINOR(inode->i_sb->s_dev),
2469 inode->i_ino);
2470}
2471
2472static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2473{
2474 seq_printf(s, "owner: ");
2475 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2476}
2477
2478static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2479{
2480 seq_printf(s, "0x%.8x", stid->si_generation);
2481 seq_printf(s, "%12phN", &stid->si_opaque);
2482}
2483
2484static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2485{
2486 struct nfs4_ol_stateid *ols;
2487 struct nfs4_file *nf;
2488 struct nfsd_file *file;
2489 struct nfs4_stateowner *oo;
2490 unsigned int access, deny;
2491
2492 if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2493 return 0; /* XXX: or SEQ_SKIP? */
2494 ols = openlockstateid(st);
2495 oo = ols->st_stateowner;
2496 nf = st->sc_file;
2497 file = find_any_file(nf);
2498 if (!file)
2499 return 0;
2500
2501 seq_printf(s, "- ");
2502 nfs4_show_stateid(s, &st->sc_stateid);
2503 seq_printf(s, ": { type: open, ");
2504
2505 access = bmap_to_share_mode(ols->st_access_bmap);
2506 deny = bmap_to_share_mode(ols->st_deny_bmap);
2507
2508 seq_printf(s, "access: %s%s, ",
2509 access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2510 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2511 seq_printf(s, "deny: %s%s, ",
2512 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2513 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2514
2515 nfs4_show_superblock(s, file);
2516 seq_printf(s, ", ");
2517 nfs4_show_fname(s, file);
2518 seq_printf(s, ", ");
2519 nfs4_show_owner(s, oo);
2520 seq_printf(s, " }\n");
2521 nfsd_file_put(file);
2522
2523 return 0;
2524}
2525
2526static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2527{
2528 struct nfs4_ol_stateid *ols;
2529 struct nfs4_file *nf;
2530 struct nfsd_file *file;
2531 struct nfs4_stateowner *oo;
2532
2533 ols = openlockstateid(st);
2534 oo = ols->st_stateowner;
2535 nf = st->sc_file;
2536 file = find_any_file(nf);
2537 if (!file)
2538 return 0;
2539
2540 seq_printf(s, "- ");
2541 nfs4_show_stateid(s, &st->sc_stateid);
2542 seq_printf(s, ": { type: lock, ");
2543
2544 /*
2545 * Note: a lock stateid isn't really the same thing as a lock,
2546 * it's the locking state held by one owner on a file, and there
2547 * may be multiple (or no) lock ranges associated with it.
2548 * (Same for the matter is true of open stateids.)
2549 */
2550
2551 nfs4_show_superblock(s, file);
2552 /* XXX: open stateid? */
2553 seq_printf(s, ", ");
2554 nfs4_show_fname(s, file);
2555 seq_printf(s, ", ");
2556 nfs4_show_owner(s, oo);
2557 seq_printf(s, " }\n");
2558 nfsd_file_put(file);
2559
2560 return 0;
2561}
2562
2563static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2564{
2565 struct nfs4_delegation *ds;
2566 struct nfs4_file *nf;
2567 struct nfsd_file *file;
2568
2569 ds = delegstateid(st);
2570 nf = st->sc_file;
2571 file = find_deleg_file(nf);
2572 if (!file)
2573 return 0;
2574
2575 seq_printf(s, "- ");
2576 nfs4_show_stateid(s, &st->sc_stateid);
2577 seq_printf(s, ": { type: deleg, ");
2578
2579 /* Kinda dead code as long as we only support read delegs: */
2580 seq_printf(s, "access: %s, ",
2581 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2582
2583 /* XXX: lease time, whether it's being recalled. */
2584
2585 nfs4_show_superblock(s, file);
2586 seq_printf(s, ", ");
2587 nfs4_show_fname(s, file);
2588 seq_printf(s, " }\n");
2589 nfsd_file_put(file);
2590
2591 return 0;
2592}
2593
2594static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2595{
2596 struct nfs4_layout_stateid *ls;
2597 struct nfsd_file *file;
2598
2599 ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2600 file = ls->ls_file;
2601
2602 seq_printf(s, "- ");
2603 nfs4_show_stateid(s, &st->sc_stateid);
2604 seq_printf(s, ": { type: layout, ");
2605
2606 /* XXX: What else would be useful? */
2607
2608 nfs4_show_superblock(s, file);
2609 seq_printf(s, ", ");
2610 nfs4_show_fname(s, file);
2611 seq_printf(s, " }\n");
2612
2613 return 0;
2614}
2615
2616static int states_show(struct seq_file *s, void *v)
2617{
2618 struct nfs4_stid *st = v;
2619
2620 switch (st->sc_type) {
2621 case NFS4_OPEN_STID:
2622 return nfs4_show_open(s, st);
2623 case NFS4_LOCK_STID:
2624 return nfs4_show_lock(s, st);
2625 case NFS4_DELEG_STID:
2626 return nfs4_show_deleg(s, st);
2627 case NFS4_LAYOUT_STID:
2628 return nfs4_show_layout(s, st);
2629 default:
2630 return 0; /* XXX: or SEQ_SKIP? */
2631 }
2632 /* XXX: copy stateids? */
2633}
2634
2635static struct seq_operations states_seq_ops = {
2636 .start = states_start,
2637 .next = states_next,
2638 .stop = states_stop,
2639 .show = states_show
2640};
2641
2642static int client_states_open(struct inode *inode, struct file *file)
2643{
2644 struct seq_file *s;
2645 struct nfs4_client *clp;
2646 int ret;
2647
2648 clp = get_nfsdfs_clp(inode);
2649 if (!clp)
2650 return -ENXIO;
2651
2652 ret = seq_open(file, &states_seq_ops);
2653 if (ret)
2654 return ret;
2655 s = file->private_data;
2656 s->private = clp;
2657 return 0;
2658}
2659
2660static int client_opens_release(struct inode *inode, struct file *file)
2661{
2662 struct seq_file *m = file->private_data;
2663 struct nfs4_client *clp = m->private;
2664
2665 /* XXX: alternatively, we could get/drop in seq start/stop */
2666 drop_client(clp);
2667 return 0;
2668}
2669
2670static const struct file_operations client_states_fops = {
2671 .open = client_states_open,
2672 .read = seq_read,
2673 .llseek = seq_lseek,
2674 .release = client_opens_release,
2675};
2676
2677/*
2678 * Normally we refuse to destroy clients that are in use, but here the
2679 * administrator is telling us to just do it. We also want to wait
2680 * so the caller has a guarantee that the client's locks are gone by
2681 * the time the write returns:
2682 */
2683static void force_expire_client(struct nfs4_client *clp)
2684{
2685 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2686 bool already_expired;
2687
2688 trace_nfsd_clid_admin_expired(&clp->cl_clientid);
2689
2690 spin_lock(&nn->client_lock);
2691 clp->cl_time = 0;
2692 spin_unlock(&nn->client_lock);
2693
2694 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2695 spin_lock(&nn->client_lock);
2696 already_expired = list_empty(&clp->cl_lru);
2697 if (!already_expired)
2698 unhash_client_locked(clp);
2699 spin_unlock(&nn->client_lock);
2700
2701 if (!already_expired)
2702 expire_client(clp);
2703 else
2704 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2705}
2706
2707static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2708 size_t size, loff_t *pos)
2709{
2710 char *data;
2711 struct nfs4_client *clp;
2712
2713 data = simple_transaction_get(file, buf, size);
2714 if (IS_ERR(data))
2715 return PTR_ERR(data);
2716 if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2717 return -EINVAL;
2718 clp = get_nfsdfs_clp(file_inode(file));
2719 if (!clp)
2720 return -ENXIO;
2721 force_expire_client(clp);
2722 drop_client(clp);
2723 return 7;
2724}
2725
2726static const struct file_operations client_ctl_fops = {
2727 .write = client_ctl_write,
2728 .release = simple_transaction_release,
2729};
2730
2731static const struct tree_descr client_files[] = {
2732 [0] = {"info", &client_info_fops, S_IRUSR},
2733 [1] = {"states", &client_states_fops, S_IRUSR},
2734 [2] = {"ctl", &client_ctl_fops, S_IWUSR},
2735 [3] = {""},
2736};
2737
2738static struct nfs4_client *create_client(struct xdr_netobj name,
2739 struct svc_rqst *rqstp, nfs4_verifier *verf)
2740{
2741 struct nfs4_client *clp;
2742 struct sockaddr *sa = svc_addr(rqstp);
2743 int ret;
2744 struct net *net = SVC_NET(rqstp);
2745 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2746 struct dentry *dentries[ARRAY_SIZE(client_files)];
2747
2748 clp = alloc_client(name);
2749 if (clp == NULL)
2750 return NULL;
2751
2752 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2753 if (ret) {
2754 free_client(clp);
2755 return NULL;
2756 }
2757 gen_clid(clp, nn);
2758 kref_init(&clp->cl_nfsdfs.cl_ref);
2759 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2760 clp->cl_time = ktime_get_boottime_seconds();
2761 clear_bit(0, &clp->cl_cb_slot_busy);
2762 copy_verf(clp, verf);
2763 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
2764 clp->cl_cb_session = NULL;
2765 clp->net = net;
2766 clp->cl_nfsd_dentry = nfsd_client_mkdir(
2767 nn, &clp->cl_nfsdfs,
2768 clp->cl_clientid.cl_id - nn->clientid_base,
2769 client_files, dentries);
2770 clp->cl_nfsd_info_dentry = dentries[0];
2771 if (!clp->cl_nfsd_dentry) {
2772 free_client(clp);
2773 return NULL;
2774 }
2775 return clp;
2776}
2777
2778static void
2779add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2780{
2781 struct rb_node **new = &(root->rb_node), *parent = NULL;
2782 struct nfs4_client *clp;
2783
2784 while (*new) {
2785 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2786 parent = *new;
2787
2788 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2789 new = &((*new)->rb_left);
2790 else
2791 new = &((*new)->rb_right);
2792 }
2793
2794 rb_link_node(&new_clp->cl_namenode, parent, new);
2795 rb_insert_color(&new_clp->cl_namenode, root);
2796}
2797
2798static struct nfs4_client *
2799find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2800{
2801 int cmp;
2802 struct rb_node *node = root->rb_node;
2803 struct nfs4_client *clp;
2804
2805 while (node) {
2806 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2807 cmp = compare_blob(&clp->cl_name, name);
2808 if (cmp > 0)
2809 node = node->rb_left;
2810 else if (cmp < 0)
2811 node = node->rb_right;
2812 else
2813 return clp;
2814 }
2815 return NULL;
2816}
2817
2818static void
2819add_to_unconfirmed(struct nfs4_client *clp)
2820{
2821 unsigned int idhashval;
2822 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2823
2824 lockdep_assert_held(&nn->client_lock);
2825
2826 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2827 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2828 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2829 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2830 renew_client_locked(clp);
2831}
2832
2833static void
2834move_to_confirmed(struct nfs4_client *clp)
2835{
2836 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2837 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2838
2839 lockdep_assert_held(&nn->client_lock);
2840
2841 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2842 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2843 add_clp_to_name_tree(clp, &nn->conf_name_tree);
2844 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2845 trace_nfsd_clid_confirmed(&clp->cl_clientid);
2846 renew_client_locked(clp);
2847}
2848
2849static struct nfs4_client *
2850find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2851{
2852 struct nfs4_client *clp;
2853 unsigned int idhashval = clientid_hashval(clid->cl_id);
2854
2855 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2856 if (same_clid(&clp->cl_clientid, clid)) {
2857 if ((bool)clp->cl_minorversion != sessions)
2858 return NULL;
2859 renew_client_locked(clp);
2860 return clp;
2861 }
2862 }
2863 return NULL;
2864}
2865
2866static struct nfs4_client *
2867find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2868{
2869 struct list_head *tbl = nn->conf_id_hashtbl;
2870
2871 lockdep_assert_held(&nn->client_lock);
2872 return find_client_in_id_table(tbl, clid, sessions);
2873}
2874
2875static struct nfs4_client *
2876find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2877{
2878 struct list_head *tbl = nn->unconf_id_hashtbl;
2879
2880 lockdep_assert_held(&nn->client_lock);
2881 return find_client_in_id_table(tbl, clid, sessions);
2882}
2883
2884static bool clp_used_exchangeid(struct nfs4_client *clp)
2885{
2886 return clp->cl_exchange_flags != 0;
2887}
2888
2889static struct nfs4_client *
2890find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2891{
2892 lockdep_assert_held(&nn->client_lock);
2893 return find_clp_in_name_tree(name, &nn->conf_name_tree);
2894}
2895
2896static struct nfs4_client *
2897find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2898{
2899 lockdep_assert_held(&nn->client_lock);
2900 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2901}
2902
2903static void
2904gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2905{
2906 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2907 struct sockaddr *sa = svc_addr(rqstp);
2908 u32 scopeid = rpc_get_scope_id(sa);
2909 unsigned short expected_family;
2910
2911 /* Currently, we only support tcp and tcp6 for the callback channel */
2912 if (se->se_callback_netid_len == 3 &&
2913 !memcmp(se->se_callback_netid_val, "tcp", 3))
2914 expected_family = AF_INET;
2915 else if (se->se_callback_netid_len == 4 &&
2916 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2917 expected_family = AF_INET6;
2918 else
2919 goto out_err;
2920
2921 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2922 se->se_callback_addr_len,
2923 (struct sockaddr *)&conn->cb_addr,
2924 sizeof(conn->cb_addr));
2925
2926 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2927 goto out_err;
2928
2929 if (conn->cb_addr.ss_family == AF_INET6)
2930 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2931
2932 conn->cb_prog = se->se_callback_prog;
2933 conn->cb_ident = se->se_callback_ident;
2934 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2935 trace_nfsd_cb_args(clp, conn);
2936 return;
2937out_err:
2938 conn->cb_addr.ss_family = AF_UNSPEC;
2939 conn->cb_addrlen = 0;
2940 trace_nfsd_cb_nodelegs(clp);
2941 return;
2942}
2943
2944/*
2945 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2946 */
2947static void
2948nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2949{
2950 struct xdr_buf *buf = resp->xdr->buf;
2951 struct nfsd4_slot *slot = resp->cstate.slot;
2952 unsigned int base;
2953
2954 dprintk("--> %s slot %p\n", __func__, slot);
2955
2956 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2957 slot->sl_opcnt = resp->opcnt;
2958 slot->sl_status = resp->cstate.status;
2959 free_svc_cred(&slot->sl_cred);
2960 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2961
2962 if (!nfsd4_cache_this(resp)) {
2963 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2964 return;
2965 }
2966 slot->sl_flags |= NFSD4_SLOT_CACHED;
2967
2968 base = resp->cstate.data_offset;
2969 slot->sl_datalen = buf->len - base;
2970 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2971 WARN(1, "%s: sessions DRC could not cache compound\n",
2972 __func__);
2973 return;
2974}
2975
2976/*
2977 * Encode the replay sequence operation from the slot values.
2978 * If cachethis is FALSE encode the uncached rep error on the next
2979 * operation which sets resp->p and increments resp->opcnt for
2980 * nfs4svc_encode_compoundres.
2981 *
2982 */
2983static __be32
2984nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2985 struct nfsd4_compoundres *resp)
2986{
2987 struct nfsd4_op *op;
2988 struct nfsd4_slot *slot = resp->cstate.slot;
2989
2990 /* Encode the replayed sequence operation */
2991 op = &args->ops[resp->opcnt - 1];
2992 nfsd4_encode_operation(resp, op);
2993
2994 if (slot->sl_flags & NFSD4_SLOT_CACHED)
2995 return op->status;
2996 if (args->opcnt == 1) {
2997 /*
2998 * The original operation wasn't a solo sequence--we
2999 * always cache those--so this retry must not match the
3000 * original:
3001 */
3002 op->status = nfserr_seq_false_retry;
3003 } else {
3004 op = &args->ops[resp->opcnt++];
3005 op->status = nfserr_retry_uncached_rep;
3006 nfsd4_encode_operation(resp, op);
3007 }
3008 return op->status;
3009}
3010
3011/*
3012 * The sequence operation is not cached because we can use the slot and
3013 * session values.
3014 */
3015static __be32
3016nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
3017 struct nfsd4_sequence *seq)
3018{
3019 struct nfsd4_slot *slot = resp->cstate.slot;
3020 struct xdr_stream *xdr = resp->xdr;
3021 __be32 *p;
3022 __be32 status;
3023
3024 dprintk("--> %s slot %p\n", __func__, slot);
3025
3026 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
3027 if (status)
3028 return status;
3029
3030 p = xdr_reserve_space(xdr, slot->sl_datalen);
3031 if (!p) {
3032 WARN_ON_ONCE(1);
3033 return nfserr_serverfault;
3034 }
3035 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
3036 xdr_commit_encode(xdr);
3037
3038 resp->opcnt = slot->sl_opcnt;
3039 return slot->sl_status;
3040}
3041
3042/*
3043 * Set the exchange_id flags returned by the server.
3044 */
3045static void
3046nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3047{
3048#ifdef CONFIG_NFSD_PNFS
3049 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3050#else
3051 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3052#endif
3053
3054 /* Referrals are supported, Migration is not. */
3055 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3056
3057 /* set the wire flags to return to client. */
3058 clid->flags = new->cl_exchange_flags;
3059}
3060
3061static bool client_has_openowners(struct nfs4_client *clp)
3062{
3063 struct nfs4_openowner *oo;
3064
3065 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3066 if (!list_empty(&oo->oo_owner.so_stateids))
3067 return true;
3068 }
3069 return false;
3070}
3071
3072static bool client_has_state(struct nfs4_client *clp)
3073{
3074 return client_has_openowners(clp)
3075#ifdef CONFIG_NFSD_PNFS
3076 || !list_empty(&clp->cl_lo_states)
3077#endif
3078 || !list_empty(&clp->cl_delegations)
3079 || !list_empty(&clp->cl_sessions)
3080 || !list_empty(&clp->async_copies);
3081}
3082
3083static __be32 copy_impl_id(struct nfs4_client *clp,
3084 struct nfsd4_exchange_id *exid)
3085{
3086 if (!exid->nii_domain.data)
3087 return 0;
3088 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3089 if (!clp->cl_nii_domain.data)
3090 return nfserr_jukebox;
3091 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3092 if (!clp->cl_nii_name.data)
3093 return nfserr_jukebox;
3094 clp->cl_nii_time = exid->nii_time;
3095 return 0;
3096}
3097
3098__be32
3099nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3100 union nfsd4_op_u *u)
3101{
3102 struct nfsd4_exchange_id *exid = &u->exchange_id;
3103 struct nfs4_client *conf, *new;
3104 struct nfs4_client *unconf = NULL;
3105 __be32 status;
3106 char addr_str[INET6_ADDRSTRLEN];
3107 nfs4_verifier verf = exid->verifier;
3108 struct sockaddr *sa = svc_addr(rqstp);
3109 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3110 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3111
3112 rpc_ntop(sa, addr_str, sizeof(addr_str));
3113 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3114 "ip_addr=%s flags %x, spa_how %u\n",
3115 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
3116 addr_str, exid->flags, exid->spa_how);
3117
3118 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3119 return nfserr_inval;
3120
3121 new = create_client(exid->clname, rqstp, &verf);
3122 if (new == NULL)
3123 return nfserr_jukebox;
3124 status = copy_impl_id(new, exid);
3125 if (status)
3126 goto out_nolock;
3127
3128 switch (exid->spa_how) {
3129 case SP4_MACH_CRED:
3130 exid->spo_must_enforce[0] = 0;
3131 exid->spo_must_enforce[1] = (
3132 1 << (OP_BIND_CONN_TO_SESSION - 32) |
3133 1 << (OP_EXCHANGE_ID - 32) |
3134 1 << (OP_CREATE_SESSION - 32) |
3135 1 << (OP_DESTROY_SESSION - 32) |
3136 1 << (OP_DESTROY_CLIENTID - 32));
3137
3138 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3139 1 << (OP_OPEN_DOWNGRADE) |
3140 1 << (OP_LOCKU) |
3141 1 << (OP_DELEGRETURN));
3142
3143 exid->spo_must_allow[1] &= (
3144 1 << (OP_TEST_STATEID - 32) |
3145 1 << (OP_FREE_STATEID - 32));
3146 if (!svc_rqst_integrity_protected(rqstp)) {
3147 status = nfserr_inval;
3148 goto out_nolock;
3149 }
3150 /*
3151 * Sometimes userspace doesn't give us a principal.
3152 * Which is a bug, really. Anyway, we can't enforce
3153 * MACH_CRED in that case, better to give up now:
3154 */
3155 if (!new->cl_cred.cr_principal &&
3156 !new->cl_cred.cr_raw_principal) {
3157 status = nfserr_serverfault;
3158 goto out_nolock;
3159 }
3160 new->cl_mach_cred = true;
3161 break;
3162 case SP4_NONE:
3163 break;
3164 default: /* checked by xdr code */
3165 WARN_ON_ONCE(1);
3166 fallthrough;
3167 case SP4_SSV:
3168 status = nfserr_encr_alg_unsupp;
3169 goto out_nolock;
3170 }
3171
3172 /* Cases below refer to rfc 5661 section 18.35.4: */
3173 spin_lock(&nn->client_lock);
3174 conf = find_confirmed_client_by_name(&exid->clname, nn);
3175 if (conf) {
3176 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3177 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3178
3179 if (update) {
3180 if (!clp_used_exchangeid(conf)) { /* buggy client */
3181 status = nfserr_inval;
3182 goto out;
3183 }
3184 if (!nfsd4_mach_creds_match(conf, rqstp)) {
3185 status = nfserr_wrong_cred;
3186 goto out;
3187 }
3188 if (!creds_match) { /* case 9 */
3189 status = nfserr_perm;
3190 goto out;
3191 }
3192 if (!verfs_match) { /* case 8 */
3193 status = nfserr_not_same;
3194 goto out;
3195 }
3196 /* case 6 */
3197 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3198 trace_nfsd_clid_confirmed_r(conf);
3199 goto out_copy;
3200 }
3201 if (!creds_match) { /* case 3 */
3202 if (client_has_state(conf)) {
3203 status = nfserr_clid_inuse;
3204 trace_nfsd_clid_cred_mismatch(conf, rqstp);
3205 goto out;
3206 }
3207 goto out_new;
3208 }
3209 if (verfs_match) { /* case 2 */
3210 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3211 trace_nfsd_clid_confirmed_r(conf);
3212 goto out_copy;
3213 }
3214 /* case 5, client reboot */
3215 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
3216 conf = NULL;
3217 goto out_new;
3218 }
3219
3220 if (update) { /* case 7 */
3221 status = nfserr_noent;
3222 goto out;
3223 }
3224
3225 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3226 if (unconf) /* case 4, possible retry or client restart */
3227 unhash_client_locked(unconf);
3228
3229 /* case 1, new owner ID */
3230 trace_nfsd_clid_fresh(new);
3231
3232out_new:
3233 if (conf) {
3234 status = mark_client_expired_locked(conf);
3235 if (status)
3236 goto out;
3237 trace_nfsd_clid_replaced(&conf->cl_clientid);
3238 }
3239 new->cl_minorversion = cstate->minorversion;
3240 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3241 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3242
3243 add_to_unconfirmed(new);
3244 swap(new, conf);
3245out_copy:
3246 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3247 exid->clientid.cl_id = conf->cl_clientid.cl_id;
3248
3249 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3250 nfsd4_set_ex_flags(conf, exid);
3251
3252 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3253 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3254 status = nfs_ok;
3255
3256out:
3257 spin_unlock(&nn->client_lock);
3258out_nolock:
3259 if (new)
3260 expire_client(new);
3261 if (unconf) {
3262 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
3263 expire_client(unconf);
3264 }
3265 return status;
3266}
3267
3268static __be32
3269check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3270{
3271 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3272 slot_seqid);
3273
3274 /* The slot is in use, and no response has been sent. */
3275 if (slot_inuse) {
3276 if (seqid == slot_seqid)
3277 return nfserr_jukebox;
3278 else
3279 return nfserr_seq_misordered;
3280 }
3281 /* Note unsigned 32-bit arithmetic handles wraparound: */
3282 if (likely(seqid == slot_seqid + 1))
3283 return nfs_ok;
3284 if (seqid == slot_seqid)
3285 return nfserr_replay_cache;
3286 return nfserr_seq_misordered;
3287}
3288
3289/*
3290 * Cache the create session result into the create session single DRC
3291 * slot cache by saving the xdr structure. sl_seqid has been set.
3292 * Do this for solo or embedded create session operations.
3293 */
3294static void
3295nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3296 struct nfsd4_clid_slot *slot, __be32 nfserr)
3297{
3298 slot->sl_status = nfserr;
3299 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3300}
3301
3302static __be32
3303nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3304 struct nfsd4_clid_slot *slot)
3305{
3306 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3307 return slot->sl_status;
3308}
3309
3310#define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3311 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3312 1 + /* MIN tag is length with zero, only length */ \
3313 3 + /* version, opcount, opcode */ \
3314 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3315 /* seqid, slotID, slotID, cache */ \
3316 4 ) * sizeof(__be32))
3317
3318#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3319 2 + /* verifier: AUTH_NULL, length 0 */\
3320 1 + /* status */ \
3321 1 + /* MIN tag is length with zero, only length */ \
3322 3 + /* opcount, opcode, opstatus*/ \
3323 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3324 /* seqid, slotID, slotID, slotID, status */ \
3325 5 ) * sizeof(__be32))
3326
3327static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3328{
3329 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3330
3331 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3332 return nfserr_toosmall;
3333 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3334 return nfserr_toosmall;
3335 ca->headerpadsz = 0;
3336 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3337 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3338 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3339 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3340 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3341 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3342 /*
3343 * Note decreasing slot size below client's request may make it
3344 * difficult for client to function correctly, whereas
3345 * decreasing the number of slots will (just?) affect
3346 * performance. When short on memory we therefore prefer to
3347 * decrease number of slots instead of their size. Clients that
3348 * request larger slots than they need will get poor results:
3349 * Note that we always allow at least one slot, because our
3350 * accounting is soft and provides no guarantees either way.
3351 */
3352 ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3353
3354 return nfs_ok;
3355}
3356
3357/*
3358 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3359 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3360 */
3361#define RPC_MAX_HEADER_WITH_AUTH_SYS \
3362 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3363
3364#define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3365 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3366
3367#define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3368 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3369#define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3370 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3371 sizeof(__be32))
3372
3373static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3374{
3375 ca->headerpadsz = 0;
3376
3377 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3378 return nfserr_toosmall;
3379 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3380 return nfserr_toosmall;
3381 ca->maxresp_cached = 0;
3382 if (ca->maxops < 2)
3383 return nfserr_toosmall;
3384
3385 return nfs_ok;
3386}
3387
3388static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3389{
3390 switch (cbs->flavor) {
3391 case RPC_AUTH_NULL:
3392 case RPC_AUTH_UNIX:
3393 return nfs_ok;
3394 default:
3395 /*
3396 * GSS case: the spec doesn't allow us to return this
3397 * error. But it also doesn't allow us not to support
3398 * GSS.
3399 * I'd rather this fail hard than return some error the
3400 * client might think it can already handle:
3401 */
3402 return nfserr_encr_alg_unsupp;
3403 }
3404}
3405
3406__be32
3407nfsd4_create_session(struct svc_rqst *rqstp,
3408 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3409{
3410 struct nfsd4_create_session *cr_ses = &u->create_session;
3411 struct sockaddr *sa = svc_addr(rqstp);
3412 struct nfs4_client *conf, *unconf;
3413 struct nfs4_client *old = NULL;
3414 struct nfsd4_session *new;
3415 struct nfsd4_conn *conn;
3416 struct nfsd4_clid_slot *cs_slot = NULL;
3417 __be32 status = 0;
3418 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3419
3420 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3421 return nfserr_inval;
3422 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3423 if (status)
3424 return status;
3425 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3426 if (status)
3427 return status;
3428 status = check_backchannel_attrs(&cr_ses->back_channel);
3429 if (status)
3430 goto out_release_drc_mem;
3431 status = nfserr_jukebox;
3432 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3433 if (!new)
3434 goto out_release_drc_mem;
3435 conn = alloc_conn_from_crses(rqstp, cr_ses);
3436 if (!conn)
3437 goto out_free_session;
3438
3439 spin_lock(&nn->client_lock);
3440 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3441 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3442 WARN_ON_ONCE(conf && unconf);
3443
3444 if (conf) {
3445 status = nfserr_wrong_cred;
3446 if (!nfsd4_mach_creds_match(conf, rqstp))
3447 goto out_free_conn;
3448 cs_slot = &conf->cl_cs_slot;
3449 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3450 if (status) {
3451 if (status == nfserr_replay_cache)
3452 status = nfsd4_replay_create_session(cr_ses, cs_slot);
3453 goto out_free_conn;
3454 }
3455 } else if (unconf) {
3456 status = nfserr_clid_inuse;
3457 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3458 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3459 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
3460 goto out_free_conn;
3461 }
3462 status = nfserr_wrong_cred;
3463 if (!nfsd4_mach_creds_match(unconf, rqstp))
3464 goto out_free_conn;
3465 cs_slot = &unconf->cl_cs_slot;
3466 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3467 if (status) {
3468 /* an unconfirmed replay returns misordered */
3469 status = nfserr_seq_misordered;
3470 goto out_free_conn;
3471 }
3472 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3473 if (old) {
3474 status = mark_client_expired_locked(old);
3475 if (status) {
3476 old = NULL;
3477 goto out_free_conn;
3478 }
3479 trace_nfsd_clid_replaced(&old->cl_clientid);
3480 }
3481 move_to_confirmed(unconf);
3482 conf = unconf;
3483 } else {
3484 status = nfserr_stale_clientid;
3485 goto out_free_conn;
3486 }
3487 status = nfs_ok;
3488 /* Persistent sessions are not supported */
3489 cr_ses->flags &= ~SESSION4_PERSIST;
3490 /* Upshifting from TCP to RDMA is not supported */
3491 cr_ses->flags &= ~SESSION4_RDMA;
3492
3493 init_session(rqstp, new, conf, cr_ses);
3494 nfsd4_get_session_locked(new);
3495
3496 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3497 NFS4_MAX_SESSIONID_LEN);
3498 cs_slot->sl_seqid++;
3499 cr_ses->seqid = cs_slot->sl_seqid;
3500
3501 /* cache solo and embedded create sessions under the client_lock */
3502 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3503 spin_unlock(&nn->client_lock);
3504 if (conf == unconf)
3505 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
3506 /* init connection and backchannel */
3507 nfsd4_init_conn(rqstp, conn, new);
3508 nfsd4_put_session(new);
3509 if (old)
3510 expire_client(old);
3511 return status;
3512out_free_conn:
3513 spin_unlock(&nn->client_lock);
3514 free_conn(conn);
3515 if (old)
3516 expire_client(old);
3517out_free_session:
3518 __free_session(new);
3519out_release_drc_mem:
3520 nfsd4_put_drc_mem(&cr_ses->fore_channel);
3521 return status;
3522}
3523
3524static __be32 nfsd4_map_bcts_dir(u32 *dir)
3525{
3526 switch (*dir) {
3527 case NFS4_CDFC4_FORE:
3528 case NFS4_CDFC4_BACK:
3529 return nfs_ok;
3530 case NFS4_CDFC4_FORE_OR_BOTH:
3531 case NFS4_CDFC4_BACK_OR_BOTH:
3532 *dir = NFS4_CDFC4_BOTH;
3533 return nfs_ok;
3534 }
3535 return nfserr_inval;
3536}
3537
3538__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3539 struct nfsd4_compound_state *cstate,
3540 union nfsd4_op_u *u)
3541{
3542 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3543 struct nfsd4_session *session = cstate->session;
3544 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3545 __be32 status;
3546
3547 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3548 if (status)
3549 return status;
3550 spin_lock(&nn->client_lock);
3551 session->se_cb_prog = bc->bc_cb_program;
3552 session->se_cb_sec = bc->bc_cb_sec;
3553 spin_unlock(&nn->client_lock);
3554
3555 nfsd4_probe_callback(session->se_client);
3556
3557 return nfs_ok;
3558}
3559
3560static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3561{
3562 struct nfsd4_conn *c;
3563
3564 list_for_each_entry(c, &s->se_conns, cn_persession) {
3565 if (c->cn_xprt == xpt) {
3566 return c;
3567 }
3568 }
3569 return NULL;
3570}
3571
3572static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3573 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
3574{
3575 struct nfs4_client *clp = session->se_client;
3576 struct svc_xprt *xpt = rqst->rq_xprt;
3577 struct nfsd4_conn *c;
3578 __be32 status;
3579
3580 /* Following the last paragraph of RFC 5661 Section 18.34.3: */
3581 spin_lock(&clp->cl_lock);
3582 c = __nfsd4_find_conn(xpt, session);
3583 if (!c)
3584 status = nfserr_noent;
3585 else if (req == c->cn_flags)
3586 status = nfs_ok;
3587 else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3588 c->cn_flags != NFS4_CDFC4_BACK)
3589 status = nfs_ok;
3590 else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3591 c->cn_flags != NFS4_CDFC4_FORE)
3592 status = nfs_ok;
3593 else
3594 status = nfserr_inval;
3595 spin_unlock(&clp->cl_lock);
3596 if (status == nfs_ok && conn)
3597 *conn = c;
3598 return status;
3599}
3600
3601__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3602 struct nfsd4_compound_state *cstate,
3603 union nfsd4_op_u *u)
3604{
3605 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3606 __be32 status;
3607 struct nfsd4_conn *conn;
3608 struct nfsd4_session *session;
3609 struct net *net = SVC_NET(rqstp);
3610 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3611
3612 if (!nfsd4_last_compound_op(rqstp))
3613 return nfserr_not_only_op;
3614 spin_lock(&nn->client_lock);
3615 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3616 spin_unlock(&nn->client_lock);
3617 if (!session)
3618 goto out_no_session;
3619 status = nfserr_wrong_cred;
3620 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3621 goto out;
3622 status = nfsd4_match_existing_connection(rqstp, session,
3623 bcts->dir, &conn);
3624 if (status == nfs_ok) {
3625 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
3626 bcts->dir == NFS4_CDFC4_BACK)
3627 conn->cn_flags |= NFS4_CDFC4_BACK;
3628 nfsd4_probe_callback(session->se_client);
3629 goto out;
3630 }
3631 if (status == nfserr_inval)
3632 goto out;
3633 status = nfsd4_map_bcts_dir(&bcts->dir);
3634 if (status)
3635 goto out;
3636 conn = alloc_conn(rqstp, bcts->dir);
3637 status = nfserr_jukebox;
3638 if (!conn)
3639 goto out;
3640 nfsd4_init_conn(rqstp, conn, session);
3641 status = nfs_ok;
3642out:
3643 nfsd4_put_session(session);
3644out_no_session:
3645 return status;
3646}
3647
3648static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3649{
3650 if (!cstate->session)
3651 return false;
3652 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3653}
3654
3655__be32
3656nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3657 union nfsd4_op_u *u)
3658{
3659 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3660 struct nfsd4_session *ses;
3661 __be32 status;
3662 int ref_held_by_me = 0;
3663 struct net *net = SVC_NET(r);
3664 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3665
3666 status = nfserr_not_only_op;
3667 if (nfsd4_compound_in_session(cstate, sessionid)) {
3668 if (!nfsd4_last_compound_op(r))
3669 goto out;
3670 ref_held_by_me++;
3671 }
3672 dump_sessionid(__func__, sessionid);
3673 spin_lock(&nn->client_lock);
3674 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3675 if (!ses)
3676 goto out_client_lock;
3677 status = nfserr_wrong_cred;
3678 if (!nfsd4_mach_creds_match(ses->se_client, r))
3679 goto out_put_session;
3680 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3681 if (status)
3682 goto out_put_session;
3683 unhash_session(ses);
3684 spin_unlock(&nn->client_lock);
3685
3686 nfsd4_probe_callback_sync(ses->se_client);
3687
3688 spin_lock(&nn->client_lock);
3689 status = nfs_ok;
3690out_put_session:
3691 nfsd4_put_session_locked(ses);
3692out_client_lock:
3693 spin_unlock(&nn->client_lock);
3694out:
3695 return status;
3696}
3697
3698static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3699{
3700 struct nfs4_client *clp = ses->se_client;
3701 struct nfsd4_conn *c;
3702 __be32 status = nfs_ok;
3703 int ret;
3704
3705 spin_lock(&clp->cl_lock);
3706 c = __nfsd4_find_conn(new->cn_xprt, ses);
3707 if (c)
3708 goto out_free;
3709 status = nfserr_conn_not_bound_to_session;
3710 if (clp->cl_mach_cred)
3711 goto out_free;
3712 __nfsd4_hash_conn(new, ses);
3713 spin_unlock(&clp->cl_lock);
3714 ret = nfsd4_register_conn(new);
3715 if (ret)
3716 /* oops; xprt is already down: */
3717 nfsd4_conn_lost(&new->cn_xpt_user);
3718 return nfs_ok;
3719out_free:
3720 spin_unlock(&clp->cl_lock);
3721 free_conn(new);
3722 return status;
3723}
3724
3725static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3726{
3727 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3728
3729 return args->opcnt > session->se_fchannel.maxops;
3730}
3731
3732static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3733 struct nfsd4_session *session)
3734{
3735 struct xdr_buf *xb = &rqstp->rq_arg;
3736
3737 return xb->len > session->se_fchannel.maxreq_sz;
3738}
3739
3740static bool replay_matches_cache(struct svc_rqst *rqstp,
3741 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3742{
3743 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3744
3745 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3746 (bool)seq->cachethis)
3747 return false;
3748 /*
3749 * If there's an error then the reply can have fewer ops than
3750 * the call.
3751 */
3752 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3753 return false;
3754 /*
3755 * But if we cached a reply with *more* ops than the call you're
3756 * sending us now, then this new call is clearly not really a
3757 * replay of the old one:
3758 */
3759 if (slot->sl_opcnt > argp->opcnt)
3760 return false;
3761 /* This is the only check explicitly called by spec: */
3762 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3763 return false;
3764 /*
3765 * There may be more comparisons we could actually do, but the
3766 * spec doesn't require us to catch every case where the calls
3767 * don't match (that would require caching the call as well as
3768 * the reply), so we don't bother.
3769 */
3770 return true;
3771}
3772
3773__be32
3774nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3775 union nfsd4_op_u *u)
3776{
3777 struct nfsd4_sequence *seq = &u->sequence;
3778 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3779 struct xdr_stream *xdr = resp->xdr;
3780 struct nfsd4_session *session;
3781 struct nfs4_client *clp;
3782 struct nfsd4_slot *slot;
3783 struct nfsd4_conn *conn;
3784 __be32 status;
3785 int buflen;
3786 struct net *net = SVC_NET(rqstp);
3787 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3788
3789 if (resp->opcnt != 1)
3790 return nfserr_sequence_pos;
3791
3792 /*
3793 * Will be either used or freed by nfsd4_sequence_check_conn
3794 * below.
3795 */
3796 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3797 if (!conn)
3798 return nfserr_jukebox;
3799
3800 spin_lock(&nn->client_lock);
3801 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3802 if (!session)
3803 goto out_no_session;
3804 clp = session->se_client;
3805
3806 status = nfserr_too_many_ops;
3807 if (nfsd4_session_too_many_ops(rqstp, session))
3808 goto out_put_session;
3809
3810 status = nfserr_req_too_big;
3811 if (nfsd4_request_too_big(rqstp, session))
3812 goto out_put_session;
3813
3814 status = nfserr_badslot;
3815 if (seq->slotid >= session->se_fchannel.maxreqs)
3816 goto out_put_session;
3817
3818 slot = session->se_slots[seq->slotid];
3819 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3820
3821 /* We do not negotiate the number of slots yet, so set the
3822 * maxslots to the session maxreqs which is used to encode
3823 * sr_highest_slotid and the sr_target_slot id to maxslots */
3824 seq->maxslots = session->se_fchannel.maxreqs;
3825
3826 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3827 slot->sl_flags & NFSD4_SLOT_INUSE);
3828 if (status == nfserr_replay_cache) {
3829 status = nfserr_seq_misordered;
3830 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3831 goto out_put_session;
3832 status = nfserr_seq_false_retry;
3833 if (!replay_matches_cache(rqstp, seq, slot))
3834 goto out_put_session;
3835 cstate->slot = slot;
3836 cstate->session = session;
3837 cstate->clp = clp;
3838 /* Return the cached reply status and set cstate->status
3839 * for nfsd4_proc_compound processing */
3840 status = nfsd4_replay_cache_entry(resp, seq);
3841 cstate->status = nfserr_replay_cache;
3842 goto out;
3843 }
3844 if (status)
3845 goto out_put_session;
3846
3847 status = nfsd4_sequence_check_conn(conn, session);
3848 conn = NULL;
3849 if (status)
3850 goto out_put_session;
3851
3852 buflen = (seq->cachethis) ?
3853 session->se_fchannel.maxresp_cached :
3854 session->se_fchannel.maxresp_sz;
3855 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3856 nfserr_rep_too_big;
3857 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3858 goto out_put_session;
3859 svc_reserve(rqstp, buflen);
3860
3861 status = nfs_ok;
3862 /* Success! bump slot seqid */
3863 slot->sl_seqid = seq->seqid;
3864 slot->sl_flags |= NFSD4_SLOT_INUSE;
3865 if (seq->cachethis)
3866 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3867 else
3868 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3869
3870 cstate->slot = slot;
3871 cstate->session = session;
3872 cstate->clp = clp;
3873
3874out:
3875 switch (clp->cl_cb_state) {
3876 case NFSD4_CB_DOWN:
3877 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3878 break;
3879 case NFSD4_CB_FAULT:
3880 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3881 break;
3882 default:
3883 seq->status_flags = 0;
3884 }
3885 if (!list_empty(&clp->cl_revoked))
3886 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3887out_no_session:
3888 if (conn)
3889 free_conn(conn);
3890 spin_unlock(&nn->client_lock);
3891 return status;
3892out_put_session:
3893 nfsd4_put_session_locked(session);
3894 goto out_no_session;
3895}
3896
3897void
3898nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3899{
3900 struct nfsd4_compound_state *cs = &resp->cstate;
3901
3902 if (nfsd4_has_session(cs)) {
3903 if (cs->status != nfserr_replay_cache) {
3904 nfsd4_store_cache_entry(resp);
3905 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3906 }
3907 /* Drop session reference that was taken in nfsd4_sequence() */
3908 nfsd4_put_session(cs->session);
3909 } else if (cs->clp)
3910 put_client_renew(cs->clp);
3911}
3912
3913__be32
3914nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3915 struct nfsd4_compound_state *cstate,
3916 union nfsd4_op_u *u)
3917{
3918 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3919 struct nfs4_client *conf, *unconf;
3920 struct nfs4_client *clp = NULL;
3921 __be32 status = 0;
3922 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3923
3924 spin_lock(&nn->client_lock);
3925 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3926 conf = find_confirmed_client(&dc->clientid, true, nn);
3927 WARN_ON_ONCE(conf && unconf);
3928
3929 if (conf) {
3930 if (client_has_state(conf)) {
3931 status = nfserr_clientid_busy;
3932 goto out;
3933 }
3934 status = mark_client_expired_locked(conf);
3935 if (status)
3936 goto out;
3937 clp = conf;
3938 } else if (unconf)
3939 clp = unconf;
3940 else {
3941 status = nfserr_stale_clientid;
3942 goto out;
3943 }
3944 if (!nfsd4_mach_creds_match(clp, rqstp)) {
3945 clp = NULL;
3946 status = nfserr_wrong_cred;
3947 goto out;
3948 }
3949 trace_nfsd_clid_destroyed(&clp->cl_clientid);
3950 unhash_client_locked(clp);
3951out:
3952 spin_unlock(&nn->client_lock);
3953 if (clp)
3954 expire_client(clp);
3955 return status;
3956}
3957
3958__be32
3959nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3960 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3961{
3962 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3963 struct nfs4_client *clp = cstate->clp;
3964 __be32 status = 0;
3965
3966 if (rc->rca_one_fs) {
3967 if (!cstate->current_fh.fh_dentry)
3968 return nfserr_nofilehandle;
3969 /*
3970 * We don't take advantage of the rca_one_fs case.
3971 * That's OK, it's optional, we can safely ignore it.
3972 */
3973 return nfs_ok;
3974 }
3975
3976 status = nfserr_complete_already;
3977 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
3978 goto out;
3979
3980 status = nfserr_stale_clientid;
3981 if (is_client_expired(clp))
3982 /*
3983 * The following error isn't really legal.
3984 * But we only get here if the client just explicitly
3985 * destroyed the client. Surely it no longer cares what
3986 * error it gets back on an operation for the dead
3987 * client.
3988 */
3989 goto out;
3990
3991 status = nfs_ok;
3992 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
3993 nfsd4_client_record_create(clp);
3994 inc_reclaim_complete(clp);
3995out:
3996 return status;
3997}
3998
3999__be32
4000nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4001 union nfsd4_op_u *u)
4002{
4003 struct nfsd4_setclientid *setclid = &u->setclientid;
4004 struct xdr_netobj clname = setclid->se_name;
4005 nfs4_verifier clverifier = setclid->se_verf;
4006 struct nfs4_client *conf, *new;
4007 struct nfs4_client *unconf = NULL;
4008 __be32 status;
4009 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4010
4011 new = create_client(clname, rqstp, &clverifier);
4012 if (new == NULL)
4013 return nfserr_jukebox;
4014 spin_lock(&nn->client_lock);
4015 conf = find_confirmed_client_by_name(&clname, nn);
4016 if (conf && client_has_state(conf)) {
4017 status = nfserr_clid_inuse;
4018 if (clp_used_exchangeid(conf))
4019 goto out;
4020 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4021 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4022 goto out;
4023 }
4024 }
4025 unconf = find_unconfirmed_client_by_name(&clname, nn);
4026 if (unconf)
4027 unhash_client_locked(unconf);
4028 if (conf) {
4029 if (same_verf(&conf->cl_verifier, &clverifier)) {
4030 copy_clid(new, conf);
4031 gen_confirm(new, nn);
4032 } else
4033 trace_nfsd_clid_verf_mismatch(conf, rqstp,
4034 &clverifier);
4035 } else
4036 trace_nfsd_clid_fresh(new);
4037 new->cl_minorversion = 0;
4038 gen_callback(new, setclid, rqstp);
4039 add_to_unconfirmed(new);
4040 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
4041 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
4042 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
4043 new = NULL;
4044 status = nfs_ok;
4045out:
4046 spin_unlock(&nn->client_lock);
4047 if (new)
4048 free_client(new);
4049 if (unconf) {
4050 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
4051 expire_client(unconf);
4052 }
4053 return status;
4054}
4055
4056__be32
4057nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
4058 struct nfsd4_compound_state *cstate,
4059 union nfsd4_op_u *u)
4060{
4061 struct nfsd4_setclientid_confirm *setclientid_confirm =
4062 &u->setclientid_confirm;
4063 struct nfs4_client *conf, *unconf;
4064 struct nfs4_client *old = NULL;
4065 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
4066 clientid_t * clid = &setclientid_confirm->sc_clientid;
4067 __be32 status;
4068 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4069
4070 if (STALE_CLIENTID(clid, nn))
4071 return nfserr_stale_clientid;
4072
4073 spin_lock(&nn->client_lock);
4074 conf = find_confirmed_client(clid, false, nn);
4075 unconf = find_unconfirmed_client(clid, false, nn);
4076 /*
4077 * We try hard to give out unique clientid's, so if we get an
4078 * attempt to confirm the same clientid with a different cred,
4079 * the client may be buggy; this should never happen.
4080 *
4081 * Nevertheless, RFC 7530 recommends INUSE for this case:
4082 */
4083 status = nfserr_clid_inuse;
4084 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
4085 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
4086 goto out;
4087 }
4088 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4089 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4090 goto out;
4091 }
4092 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4093 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4094 status = nfs_ok;
4095 } else
4096 status = nfserr_stale_clientid;
4097 goto out;
4098 }
4099 status = nfs_ok;
4100 if (conf) {
4101 old = unconf;
4102 unhash_client_locked(old);
4103 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4104 } else {
4105 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4106 if (old) {
4107 status = nfserr_clid_inuse;
4108 if (client_has_state(old)
4109 && !same_creds(&unconf->cl_cred,
4110 &old->cl_cred))
4111 goto out;
4112 status = mark_client_expired_locked(old);
4113 if (status) {
4114 old = NULL;
4115 goto out;
4116 }
4117 trace_nfsd_clid_replaced(&old->cl_clientid);
4118 }
4119 move_to_confirmed(unconf);
4120 conf = unconf;
4121 }
4122 get_client_locked(conf);
4123 spin_unlock(&nn->client_lock);
4124 if (conf == unconf)
4125 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
4126 nfsd4_probe_callback(conf);
4127 spin_lock(&nn->client_lock);
4128 put_client_renew_locked(conf);
4129out:
4130 spin_unlock(&nn->client_lock);
4131 if (old)
4132 expire_client(old);
4133 return status;
4134}
4135
4136static struct nfs4_file *nfsd4_alloc_file(void)
4137{
4138 return kmem_cache_alloc(file_slab, GFP_KERNEL);
4139}
4140
4141/* OPEN Share state helper functions */
4142static void nfsd4_init_file(struct svc_fh *fh, unsigned int hashval,
4143 struct nfs4_file *fp)
4144{
4145 lockdep_assert_held(&state_lock);
4146
4147 refcount_set(&fp->fi_ref, 1);
4148 spin_lock_init(&fp->fi_lock);
4149 INIT_LIST_HEAD(&fp->fi_stateids);
4150 INIT_LIST_HEAD(&fp->fi_delegations);
4151 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4152 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
4153 fp->fi_deleg_file = NULL;
4154 fp->fi_had_conflict = false;
4155 fp->fi_share_deny = 0;
4156 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4157 memset(fp->fi_access, 0, sizeof(fp->fi_access));
4158 fp->fi_aliased = false;
4159 fp->fi_inode = d_inode(fh->fh_dentry);
4160#ifdef CONFIG_NFSD_PNFS
4161 INIT_LIST_HEAD(&fp->fi_lo_states);
4162 atomic_set(&fp->fi_lo_recalls, 0);
4163#endif
4164 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
4165}
4166
4167void
4168nfsd4_free_slabs(void)
4169{
4170 kmem_cache_destroy(client_slab);
4171 kmem_cache_destroy(openowner_slab);
4172 kmem_cache_destroy(lockowner_slab);
4173 kmem_cache_destroy(file_slab);
4174 kmem_cache_destroy(stateid_slab);
4175 kmem_cache_destroy(deleg_slab);
4176 kmem_cache_destroy(odstate_slab);
4177}
4178
4179int
4180nfsd4_init_slabs(void)
4181{
4182 client_slab = kmem_cache_create("nfsd4_clients",
4183 sizeof(struct nfs4_client), 0, 0, NULL);
4184 if (client_slab == NULL)
4185 goto out;
4186 openowner_slab = kmem_cache_create("nfsd4_openowners",
4187 sizeof(struct nfs4_openowner), 0, 0, NULL);
4188 if (openowner_slab == NULL)
4189 goto out_free_client_slab;
4190 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
4191 sizeof(struct nfs4_lockowner), 0, 0, NULL);
4192 if (lockowner_slab == NULL)
4193 goto out_free_openowner_slab;
4194 file_slab = kmem_cache_create("nfsd4_files",
4195 sizeof(struct nfs4_file), 0, 0, NULL);
4196 if (file_slab == NULL)
4197 goto out_free_lockowner_slab;
4198 stateid_slab = kmem_cache_create("nfsd4_stateids",
4199 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
4200 if (stateid_slab == NULL)
4201 goto out_free_file_slab;
4202 deleg_slab = kmem_cache_create("nfsd4_delegations",
4203 sizeof(struct nfs4_delegation), 0, 0, NULL);
4204 if (deleg_slab == NULL)
4205 goto out_free_stateid_slab;
4206 odstate_slab = kmem_cache_create("nfsd4_odstate",
4207 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4208 if (odstate_slab == NULL)
4209 goto out_free_deleg_slab;
4210 return 0;
4211
4212out_free_deleg_slab:
4213 kmem_cache_destroy(deleg_slab);
4214out_free_stateid_slab:
4215 kmem_cache_destroy(stateid_slab);
4216out_free_file_slab:
4217 kmem_cache_destroy(file_slab);
4218out_free_lockowner_slab:
4219 kmem_cache_destroy(lockowner_slab);
4220out_free_openowner_slab:
4221 kmem_cache_destroy(openowner_slab);
4222out_free_client_slab:
4223 kmem_cache_destroy(client_slab);
4224out:
4225 return -ENOMEM;
4226}
4227
4228static void init_nfs4_replay(struct nfs4_replay *rp)
4229{
4230 rp->rp_status = nfserr_serverfault;
4231 rp->rp_buflen = 0;
4232 rp->rp_buf = rp->rp_ibuf;
4233 mutex_init(&rp->rp_mutex);
4234}
4235
4236static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4237 struct nfs4_stateowner *so)
4238{
4239 if (!nfsd4_has_session(cstate)) {
4240 mutex_lock(&so->so_replay.rp_mutex);
4241 cstate->replay_owner = nfs4_get_stateowner(so);
4242 }
4243}
4244
4245void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4246{
4247 struct nfs4_stateowner *so = cstate->replay_owner;
4248
4249 if (so != NULL) {
4250 cstate->replay_owner = NULL;
4251 mutex_unlock(&so->so_replay.rp_mutex);
4252 nfs4_put_stateowner(so);
4253 }
4254}
4255
4256static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4257{
4258 struct nfs4_stateowner *sop;
4259
4260 sop = kmem_cache_alloc(slab, GFP_KERNEL);
4261 if (!sop)
4262 return NULL;
4263
4264 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4265 if (!sop->so_owner.data) {
4266 kmem_cache_free(slab, sop);
4267 return NULL;
4268 }
4269
4270 INIT_LIST_HEAD(&sop->so_stateids);
4271 sop->so_client = clp;
4272 init_nfs4_replay(&sop->so_replay);
4273 atomic_set(&sop->so_count, 1);
4274 return sop;
4275}
4276
4277static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4278{
4279 lockdep_assert_held(&clp->cl_lock);
4280
4281 list_add(&oo->oo_owner.so_strhash,
4282 &clp->cl_ownerstr_hashtbl[strhashval]);
4283 list_add(&oo->oo_perclient, &clp->cl_openowners);
4284}
4285
4286static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4287{
4288 unhash_openowner_locked(openowner(so));
4289}
4290
4291static void nfs4_free_openowner(struct nfs4_stateowner *so)
4292{
4293 struct nfs4_openowner *oo = openowner(so);
4294
4295 kmem_cache_free(openowner_slab, oo);
4296}
4297
4298static const struct nfs4_stateowner_operations openowner_ops = {
4299 .so_unhash = nfs4_unhash_openowner,
4300 .so_free = nfs4_free_openowner,
4301};
4302
4303static struct nfs4_ol_stateid *
4304nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4305{
4306 struct nfs4_ol_stateid *local, *ret = NULL;
4307 struct nfs4_openowner *oo = open->op_openowner;
4308
4309 lockdep_assert_held(&fp->fi_lock);
4310
4311 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4312 /* ignore lock owners */
4313 if (local->st_stateowner->so_is_open_owner == 0)
4314 continue;
4315 if (local->st_stateowner != &oo->oo_owner)
4316 continue;
4317 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4318 ret = local;
4319 refcount_inc(&ret->st_stid.sc_count);
4320 break;
4321 }
4322 }
4323 return ret;
4324}
4325
4326static __be32
4327nfsd4_verify_open_stid(struct nfs4_stid *s)
4328{
4329 __be32 ret = nfs_ok;
4330
4331 switch (s->sc_type) {
4332 default:
4333 break;
4334 case 0:
4335 case NFS4_CLOSED_STID:
4336 case NFS4_CLOSED_DELEG_STID:
4337 ret = nfserr_bad_stateid;
4338 break;
4339 case NFS4_REVOKED_DELEG_STID:
4340 ret = nfserr_deleg_revoked;
4341 }
4342 return ret;
4343}
4344
4345/* Lock the stateid st_mutex, and deal with races with CLOSE */
4346static __be32
4347nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4348{
4349 __be32 ret;
4350
4351 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4352 ret = nfsd4_verify_open_stid(&stp->st_stid);
4353 if (ret != nfs_ok)
4354 mutex_unlock(&stp->st_mutex);
4355 return ret;
4356}
4357
4358static struct nfs4_ol_stateid *
4359nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4360{
4361 struct nfs4_ol_stateid *stp;
4362 for (;;) {
4363 spin_lock(&fp->fi_lock);
4364 stp = nfsd4_find_existing_open(fp, open);
4365 spin_unlock(&fp->fi_lock);
4366 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4367 break;
4368 nfs4_put_stid(&stp->st_stid);
4369 }
4370 return stp;
4371}
4372
4373static struct nfs4_openowner *
4374alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4375 struct nfsd4_compound_state *cstate)
4376{
4377 struct nfs4_client *clp = cstate->clp;
4378 struct nfs4_openowner *oo, *ret;
4379
4380 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4381 if (!oo)
4382 return NULL;
4383 oo->oo_owner.so_ops = &openowner_ops;
4384 oo->oo_owner.so_is_open_owner = 1;
4385 oo->oo_owner.so_seqid = open->op_seqid;
4386 oo->oo_flags = 0;
4387 if (nfsd4_has_session(cstate))
4388 oo->oo_flags |= NFS4_OO_CONFIRMED;
4389 oo->oo_time = 0;
4390 oo->oo_last_closed_stid = NULL;
4391 INIT_LIST_HEAD(&oo->oo_close_lru);
4392 spin_lock(&clp->cl_lock);
4393 ret = find_openstateowner_str_locked(strhashval, open, clp);
4394 if (ret == NULL) {
4395 hash_openowner(oo, clp, strhashval);
4396 ret = oo;
4397 } else
4398 nfs4_free_stateowner(&oo->oo_owner);
4399
4400 spin_unlock(&clp->cl_lock);
4401 return ret;
4402}
4403
4404static struct nfs4_ol_stateid *
4405init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4406{
4407
4408 struct nfs4_openowner *oo = open->op_openowner;
4409 struct nfs4_ol_stateid *retstp = NULL;
4410 struct nfs4_ol_stateid *stp;
4411
4412 stp = open->op_stp;
4413 /* We are moving these outside of the spinlocks to avoid the warnings */
4414 mutex_init(&stp->st_mutex);
4415 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4416
4417retry:
4418 spin_lock(&oo->oo_owner.so_client->cl_lock);
4419 spin_lock(&fp->fi_lock);
4420
4421 retstp = nfsd4_find_existing_open(fp, open);
4422 if (retstp)
4423 goto out_unlock;
4424
4425 open->op_stp = NULL;
4426 refcount_inc(&stp->st_stid.sc_count);
4427 stp->st_stid.sc_type = NFS4_OPEN_STID;
4428 INIT_LIST_HEAD(&stp->st_locks);
4429 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4430 get_nfs4_file(fp);
4431 stp->st_stid.sc_file = fp;
4432 stp->st_access_bmap = 0;
4433 stp->st_deny_bmap = 0;
4434 stp->st_openstp = NULL;
4435 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4436 list_add(&stp->st_perfile, &fp->fi_stateids);
4437
4438out_unlock:
4439 spin_unlock(&fp->fi_lock);
4440 spin_unlock(&oo->oo_owner.so_client->cl_lock);
4441 if (retstp) {
4442 /* Handle races with CLOSE */
4443 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4444 nfs4_put_stid(&retstp->st_stid);
4445 goto retry;
4446 }
4447 /* To keep mutex tracking happy */
4448 mutex_unlock(&stp->st_mutex);
4449 stp = retstp;
4450 }
4451 return stp;
4452}
4453
4454/*
4455 * In the 4.0 case we need to keep the owners around a little while to handle
4456 * CLOSE replay. We still do need to release any file access that is held by
4457 * them before returning however.
4458 */
4459static void
4460move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4461{
4462 struct nfs4_ol_stateid *last;
4463 struct nfs4_openowner *oo = openowner(s->st_stateowner);
4464 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4465 nfsd_net_id);
4466
4467 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4468
4469 /*
4470 * We know that we hold one reference via nfsd4_close, and another
4471 * "persistent" reference for the client. If the refcount is higher
4472 * than 2, then there are still calls in progress that are using this
4473 * stateid. We can't put the sc_file reference until they are finished.
4474 * Wait for the refcount to drop to 2. Since it has been unhashed,
4475 * there should be no danger of the refcount going back up again at
4476 * this point.
4477 */
4478 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4479
4480 release_all_access(s);
4481 if (s->st_stid.sc_file) {
4482 put_nfs4_file(s->st_stid.sc_file);
4483 s->st_stid.sc_file = NULL;
4484 }
4485
4486 spin_lock(&nn->client_lock);
4487 last = oo->oo_last_closed_stid;
4488 oo->oo_last_closed_stid = s;
4489 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4490 oo->oo_time = ktime_get_boottime_seconds();
4491 spin_unlock(&nn->client_lock);
4492 if (last)
4493 nfs4_put_stid(&last->st_stid);
4494}
4495
4496/* search file_hashtbl[] for file */
4497static struct nfs4_file *
4498find_file_locked(struct svc_fh *fh, unsigned int hashval)
4499{
4500 struct nfs4_file *fp;
4501
4502 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
4503 lockdep_is_held(&state_lock)) {
4504 if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
4505 if (refcount_inc_not_zero(&fp->fi_ref))
4506 return fp;
4507 }
4508 }
4509 return NULL;
4510}
4511
4512static struct nfs4_file *insert_file(struct nfs4_file *new, struct svc_fh *fh,
4513 unsigned int hashval)
4514{
4515 struct nfs4_file *fp;
4516 struct nfs4_file *ret = NULL;
4517 bool alias_found = false;
4518
4519 spin_lock(&state_lock);
4520 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash,
4521 lockdep_is_held(&state_lock)) {
4522 if (fh_match(&fp->fi_fhandle, &fh->fh_handle)) {
4523 if (refcount_inc_not_zero(&fp->fi_ref))
4524 ret = fp;
4525 } else if (d_inode(fh->fh_dentry) == fp->fi_inode)
4526 fp->fi_aliased = alias_found = true;
4527 }
4528 if (likely(ret == NULL)) {
4529 nfsd4_init_file(fh, hashval, new);
4530 new->fi_aliased = alias_found;
4531 ret = new;
4532 }
4533 spin_unlock(&state_lock);
4534 return ret;
4535}
4536
4537static struct nfs4_file * find_file(struct svc_fh *fh)
4538{
4539 struct nfs4_file *fp;
4540 unsigned int hashval = file_hashval(fh);
4541
4542 rcu_read_lock();
4543 fp = find_file_locked(fh, hashval);
4544 rcu_read_unlock();
4545 return fp;
4546}
4547
4548static struct nfs4_file *
4549find_or_add_file(struct nfs4_file *new, struct svc_fh *fh)
4550{
4551 struct nfs4_file *fp;
4552 unsigned int hashval = file_hashval(fh);
4553
4554 rcu_read_lock();
4555 fp = find_file_locked(fh, hashval);
4556 rcu_read_unlock();
4557 if (fp)
4558 return fp;
4559
4560 return insert_file(new, fh, hashval);
4561}
4562
4563/*
4564 * Called to check deny when READ with all zero stateid or
4565 * WRITE with all zero or all one stateid
4566 */
4567static __be32
4568nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4569{
4570 struct nfs4_file *fp;
4571 __be32 ret = nfs_ok;
4572
4573 fp = find_file(current_fh);
4574 if (!fp)
4575 return ret;
4576 /* Check for conflicting share reservations */
4577 spin_lock(&fp->fi_lock);
4578 if (fp->fi_share_deny & deny_type)
4579 ret = nfserr_locked;
4580 spin_unlock(&fp->fi_lock);
4581 put_nfs4_file(fp);
4582 return ret;
4583}
4584
4585static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
4586{
4587 struct nfs4_delegation *dp = cb_to_delegation(cb);
4588 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4589 nfsd_net_id);
4590
4591 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4592
4593 /*
4594 * We can't do this in nfsd_break_deleg_cb because it is
4595 * already holding inode->i_lock.
4596 *
4597 * If the dl_time != 0, then we know that it has already been
4598 * queued for a lease break. Don't queue it again.
4599 */
4600 spin_lock(&state_lock);
4601 if (dp->dl_time == 0) {
4602 dp->dl_time = ktime_get_boottime_seconds();
4603 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4604 }
4605 spin_unlock(&state_lock);
4606}
4607
4608static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4609 struct rpc_task *task)
4610{
4611 struct nfs4_delegation *dp = cb_to_delegation(cb);
4612
4613 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
4614 dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4615 return 1;
4616
4617 switch (task->tk_status) {
4618 case 0:
4619 return 1;
4620 case -NFS4ERR_DELAY:
4621 rpc_delay(task, 2 * HZ);
4622 return 0;
4623 case -EBADHANDLE:
4624 case -NFS4ERR_BAD_STATEID:
4625 /*
4626 * Race: client probably got cb_recall before open reply
4627 * granting delegation.
4628 */
4629 if (dp->dl_retries--) {
4630 rpc_delay(task, 2 * HZ);
4631 return 0;
4632 }
4633 fallthrough;
4634 default:
4635 return 1;
4636 }
4637}
4638
4639static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4640{
4641 struct nfs4_delegation *dp = cb_to_delegation(cb);
4642
4643 nfs4_put_stid(&dp->dl_stid);
4644}
4645
4646static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4647 .prepare = nfsd4_cb_recall_prepare,
4648 .done = nfsd4_cb_recall_done,
4649 .release = nfsd4_cb_recall_release,
4650};
4651
4652static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4653{
4654 /*
4655 * We're assuming the state code never drops its reference
4656 * without first removing the lease. Since we're in this lease
4657 * callback (and since the lease code is serialized by the
4658 * i_lock) we know the server hasn't removed the lease yet, and
4659 * we know it's safe to take a reference.
4660 */
4661 refcount_inc(&dp->dl_stid.sc_count);
4662 nfsd4_run_cb(&dp->dl_recall);
4663}
4664
4665/* Called from break_lease() with i_lock held. */
4666static bool
4667nfsd_break_deleg_cb(struct file_lock *fl)
4668{
4669 bool ret = false;
4670 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4671 struct nfs4_file *fp = dp->dl_stid.sc_file;
4672
4673 trace_nfsd_cb_recall(&dp->dl_stid);
4674
4675 /*
4676 * We don't want the locks code to timeout the lease for us;
4677 * we'll remove it ourself if a delegation isn't returned
4678 * in time:
4679 */
4680 fl->fl_break_time = 0;
4681
4682 spin_lock(&fp->fi_lock);
4683 fp->fi_had_conflict = true;
4684 nfsd_break_one_deleg(dp);
4685 spin_unlock(&fp->fi_lock);
4686 return ret;
4687}
4688
4689static bool nfsd_breaker_owns_lease(struct file_lock *fl)
4690{
4691 struct nfs4_delegation *dl = fl->fl_owner;
4692 struct svc_rqst *rqst;
4693 struct nfs4_client *clp;
4694
4695 if (!i_am_nfsd())
4696 return NULL;
4697 rqst = kthread_data(current);
4698 /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
4699 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
4700 return NULL;
4701 clp = *(rqst->rq_lease_breaker);
4702 return dl->dl_stid.sc_client == clp;
4703}
4704
4705static int
4706nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4707 struct list_head *dispose)
4708{
4709 if (arg & F_UNLCK)
4710 return lease_modify(onlist, arg, dispose);
4711 else
4712 return -EAGAIN;
4713}
4714
4715static const struct lock_manager_operations nfsd_lease_mng_ops = {
4716 .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
4717 .lm_break = nfsd_break_deleg_cb,
4718 .lm_change = nfsd_change_deleg_cb,
4719};
4720
4721static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4722{
4723 if (nfsd4_has_session(cstate))
4724 return nfs_ok;
4725 if (seqid == so->so_seqid - 1)
4726 return nfserr_replay_me;
4727 if (seqid == so->so_seqid)
4728 return nfs_ok;
4729 return nfserr_bad_seqid;
4730}
4731
4732static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
4733 struct nfsd_net *nn)
4734{
4735 struct nfs4_client *found;
4736
4737 spin_lock(&nn->client_lock);
4738 found = find_confirmed_client(clid, sessions, nn);
4739 if (found)
4740 atomic_inc(&found->cl_rpc_users);
4741 spin_unlock(&nn->client_lock);
4742 return found;
4743}
4744
4745static __be32 set_client(clientid_t *clid,
4746 struct nfsd4_compound_state *cstate,
4747 struct nfsd_net *nn)
4748{
4749 if (cstate->clp) {
4750 if (!same_clid(&cstate->clp->cl_clientid, clid))
4751 return nfserr_stale_clientid;
4752 return nfs_ok;
4753 }
4754 if (STALE_CLIENTID(clid, nn))
4755 return nfserr_stale_clientid;
4756 /*
4757 * We're in the 4.0 case (otherwise the SEQUENCE op would have
4758 * set cstate->clp), so session = false:
4759 */
4760 cstate->clp = lookup_clientid(clid, false, nn);
4761 if (!cstate->clp)
4762 return nfserr_expired;
4763 return nfs_ok;
4764}
4765
4766__be32
4767nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4768 struct nfsd4_open *open, struct nfsd_net *nn)
4769{
4770 clientid_t *clientid = &open->op_clientid;
4771 struct nfs4_client *clp = NULL;
4772 unsigned int strhashval;
4773 struct nfs4_openowner *oo = NULL;
4774 __be32 status;
4775
4776 /*
4777 * In case we need it later, after we've already created the
4778 * file and don't want to risk a further failure:
4779 */
4780 open->op_file = nfsd4_alloc_file();
4781 if (open->op_file == NULL)
4782 return nfserr_jukebox;
4783
4784 status = set_client(clientid, cstate, nn);
4785 if (status)
4786 return status;
4787 clp = cstate->clp;
4788
4789 strhashval = ownerstr_hashval(&open->op_owner);
4790 oo = find_openstateowner_str(strhashval, open, clp);
4791 open->op_openowner = oo;
4792 if (!oo) {
4793 goto new_owner;
4794 }
4795 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4796 /* Replace unconfirmed owners without checking for replay. */
4797 release_openowner(oo);
4798 open->op_openowner = NULL;
4799 goto new_owner;
4800 }
4801 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4802 if (status)
4803 return status;
4804 goto alloc_stateid;
4805new_owner:
4806 oo = alloc_init_open_stateowner(strhashval, open, cstate);
4807 if (oo == NULL)
4808 return nfserr_jukebox;
4809 open->op_openowner = oo;
4810alloc_stateid:
4811 open->op_stp = nfs4_alloc_open_stateid(clp);
4812 if (!open->op_stp)
4813 return nfserr_jukebox;
4814
4815 if (nfsd4_has_session(cstate) &&
4816 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4817 open->op_odstate = alloc_clnt_odstate(clp);
4818 if (!open->op_odstate)
4819 return nfserr_jukebox;
4820 }
4821
4822 return nfs_ok;
4823}
4824
4825static inline __be32
4826nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4827{
4828 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4829 return nfserr_openmode;
4830 else
4831 return nfs_ok;
4832}
4833
4834static int share_access_to_flags(u32 share_access)
4835{
4836 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4837}
4838
4839static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4840{
4841 struct nfs4_stid *ret;
4842
4843 ret = find_stateid_by_type(cl, s,
4844 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4845 if (!ret)
4846 return NULL;
4847 return delegstateid(ret);
4848}
4849
4850static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4851{
4852 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4853 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4854}
4855
4856static __be32
4857nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4858 struct nfs4_delegation **dp)
4859{
4860 int flags;
4861 __be32 status = nfserr_bad_stateid;
4862 struct nfs4_delegation *deleg;
4863
4864 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4865 if (deleg == NULL)
4866 goto out;
4867 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4868 nfs4_put_stid(&deleg->dl_stid);
4869 if (cl->cl_minorversion)
4870 status = nfserr_deleg_revoked;
4871 goto out;
4872 }
4873 flags = share_access_to_flags(open->op_share_access);
4874 status = nfs4_check_delegmode(deleg, flags);
4875 if (status) {
4876 nfs4_put_stid(&deleg->dl_stid);
4877 goto out;
4878 }
4879 *dp = deleg;
4880out:
4881 if (!nfsd4_is_deleg_cur(open))
4882 return nfs_ok;
4883 if (status)
4884 return status;
4885 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4886 return nfs_ok;
4887}
4888
4889static inline int nfs4_access_to_access(u32 nfs4_access)
4890{
4891 int flags = 0;
4892
4893 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4894 flags |= NFSD_MAY_READ;
4895 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4896 flags |= NFSD_MAY_WRITE;
4897 return flags;
4898}
4899
4900static inline __be32
4901nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4902 struct nfsd4_open *open)
4903{
4904 struct iattr iattr = {
4905 .ia_valid = ATTR_SIZE,
4906 .ia_size = 0,
4907 };
4908 if (!open->op_truncate)
4909 return 0;
4910 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4911 return nfserr_inval;
4912 return nfsd_setattr(rqstp, fh, &iattr, 0, (time64_t)0);
4913}
4914
4915static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4916 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4917 struct nfsd4_open *open)
4918{
4919 struct nfsd_file *nf = NULL;
4920 __be32 status;
4921 int oflag = nfs4_access_to_omode(open->op_share_access);
4922 int access = nfs4_access_to_access(open->op_share_access);
4923 unsigned char old_access_bmap, old_deny_bmap;
4924
4925 spin_lock(&fp->fi_lock);
4926
4927 /*
4928 * Are we trying to set a deny mode that would conflict with
4929 * current access?
4930 */
4931 status = nfs4_file_check_deny(fp, open->op_share_deny);
4932 if (status != nfs_ok) {
4933 spin_unlock(&fp->fi_lock);
4934 goto out;
4935 }
4936
4937 /* set access to the file */
4938 status = nfs4_file_get_access(fp, open->op_share_access);
4939 if (status != nfs_ok) {
4940 spin_unlock(&fp->fi_lock);
4941 goto out;
4942 }
4943
4944 /* Set access bits in stateid */
4945 old_access_bmap = stp->st_access_bmap;
4946 set_access(open->op_share_access, stp);
4947
4948 /* Set new deny mask */
4949 old_deny_bmap = stp->st_deny_bmap;
4950 set_deny(open->op_share_deny, stp);
4951 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4952
4953 if (!fp->fi_fds[oflag]) {
4954 spin_unlock(&fp->fi_lock);
4955 status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
4956 if (status)
4957 goto out_put_access;
4958 spin_lock(&fp->fi_lock);
4959 if (!fp->fi_fds[oflag]) {
4960 fp->fi_fds[oflag] = nf;
4961 nf = NULL;
4962 }
4963 }
4964 spin_unlock(&fp->fi_lock);
4965 if (nf)
4966 nfsd_file_put(nf);
4967
4968 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
4969 access));
4970 if (status)
4971 goto out_put_access;
4972
4973 status = nfsd4_truncate(rqstp, cur_fh, open);
4974 if (status)
4975 goto out_put_access;
4976out:
4977 return status;
4978out_put_access:
4979 stp->st_access_bmap = old_access_bmap;
4980 nfs4_file_put_access(fp, open->op_share_access);
4981 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4982 goto out;
4983}
4984
4985static __be32
4986nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4987{
4988 __be32 status;
4989 unsigned char old_deny_bmap = stp->st_deny_bmap;
4990
4991 if (!test_access(open->op_share_access, stp))
4992 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4993
4994 /* test and set deny mode */
4995 spin_lock(&fp->fi_lock);
4996 status = nfs4_file_check_deny(fp, open->op_share_deny);
4997 if (status == nfs_ok) {
4998 set_deny(open->op_share_deny, stp);
4999 fp->fi_share_deny |=
5000 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5001 }
5002 spin_unlock(&fp->fi_lock);
5003
5004 if (status != nfs_ok)
5005 return status;
5006
5007 status = nfsd4_truncate(rqstp, cur_fh, open);
5008 if (status != nfs_ok)
5009 reset_union_bmap_deny(old_deny_bmap, stp);
5010 return status;
5011}
5012
5013/* Should we give out recallable state?: */
5014static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
5015{
5016 if (clp->cl_cb_state == NFSD4_CB_UP)
5017 return true;
5018 /*
5019 * In the sessions case, since we don't have to establish a
5020 * separate connection for callbacks, we assume it's OK
5021 * until we hear otherwise:
5022 */
5023 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
5024}
5025
5026static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
5027 int flag)
5028{
5029 struct file_lock *fl;
5030
5031 fl = locks_alloc_lock();
5032 if (!fl)
5033 return NULL;
5034 fl->fl_lmops = &nfsd_lease_mng_ops;
5035 fl->fl_flags = FL_DELEG;
5036 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
5037 fl->fl_end = OFFSET_MAX;
5038 fl->fl_owner = (fl_owner_t)dp;
5039 fl->fl_pid = current->tgid;
5040 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
5041 return fl;
5042}
5043
5044static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
5045 struct nfs4_file *fp)
5046{
5047 struct nfs4_ol_stateid *st;
5048 struct file *f = fp->fi_deleg_file->nf_file;
5049 struct inode *ino = locks_inode(f);
5050 int writes;
5051
5052 writes = atomic_read(&ino->i_writecount);
5053 if (!writes)
5054 return 0;
5055 /*
5056 * There could be multiple filehandles (hence multiple
5057 * nfs4_files) referencing this file, but that's not too
5058 * common; let's just give up in that case rather than
5059 * trying to go look up all the clients using that other
5060 * nfs4_file as well:
5061 */
5062 if (fp->fi_aliased)
5063 return -EAGAIN;
5064 /*
5065 * If there's a close in progress, make sure that we see it
5066 * clear any fi_fds[] entries before we see it decrement
5067 * i_writecount:
5068 */
5069 smp_mb__after_atomic();
5070
5071 if (fp->fi_fds[O_WRONLY])
5072 writes--;
5073 if (fp->fi_fds[O_RDWR])
5074 writes--;
5075 if (writes > 0)
5076 return -EAGAIN; /* There may be non-NFSv4 writers */
5077 /*
5078 * It's possible there are non-NFSv4 write opens in progress,
5079 * but if they haven't incremented i_writecount yet then they
5080 * also haven't called break lease yet; so, they'll break this
5081 * lease soon enough. So, all that's left to check for is NFSv4
5082 * opens:
5083 */
5084 spin_lock(&fp->fi_lock);
5085 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
5086 if (st->st_openstp == NULL /* it's an open */ &&
5087 access_permit_write(st) &&
5088 st->st_stid.sc_client != clp) {
5089 spin_unlock(&fp->fi_lock);
5090 return -EAGAIN;
5091 }
5092 }
5093 spin_unlock(&fp->fi_lock);
5094 /*
5095 * There's a small chance that we could be racing with another
5096 * NFSv4 open. However, any open that hasn't added itself to
5097 * the fi_stateids list also hasn't called break_lease yet; so,
5098 * they'll break this lease soon enough.
5099 */
5100 return 0;
5101}
5102
5103static struct nfs4_delegation *
5104nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
5105 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
5106{
5107 int status = 0;
5108 struct nfs4_delegation *dp;
5109 struct nfsd_file *nf;
5110 struct file_lock *fl;
5111
5112 /*
5113 * The fi_had_conflict and nfs_get_existing_delegation checks
5114 * here are just optimizations; we'll need to recheck them at
5115 * the end:
5116 */
5117 if (fp->fi_had_conflict)
5118 return ERR_PTR(-EAGAIN);
5119
5120 nf = find_readable_file(fp);
5121 if (!nf) {
5122 /*
5123 * We probably could attempt another open and get a read
5124 * delegation, but for now, don't bother until the
5125 * client actually sends us one.
5126 */
5127 return ERR_PTR(-EAGAIN);
5128 }
5129 spin_lock(&state_lock);
5130 spin_lock(&fp->fi_lock);
5131 if (nfs4_delegation_exists(clp, fp))
5132 status = -EAGAIN;
5133 else if (!fp->fi_deleg_file) {
5134 fp->fi_deleg_file = nf;
5135 /* increment early to prevent fi_deleg_file from being
5136 * cleared */
5137 fp->fi_delegees = 1;
5138 nf = NULL;
5139 } else
5140 fp->fi_delegees++;
5141 spin_unlock(&fp->fi_lock);
5142 spin_unlock(&state_lock);
5143 if (nf)
5144 nfsd_file_put(nf);
5145 if (status)
5146 return ERR_PTR(status);
5147
5148 status = -ENOMEM;
5149 dp = alloc_init_deleg(clp, fp, fh, odstate);
5150 if (!dp)
5151 goto out_delegees;
5152
5153 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
5154 if (!fl)
5155 goto out_clnt_odstate;
5156
5157 status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
5158 if (fl)
5159 locks_free_lock(fl);
5160 if (status)
5161 goto out_clnt_odstate;
5162 status = nfsd4_check_conflicting_opens(clp, fp);
5163 if (status)
5164 goto out_unlock;
5165
5166 spin_lock(&state_lock);
5167 spin_lock(&fp->fi_lock);
5168 if (fp->fi_had_conflict)
5169 status = -EAGAIN;
5170 else
5171 status = hash_delegation_locked(dp, fp);
5172 spin_unlock(&fp->fi_lock);
5173 spin_unlock(&state_lock);
5174
5175 if (status)
5176 goto out_unlock;
5177
5178 return dp;
5179out_unlock:
5180 vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5181out_clnt_odstate:
5182 put_clnt_odstate(dp->dl_clnt_odstate);
5183 nfs4_put_stid(&dp->dl_stid);
5184out_delegees:
5185 put_deleg_file(fp);
5186 return ERR_PTR(status);
5187}
5188
5189static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5190{
5191 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5192 if (status == -EAGAIN)
5193 open->op_why_no_deleg = WND4_CONTENTION;
5194 else {
5195 open->op_why_no_deleg = WND4_RESOURCE;
5196 switch (open->op_deleg_want) {
5197 case NFS4_SHARE_WANT_READ_DELEG:
5198 case NFS4_SHARE_WANT_WRITE_DELEG:
5199 case NFS4_SHARE_WANT_ANY_DELEG:
5200 break;
5201 case NFS4_SHARE_WANT_CANCEL:
5202 open->op_why_no_deleg = WND4_CANCELLED;
5203 break;
5204 case NFS4_SHARE_WANT_NO_DELEG:
5205 WARN_ON_ONCE(1);
5206 }
5207 }
5208}
5209
5210/*
5211 * Attempt to hand out a delegation.
5212 *
5213 * Note we don't support write delegations, and won't until the vfs has
5214 * proper support for them.
5215 */
5216static void
5217nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
5218 struct nfs4_ol_stateid *stp)
5219{
5220 struct nfs4_delegation *dp;
5221 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5222 struct nfs4_client *clp = stp->st_stid.sc_client;
5223 int cb_up;
5224 int status = 0;
5225
5226 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5227 open->op_recall = 0;
5228 switch (open->op_claim_type) {
5229 case NFS4_OPEN_CLAIM_PREVIOUS:
5230 if (!cb_up)
5231 open->op_recall = 1;
5232 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
5233 goto out_no_deleg;
5234 break;
5235 case NFS4_OPEN_CLAIM_NULL:
5236 case NFS4_OPEN_CLAIM_FH:
5237 /*
5238 * Let's not give out any delegations till everyone's
5239 * had the chance to reclaim theirs, *and* until
5240 * NLM locks have all been reclaimed:
5241 */
5242 if (locks_in_grace(clp->net))
5243 goto out_no_deleg;
5244 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5245 goto out_no_deleg;
5246 break;
5247 default:
5248 goto out_no_deleg;
5249 }
5250 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
5251 if (IS_ERR(dp))
5252 goto out_no_deleg;
5253
5254 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5255
5256 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
5257 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5258 nfs4_put_stid(&dp->dl_stid);
5259 return;
5260out_no_deleg:
5261 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5262 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5263 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5264 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5265 open->op_recall = 1;
5266 }
5267
5268 /* 4.1 client asking for a delegation? */
5269 if (open->op_deleg_want)
5270 nfsd4_open_deleg_none_ext(open, status);
5271 return;
5272}
5273
5274static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5275 struct nfs4_delegation *dp)
5276{
5277 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5278 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5279 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5280 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
5281 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
5282 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5283 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5284 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
5285 }
5286 /* Otherwise the client must be confused wanting a delegation
5287 * it already has, therefore we don't return
5288 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5289 */
5290}
5291
5292__be32
5293nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5294{
5295 struct nfsd4_compoundres *resp = rqstp->rq_resp;
5296 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5297 struct nfs4_file *fp = NULL;
5298 struct nfs4_ol_stateid *stp = NULL;
5299 struct nfs4_delegation *dp = NULL;
5300 __be32 status;
5301 bool new_stp = false;
5302
5303 /*
5304 * Lookup file; if found, lookup stateid and check open request,
5305 * and check for delegations in the process of being recalled.
5306 * If not found, create the nfs4_file struct
5307 */
5308 fp = find_or_add_file(open->op_file, current_fh);
5309 if (fp != open->op_file) {
5310 status = nfs4_check_deleg(cl, open, &dp);
5311 if (status)
5312 goto out;
5313 stp = nfsd4_find_and_lock_existing_open(fp, open);
5314 } else {
5315 open->op_file = NULL;
5316 status = nfserr_bad_stateid;
5317 if (nfsd4_is_deleg_cur(open))
5318 goto out;
5319 }
5320
5321 if (!stp) {
5322 stp = init_open_stateid(fp, open);
5323 if (!open->op_stp)
5324 new_stp = true;
5325 }
5326
5327 /*
5328 * OPEN the file, or upgrade an existing OPEN.
5329 * If truncate fails, the OPEN fails.
5330 *
5331 * stp is already locked.
5332 */
5333 if (!new_stp) {
5334 /* Stateid was found, this is an OPEN upgrade */
5335 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5336 if (status) {
5337 mutex_unlock(&stp->st_mutex);
5338 goto out;
5339 }
5340 } else {
5341 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
5342 if (status) {
5343 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5344 release_open_stateid(stp);
5345 mutex_unlock(&stp->st_mutex);
5346 goto out;
5347 }
5348
5349 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5350 open->op_odstate);
5351 if (stp->st_clnt_odstate == open->op_odstate)
5352 open->op_odstate = NULL;
5353 }
5354
5355 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5356 mutex_unlock(&stp->st_mutex);
5357
5358 if (nfsd4_has_session(&resp->cstate)) {
5359 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5360 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5361 open->op_why_no_deleg = WND4_NOT_WANTED;
5362 goto nodeleg;
5363 }
5364 }
5365
5366 /*
5367 * Attempt to hand out a delegation. No error return, because the
5368 * OPEN succeeds even if we fail.
5369 */
5370 nfs4_open_delegation(current_fh, open, stp);
5371nodeleg:
5372 status = nfs_ok;
5373 trace_nfsd_open(&stp->st_stid.sc_stateid);
5374out:
5375 /* 4.1 client trying to upgrade/downgrade delegation? */
5376 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5377 open->op_deleg_want)
5378 nfsd4_deleg_xgrade_none_ext(open, dp);
5379
5380 if (fp)
5381 put_nfs4_file(fp);
5382 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5383 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5384 /*
5385 * To finish the open response, we just need to set the rflags.
5386 */
5387 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5388 if (nfsd4_has_session(&resp->cstate))
5389 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5390 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5391 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5392
5393 if (dp)
5394 nfs4_put_stid(&dp->dl_stid);
5395 if (stp)
5396 nfs4_put_stid(&stp->st_stid);
5397
5398 return status;
5399}
5400
5401void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5402 struct nfsd4_open *open)
5403{
5404 if (open->op_openowner) {
5405 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5406
5407 nfsd4_cstate_assign_replay(cstate, so);
5408 nfs4_put_stateowner(so);
5409 }
5410 if (open->op_file)
5411 kmem_cache_free(file_slab, open->op_file);
5412 if (open->op_stp)
5413 nfs4_put_stid(&open->op_stp->st_stid);
5414 if (open->op_odstate)
5415 kmem_cache_free(odstate_slab, open->op_odstate);
5416}
5417
5418__be32
5419nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5420 union nfsd4_op_u *u)
5421{
5422 clientid_t *clid = &u->renew;
5423 struct nfs4_client *clp;
5424 __be32 status;
5425 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5426
5427 trace_nfsd_clid_renew(clid);
5428 status = set_client(clid, cstate, nn);
5429 if (status)
5430 return status;
5431 clp = cstate->clp;
5432 if (!list_empty(&clp->cl_delegations)
5433 && clp->cl_cb_state != NFSD4_CB_UP)
5434 return nfserr_cb_path_down;
5435 return nfs_ok;
5436}
5437
5438void
5439nfsd4_end_grace(struct nfsd_net *nn)
5440{
5441 /* do nothing if grace period already ended */
5442 if (nn->grace_ended)
5443 return;
5444
5445 trace_nfsd_grace_complete(nn);
5446 nn->grace_ended = true;
5447 /*
5448 * If the server goes down again right now, an NFSv4
5449 * client will still be allowed to reclaim after it comes back up,
5450 * even if it hasn't yet had a chance to reclaim state this time.
5451 *
5452 */
5453 nfsd4_record_grace_done(nn);
5454 /*
5455 * At this point, NFSv4 clients can still reclaim. But if the
5456 * server crashes, any that have not yet reclaimed will be out
5457 * of luck on the next boot.
5458 *
5459 * (NFSv4.1+ clients are considered to have reclaimed once they
5460 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
5461 * have reclaimed after their first OPEN.)
5462 */
5463 locks_end_grace(&nn->nfsd4_manager);
5464 /*
5465 * At this point, and once lockd and/or any other containers
5466 * exit their grace period, further reclaims will fail and
5467 * regular locking can resume.
5468 */
5469}
5470
5471/*
5472 * If we've waited a lease period but there are still clients trying to
5473 * reclaim, wait a little longer to give them a chance to finish.
5474 */
5475static bool clients_still_reclaiming(struct nfsd_net *nn)
5476{
5477 time64_t double_grace_period_end = nn->boot_time +
5478 2 * nn->nfsd4_lease;
5479
5480 if (nn->track_reclaim_completes &&
5481 atomic_read(&nn->nr_reclaim_complete) ==
5482 nn->reclaim_str_hashtbl_size)
5483 return false;
5484 if (!nn->somebody_reclaimed)
5485 return false;
5486 nn->somebody_reclaimed = false;
5487 /*
5488 * If we've given them *two* lease times to reclaim, and they're
5489 * still not done, give up:
5490 */
5491 if (ktime_get_boottime_seconds() > double_grace_period_end)
5492 return false;
5493 return true;
5494}
5495
5496struct laundry_time {
5497 time64_t cutoff;
5498 time64_t new_timeo;
5499};
5500
5501static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
5502{
5503 time64_t time_remaining;
5504
5505 if (last_refresh < lt->cutoff)
5506 return true;
5507 time_remaining = last_refresh - lt->cutoff;
5508 lt->new_timeo = min(lt->new_timeo, time_remaining);
5509 return false;
5510}
5511
5512#ifdef CONFIG_NFSD_V4_2_INTER_SSC
5513void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
5514{
5515 spin_lock_init(&nn->nfsd_ssc_lock);
5516 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
5517 init_waitqueue_head(&nn->nfsd_ssc_waitq);
5518}
5519EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
5520
5521/*
5522 * This is called when nfsd is being shutdown, after all inter_ssc
5523 * cleanup were done, to destroy the ssc delayed unmount list.
5524 */
5525static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
5526{
5527 struct nfsd4_ssc_umount_item *ni = NULL;
5528 struct nfsd4_ssc_umount_item *tmp;
5529
5530 spin_lock(&nn->nfsd_ssc_lock);
5531 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5532 list_del(&ni->nsui_list);
5533 spin_unlock(&nn->nfsd_ssc_lock);
5534 mntput(ni->nsui_vfsmount);
5535 kfree(ni);
5536 spin_lock(&nn->nfsd_ssc_lock);
5537 }
5538 spin_unlock(&nn->nfsd_ssc_lock);
5539}
5540
5541static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
5542{
5543 bool do_wakeup = false;
5544 struct nfsd4_ssc_umount_item *ni = 0;
5545 struct nfsd4_ssc_umount_item *tmp;
5546
5547 spin_lock(&nn->nfsd_ssc_lock);
5548 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5549 if (time_after(jiffies, ni->nsui_expire)) {
5550 if (refcount_read(&ni->nsui_refcnt) > 1)
5551 continue;
5552
5553 /* mark being unmount */
5554 ni->nsui_busy = true;
5555 spin_unlock(&nn->nfsd_ssc_lock);
5556 mntput(ni->nsui_vfsmount);
5557 spin_lock(&nn->nfsd_ssc_lock);
5558
5559 /* waiters need to start from begin of list */
5560 list_del(&ni->nsui_list);
5561 kfree(ni);
5562
5563 /* wakeup ssc_connect waiters */
5564 do_wakeup = true;
5565 continue;
5566 }
5567 break;
5568 }
5569 if (do_wakeup)
5570 wake_up_all(&nn->nfsd_ssc_waitq);
5571 spin_unlock(&nn->nfsd_ssc_lock);
5572}
5573#endif
5574
5575static time64_t
5576nfs4_laundromat(struct nfsd_net *nn)
5577{
5578 struct nfs4_client *clp;
5579 struct nfs4_openowner *oo;
5580 struct nfs4_delegation *dp;
5581 struct nfs4_ol_stateid *stp;
5582 struct nfsd4_blocked_lock *nbl;
5583 struct list_head *pos, *next, reaplist;
5584 struct laundry_time lt = {
5585 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
5586 .new_timeo = nn->nfsd4_lease
5587 };
5588 struct nfs4_cpntf_state *cps;
5589 copy_stateid_t *cps_t;
5590 int i;
5591
5592 if (clients_still_reclaiming(nn)) {
5593 lt.new_timeo = 0;
5594 goto out;
5595 }
5596 nfsd4_end_grace(nn);
5597 INIT_LIST_HEAD(&reaplist);
5598
5599 spin_lock(&nn->s2s_cp_lock);
5600 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
5601 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
5602 if (cps->cp_stateid.sc_type == NFS4_COPYNOTIFY_STID &&
5603 state_expired(<, cps->cpntf_time))
5604 _free_cpntf_state_locked(nn, cps);
5605 }
5606 spin_unlock(&nn->s2s_cp_lock);
5607
5608 spin_lock(&nn->client_lock);
5609 list_for_each_safe(pos, next, &nn->client_lru) {
5610 clp = list_entry(pos, struct nfs4_client, cl_lru);
5611 if (!state_expired(<, clp->cl_time))
5612 break;
5613 if (mark_client_expired_locked(clp))
5614 continue;
5615 list_add(&clp->cl_lru, &reaplist);
5616 }
5617 spin_unlock(&nn->client_lock);
5618 list_for_each_safe(pos, next, &reaplist) {
5619 clp = list_entry(pos, struct nfs4_client, cl_lru);
5620 trace_nfsd_clid_purged(&clp->cl_clientid);
5621 list_del_init(&clp->cl_lru);
5622 expire_client(clp);
5623 }
5624 spin_lock(&state_lock);
5625 list_for_each_safe(pos, next, &nn->del_recall_lru) {
5626 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5627 if (!state_expired(<, dp->dl_time))
5628 break;
5629 WARN_ON(!unhash_delegation_locked(dp));
5630 list_add(&dp->dl_recall_lru, &reaplist);
5631 }
5632 spin_unlock(&state_lock);
5633 while (!list_empty(&reaplist)) {
5634 dp = list_first_entry(&reaplist, struct nfs4_delegation,
5635 dl_recall_lru);
5636 list_del_init(&dp->dl_recall_lru);
5637 revoke_delegation(dp);
5638 }
5639
5640 spin_lock(&nn->client_lock);
5641 while (!list_empty(&nn->close_lru)) {
5642 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
5643 oo_close_lru);
5644 if (!state_expired(<, oo->oo_time))
5645 break;
5646 list_del_init(&oo->oo_close_lru);
5647 stp = oo->oo_last_closed_stid;
5648 oo->oo_last_closed_stid = NULL;
5649 spin_unlock(&nn->client_lock);
5650 nfs4_put_stid(&stp->st_stid);
5651 spin_lock(&nn->client_lock);
5652 }
5653 spin_unlock(&nn->client_lock);
5654
5655 /*
5656 * It's possible for a client to try and acquire an already held lock
5657 * that is being held for a long time, and then lose interest in it.
5658 * So, we clean out any un-revisited request after a lease period
5659 * under the assumption that the client is no longer interested.
5660 *
5661 * RFC5661, sec. 9.6 states that the client must not rely on getting
5662 * notifications and must continue to poll for locks, even when the
5663 * server supports them. Thus this shouldn't lead to clients blocking
5664 * indefinitely once the lock does become free.
5665 */
5666 BUG_ON(!list_empty(&reaplist));
5667 spin_lock(&nn->blocked_locks_lock);
5668 while (!list_empty(&nn->blocked_locks_lru)) {
5669 nbl = list_first_entry(&nn->blocked_locks_lru,
5670 struct nfsd4_blocked_lock, nbl_lru);
5671 if (!state_expired(<, nbl->nbl_time))
5672 break;
5673 list_move(&nbl->nbl_lru, &reaplist);
5674 list_del_init(&nbl->nbl_list);
5675 }
5676 spin_unlock(&nn->blocked_locks_lock);
5677
5678 while (!list_empty(&reaplist)) {
5679 nbl = list_first_entry(&reaplist,
5680 struct nfsd4_blocked_lock, nbl_lru);
5681 list_del_init(&nbl->nbl_lru);
5682 free_blocked_lock(nbl);
5683 }
5684#ifdef CONFIG_NFSD_V4_2_INTER_SSC
5685 /* service the server-to-server copy delayed unmount list */
5686 nfsd4_ssc_expire_umount(nn);
5687#endif
5688out:
5689 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
5690}
5691
5692static struct workqueue_struct *laundry_wq;
5693static void laundromat_main(struct work_struct *);
5694
5695static void
5696laundromat_main(struct work_struct *laundry)
5697{
5698 time64_t t;
5699 struct delayed_work *dwork = to_delayed_work(laundry);
5700 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
5701 laundromat_work);
5702
5703 t = nfs4_laundromat(nn);
5704 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
5705}
5706
5707static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
5708{
5709 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
5710 return nfserr_bad_stateid;
5711 return nfs_ok;
5712}
5713
5714static
5715__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
5716{
5717 __be32 status = nfserr_openmode;
5718
5719 /* For lock stateid's, we test the parent open, not the lock: */
5720 if (stp->st_openstp)
5721 stp = stp->st_openstp;
5722 if ((flags & WR_STATE) && !access_permit_write(stp))
5723 goto out;
5724 if ((flags & RD_STATE) && !access_permit_read(stp))
5725 goto out;
5726 status = nfs_ok;
5727out:
5728 return status;
5729}
5730
5731static inline __be32
5732check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
5733{
5734 if (ONE_STATEID(stateid) && (flags & RD_STATE))
5735 return nfs_ok;
5736 else if (opens_in_grace(net)) {
5737 /* Answer in remaining cases depends on existence of
5738 * conflicting state; so we must wait out the grace period. */
5739 return nfserr_grace;
5740 } else if (flags & WR_STATE)
5741 return nfs4_share_conflict(current_fh,
5742 NFS4_SHARE_DENY_WRITE);
5743 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
5744 return nfs4_share_conflict(current_fh,
5745 NFS4_SHARE_DENY_READ);
5746}
5747
5748/*
5749 * Allow READ/WRITE during grace period on recovered state only for files
5750 * that are not able to provide mandatory locking.
5751 */
5752static inline int
5753grace_disallows_io(struct net *net, struct inode *inode)
5754{
5755 return opens_in_grace(net) && mandatory_lock(inode);
5756}
5757
5758static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
5759{
5760 /*
5761 * When sessions are used the stateid generation number is ignored
5762 * when it is zero.
5763 */
5764 if (has_session && in->si_generation == 0)
5765 return nfs_ok;
5766
5767 if (in->si_generation == ref->si_generation)
5768 return nfs_ok;
5769
5770 /* If the client sends us a stateid from the future, it's buggy: */
5771 if (nfsd4_stateid_generation_after(in, ref))
5772 return nfserr_bad_stateid;
5773 /*
5774 * However, we could see a stateid from the past, even from a
5775 * non-buggy client. For example, if the client sends a lock
5776 * while some IO is outstanding, the lock may bump si_generation
5777 * while the IO is still in flight. The client could avoid that
5778 * situation by waiting for responses on all the IO requests,
5779 * but better performance may result in retrying IO that
5780 * receives an old_stateid error if requests are rarely
5781 * reordered in flight:
5782 */
5783 return nfserr_old_stateid;
5784}
5785
5786static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
5787{
5788 __be32 ret;
5789
5790 spin_lock(&s->sc_lock);
5791 ret = nfsd4_verify_open_stid(s);
5792 if (ret == nfs_ok)
5793 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
5794 spin_unlock(&s->sc_lock);
5795 return ret;
5796}
5797
5798static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
5799{
5800 if (ols->st_stateowner->so_is_open_owner &&
5801 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
5802 return nfserr_bad_stateid;
5803 return nfs_ok;
5804}
5805
5806static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
5807{
5808 struct nfs4_stid *s;
5809 __be32 status = nfserr_bad_stateid;
5810
5811 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5812 CLOSE_STATEID(stateid))
5813 return status;
5814 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid))
5815 return status;
5816 spin_lock(&cl->cl_lock);
5817 s = find_stateid_locked(cl, stateid);
5818 if (!s)
5819 goto out_unlock;
5820 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
5821 if (status)
5822 goto out_unlock;
5823 switch (s->sc_type) {
5824 case NFS4_DELEG_STID:
5825 status = nfs_ok;
5826 break;
5827 case NFS4_REVOKED_DELEG_STID:
5828 status = nfserr_deleg_revoked;
5829 break;
5830 case NFS4_OPEN_STID:
5831 case NFS4_LOCK_STID:
5832 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
5833 break;
5834 default:
5835 printk("unknown stateid type %x\n", s->sc_type);
5836 fallthrough;
5837 case NFS4_CLOSED_STID:
5838 case NFS4_CLOSED_DELEG_STID:
5839 status = nfserr_bad_stateid;
5840 }
5841out_unlock:
5842 spin_unlock(&cl->cl_lock);
5843 return status;
5844}
5845
5846__be32
5847nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
5848 stateid_t *stateid, unsigned char typemask,
5849 struct nfs4_stid **s, struct nfsd_net *nn)
5850{
5851 __be32 status;
5852 bool return_revoked = false;
5853
5854 /*
5855 * only return revoked delegations if explicitly asked.
5856 * otherwise we report revoked or bad_stateid status.
5857 */
5858 if (typemask & NFS4_REVOKED_DELEG_STID)
5859 return_revoked = true;
5860 else if (typemask & NFS4_DELEG_STID)
5861 typemask |= NFS4_REVOKED_DELEG_STID;
5862
5863 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5864 CLOSE_STATEID(stateid))
5865 return nfserr_bad_stateid;
5866 status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
5867 if (status == nfserr_stale_clientid) {
5868 if (cstate->session)
5869 return nfserr_bad_stateid;
5870 return nfserr_stale_stateid;
5871 }
5872 if (status)
5873 return status;
5874 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
5875 if (!*s)
5876 return nfserr_bad_stateid;
5877 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5878 nfs4_put_stid(*s);
5879 if (cstate->minorversion)
5880 return nfserr_deleg_revoked;
5881 return nfserr_bad_stateid;
5882 }
5883 return nfs_ok;
5884}
5885
5886static struct nfsd_file *
5887nfs4_find_file(struct nfs4_stid *s, int flags)
5888{
5889 if (!s)
5890 return NULL;
5891
5892 switch (s->sc_type) {
5893 case NFS4_DELEG_STID:
5894 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5895 return NULL;
5896 return nfsd_file_get(s->sc_file->fi_deleg_file);
5897 case NFS4_OPEN_STID:
5898 case NFS4_LOCK_STID:
5899 if (flags & RD_STATE)
5900 return find_readable_file(s->sc_file);
5901 else
5902 return find_writeable_file(s->sc_file);
5903 }
5904
5905 return NULL;
5906}
5907
5908static __be32
5909nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
5910{
5911 __be32 status;
5912
5913 status = nfsd4_check_openowner_confirmed(ols);
5914 if (status)
5915 return status;
5916 return nfs4_check_openmode(ols, flags);
5917}
5918
5919static __be32
5920nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5921 struct nfsd_file **nfp, int flags)
5922{
5923 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5924 struct nfsd_file *nf;
5925 __be32 status;
5926
5927 nf = nfs4_find_file(s, flags);
5928 if (nf) {
5929 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5930 acc | NFSD_MAY_OWNER_OVERRIDE);
5931 if (status) {
5932 nfsd_file_put(nf);
5933 goto out;
5934 }
5935 } else {
5936 status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
5937 if (status)
5938 return status;
5939 }
5940 *nfp = nf;
5941out:
5942 return status;
5943}
5944static void
5945_free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
5946{
5947 WARN_ON_ONCE(cps->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID);
5948 if (!refcount_dec_and_test(&cps->cp_stateid.sc_count))
5949 return;
5950 list_del(&cps->cp_list);
5951 idr_remove(&nn->s2s_cp_stateids,
5952 cps->cp_stateid.stid.si_opaque.so_id);
5953 kfree(cps);
5954}
5955/*
5956 * A READ from an inter server to server COPY will have a
5957 * copy stateid. Look up the copy notify stateid from the
5958 * idr structure and take a reference on it.
5959 */
5960__be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
5961 struct nfs4_client *clp,
5962 struct nfs4_cpntf_state **cps)
5963{
5964 copy_stateid_t *cps_t;
5965 struct nfs4_cpntf_state *state = NULL;
5966
5967 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
5968 return nfserr_bad_stateid;
5969 spin_lock(&nn->s2s_cp_lock);
5970 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
5971 if (cps_t) {
5972 state = container_of(cps_t, struct nfs4_cpntf_state,
5973 cp_stateid);
5974 if (state->cp_stateid.sc_type != NFS4_COPYNOTIFY_STID) {
5975 state = NULL;
5976 goto unlock;
5977 }
5978 if (!clp)
5979 refcount_inc(&state->cp_stateid.sc_count);
5980 else
5981 _free_cpntf_state_locked(nn, state);
5982 }
5983unlock:
5984 spin_unlock(&nn->s2s_cp_lock);
5985 if (!state)
5986 return nfserr_bad_stateid;
5987 if (!clp && state)
5988 *cps = state;
5989 return 0;
5990}
5991
5992static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
5993 struct nfs4_stid **stid)
5994{
5995 __be32 status;
5996 struct nfs4_cpntf_state *cps = NULL;
5997 struct nfs4_client *found;
5998
5999 status = manage_cpntf_state(nn, st, NULL, &cps);
6000 if (status)
6001 return status;
6002
6003 cps->cpntf_time = ktime_get_boottime_seconds();
6004
6005 status = nfserr_expired;
6006 found = lookup_clientid(&cps->cp_p_clid, true, nn);
6007 if (!found)
6008 goto out;
6009
6010 *stid = find_stateid_by_type(found, &cps->cp_p_stateid,
6011 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
6012 if (*stid)
6013 status = nfs_ok;
6014 else
6015 status = nfserr_bad_stateid;
6016
6017 put_client_renew(found);
6018out:
6019 nfs4_put_cpntf_state(nn, cps);
6020 return status;
6021}
6022
6023void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6024{
6025 spin_lock(&nn->s2s_cp_lock);
6026 _free_cpntf_state_locked(nn, cps);
6027 spin_unlock(&nn->s2s_cp_lock);
6028}
6029
6030/*
6031 * Checks for stateid operations
6032 */
6033__be32
6034nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
6035 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
6036 stateid_t *stateid, int flags, struct nfsd_file **nfp,
6037 struct nfs4_stid **cstid)
6038{
6039 struct inode *ino = d_inode(fhp->fh_dentry);
6040 struct net *net = SVC_NET(rqstp);
6041 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6042 struct nfs4_stid *s = NULL;
6043 __be32 status;
6044
6045 if (nfp)
6046 *nfp = NULL;
6047
6048 if (grace_disallows_io(net, ino))
6049 return nfserr_grace;
6050
6051 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
6052 status = check_special_stateids(net, fhp, stateid, flags);
6053 goto done;
6054 }
6055
6056 status = nfsd4_lookup_stateid(cstate, stateid,
6057 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
6058 &s, nn);
6059 if (status == nfserr_bad_stateid)
6060 status = find_cpntf_state(nn, stateid, &s);
6061 if (status)
6062 return status;
6063 status = nfsd4_stid_check_stateid_generation(stateid, s,
6064 nfsd4_has_session(cstate));
6065 if (status)
6066 goto out;
6067
6068 switch (s->sc_type) {
6069 case NFS4_DELEG_STID:
6070 status = nfs4_check_delegmode(delegstateid(s), flags);
6071 break;
6072 case NFS4_OPEN_STID:
6073 case NFS4_LOCK_STID:
6074 status = nfs4_check_olstateid(openlockstateid(s), flags);
6075 break;
6076 default:
6077 status = nfserr_bad_stateid;
6078 break;
6079 }
6080 if (status)
6081 goto out;
6082 status = nfs4_check_fh(fhp, s);
6083
6084done:
6085 if (status == nfs_ok && nfp)
6086 status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
6087out:
6088 if (s) {
6089 if (!status && cstid)
6090 *cstid = s;
6091 else
6092 nfs4_put_stid(s);
6093 }
6094 return status;
6095}
6096
6097/*
6098 * Test if the stateid is valid
6099 */
6100__be32
6101nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6102 union nfsd4_op_u *u)
6103{
6104 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
6105 struct nfsd4_test_stateid_id *stateid;
6106 struct nfs4_client *cl = cstate->clp;
6107
6108 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
6109 stateid->ts_id_status =
6110 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
6111
6112 return nfs_ok;
6113}
6114
6115static __be32
6116nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
6117{
6118 struct nfs4_ol_stateid *stp = openlockstateid(s);
6119 __be32 ret;
6120
6121 ret = nfsd4_lock_ol_stateid(stp);
6122 if (ret)
6123 goto out_put_stid;
6124
6125 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6126 if (ret)
6127 goto out;
6128
6129 ret = nfserr_locks_held;
6130 if (check_for_locks(stp->st_stid.sc_file,
6131 lockowner(stp->st_stateowner)))
6132 goto out;
6133
6134 release_lock_stateid(stp);
6135 ret = nfs_ok;
6136
6137out:
6138 mutex_unlock(&stp->st_mutex);
6139out_put_stid:
6140 nfs4_put_stid(s);
6141 return ret;
6142}
6143
6144__be32
6145nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6146 union nfsd4_op_u *u)
6147{
6148 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
6149 stateid_t *stateid = &free_stateid->fr_stateid;
6150 struct nfs4_stid *s;
6151 struct nfs4_delegation *dp;
6152 struct nfs4_client *cl = cstate->clp;
6153 __be32 ret = nfserr_bad_stateid;
6154
6155 spin_lock(&cl->cl_lock);
6156 s = find_stateid_locked(cl, stateid);
6157 if (!s)
6158 goto out_unlock;
6159 spin_lock(&s->sc_lock);
6160 switch (s->sc_type) {
6161 case NFS4_DELEG_STID:
6162 ret = nfserr_locks_held;
6163 break;
6164 case NFS4_OPEN_STID:
6165 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6166 if (ret)
6167 break;
6168 ret = nfserr_locks_held;
6169 break;
6170 case NFS4_LOCK_STID:
6171 spin_unlock(&s->sc_lock);
6172 refcount_inc(&s->sc_count);
6173 spin_unlock(&cl->cl_lock);
6174 ret = nfsd4_free_lock_stateid(stateid, s);
6175 goto out;
6176 case NFS4_REVOKED_DELEG_STID:
6177 spin_unlock(&s->sc_lock);
6178 dp = delegstateid(s);
6179 list_del_init(&dp->dl_recall_lru);
6180 spin_unlock(&cl->cl_lock);
6181 nfs4_put_stid(s);
6182 ret = nfs_ok;
6183 goto out;
6184 /* Default falls through and returns nfserr_bad_stateid */
6185 }
6186 spin_unlock(&s->sc_lock);
6187out_unlock:
6188 spin_unlock(&cl->cl_lock);
6189out:
6190 return ret;
6191}
6192
6193static inline int
6194setlkflg (int type)
6195{
6196 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
6197 RD_STATE : WR_STATE;
6198}
6199
6200static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
6201{
6202 struct svc_fh *current_fh = &cstate->current_fh;
6203 struct nfs4_stateowner *sop = stp->st_stateowner;
6204 __be32 status;
6205
6206 status = nfsd4_check_seqid(cstate, sop, seqid);
6207 if (status)
6208 return status;
6209 status = nfsd4_lock_ol_stateid(stp);
6210 if (status != nfs_ok)
6211 return status;
6212 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
6213 if (status == nfs_ok)
6214 status = nfs4_check_fh(current_fh, &stp->st_stid);
6215 if (status != nfs_ok)
6216 mutex_unlock(&stp->st_mutex);
6217 return status;
6218}
6219
6220/*
6221 * Checks for sequence id mutating operations.
6222 */
6223static __be32
6224nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6225 stateid_t *stateid, char typemask,
6226 struct nfs4_ol_stateid **stpp,
6227 struct nfsd_net *nn)
6228{
6229 __be32 status;
6230 struct nfs4_stid *s;
6231 struct nfs4_ol_stateid *stp = NULL;
6232
6233 trace_nfsd_preprocess(seqid, stateid);
6234
6235 *stpp = NULL;
6236 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
6237 if (status)
6238 return status;
6239 stp = openlockstateid(s);
6240 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
6241
6242 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
6243 if (!status)
6244 *stpp = stp;
6245 else
6246 nfs4_put_stid(&stp->st_stid);
6247 return status;
6248}
6249
6250static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6251 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
6252{
6253 __be32 status;
6254 struct nfs4_openowner *oo;
6255 struct nfs4_ol_stateid *stp;
6256
6257 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
6258 NFS4_OPEN_STID, &stp, nn);
6259 if (status)
6260 return status;
6261 oo = openowner(stp->st_stateowner);
6262 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
6263 mutex_unlock(&stp->st_mutex);
6264 nfs4_put_stid(&stp->st_stid);
6265 return nfserr_bad_stateid;
6266 }
6267 *stpp = stp;
6268 return nfs_ok;
6269}
6270
6271__be32
6272nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6273 union nfsd4_op_u *u)
6274{
6275 struct nfsd4_open_confirm *oc = &u->open_confirm;
6276 __be32 status;
6277 struct nfs4_openowner *oo;
6278 struct nfs4_ol_stateid *stp;
6279 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6280
6281 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
6282 cstate->current_fh.fh_dentry);
6283
6284 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
6285 if (status)
6286 return status;
6287
6288 status = nfs4_preprocess_seqid_op(cstate,
6289 oc->oc_seqid, &oc->oc_req_stateid,
6290 NFS4_OPEN_STID, &stp, nn);
6291 if (status)
6292 goto out;
6293 oo = openowner(stp->st_stateowner);
6294 status = nfserr_bad_stateid;
6295 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
6296 mutex_unlock(&stp->st_mutex);
6297 goto put_stateid;
6298 }
6299 oo->oo_flags |= NFS4_OO_CONFIRMED;
6300 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
6301 mutex_unlock(&stp->st_mutex);
6302 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
6303 nfsd4_client_record_create(oo->oo_owner.so_client);
6304 status = nfs_ok;
6305put_stateid:
6306 nfs4_put_stid(&stp->st_stid);
6307out:
6308 nfsd4_bump_seqid(cstate, status);
6309 return status;
6310}
6311
6312static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
6313{
6314 if (!test_access(access, stp))
6315 return;
6316 nfs4_file_put_access(stp->st_stid.sc_file, access);
6317 clear_access(access, stp);
6318}
6319
6320static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
6321{
6322 switch (to_access) {
6323 case NFS4_SHARE_ACCESS_READ:
6324 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
6325 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6326 break;
6327 case NFS4_SHARE_ACCESS_WRITE:
6328 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
6329 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6330 break;
6331 case NFS4_SHARE_ACCESS_BOTH:
6332 break;
6333 default:
6334 WARN_ON_ONCE(1);
6335 }
6336}
6337
6338__be32
6339nfsd4_open_downgrade(struct svc_rqst *rqstp,
6340 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
6341{
6342 struct nfsd4_open_downgrade *od = &u->open_downgrade;
6343 __be32 status;
6344 struct nfs4_ol_stateid *stp;
6345 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6346
6347 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
6348 cstate->current_fh.fh_dentry);
6349
6350 /* We don't yet support WANT bits: */
6351 if (od->od_deleg_want)
6352 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
6353 od->od_deleg_want);
6354
6355 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
6356 &od->od_stateid, &stp, nn);
6357 if (status)
6358 goto out;
6359 status = nfserr_inval;
6360 if (!test_access(od->od_share_access, stp)) {
6361 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6362 stp->st_access_bmap, od->od_share_access);
6363 goto put_stateid;
6364 }
6365 if (!test_deny(od->od_share_deny, stp)) {
6366 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6367 stp->st_deny_bmap, od->od_share_deny);
6368 goto put_stateid;
6369 }
6370 nfs4_stateid_downgrade(stp, od->od_share_access);
6371 reset_union_bmap_deny(od->od_share_deny, stp);
6372 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
6373 status = nfs_ok;
6374put_stateid:
6375 mutex_unlock(&stp->st_mutex);
6376 nfs4_put_stid(&stp->st_stid);
6377out:
6378 nfsd4_bump_seqid(cstate, status);
6379 return status;
6380}
6381
6382static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
6383{
6384 struct nfs4_client *clp = s->st_stid.sc_client;
6385 bool unhashed;
6386 LIST_HEAD(reaplist);
6387
6388 spin_lock(&clp->cl_lock);
6389 unhashed = unhash_open_stateid(s, &reaplist);
6390
6391 if (clp->cl_minorversion) {
6392 if (unhashed)
6393 put_ol_stateid_locked(s, &reaplist);
6394 spin_unlock(&clp->cl_lock);
6395 free_ol_stateid_reaplist(&reaplist);
6396 } else {
6397 spin_unlock(&clp->cl_lock);
6398 free_ol_stateid_reaplist(&reaplist);
6399 if (unhashed)
6400 move_to_close_lru(s, clp->net);
6401 }
6402}
6403
6404/*
6405 * nfs4_unlock_state() called after encode
6406 */
6407__be32
6408nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6409 union nfsd4_op_u *u)
6410{
6411 struct nfsd4_close *close = &u->close;
6412 __be32 status;
6413 struct nfs4_ol_stateid *stp;
6414 struct net *net = SVC_NET(rqstp);
6415 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6416
6417 dprintk("NFSD: nfsd4_close on file %pd\n",
6418 cstate->current_fh.fh_dentry);
6419
6420 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
6421 &close->cl_stateid,
6422 NFS4_OPEN_STID|NFS4_CLOSED_STID,
6423 &stp, nn);
6424 nfsd4_bump_seqid(cstate, status);
6425 if (status)
6426 goto out;
6427
6428 stp->st_stid.sc_type = NFS4_CLOSED_STID;
6429
6430 /*
6431 * Technically we don't _really_ have to increment or copy it, since
6432 * it should just be gone after this operation and we clobber the
6433 * copied value below, but we continue to do so here just to ensure
6434 * that racing ops see that there was a state change.
6435 */
6436 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
6437
6438 nfsd4_close_open_stateid(stp);
6439 mutex_unlock(&stp->st_mutex);
6440
6441 /* v4.1+ suggests that we send a special stateid in here, since the
6442 * clients should just ignore this anyway. Since this is not useful
6443 * for v4.0 clients either, we set it to the special close_stateid
6444 * universally.
6445 *
6446 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
6447 */
6448 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
6449
6450 /* put reference from nfs4_preprocess_seqid_op */
6451 nfs4_put_stid(&stp->st_stid);
6452out:
6453 return status;
6454}
6455
6456__be32
6457nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6458 union nfsd4_op_u *u)
6459{
6460 struct nfsd4_delegreturn *dr = &u->delegreturn;
6461 struct nfs4_delegation *dp;
6462 stateid_t *stateid = &dr->dr_stateid;
6463 struct nfs4_stid *s;
6464 __be32 status;
6465 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6466
6467 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6468 return status;
6469
6470 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
6471 if (status)
6472 goto out;
6473 dp = delegstateid(s);
6474 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
6475 if (status)
6476 goto put_stateid;
6477
6478 destroy_delegation(dp);
6479put_stateid:
6480 nfs4_put_stid(&dp->dl_stid);
6481out:
6482 return status;
6483}
6484
6485/* last octet in a range */
6486static inline u64
6487last_byte_offset(u64 start, u64 len)
6488{
6489 u64 end;
6490
6491 WARN_ON_ONCE(!len);
6492 end = start + len;
6493 return end > start ? end - 1: NFS4_MAX_UINT64;
6494}
6495
6496/*
6497 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
6498 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
6499 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
6500 * locking, this prevents us from being completely protocol-compliant. The
6501 * real solution to this problem is to start using unsigned file offsets in
6502 * the VFS, but this is a very deep change!
6503 */
6504static inline void
6505nfs4_transform_lock_offset(struct file_lock *lock)
6506{
6507 if (lock->fl_start < 0)
6508 lock->fl_start = OFFSET_MAX;
6509 if (lock->fl_end < 0)
6510 lock->fl_end = OFFSET_MAX;
6511}
6512
6513static fl_owner_t
6514nfsd4_fl_get_owner(fl_owner_t owner)
6515{
6516 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6517
6518 nfs4_get_stateowner(&lo->lo_owner);
6519 return owner;
6520}
6521
6522static void
6523nfsd4_fl_put_owner(fl_owner_t owner)
6524{
6525 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
6526
6527 if (lo)
6528 nfs4_put_stateowner(&lo->lo_owner);
6529}
6530
6531static void
6532nfsd4_lm_notify(struct file_lock *fl)
6533{
6534 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
6535 struct net *net = lo->lo_owner.so_client->net;
6536 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6537 struct nfsd4_blocked_lock *nbl = container_of(fl,
6538 struct nfsd4_blocked_lock, nbl_lock);
6539 bool queue = false;
6540
6541 /* An empty list means that something else is going to be using it */
6542 spin_lock(&nn->blocked_locks_lock);
6543 if (!list_empty(&nbl->nbl_list)) {
6544 list_del_init(&nbl->nbl_list);
6545 list_del_init(&nbl->nbl_lru);
6546 queue = true;
6547 }
6548 spin_unlock(&nn->blocked_locks_lock);
6549
6550 if (queue) {
6551 trace_nfsd_cb_notify_lock(lo, nbl);
6552 nfsd4_run_cb(&nbl->nbl_cb);
6553 }
6554}
6555
6556static const struct lock_manager_operations nfsd_posix_mng_ops = {
6557 .lm_notify = nfsd4_lm_notify,
6558 .lm_get_owner = nfsd4_fl_get_owner,
6559 .lm_put_owner = nfsd4_fl_put_owner,
6560};
6561
6562static inline void
6563nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
6564{
6565 struct nfs4_lockowner *lo;
6566
6567 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
6568 lo = (struct nfs4_lockowner *) fl->fl_owner;
6569 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
6570 GFP_KERNEL);
6571 if (!deny->ld_owner.data)
6572 /* We just don't care that much */
6573 goto nevermind;
6574 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
6575 } else {
6576nevermind:
6577 deny->ld_owner.len = 0;
6578 deny->ld_owner.data = NULL;
6579 deny->ld_clientid.cl_boot = 0;
6580 deny->ld_clientid.cl_id = 0;
6581 }
6582 deny->ld_start = fl->fl_start;
6583 deny->ld_length = NFS4_MAX_UINT64;
6584 if (fl->fl_end != NFS4_MAX_UINT64)
6585 deny->ld_length = fl->fl_end - fl->fl_start + 1;
6586 deny->ld_type = NFS4_READ_LT;
6587 if (fl->fl_type != F_RDLCK)
6588 deny->ld_type = NFS4_WRITE_LT;
6589}
6590
6591static struct nfs4_lockowner *
6592find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
6593{
6594 unsigned int strhashval = ownerstr_hashval(owner);
6595 struct nfs4_stateowner *so;
6596
6597 lockdep_assert_held(&clp->cl_lock);
6598
6599 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
6600 so_strhash) {
6601 if (so->so_is_open_owner)
6602 continue;
6603 if (same_owner_str(so, owner))
6604 return lockowner(nfs4_get_stateowner(so));
6605 }
6606 return NULL;
6607}
6608
6609static struct nfs4_lockowner *
6610find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
6611{
6612 struct nfs4_lockowner *lo;
6613
6614 spin_lock(&clp->cl_lock);
6615 lo = find_lockowner_str_locked(clp, owner);
6616 spin_unlock(&clp->cl_lock);
6617 return lo;
6618}
6619
6620static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
6621{
6622 unhash_lockowner_locked(lockowner(sop));
6623}
6624
6625static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
6626{
6627 struct nfs4_lockowner *lo = lockowner(sop);
6628
6629 kmem_cache_free(lockowner_slab, lo);
6630}
6631
6632static const struct nfs4_stateowner_operations lockowner_ops = {
6633 .so_unhash = nfs4_unhash_lockowner,
6634 .so_free = nfs4_free_lockowner,
6635};
6636
6637/*
6638 * Alloc a lock owner structure.
6639 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
6640 * occurred.
6641 *
6642 * strhashval = ownerstr_hashval
6643 */
6644static struct nfs4_lockowner *
6645alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
6646 struct nfs4_ol_stateid *open_stp,
6647 struct nfsd4_lock *lock)
6648{
6649 struct nfs4_lockowner *lo, *ret;
6650
6651 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
6652 if (!lo)
6653 return NULL;
6654 INIT_LIST_HEAD(&lo->lo_blocked);
6655 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
6656 lo->lo_owner.so_is_open_owner = 0;
6657 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
6658 lo->lo_owner.so_ops = &lockowner_ops;
6659 spin_lock(&clp->cl_lock);
6660 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
6661 if (ret == NULL) {
6662 list_add(&lo->lo_owner.so_strhash,
6663 &clp->cl_ownerstr_hashtbl[strhashval]);
6664 ret = lo;
6665 } else
6666 nfs4_free_stateowner(&lo->lo_owner);
6667
6668 spin_unlock(&clp->cl_lock);
6669 return ret;
6670}
6671
6672static struct nfs4_ol_stateid *
6673find_lock_stateid(const struct nfs4_lockowner *lo,
6674 const struct nfs4_ol_stateid *ost)
6675{
6676 struct nfs4_ol_stateid *lst;
6677
6678 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
6679
6680 /* If ost is not hashed, ost->st_locks will not be valid */
6681 if (!nfs4_ol_stateid_unhashed(ost))
6682 list_for_each_entry(lst, &ost->st_locks, st_locks) {
6683 if (lst->st_stateowner == &lo->lo_owner) {
6684 refcount_inc(&lst->st_stid.sc_count);
6685 return lst;
6686 }
6687 }
6688 return NULL;
6689}
6690
6691static struct nfs4_ol_stateid *
6692init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
6693 struct nfs4_file *fp, struct inode *inode,
6694 struct nfs4_ol_stateid *open_stp)
6695{
6696 struct nfs4_client *clp = lo->lo_owner.so_client;
6697 struct nfs4_ol_stateid *retstp;
6698
6699 mutex_init(&stp->st_mutex);
6700 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
6701retry:
6702 spin_lock(&clp->cl_lock);
6703 if (nfs4_ol_stateid_unhashed(open_stp))
6704 goto out_close;
6705 retstp = find_lock_stateid(lo, open_stp);
6706 if (retstp)
6707 goto out_found;
6708 refcount_inc(&stp->st_stid.sc_count);
6709 stp->st_stid.sc_type = NFS4_LOCK_STID;
6710 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
6711 get_nfs4_file(fp);
6712 stp->st_stid.sc_file = fp;
6713 stp->st_access_bmap = 0;
6714 stp->st_deny_bmap = open_stp->st_deny_bmap;
6715 stp->st_openstp = open_stp;
6716 spin_lock(&fp->fi_lock);
6717 list_add(&stp->st_locks, &open_stp->st_locks);
6718 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
6719 list_add(&stp->st_perfile, &fp->fi_stateids);
6720 spin_unlock(&fp->fi_lock);
6721 spin_unlock(&clp->cl_lock);
6722 return stp;
6723out_found:
6724 spin_unlock(&clp->cl_lock);
6725 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
6726 nfs4_put_stid(&retstp->st_stid);
6727 goto retry;
6728 }
6729 /* To keep mutex tracking happy */
6730 mutex_unlock(&stp->st_mutex);
6731 return retstp;
6732out_close:
6733 spin_unlock(&clp->cl_lock);
6734 mutex_unlock(&stp->st_mutex);
6735 return NULL;
6736}
6737
6738static struct nfs4_ol_stateid *
6739find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
6740 struct inode *inode, struct nfs4_ol_stateid *ost,
6741 bool *new)
6742{
6743 struct nfs4_stid *ns = NULL;
6744 struct nfs4_ol_stateid *lst;
6745 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6746 struct nfs4_client *clp = oo->oo_owner.so_client;
6747
6748 *new = false;
6749 spin_lock(&clp->cl_lock);
6750 lst = find_lock_stateid(lo, ost);
6751 spin_unlock(&clp->cl_lock);
6752 if (lst != NULL) {
6753 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
6754 goto out;
6755 nfs4_put_stid(&lst->st_stid);
6756 }
6757 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
6758 if (ns == NULL)
6759 return NULL;
6760
6761 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
6762 if (lst == openlockstateid(ns))
6763 *new = true;
6764 else
6765 nfs4_put_stid(ns);
6766out:
6767 return lst;
6768}
6769
6770static int
6771check_lock_length(u64 offset, u64 length)
6772{
6773 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
6774 (length > ~offset)));
6775}
6776
6777static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
6778{
6779 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
6780
6781 lockdep_assert_held(&fp->fi_lock);
6782
6783 if (test_access(access, lock_stp))
6784 return;
6785 __nfs4_file_get_access(fp, access);
6786 set_access(access, lock_stp);
6787}
6788
6789static __be32
6790lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
6791 struct nfs4_ol_stateid *ost,
6792 struct nfsd4_lock *lock,
6793 struct nfs4_ol_stateid **plst, bool *new)
6794{
6795 __be32 status;
6796 struct nfs4_file *fi = ost->st_stid.sc_file;
6797 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
6798 struct nfs4_client *cl = oo->oo_owner.so_client;
6799 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
6800 struct nfs4_lockowner *lo;
6801 struct nfs4_ol_stateid *lst;
6802 unsigned int strhashval;
6803
6804 lo = find_lockowner_str(cl, &lock->lk_new_owner);
6805 if (!lo) {
6806 strhashval = ownerstr_hashval(&lock->lk_new_owner);
6807 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
6808 if (lo == NULL)
6809 return nfserr_jukebox;
6810 } else {
6811 /* with an existing lockowner, seqids must be the same */
6812 status = nfserr_bad_seqid;
6813 if (!cstate->minorversion &&
6814 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
6815 goto out;
6816 }
6817
6818 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
6819 if (lst == NULL) {
6820 status = nfserr_jukebox;
6821 goto out;
6822 }
6823
6824 status = nfs_ok;
6825 *plst = lst;
6826out:
6827 nfs4_put_stateowner(&lo->lo_owner);
6828 return status;
6829}
6830
6831/*
6832 * LOCK operation
6833 */
6834__be32
6835nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6836 union nfsd4_op_u *u)
6837{
6838 struct nfsd4_lock *lock = &u->lock;
6839 struct nfs4_openowner *open_sop = NULL;
6840 struct nfs4_lockowner *lock_sop = NULL;
6841 struct nfs4_ol_stateid *lock_stp = NULL;
6842 struct nfs4_ol_stateid *open_stp = NULL;
6843 struct nfs4_file *fp;
6844 struct nfsd_file *nf = NULL;
6845 struct nfsd4_blocked_lock *nbl = NULL;
6846 struct file_lock *file_lock = NULL;
6847 struct file_lock *conflock = NULL;
6848 struct super_block *sb;
6849 __be32 status = 0;
6850 int lkflg;
6851 int err;
6852 bool new = false;
6853 unsigned char fl_type;
6854 unsigned int fl_flags = FL_POSIX;
6855 struct net *net = SVC_NET(rqstp);
6856 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6857
6858 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
6859 (long long) lock->lk_offset,
6860 (long long) lock->lk_length);
6861
6862 if (check_lock_length(lock->lk_offset, lock->lk_length))
6863 return nfserr_inval;
6864
6865 if ((status = fh_verify(rqstp, &cstate->current_fh,
6866 S_IFREG, NFSD_MAY_LOCK))) {
6867 dprintk("NFSD: nfsd4_lock: permission denied!\n");
6868 return status;
6869 }
6870 sb = cstate->current_fh.fh_dentry->d_sb;
6871
6872 if (lock->lk_is_new) {
6873 if (nfsd4_has_session(cstate))
6874 /* See rfc 5661 18.10.3: given clientid is ignored: */
6875 memcpy(&lock->lk_new_clientid,
6876 &cstate->clp->cl_clientid,
6877 sizeof(clientid_t));
6878
6879 /* validate and update open stateid and open seqid */
6880 status = nfs4_preprocess_confirmed_seqid_op(cstate,
6881 lock->lk_new_open_seqid,
6882 &lock->lk_new_open_stateid,
6883 &open_stp, nn);
6884 if (status)
6885 goto out;
6886 mutex_unlock(&open_stp->st_mutex);
6887 open_sop = openowner(open_stp->st_stateowner);
6888 status = nfserr_bad_stateid;
6889 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
6890 &lock->lk_new_clientid))
6891 goto out;
6892 status = lookup_or_create_lock_state(cstate, open_stp, lock,
6893 &lock_stp, &new);
6894 } else {
6895 status = nfs4_preprocess_seqid_op(cstate,
6896 lock->lk_old_lock_seqid,
6897 &lock->lk_old_lock_stateid,
6898 NFS4_LOCK_STID, &lock_stp, nn);
6899 }
6900 if (status)
6901 goto out;
6902 lock_sop = lockowner(lock_stp->st_stateowner);
6903
6904 lkflg = setlkflg(lock->lk_type);
6905 status = nfs4_check_openmode(lock_stp, lkflg);
6906 if (status)
6907 goto out;
6908
6909 status = nfserr_grace;
6910 if (locks_in_grace(net) && !lock->lk_reclaim)
6911 goto out;
6912 status = nfserr_no_grace;
6913 if (!locks_in_grace(net) && lock->lk_reclaim)
6914 goto out;
6915
6916 fp = lock_stp->st_stid.sc_file;
6917 switch (lock->lk_type) {
6918 case NFS4_READW_LT:
6919 if (nfsd4_has_session(cstate) &&
6920 !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
6921 fl_flags |= FL_SLEEP;
6922 fallthrough;
6923 case NFS4_READ_LT:
6924 spin_lock(&fp->fi_lock);
6925 nf = find_readable_file_locked(fp);
6926 if (nf)
6927 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
6928 spin_unlock(&fp->fi_lock);
6929 fl_type = F_RDLCK;
6930 break;
6931 case NFS4_WRITEW_LT:
6932 if (nfsd4_has_session(cstate) &&
6933 !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
6934 fl_flags |= FL_SLEEP;
6935 fallthrough;
6936 case NFS4_WRITE_LT:
6937 spin_lock(&fp->fi_lock);
6938 nf = find_writeable_file_locked(fp);
6939 if (nf)
6940 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
6941 spin_unlock(&fp->fi_lock);
6942 fl_type = F_WRLCK;
6943 break;
6944 default:
6945 status = nfserr_inval;
6946 goto out;
6947 }
6948
6949 if (!nf) {
6950 status = nfserr_openmode;
6951 goto out;
6952 }
6953
6954 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6955 if (!nbl) {
6956 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6957 status = nfserr_jukebox;
6958 goto out;
6959 }
6960
6961 file_lock = &nbl->nbl_lock;
6962 file_lock->fl_type = fl_type;
6963 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6964 file_lock->fl_pid = current->tgid;
6965 file_lock->fl_file = nf->nf_file;
6966 file_lock->fl_flags = fl_flags;
6967 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6968 file_lock->fl_start = lock->lk_offset;
6969 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6970 nfs4_transform_lock_offset(file_lock);
6971
6972 conflock = locks_alloc_lock();
6973 if (!conflock) {
6974 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6975 status = nfserr_jukebox;
6976 goto out;
6977 }
6978
6979 if (fl_flags & FL_SLEEP) {
6980 nbl->nbl_time = ktime_get_boottime_seconds();
6981 spin_lock(&nn->blocked_locks_lock);
6982 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6983 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6984 spin_unlock(&nn->blocked_locks_lock);
6985 }
6986
6987 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
6988 switch (err) {
6989 case 0: /* success! */
6990 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6991 status = 0;
6992 if (lock->lk_reclaim)
6993 nn->somebody_reclaimed = true;
6994 break;
6995 case FILE_LOCK_DEFERRED:
6996 nbl = NULL;
6997 fallthrough;
6998 case -EAGAIN: /* conflock holds conflicting lock */
6999 status = nfserr_denied;
7000 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
7001 nfs4_set_lock_denied(conflock, &lock->lk_denied);
7002 break;
7003 case -EDEADLK:
7004 status = nfserr_deadlock;
7005 break;
7006 default:
7007 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
7008 status = nfserrno(err);
7009 break;
7010 }
7011out:
7012 if (nbl) {
7013 /* dequeue it if we queued it before */
7014 if (fl_flags & FL_SLEEP) {
7015 spin_lock(&nn->blocked_locks_lock);
7016 list_del_init(&nbl->nbl_list);
7017 list_del_init(&nbl->nbl_lru);
7018 spin_unlock(&nn->blocked_locks_lock);
7019 }
7020 free_blocked_lock(nbl);
7021 }
7022 if (nf)
7023 nfsd_file_put(nf);
7024 if (lock_stp) {
7025 /* Bump seqid manually if the 4.0 replay owner is openowner */
7026 if (cstate->replay_owner &&
7027 cstate->replay_owner != &lock_sop->lo_owner &&
7028 seqid_mutating_err(ntohl(status)))
7029 lock_sop->lo_owner.so_seqid++;
7030
7031 /*
7032 * If this is a new, never-before-used stateid, and we are
7033 * returning an error, then just go ahead and release it.
7034 */
7035 if (status && new)
7036 release_lock_stateid(lock_stp);
7037
7038 mutex_unlock(&lock_stp->st_mutex);
7039
7040 nfs4_put_stid(&lock_stp->st_stid);
7041 }
7042 if (open_stp)
7043 nfs4_put_stid(&open_stp->st_stid);
7044 nfsd4_bump_seqid(cstate, status);
7045 if (conflock)
7046 locks_free_lock(conflock);
7047 return status;
7048}
7049
7050/*
7051 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
7052 * so we do a temporary open here just to get an open file to pass to
7053 * vfs_test_lock.
7054 */
7055static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
7056{
7057 struct nfsd_file *nf;
7058 __be32 err;
7059
7060 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
7061 if (err)
7062 return err;
7063 fh_lock(fhp); /* to block new leases till after test_lock: */
7064 err = nfserrno(nfsd_open_break_lease(fhp->fh_dentry->d_inode,
7065 NFSD_MAY_READ));
7066 if (err)
7067 goto out;
7068 lock->fl_file = nf->nf_file;
7069 err = nfserrno(vfs_test_lock(nf->nf_file, lock));
7070 lock->fl_file = NULL;
7071out:
7072 fh_unlock(fhp);
7073 nfsd_file_put(nf);
7074 return err;
7075}
7076
7077/*
7078 * LOCKT operation
7079 */
7080__be32
7081nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7082 union nfsd4_op_u *u)
7083{
7084 struct nfsd4_lockt *lockt = &u->lockt;
7085 struct file_lock *file_lock = NULL;
7086 struct nfs4_lockowner *lo = NULL;
7087 __be32 status;
7088 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7089
7090 if (locks_in_grace(SVC_NET(rqstp)))
7091 return nfserr_grace;
7092
7093 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
7094 return nfserr_inval;
7095
7096 if (!nfsd4_has_session(cstate)) {
7097 status = set_client(&lockt->lt_clientid, cstate, nn);
7098 if (status)
7099 goto out;
7100 }
7101
7102 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7103 goto out;
7104
7105 file_lock = locks_alloc_lock();
7106 if (!file_lock) {
7107 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7108 status = nfserr_jukebox;
7109 goto out;
7110 }
7111
7112 switch (lockt->lt_type) {
7113 case NFS4_READ_LT:
7114 case NFS4_READW_LT:
7115 file_lock->fl_type = F_RDLCK;
7116 break;
7117 case NFS4_WRITE_LT:
7118 case NFS4_WRITEW_LT:
7119 file_lock->fl_type = F_WRLCK;
7120 break;
7121 default:
7122 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
7123 status = nfserr_inval;
7124 goto out;
7125 }
7126
7127 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
7128 if (lo)
7129 file_lock->fl_owner = (fl_owner_t)lo;
7130 file_lock->fl_pid = current->tgid;
7131 file_lock->fl_flags = FL_POSIX;
7132
7133 file_lock->fl_start = lockt->lt_offset;
7134 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
7135
7136 nfs4_transform_lock_offset(file_lock);
7137
7138 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
7139 if (status)
7140 goto out;
7141
7142 if (file_lock->fl_type != F_UNLCK) {
7143 status = nfserr_denied;
7144 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
7145 }
7146out:
7147 if (lo)
7148 nfs4_put_stateowner(&lo->lo_owner);
7149 if (file_lock)
7150 locks_free_lock(file_lock);
7151 return status;
7152}
7153
7154__be32
7155nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7156 union nfsd4_op_u *u)
7157{
7158 struct nfsd4_locku *locku = &u->locku;
7159 struct nfs4_ol_stateid *stp;
7160 struct nfsd_file *nf = NULL;
7161 struct file_lock *file_lock = NULL;
7162 __be32 status;
7163 int err;
7164 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7165
7166 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
7167 (long long) locku->lu_offset,
7168 (long long) locku->lu_length);
7169
7170 if (check_lock_length(locku->lu_offset, locku->lu_length))
7171 return nfserr_inval;
7172
7173 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
7174 &locku->lu_stateid, NFS4_LOCK_STID,
7175 &stp, nn);
7176 if (status)
7177 goto out;
7178 nf = find_any_file(stp->st_stid.sc_file);
7179 if (!nf) {
7180 status = nfserr_lock_range;
7181 goto put_stateid;
7182 }
7183 file_lock = locks_alloc_lock();
7184 if (!file_lock) {
7185 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7186 status = nfserr_jukebox;
7187 goto put_file;
7188 }
7189
7190 file_lock->fl_type = F_UNLCK;
7191 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
7192 file_lock->fl_pid = current->tgid;
7193 file_lock->fl_file = nf->nf_file;
7194 file_lock->fl_flags = FL_POSIX;
7195 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7196 file_lock->fl_start = locku->lu_offset;
7197
7198 file_lock->fl_end = last_byte_offset(locku->lu_offset,
7199 locku->lu_length);
7200 nfs4_transform_lock_offset(file_lock);
7201
7202 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
7203 if (err) {
7204 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
7205 goto out_nfserr;
7206 }
7207 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
7208put_file:
7209 nfsd_file_put(nf);
7210put_stateid:
7211 mutex_unlock(&stp->st_mutex);
7212 nfs4_put_stid(&stp->st_stid);
7213out:
7214 nfsd4_bump_seqid(cstate, status);
7215 if (file_lock)
7216 locks_free_lock(file_lock);
7217 return status;
7218
7219out_nfserr:
7220 status = nfserrno(err);
7221 goto put_file;
7222}
7223
7224/*
7225 * returns
7226 * true: locks held by lockowner
7227 * false: no locks held by lockowner
7228 */
7229static bool
7230check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
7231{
7232 struct file_lock *fl;
7233 int status = false;
7234 struct nfsd_file *nf = find_any_file(fp);
7235 struct inode *inode;
7236 struct file_lock_context *flctx;
7237
7238 if (!nf) {
7239 /* Any valid lock stateid should have some sort of access */
7240 WARN_ON_ONCE(1);
7241 return status;
7242 }
7243
7244 inode = locks_inode(nf->nf_file);
7245 flctx = inode->i_flctx;
7246
7247 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
7248 spin_lock(&flctx->flc_lock);
7249 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
7250 if (fl->fl_owner == (fl_owner_t)lowner) {
7251 status = true;
7252 break;
7253 }
7254 }
7255 spin_unlock(&flctx->flc_lock);
7256 }
7257 nfsd_file_put(nf);
7258 return status;
7259}
7260
7261__be32
7262nfsd4_release_lockowner(struct svc_rqst *rqstp,
7263 struct nfsd4_compound_state *cstate,
7264 union nfsd4_op_u *u)
7265{
7266 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
7267 clientid_t *clid = &rlockowner->rl_clientid;
7268 struct nfs4_stateowner *sop;
7269 struct nfs4_lockowner *lo = NULL;
7270 struct nfs4_ol_stateid *stp;
7271 struct xdr_netobj *owner = &rlockowner->rl_owner;
7272 unsigned int hashval = ownerstr_hashval(owner);
7273 __be32 status;
7274 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7275 struct nfs4_client *clp;
7276 LIST_HEAD (reaplist);
7277
7278 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
7279 clid->cl_boot, clid->cl_id);
7280
7281 status = set_client(clid, cstate, nn);
7282 if (status)
7283 return status;
7284
7285 clp = cstate->clp;
7286 /* Find the matching lock stateowner */
7287 spin_lock(&clp->cl_lock);
7288 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
7289 so_strhash) {
7290
7291 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
7292 continue;
7293
7294 /* see if there are still any locks associated with it */
7295 lo = lockowner(sop);
7296 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
7297 if (check_for_locks(stp->st_stid.sc_file, lo)) {
7298 status = nfserr_locks_held;
7299 spin_unlock(&clp->cl_lock);
7300 return status;
7301 }
7302 }
7303
7304 nfs4_get_stateowner(sop);
7305 break;
7306 }
7307 if (!lo) {
7308 spin_unlock(&clp->cl_lock);
7309 return status;
7310 }
7311
7312 unhash_lockowner_locked(lo);
7313 while (!list_empty(&lo->lo_owner.so_stateids)) {
7314 stp = list_first_entry(&lo->lo_owner.so_stateids,
7315 struct nfs4_ol_stateid,
7316 st_perstateowner);
7317 WARN_ON(!unhash_lock_stateid(stp));
7318 put_ol_stateid_locked(stp, &reaplist);
7319 }
7320 spin_unlock(&clp->cl_lock);
7321 free_ol_stateid_reaplist(&reaplist);
7322 remove_blocked_locks(lo);
7323 nfs4_put_stateowner(&lo->lo_owner);
7324
7325 return status;
7326}
7327
7328static inline struct nfs4_client_reclaim *
7329alloc_reclaim(void)
7330{
7331 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
7332}
7333
7334bool
7335nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
7336{
7337 struct nfs4_client_reclaim *crp;
7338
7339 crp = nfsd4_find_reclaim_client(name, nn);
7340 return (crp && crp->cr_clp);
7341}
7342
7343/*
7344 * failure => all reset bets are off, nfserr_no_grace...
7345 *
7346 * The caller is responsible for freeing name.data if NULL is returned (it
7347 * will be freed in nfs4_remove_reclaim_record in the normal case).
7348 */
7349struct nfs4_client_reclaim *
7350nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
7351 struct nfsd_net *nn)
7352{
7353 unsigned int strhashval;
7354 struct nfs4_client_reclaim *crp;
7355
7356 crp = alloc_reclaim();
7357 if (crp) {
7358 strhashval = clientstr_hashval(name);
7359 INIT_LIST_HEAD(&crp->cr_strhash);
7360 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
7361 crp->cr_name.data = name.data;
7362 crp->cr_name.len = name.len;
7363 crp->cr_princhash.data = princhash.data;
7364 crp->cr_princhash.len = princhash.len;
7365 crp->cr_clp = NULL;
7366 nn->reclaim_str_hashtbl_size++;
7367 }
7368 return crp;
7369}
7370
7371void
7372nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
7373{
7374 list_del(&crp->cr_strhash);
7375 kfree(crp->cr_name.data);
7376 kfree(crp->cr_princhash.data);
7377 kfree(crp);
7378 nn->reclaim_str_hashtbl_size--;
7379}
7380
7381void
7382nfs4_release_reclaim(struct nfsd_net *nn)
7383{
7384 struct nfs4_client_reclaim *crp = NULL;
7385 int i;
7386
7387 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7388 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
7389 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
7390 struct nfs4_client_reclaim, cr_strhash);
7391 nfs4_remove_reclaim_record(crp, nn);
7392 }
7393 }
7394 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
7395}
7396
7397/*
7398 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
7399struct nfs4_client_reclaim *
7400nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
7401{
7402 unsigned int strhashval;
7403 struct nfs4_client_reclaim *crp = NULL;
7404
7405 strhashval = clientstr_hashval(name);
7406 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
7407 if (compare_blob(&crp->cr_name, &name) == 0) {
7408 return crp;
7409 }
7410 }
7411 return NULL;
7412}
7413
7414__be32
7415nfs4_check_open_reclaim(struct nfs4_client *clp)
7416{
7417 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
7418 return nfserr_no_grace;
7419
7420 if (nfsd4_client_record_check(clp))
7421 return nfserr_reclaim_bad;
7422
7423 return nfs_ok;
7424}
7425
7426/*
7427 * Since the lifetime of a delegation isn't limited to that of an open, a
7428 * client may quite reasonably hang on to a delegation as long as it has
7429 * the inode cached. This becomes an obvious problem the first time a
7430 * client's inode cache approaches the size of the server's total memory.
7431 *
7432 * For now we avoid this problem by imposing a hard limit on the number
7433 * of delegations, which varies according to the server's memory size.
7434 */
7435static void
7436set_max_delegations(void)
7437{
7438 /*
7439 * Allow at most 4 delegations per megabyte of RAM. Quick
7440 * estimates suggest that in the worst case (where every delegation
7441 * is for a different inode), a delegation could take about 1.5K,
7442 * giving a worst case usage of about 6% of memory.
7443 */
7444 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7445}
7446
7447static int nfs4_state_create_net(struct net *net)
7448{
7449 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7450 int i;
7451
7452 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7453 sizeof(struct list_head),
7454 GFP_KERNEL);
7455 if (!nn->conf_id_hashtbl)
7456 goto err;
7457 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
7458 sizeof(struct list_head),
7459 GFP_KERNEL);
7460 if (!nn->unconf_id_hashtbl)
7461 goto err_unconf_id;
7462 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
7463 sizeof(struct list_head),
7464 GFP_KERNEL);
7465 if (!nn->sessionid_hashtbl)
7466 goto err_sessionid;
7467
7468 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7469 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7470 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7471 }
7472 for (i = 0; i < SESSION_HASH_SIZE; i++)
7473 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7474 nn->conf_name_tree = RB_ROOT;
7475 nn->unconf_name_tree = RB_ROOT;
7476 nn->boot_time = ktime_get_real_seconds();
7477 nn->grace_ended = false;
7478 nn->nfsd4_manager.block_opens = true;
7479 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7480 INIT_LIST_HEAD(&nn->client_lru);
7481 INIT_LIST_HEAD(&nn->close_lru);
7482 INIT_LIST_HEAD(&nn->del_recall_lru);
7483 spin_lock_init(&nn->client_lock);
7484 spin_lock_init(&nn->s2s_cp_lock);
7485 idr_init(&nn->s2s_cp_stateids);
7486
7487 spin_lock_init(&nn->blocked_locks_lock);
7488 INIT_LIST_HEAD(&nn->blocked_locks_lru);
7489
7490 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7491 get_net(net);
7492
7493 return 0;
7494
7495err_sessionid:
7496 kfree(nn->unconf_id_hashtbl);
7497err_unconf_id:
7498 kfree(nn->conf_id_hashtbl);
7499err:
7500 return -ENOMEM;
7501}
7502
7503static void
7504nfs4_state_destroy_net(struct net *net)
7505{
7506 int i;
7507 struct nfs4_client *clp = NULL;
7508 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7509
7510 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7511 while (!list_empty(&nn->conf_id_hashtbl[i])) {
7512 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7513 destroy_client(clp);
7514 }
7515 }
7516
7517 WARN_ON(!list_empty(&nn->blocked_locks_lru));
7518
7519 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7520 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7521 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7522 destroy_client(clp);
7523 }
7524 }
7525
7526 kfree(nn->sessionid_hashtbl);
7527 kfree(nn->unconf_id_hashtbl);
7528 kfree(nn->conf_id_hashtbl);
7529 put_net(net);
7530}
7531
7532int
7533nfs4_state_start_net(struct net *net)
7534{
7535 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7536 int ret;
7537
7538 ret = nfs4_state_create_net(net);
7539 if (ret)
7540 return ret;
7541 locks_start_grace(net, &nn->nfsd4_manager);
7542 nfsd4_client_tracking_init(net);
7543 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
7544 goto skip_grace;
7545 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
7546 nn->nfsd4_grace, net->ns.inum);
7547 trace_nfsd_grace_start(nn);
7548 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7549 return 0;
7550
7551skip_grace:
7552 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
7553 net->ns.inum);
7554 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
7555 nfsd4_end_grace(nn);
7556 return 0;
7557}
7558
7559/* initialization to perform when the nfsd service is started: */
7560
7561int
7562nfs4_state_start(void)
7563{
7564 int ret;
7565
7566 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7567 if (laundry_wq == NULL) {
7568 ret = -ENOMEM;
7569 goto out;
7570 }
7571 ret = nfsd4_create_callback_queue();
7572 if (ret)
7573 goto out_free_laundry;
7574
7575 set_max_delegations();
7576 return 0;
7577
7578out_free_laundry:
7579 destroy_workqueue(laundry_wq);
7580out:
7581 return ret;
7582}
7583
7584void
7585nfs4_state_shutdown_net(struct net *net)
7586{
7587 struct nfs4_delegation *dp = NULL;
7588 struct list_head *pos, *next, reaplist;
7589 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7590
7591 cancel_delayed_work_sync(&nn->laundromat_work);
7592 locks_end_grace(&nn->nfsd4_manager);
7593
7594 INIT_LIST_HEAD(&reaplist);
7595 spin_lock(&state_lock);
7596 list_for_each_safe(pos, next, &nn->del_recall_lru) {
7597 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7598 WARN_ON(!unhash_delegation_locked(dp));
7599 list_add(&dp->dl_recall_lru, &reaplist);
7600 }
7601 spin_unlock(&state_lock);
7602 list_for_each_safe(pos, next, &reaplist) {
7603 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7604 list_del_init(&dp->dl_recall_lru);
7605 destroy_unhashed_deleg(dp);
7606 }
7607
7608 nfsd4_client_tracking_exit(net);
7609 nfs4_state_destroy_net(net);
7610#ifdef CONFIG_NFSD_V4_2_INTER_SSC
7611 nfsd4_ssc_shutdown_umount(nn);
7612#endif
7613}
7614
7615void
7616nfs4_state_shutdown(void)
7617{
7618 destroy_workqueue(laundry_wq);
7619 nfsd4_destroy_callback_queue();
7620}
7621
7622static void
7623get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7624{
7625 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
7626 CURRENT_STATEID(stateid))
7627 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7628}
7629
7630static void
7631put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7632{
7633 if (cstate->minorversion) {
7634 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7635 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7636 }
7637}
7638
7639void
7640clear_current_stateid(struct nfsd4_compound_state *cstate)
7641{
7642 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
7643}
7644
7645/*
7646 * functions to set current state id
7647 */
7648void
7649nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7650 union nfsd4_op_u *u)
7651{
7652 put_stateid(cstate, &u->open_downgrade.od_stateid);
7653}
7654
7655void
7656nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7657 union nfsd4_op_u *u)
7658{
7659 put_stateid(cstate, &u->open.op_stateid);
7660}
7661
7662void
7663nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7664 union nfsd4_op_u *u)
7665{
7666 put_stateid(cstate, &u->close.cl_stateid);
7667}
7668
7669void
7670nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7671 union nfsd4_op_u *u)
7672{
7673 put_stateid(cstate, &u->lock.lk_resp_stateid);
7674}
7675
7676/*
7677 * functions to consume current state id
7678 */
7679
7680void
7681nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7682 union nfsd4_op_u *u)
7683{
7684 get_stateid(cstate, &u->open_downgrade.od_stateid);
7685}
7686
7687void
7688nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7689 union nfsd4_op_u *u)
7690{
7691 get_stateid(cstate, &u->delegreturn.dr_stateid);
7692}
7693
7694void
7695nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7696 union nfsd4_op_u *u)
7697{
7698 get_stateid(cstate, &u->free_stateid.fr_stateid);
7699}
7700
7701void
7702nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7703 union nfsd4_op_u *u)
7704{
7705 get_stateid(cstate, &u->setattr.sa_stateid);
7706}
7707
7708void
7709nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7710 union nfsd4_op_u *u)
7711{
7712 get_stateid(cstate, &u->close.cl_stateid);
7713}
7714
7715void
7716nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7717 union nfsd4_op_u *u)
7718{
7719 get_stateid(cstate, &u->locku.lu_stateid);
7720}
7721
7722void
7723nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7724 union nfsd4_op_u *u)
7725{
7726 get_stateid(cstate, &u->read.rd_stateid);
7727}
7728
7729void
7730nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7731 union nfsd4_op_u *u)
7732{
7733 get_stateid(cstate, &u->write.wr_stateid);
7734}