Loading...
1/*
2* Copyright (c) 2001 The Regents of the University of Michigan.
3* All rights reserved.
4*
5* Kendrick Smith <kmsmith@umich.edu>
6* Andy Adamson <kandros@umich.edu>
7*
8* Redistribution and use in source and binary forms, with or without
9* modification, are permitted provided that the following conditions
10* are met:
11*
12* 1. Redistributions of source code must retain the above copyright
13* notice, this list of conditions and the following disclaimer.
14* 2. Redistributions in binary form must reproduce the above copyright
15* notice, this list of conditions and the following disclaimer in the
16* documentation and/or other materials provided with the distribution.
17* 3. Neither the name of the University nor the names of its
18* contributors may be used to endorse or promote products derived
19* from this software without specific prior written permission.
20*
21* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32*
33*/
34
35#include <linux/file.h>
36#include <linux/fs.h>
37#include <linux/slab.h>
38#include <linux/namei.h>
39#include <linux/swap.h>
40#include <linux/pagemap.h>
41#include <linux/ratelimit.h>
42#include <linux/sunrpc/svcauth_gss.h>
43#include <linux/sunrpc/addr.h>
44#include <linux/jhash.h>
45#include <linux/string_helpers.h>
46#include <linux/fsnotify.h>
47#include <linux/rhashtable.h>
48#include <linux/nfs_ssc.h>
49
50#include "xdr4.h"
51#include "xdr4cb.h"
52#include "vfs.h"
53#include "current_stateid.h"
54
55#include "netns.h"
56#include "pnfs.h"
57#include "filecache.h"
58#include "trace.h"
59
60#define NFSDDBG_FACILITY NFSDDBG_PROC
61
62#define all_ones {{ ~0, ~0}, ~0}
63static const stateid_t one_stateid = {
64 .si_generation = ~0,
65 .si_opaque = all_ones,
66};
67static const stateid_t zero_stateid = {
68 /* all fields zero */
69};
70static const stateid_t currentstateid = {
71 .si_generation = 1,
72};
73static const stateid_t close_stateid = {
74 .si_generation = 0xffffffffU,
75};
76
77static u64 current_sessionid = 1;
78
79#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
80#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
81#define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
82#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
83
84/* forward declarations */
85static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
86static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
87void nfsd4_end_grace(struct nfsd_net *nn);
88static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
89static void nfsd4_file_hash_remove(struct nfs4_file *fi);
90static void deleg_reaper(struct nfsd_net *nn);
91
92/* Locking: */
93
94/*
95 * Currently used for the del_recall_lru and file hash table. In an
96 * effort to decrease the scope of the client_mutex, this spinlock may
97 * eventually cover more:
98 */
99static DEFINE_SPINLOCK(state_lock);
100
101enum nfsd4_st_mutex_lock_subclass {
102 OPEN_STATEID_MUTEX = 0,
103 LOCK_STATEID_MUTEX = 1,
104};
105
106/*
107 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
108 * the refcount on the open stateid to drop.
109 */
110static DECLARE_WAIT_QUEUE_HEAD(close_wq);
111
112/*
113 * A waitqueue where a writer to clients/#/ctl destroying a client can
114 * wait for cl_rpc_users to drop to 0 and then for the client to be
115 * unhashed.
116 */
117static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
118
119static struct kmem_cache *client_slab;
120static struct kmem_cache *openowner_slab;
121static struct kmem_cache *lockowner_slab;
122static struct kmem_cache *file_slab;
123static struct kmem_cache *stateid_slab;
124static struct kmem_cache *deleg_slab;
125static struct kmem_cache *odstate_slab;
126
127static void free_session(struct nfsd4_session *);
128
129static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
130static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
131static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops;
132
133static struct workqueue_struct *laundry_wq;
134
135int nfsd4_create_laundry_wq(void)
136{
137 int rc = 0;
138
139 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
140 if (laundry_wq == NULL)
141 rc = -ENOMEM;
142 return rc;
143}
144
145void nfsd4_destroy_laundry_wq(void)
146{
147 destroy_workqueue(laundry_wq);
148}
149
150static bool is_session_dead(struct nfsd4_session *ses)
151{
152 return ses->se_flags & NFS4_SESSION_DEAD;
153}
154
155static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
156{
157 if (atomic_read(&ses->se_ref) > ref_held_by_me)
158 return nfserr_jukebox;
159 ses->se_flags |= NFS4_SESSION_DEAD;
160 return nfs_ok;
161}
162
163static bool is_client_expired(struct nfs4_client *clp)
164{
165 return clp->cl_time == 0;
166}
167
168static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn,
169 struct nfs4_client *clp)
170{
171 if (clp->cl_state != NFSD4_ACTIVE)
172 atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0);
173}
174
175static __be32 get_client_locked(struct nfs4_client *clp)
176{
177 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
178
179 lockdep_assert_held(&nn->client_lock);
180
181 if (is_client_expired(clp))
182 return nfserr_expired;
183 atomic_inc(&clp->cl_rpc_users);
184 nfsd4_dec_courtesy_client_count(nn, clp);
185 clp->cl_state = NFSD4_ACTIVE;
186 return nfs_ok;
187}
188
189/* must be called under the client_lock */
190static inline void
191renew_client_locked(struct nfs4_client *clp)
192{
193 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
194
195 if (is_client_expired(clp)) {
196 WARN_ON(1);
197 printk("%s: client (clientid %08x/%08x) already expired\n",
198 __func__,
199 clp->cl_clientid.cl_boot,
200 clp->cl_clientid.cl_id);
201 return;
202 }
203
204 list_move_tail(&clp->cl_lru, &nn->client_lru);
205 clp->cl_time = ktime_get_boottime_seconds();
206 nfsd4_dec_courtesy_client_count(nn, clp);
207 clp->cl_state = NFSD4_ACTIVE;
208}
209
210static void put_client_renew_locked(struct nfs4_client *clp)
211{
212 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
213
214 lockdep_assert_held(&nn->client_lock);
215
216 if (!atomic_dec_and_test(&clp->cl_rpc_users))
217 return;
218 if (!is_client_expired(clp))
219 renew_client_locked(clp);
220 else
221 wake_up_all(&expiry_wq);
222}
223
224static void put_client_renew(struct nfs4_client *clp)
225{
226 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
227
228 if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
229 return;
230 if (!is_client_expired(clp))
231 renew_client_locked(clp);
232 else
233 wake_up_all(&expiry_wq);
234 spin_unlock(&nn->client_lock);
235}
236
237static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
238{
239 __be32 status;
240
241 if (is_session_dead(ses))
242 return nfserr_badsession;
243 status = get_client_locked(ses->se_client);
244 if (status)
245 return status;
246 atomic_inc(&ses->se_ref);
247 return nfs_ok;
248}
249
250static void nfsd4_put_session_locked(struct nfsd4_session *ses)
251{
252 struct nfs4_client *clp = ses->se_client;
253 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
254
255 lockdep_assert_held(&nn->client_lock);
256
257 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
258 free_session(ses);
259 put_client_renew_locked(clp);
260}
261
262static void nfsd4_put_session(struct nfsd4_session *ses)
263{
264 struct nfs4_client *clp = ses->se_client;
265 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
266
267 spin_lock(&nn->client_lock);
268 nfsd4_put_session_locked(ses);
269 spin_unlock(&nn->client_lock);
270}
271
272static struct nfsd4_blocked_lock *
273find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
274 struct nfsd_net *nn)
275{
276 struct nfsd4_blocked_lock *cur, *found = NULL;
277
278 spin_lock(&nn->blocked_locks_lock);
279 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
280 if (fh_match(fh, &cur->nbl_fh)) {
281 list_del_init(&cur->nbl_list);
282 WARN_ON(list_empty(&cur->nbl_lru));
283 list_del_init(&cur->nbl_lru);
284 found = cur;
285 break;
286 }
287 }
288 spin_unlock(&nn->blocked_locks_lock);
289 if (found)
290 locks_delete_block(&found->nbl_lock);
291 return found;
292}
293
294static struct nfsd4_blocked_lock *
295find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
296 struct nfsd_net *nn)
297{
298 struct nfsd4_blocked_lock *nbl;
299
300 nbl = find_blocked_lock(lo, fh, nn);
301 if (!nbl) {
302 nbl = kmalloc(sizeof(*nbl), GFP_KERNEL);
303 if (nbl) {
304 INIT_LIST_HEAD(&nbl->nbl_list);
305 INIT_LIST_HEAD(&nbl->nbl_lru);
306 fh_copy_shallow(&nbl->nbl_fh, fh);
307 locks_init_lock(&nbl->nbl_lock);
308 kref_init(&nbl->nbl_kref);
309 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
310 &nfsd4_cb_notify_lock_ops,
311 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
312 }
313 }
314 return nbl;
315}
316
317static void
318free_nbl(struct kref *kref)
319{
320 struct nfsd4_blocked_lock *nbl;
321
322 nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
323 locks_release_private(&nbl->nbl_lock);
324 kfree(nbl);
325}
326
327static void
328free_blocked_lock(struct nfsd4_blocked_lock *nbl)
329{
330 locks_delete_block(&nbl->nbl_lock);
331 kref_put(&nbl->nbl_kref, free_nbl);
332}
333
334static void
335remove_blocked_locks(struct nfs4_lockowner *lo)
336{
337 struct nfs4_client *clp = lo->lo_owner.so_client;
338 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
339 struct nfsd4_blocked_lock *nbl;
340 LIST_HEAD(reaplist);
341
342 /* Dequeue all blocked locks */
343 spin_lock(&nn->blocked_locks_lock);
344 while (!list_empty(&lo->lo_blocked)) {
345 nbl = list_first_entry(&lo->lo_blocked,
346 struct nfsd4_blocked_lock,
347 nbl_list);
348 list_del_init(&nbl->nbl_list);
349 WARN_ON(list_empty(&nbl->nbl_lru));
350 list_move(&nbl->nbl_lru, &reaplist);
351 }
352 spin_unlock(&nn->blocked_locks_lock);
353
354 /* Now free them */
355 while (!list_empty(&reaplist)) {
356 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
357 nbl_lru);
358 list_del_init(&nbl->nbl_lru);
359 free_blocked_lock(nbl);
360 }
361}
362
363static void
364nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
365{
366 struct nfsd4_blocked_lock *nbl = container_of(cb,
367 struct nfsd4_blocked_lock, nbl_cb);
368 locks_delete_block(&nbl->nbl_lock);
369}
370
371static int
372nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
373{
374 trace_nfsd_cb_notify_lock_done(&zero_stateid, task);
375
376 /*
377 * Since this is just an optimization, we don't try very hard if it
378 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
379 * just quit trying on anything else.
380 */
381 switch (task->tk_status) {
382 case -NFS4ERR_DELAY:
383 rpc_delay(task, 1 * HZ);
384 return 0;
385 default:
386 return 1;
387 }
388}
389
390static void
391nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
392{
393 struct nfsd4_blocked_lock *nbl = container_of(cb,
394 struct nfsd4_blocked_lock, nbl_cb);
395
396 free_blocked_lock(nbl);
397}
398
399static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
400 .prepare = nfsd4_cb_notify_lock_prepare,
401 .done = nfsd4_cb_notify_lock_done,
402 .release = nfsd4_cb_notify_lock_release,
403};
404
405/*
406 * We store the NONE, READ, WRITE, and BOTH bits separately in the
407 * st_{access,deny}_bmap field of the stateid, in order to track not
408 * only what share bits are currently in force, but also what
409 * combinations of share bits previous opens have used. This allows us
410 * to enforce the recommendation in
411 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
412 * the server return an error if the client attempt to downgrade to a
413 * combination of share bits not explicable by closing some of its
414 * previous opens.
415 *
416 * This enforcement is arguably incomplete, since we don't keep
417 * track of access/deny bit combinations; so, e.g., we allow:
418 *
419 * OPEN allow read, deny write
420 * OPEN allow both, deny none
421 * DOWNGRADE allow read, deny none
422 *
423 * which we should reject.
424 *
425 * But you could also argue that our current code is already overkill,
426 * since it only exists to return NFS4ERR_INVAL on incorrect client
427 * behavior.
428 */
429static unsigned int
430bmap_to_share_mode(unsigned long bmap)
431{
432 int i;
433 unsigned int access = 0;
434
435 for (i = 1; i < 4; i++) {
436 if (test_bit(i, &bmap))
437 access |= i;
438 }
439 return access;
440}
441
442/* set share access for a given stateid */
443static inline void
444set_access(u32 access, struct nfs4_ol_stateid *stp)
445{
446 unsigned char mask = 1 << access;
447
448 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
449 stp->st_access_bmap |= mask;
450}
451
452/* clear share access for a given stateid */
453static inline void
454clear_access(u32 access, struct nfs4_ol_stateid *stp)
455{
456 unsigned char mask = 1 << access;
457
458 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
459 stp->st_access_bmap &= ~mask;
460}
461
462/* test whether a given stateid has access */
463static inline bool
464test_access(u32 access, struct nfs4_ol_stateid *stp)
465{
466 unsigned char mask = 1 << access;
467
468 return (bool)(stp->st_access_bmap & mask);
469}
470
471/* set share deny for a given stateid */
472static inline void
473set_deny(u32 deny, struct nfs4_ol_stateid *stp)
474{
475 unsigned char mask = 1 << deny;
476
477 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
478 stp->st_deny_bmap |= mask;
479}
480
481/* clear share deny for a given stateid */
482static inline void
483clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
484{
485 unsigned char mask = 1 << deny;
486
487 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
488 stp->st_deny_bmap &= ~mask;
489}
490
491/* test whether a given stateid is denying specific access */
492static inline bool
493test_deny(u32 deny, struct nfs4_ol_stateid *stp)
494{
495 unsigned char mask = 1 << deny;
496
497 return (bool)(stp->st_deny_bmap & mask);
498}
499
500static int nfs4_access_to_omode(u32 access)
501{
502 switch (access & NFS4_SHARE_ACCESS_BOTH) {
503 case NFS4_SHARE_ACCESS_READ:
504 return O_RDONLY;
505 case NFS4_SHARE_ACCESS_WRITE:
506 return O_WRONLY;
507 case NFS4_SHARE_ACCESS_BOTH:
508 return O_RDWR;
509 }
510 WARN_ON_ONCE(1);
511 return O_RDONLY;
512}
513
514static inline int
515access_permit_read(struct nfs4_ol_stateid *stp)
516{
517 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
518 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
519 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
520}
521
522static inline int
523access_permit_write(struct nfs4_ol_stateid *stp)
524{
525 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
526 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
527}
528
529static inline struct nfs4_stateowner *
530nfs4_get_stateowner(struct nfs4_stateowner *sop)
531{
532 atomic_inc(&sop->so_count);
533 return sop;
534}
535
536static int
537same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
538{
539 return (sop->so_owner.len == owner->len) &&
540 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
541}
542
543static struct nfs4_openowner *
544find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
545 struct nfs4_client *clp)
546{
547 struct nfs4_stateowner *so;
548
549 lockdep_assert_held(&clp->cl_lock);
550
551 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
552 so_strhash) {
553 if (!so->so_is_open_owner)
554 continue;
555 if (same_owner_str(so, &open->op_owner))
556 return openowner(nfs4_get_stateowner(so));
557 }
558 return NULL;
559}
560
561static struct nfs4_openowner *
562find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
563 struct nfs4_client *clp)
564{
565 struct nfs4_openowner *oo;
566
567 spin_lock(&clp->cl_lock);
568 oo = find_openstateowner_str_locked(hashval, open, clp);
569 spin_unlock(&clp->cl_lock);
570 return oo;
571}
572
573static inline u32
574opaque_hashval(const void *ptr, int nbytes)
575{
576 unsigned char *cptr = (unsigned char *) ptr;
577
578 u32 x = 0;
579 while (nbytes--) {
580 x *= 37;
581 x += *cptr++;
582 }
583 return x;
584}
585
586static void nfsd4_free_file_rcu(struct rcu_head *rcu)
587{
588 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
589
590 kmem_cache_free(file_slab, fp);
591}
592
593void
594put_nfs4_file(struct nfs4_file *fi)
595{
596 if (refcount_dec_and_test(&fi->fi_ref)) {
597 nfsd4_file_hash_remove(fi);
598 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
599 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
600 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
601 }
602}
603
604static struct nfsd_file *
605find_writeable_file_locked(struct nfs4_file *f)
606{
607 struct nfsd_file *ret;
608
609 lockdep_assert_held(&f->fi_lock);
610
611 ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
612 if (!ret)
613 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
614 return ret;
615}
616
617static struct nfsd_file *
618find_writeable_file(struct nfs4_file *f)
619{
620 struct nfsd_file *ret;
621
622 spin_lock(&f->fi_lock);
623 ret = find_writeable_file_locked(f);
624 spin_unlock(&f->fi_lock);
625
626 return ret;
627}
628
629static struct nfsd_file *
630find_readable_file_locked(struct nfs4_file *f)
631{
632 struct nfsd_file *ret;
633
634 lockdep_assert_held(&f->fi_lock);
635
636 ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
637 if (!ret)
638 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
639 return ret;
640}
641
642static struct nfsd_file *
643find_readable_file(struct nfs4_file *f)
644{
645 struct nfsd_file *ret;
646
647 spin_lock(&f->fi_lock);
648 ret = find_readable_file_locked(f);
649 spin_unlock(&f->fi_lock);
650
651 return ret;
652}
653
654static struct nfsd_file *
655find_rw_file(struct nfs4_file *f)
656{
657 struct nfsd_file *ret;
658
659 spin_lock(&f->fi_lock);
660 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
661 spin_unlock(&f->fi_lock);
662
663 return ret;
664}
665
666struct nfsd_file *
667find_any_file(struct nfs4_file *f)
668{
669 struct nfsd_file *ret;
670
671 if (!f)
672 return NULL;
673 spin_lock(&f->fi_lock);
674 ret = nfsd_file_get(f->fi_fds[O_RDWR]);
675 if (!ret) {
676 ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
677 if (!ret)
678 ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
679 }
680 spin_unlock(&f->fi_lock);
681 return ret;
682}
683
684static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
685{
686 lockdep_assert_held(&f->fi_lock);
687
688 if (f->fi_fds[O_RDWR])
689 return f->fi_fds[O_RDWR];
690 if (f->fi_fds[O_WRONLY])
691 return f->fi_fds[O_WRONLY];
692 if (f->fi_fds[O_RDONLY])
693 return f->fi_fds[O_RDONLY];
694 return NULL;
695}
696
697static atomic_long_t num_delegations;
698unsigned long max_delegations;
699
700/*
701 * Open owner state (share locks)
702 */
703
704/* hash tables for lock and open owners */
705#define OWNER_HASH_BITS 8
706#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
707#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
708
709static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
710{
711 unsigned int ret;
712
713 ret = opaque_hashval(ownername->data, ownername->len);
714 return ret & OWNER_HASH_MASK;
715}
716
717static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp;
718
719static const struct rhashtable_params nfs4_file_rhash_params = {
720 .key_len = sizeof_field(struct nfs4_file, fi_inode),
721 .key_offset = offsetof(struct nfs4_file, fi_inode),
722 .head_offset = offsetof(struct nfs4_file, fi_rlist),
723
724 /*
725 * Start with a single page hash table to reduce resizing churn
726 * on light workloads.
727 */
728 .min_size = 256,
729 .automatic_shrinking = true,
730};
731
732/*
733 * Check if courtesy clients have conflicting access and resolve it if possible
734 *
735 * access: is op_share_access if share_access is true.
736 * Check if access mode, op_share_access, would conflict with
737 * the current deny mode of the file 'fp'.
738 * access: is op_share_deny if share_access is false.
739 * Check if the deny mode, op_share_deny, would conflict with
740 * current access of the file 'fp'.
741 * stp: skip checking this entry.
742 * new_stp: normal open, not open upgrade.
743 *
744 * Function returns:
745 * false - access/deny mode conflict with normal client.
746 * true - no conflict or conflict with courtesy client(s) is resolved.
747 */
748static bool
749nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp,
750 struct nfs4_ol_stateid *stp, u32 access, bool share_access)
751{
752 struct nfs4_ol_stateid *st;
753 bool resolvable = true;
754 unsigned char bmap;
755 struct nfsd_net *nn;
756 struct nfs4_client *clp;
757
758 lockdep_assert_held(&fp->fi_lock);
759 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
760 /* ignore lock stateid */
761 if (st->st_openstp)
762 continue;
763 if (st == stp && new_stp)
764 continue;
765 /* check file access against deny mode or vice versa */
766 bmap = share_access ? st->st_deny_bmap : st->st_access_bmap;
767 if (!(access & bmap_to_share_mode(bmap)))
768 continue;
769 clp = st->st_stid.sc_client;
770 if (try_to_expire_client(clp))
771 continue;
772 resolvable = false;
773 break;
774 }
775 if (resolvable) {
776 clp = stp->st_stid.sc_client;
777 nn = net_generic(clp->net, nfsd_net_id);
778 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
779 }
780 return resolvable;
781}
782
783static void
784__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
785{
786 lockdep_assert_held(&fp->fi_lock);
787
788 if (access & NFS4_SHARE_ACCESS_WRITE)
789 atomic_inc(&fp->fi_access[O_WRONLY]);
790 if (access & NFS4_SHARE_ACCESS_READ)
791 atomic_inc(&fp->fi_access[O_RDONLY]);
792}
793
794static __be32
795nfs4_file_get_access(struct nfs4_file *fp, u32 access)
796{
797 lockdep_assert_held(&fp->fi_lock);
798
799 /* Does this access mode make sense? */
800 if (access & ~NFS4_SHARE_ACCESS_BOTH)
801 return nfserr_inval;
802
803 /* Does it conflict with a deny mode already set? */
804 if ((access & fp->fi_share_deny) != 0)
805 return nfserr_share_denied;
806
807 __nfs4_file_get_access(fp, access);
808 return nfs_ok;
809}
810
811static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
812{
813 /* Common case is that there is no deny mode. */
814 if (deny) {
815 /* Does this deny mode make sense? */
816 if (deny & ~NFS4_SHARE_DENY_BOTH)
817 return nfserr_inval;
818
819 if ((deny & NFS4_SHARE_DENY_READ) &&
820 atomic_read(&fp->fi_access[O_RDONLY]))
821 return nfserr_share_denied;
822
823 if ((deny & NFS4_SHARE_DENY_WRITE) &&
824 atomic_read(&fp->fi_access[O_WRONLY]))
825 return nfserr_share_denied;
826 }
827 return nfs_ok;
828}
829
830static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
831{
832 might_lock(&fp->fi_lock);
833
834 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
835 struct nfsd_file *f1 = NULL;
836 struct nfsd_file *f2 = NULL;
837
838 swap(f1, fp->fi_fds[oflag]);
839 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
840 swap(f2, fp->fi_fds[O_RDWR]);
841 spin_unlock(&fp->fi_lock);
842 if (f1)
843 nfsd_file_put(f1);
844 if (f2)
845 nfsd_file_put(f2);
846 }
847}
848
849static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
850{
851 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
852
853 if (access & NFS4_SHARE_ACCESS_WRITE)
854 __nfs4_file_put_access(fp, O_WRONLY);
855 if (access & NFS4_SHARE_ACCESS_READ)
856 __nfs4_file_put_access(fp, O_RDONLY);
857}
858
859/*
860 * Allocate a new open/delegation state counter. This is needed for
861 * pNFS for proper return on close semantics.
862 *
863 * Note that we only allocate it for pNFS-enabled exports, otherwise
864 * all pointers to struct nfs4_clnt_odstate are always NULL.
865 */
866static struct nfs4_clnt_odstate *
867alloc_clnt_odstate(struct nfs4_client *clp)
868{
869 struct nfs4_clnt_odstate *co;
870
871 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
872 if (co) {
873 co->co_client = clp;
874 refcount_set(&co->co_odcount, 1);
875 }
876 return co;
877}
878
879static void
880hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
881{
882 struct nfs4_file *fp = co->co_file;
883
884 lockdep_assert_held(&fp->fi_lock);
885 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
886}
887
888static inline void
889get_clnt_odstate(struct nfs4_clnt_odstate *co)
890{
891 if (co)
892 refcount_inc(&co->co_odcount);
893}
894
895static void
896put_clnt_odstate(struct nfs4_clnt_odstate *co)
897{
898 struct nfs4_file *fp;
899
900 if (!co)
901 return;
902
903 fp = co->co_file;
904 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
905 list_del(&co->co_perfile);
906 spin_unlock(&fp->fi_lock);
907
908 nfsd4_return_all_file_layouts(co->co_client, fp);
909 kmem_cache_free(odstate_slab, co);
910 }
911}
912
913static struct nfs4_clnt_odstate *
914find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
915{
916 struct nfs4_clnt_odstate *co;
917 struct nfs4_client *cl;
918
919 if (!new)
920 return NULL;
921
922 cl = new->co_client;
923
924 spin_lock(&fp->fi_lock);
925 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
926 if (co->co_client == cl) {
927 get_clnt_odstate(co);
928 goto out;
929 }
930 }
931 co = new;
932 co->co_file = fp;
933 hash_clnt_odstate_locked(new);
934out:
935 spin_unlock(&fp->fi_lock);
936 return co;
937}
938
939struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
940 void (*sc_free)(struct nfs4_stid *))
941{
942 struct nfs4_stid *stid;
943 int new_id;
944
945 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
946 if (!stid)
947 return NULL;
948
949 idr_preload(GFP_KERNEL);
950 spin_lock(&cl->cl_lock);
951 /* Reserving 0 for start of file in nfsdfs "states" file: */
952 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
953 spin_unlock(&cl->cl_lock);
954 idr_preload_end();
955 if (new_id < 0)
956 goto out_free;
957
958 stid->sc_free = sc_free;
959 stid->sc_client = cl;
960 stid->sc_stateid.si_opaque.so_id = new_id;
961 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
962 /* Will be incremented before return to client: */
963 refcount_set(&stid->sc_count, 1);
964 spin_lock_init(&stid->sc_lock);
965 INIT_LIST_HEAD(&stid->sc_cp_list);
966
967 /*
968 * It shouldn't be a problem to reuse an opaque stateid value.
969 * I don't think it is for 4.1. But with 4.0 I worry that, for
970 * example, a stray write retransmission could be accepted by
971 * the server when it should have been rejected. Therefore,
972 * adopt a trick from the sctp code to attempt to maximize the
973 * amount of time until an id is reused, by ensuring they always
974 * "increase" (mod INT_MAX):
975 */
976 return stid;
977out_free:
978 kmem_cache_free(slab, stid);
979 return NULL;
980}
981
982/*
983 * Create a unique stateid_t to represent each COPY.
984 */
985static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
986 unsigned char cs_type)
987{
988 int new_id;
989
990 stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
991 stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
992
993 idr_preload(GFP_KERNEL);
994 spin_lock(&nn->s2s_cp_lock);
995 new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
996 stid->cs_stid.si_opaque.so_id = new_id;
997 stid->cs_stid.si_generation = 1;
998 spin_unlock(&nn->s2s_cp_lock);
999 idr_preload_end();
1000 if (new_id < 0)
1001 return 0;
1002 stid->cs_type = cs_type;
1003 return 1;
1004}
1005
1006int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
1007{
1008 return nfs4_init_cp_state(nn, ©->cp_stateid, NFS4_COPY_STID);
1009}
1010
1011struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
1012 struct nfs4_stid *p_stid)
1013{
1014 struct nfs4_cpntf_state *cps;
1015
1016 cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
1017 if (!cps)
1018 return NULL;
1019 cps->cpntf_time = ktime_get_boottime_seconds();
1020 refcount_set(&cps->cp_stateid.cs_count, 1);
1021 if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
1022 goto out_free;
1023 spin_lock(&nn->s2s_cp_lock);
1024 list_add(&cps->cp_list, &p_stid->sc_cp_list);
1025 spin_unlock(&nn->s2s_cp_lock);
1026 return cps;
1027out_free:
1028 kfree(cps);
1029 return NULL;
1030}
1031
1032void nfs4_free_copy_state(struct nfsd4_copy *copy)
1033{
1034 struct nfsd_net *nn;
1035
1036 if (copy->cp_stateid.cs_type != NFS4_COPY_STID)
1037 return;
1038 nn = net_generic(copy->cp_clp->net, nfsd_net_id);
1039 spin_lock(&nn->s2s_cp_lock);
1040 idr_remove(&nn->s2s_cp_stateids,
1041 copy->cp_stateid.cs_stid.si_opaque.so_id);
1042 spin_unlock(&nn->s2s_cp_lock);
1043}
1044
1045static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
1046{
1047 struct nfs4_cpntf_state *cps;
1048 struct nfsd_net *nn;
1049
1050 nn = net_generic(net, nfsd_net_id);
1051 spin_lock(&nn->s2s_cp_lock);
1052 while (!list_empty(&stid->sc_cp_list)) {
1053 cps = list_first_entry(&stid->sc_cp_list,
1054 struct nfs4_cpntf_state, cp_list);
1055 _free_cpntf_state_locked(nn, cps);
1056 }
1057 spin_unlock(&nn->s2s_cp_lock);
1058}
1059
1060static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
1061{
1062 struct nfs4_stid *stid;
1063
1064 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
1065 if (!stid)
1066 return NULL;
1067
1068 return openlockstateid(stid);
1069}
1070
1071static void nfs4_free_deleg(struct nfs4_stid *stid)
1072{
1073 struct nfs4_delegation *dp = delegstateid(stid);
1074
1075 WARN_ON_ONCE(!list_empty(&stid->sc_cp_list));
1076 WARN_ON_ONCE(!list_empty(&dp->dl_perfile));
1077 WARN_ON_ONCE(!list_empty(&dp->dl_perclnt));
1078 WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru));
1079 kmem_cache_free(deleg_slab, stid);
1080 atomic_long_dec(&num_delegations);
1081}
1082
1083/*
1084 * When we recall a delegation, we should be careful not to hand it
1085 * out again straight away.
1086 * To ensure this we keep a pair of bloom filters ('new' and 'old')
1087 * in which the filehandles of recalled delegations are "stored".
1088 * If a filehandle appear in either filter, a delegation is blocked.
1089 * When a delegation is recalled, the filehandle is stored in the "new"
1090 * filter.
1091 * Every 30 seconds we swap the filters and clear the "new" one,
1092 * unless both are empty of course.
1093 *
1094 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
1095 * low 3 bytes as hash-table indices.
1096 *
1097 * 'blocked_delegations_lock', which is always taken in block_delegations(),
1098 * is used to manage concurrent access. Testing does not need the lock
1099 * except when swapping the two filters.
1100 */
1101static DEFINE_SPINLOCK(blocked_delegations_lock);
1102static struct bloom_pair {
1103 int entries, old_entries;
1104 time64_t swap_time;
1105 int new; /* index into 'set' */
1106 DECLARE_BITMAP(set[2], 256);
1107} blocked_delegations;
1108
1109static int delegation_blocked(struct knfsd_fh *fh)
1110{
1111 u32 hash;
1112 struct bloom_pair *bd = &blocked_delegations;
1113
1114 if (bd->entries == 0)
1115 return 0;
1116 if (ktime_get_seconds() - bd->swap_time > 30) {
1117 spin_lock(&blocked_delegations_lock);
1118 if (ktime_get_seconds() - bd->swap_time > 30) {
1119 bd->entries -= bd->old_entries;
1120 bd->old_entries = bd->entries;
1121 memset(bd->set[bd->new], 0,
1122 sizeof(bd->set[0]));
1123 bd->new = 1-bd->new;
1124 bd->swap_time = ktime_get_seconds();
1125 }
1126 spin_unlock(&blocked_delegations_lock);
1127 }
1128 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1129 if (test_bit(hash&255, bd->set[0]) &&
1130 test_bit((hash>>8)&255, bd->set[0]) &&
1131 test_bit((hash>>16)&255, bd->set[0]))
1132 return 1;
1133
1134 if (test_bit(hash&255, bd->set[1]) &&
1135 test_bit((hash>>8)&255, bd->set[1]) &&
1136 test_bit((hash>>16)&255, bd->set[1]))
1137 return 1;
1138
1139 return 0;
1140}
1141
1142static void block_delegations(struct knfsd_fh *fh)
1143{
1144 u32 hash;
1145 struct bloom_pair *bd = &blocked_delegations;
1146
1147 hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1148
1149 spin_lock(&blocked_delegations_lock);
1150 __set_bit(hash&255, bd->set[bd->new]);
1151 __set_bit((hash>>8)&255, bd->set[bd->new]);
1152 __set_bit((hash>>16)&255, bd->set[bd->new]);
1153 if (bd->entries == 0)
1154 bd->swap_time = ktime_get_seconds();
1155 bd->entries += 1;
1156 spin_unlock(&blocked_delegations_lock);
1157}
1158
1159static struct nfs4_delegation *
1160alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
1161 struct nfs4_clnt_odstate *odstate, u32 dl_type)
1162{
1163 struct nfs4_delegation *dp;
1164 struct nfs4_stid *stid;
1165 long n;
1166
1167 dprintk("NFSD alloc_init_deleg\n");
1168 n = atomic_long_inc_return(&num_delegations);
1169 if (n < 0 || n > max_delegations)
1170 goto out_dec;
1171 if (delegation_blocked(&fp->fi_fhandle))
1172 goto out_dec;
1173 stid = nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg);
1174 if (stid == NULL)
1175 goto out_dec;
1176 dp = delegstateid(stid);
1177
1178 /*
1179 * delegation seqid's are never incremented. The 4.1 special
1180 * meaning of seqid 0 isn't meaningful, really, but let's avoid
1181 * 0 anyway just for consistency and use 1:
1182 */
1183 dp->dl_stid.sc_stateid.si_generation = 1;
1184 INIT_LIST_HEAD(&dp->dl_perfile);
1185 INIT_LIST_HEAD(&dp->dl_perclnt);
1186 INIT_LIST_HEAD(&dp->dl_recall_lru);
1187 dp->dl_clnt_odstate = odstate;
1188 get_clnt_odstate(odstate);
1189 dp->dl_type = dl_type;
1190 dp->dl_retries = 1;
1191 dp->dl_recalled = false;
1192 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
1193 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
1194 nfsd4_init_cb(&dp->dl_cb_fattr.ncf_getattr, dp->dl_stid.sc_client,
1195 &nfsd4_cb_getattr_ops, NFSPROC4_CLNT_CB_GETATTR);
1196 dp->dl_cb_fattr.ncf_file_modified = false;
1197 dp->dl_cb_fattr.ncf_cb_bmap[0] = FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE;
1198 get_nfs4_file(fp);
1199 dp->dl_stid.sc_file = fp;
1200 return dp;
1201out_dec:
1202 atomic_long_dec(&num_delegations);
1203 return NULL;
1204}
1205
1206void
1207nfs4_put_stid(struct nfs4_stid *s)
1208{
1209 struct nfs4_file *fp = s->sc_file;
1210 struct nfs4_client *clp = s->sc_client;
1211
1212 might_lock(&clp->cl_lock);
1213
1214 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
1215 wake_up_all(&close_wq);
1216 return;
1217 }
1218 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1219 if (s->sc_status & SC_STATUS_ADMIN_REVOKED)
1220 atomic_dec(&s->sc_client->cl_admin_revoked);
1221 nfs4_free_cpntf_statelist(clp->net, s);
1222 spin_unlock(&clp->cl_lock);
1223 s->sc_free(s);
1224 if (fp)
1225 put_nfs4_file(fp);
1226}
1227
1228void
1229nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
1230{
1231 stateid_t *src = &stid->sc_stateid;
1232
1233 spin_lock(&stid->sc_lock);
1234 if (unlikely(++src->si_generation == 0))
1235 src->si_generation = 1;
1236 memcpy(dst, src, sizeof(*dst));
1237 spin_unlock(&stid->sc_lock);
1238}
1239
1240static void put_deleg_file(struct nfs4_file *fp)
1241{
1242 struct nfsd_file *nf = NULL;
1243
1244 spin_lock(&fp->fi_lock);
1245 if (--fp->fi_delegees == 0)
1246 swap(nf, fp->fi_deleg_file);
1247 spin_unlock(&fp->fi_lock);
1248
1249 if (nf)
1250 nfsd_file_put(nf);
1251}
1252
1253static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1254{
1255 struct nfs4_file *fp = dp->dl_stid.sc_file;
1256 struct nfsd_file *nf = fp->fi_deleg_file;
1257
1258 WARN_ON_ONCE(!fp->fi_delegees);
1259
1260 kernel_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1261 put_deleg_file(fp);
1262}
1263
1264static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1265{
1266 put_clnt_odstate(dp->dl_clnt_odstate);
1267 nfs4_unlock_deleg_lease(dp);
1268 nfs4_put_stid(&dp->dl_stid);
1269}
1270
1271/**
1272 * nfs4_delegation_exists - Discover if this delegation already exists
1273 * @clp: a pointer to the nfs4_client we're granting a delegation to
1274 * @fp: a pointer to the nfs4_file we're granting a delegation on
1275 *
1276 * Return:
1277 * On success: true iff an existing delegation is found
1278 */
1279
1280static bool
1281nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1282{
1283 struct nfs4_delegation *searchdp = NULL;
1284 struct nfs4_client *searchclp = NULL;
1285
1286 lockdep_assert_held(&state_lock);
1287 lockdep_assert_held(&fp->fi_lock);
1288
1289 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1290 searchclp = searchdp->dl_stid.sc_client;
1291 if (clp == searchclp) {
1292 return true;
1293 }
1294 }
1295 return false;
1296}
1297
1298/**
1299 * hash_delegation_locked - Add a delegation to the appropriate lists
1300 * @dp: a pointer to the nfs4_delegation we are adding.
1301 * @fp: a pointer to the nfs4_file we're granting a delegation on
1302 *
1303 * Return:
1304 * On success: NULL if the delegation was successfully hashed.
1305 *
1306 * On error: -EAGAIN if one was previously granted to this
1307 * nfs4_client for this nfs4_file. Delegation is not hashed.
1308 *
1309 */
1310
1311static int
1312hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1313{
1314 struct nfs4_client *clp = dp->dl_stid.sc_client;
1315
1316 lockdep_assert_held(&state_lock);
1317 lockdep_assert_held(&fp->fi_lock);
1318 lockdep_assert_held(&clp->cl_lock);
1319
1320 if (nfs4_delegation_exists(clp, fp))
1321 return -EAGAIN;
1322 refcount_inc(&dp->dl_stid.sc_count);
1323 dp->dl_stid.sc_type = SC_TYPE_DELEG;
1324 list_add(&dp->dl_perfile, &fp->fi_delegations);
1325 list_add(&dp->dl_perclnt, &clp->cl_delegations);
1326 return 0;
1327}
1328
1329static bool delegation_hashed(struct nfs4_delegation *dp)
1330{
1331 return !(list_empty(&dp->dl_perfile));
1332}
1333
1334static bool
1335unhash_delegation_locked(struct nfs4_delegation *dp, unsigned short statusmask)
1336{
1337 struct nfs4_file *fp = dp->dl_stid.sc_file;
1338
1339 lockdep_assert_held(&state_lock);
1340
1341 if (!delegation_hashed(dp))
1342 return false;
1343
1344 if (statusmask == SC_STATUS_REVOKED &&
1345 dp->dl_stid.sc_client->cl_minorversion == 0)
1346 statusmask = SC_STATUS_CLOSED;
1347 dp->dl_stid.sc_status |= statusmask;
1348 if (statusmask & SC_STATUS_ADMIN_REVOKED)
1349 atomic_inc(&dp->dl_stid.sc_client->cl_admin_revoked);
1350
1351 /* Ensure that deleg break won't try to requeue it */
1352 ++dp->dl_time;
1353 spin_lock(&fp->fi_lock);
1354 list_del_init(&dp->dl_perclnt);
1355 list_del_init(&dp->dl_recall_lru);
1356 list_del_init(&dp->dl_perfile);
1357 spin_unlock(&fp->fi_lock);
1358 return true;
1359}
1360
1361static void destroy_delegation(struct nfs4_delegation *dp)
1362{
1363 bool unhashed;
1364
1365 spin_lock(&state_lock);
1366 unhashed = unhash_delegation_locked(dp, SC_STATUS_CLOSED);
1367 spin_unlock(&state_lock);
1368 if (unhashed)
1369 destroy_unhashed_deleg(dp);
1370}
1371
1372static void revoke_delegation(struct nfs4_delegation *dp)
1373{
1374 struct nfs4_client *clp = dp->dl_stid.sc_client;
1375
1376 WARN_ON(!list_empty(&dp->dl_recall_lru));
1377
1378 trace_nfsd_stid_revoke(&dp->dl_stid);
1379
1380 if (dp->dl_stid.sc_status &
1381 (SC_STATUS_REVOKED | SC_STATUS_ADMIN_REVOKED)) {
1382 spin_lock(&clp->cl_lock);
1383 refcount_inc(&dp->dl_stid.sc_count);
1384 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1385 spin_unlock(&clp->cl_lock);
1386 }
1387 destroy_unhashed_deleg(dp);
1388}
1389
1390/*
1391 * SETCLIENTID state
1392 */
1393
1394static unsigned int clientid_hashval(u32 id)
1395{
1396 return id & CLIENT_HASH_MASK;
1397}
1398
1399static unsigned int clientstr_hashval(struct xdr_netobj name)
1400{
1401 return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1402}
1403
1404/*
1405 * A stateid that had a deny mode associated with it is being released
1406 * or downgraded. Recalculate the deny mode on the file.
1407 */
1408static void
1409recalculate_deny_mode(struct nfs4_file *fp)
1410{
1411 struct nfs4_ol_stateid *stp;
1412
1413 spin_lock(&fp->fi_lock);
1414 fp->fi_share_deny = 0;
1415 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1416 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1417 spin_unlock(&fp->fi_lock);
1418}
1419
1420static void
1421reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1422{
1423 int i;
1424 bool change = false;
1425
1426 for (i = 1; i < 4; i++) {
1427 if ((i & deny) != i) {
1428 change = true;
1429 clear_deny(i, stp);
1430 }
1431 }
1432
1433 /* Recalculate per-file deny mode if there was a change */
1434 if (change)
1435 recalculate_deny_mode(stp->st_stid.sc_file);
1436}
1437
1438/* release all access and file references for a given stateid */
1439static void
1440release_all_access(struct nfs4_ol_stateid *stp)
1441{
1442 int i;
1443 struct nfs4_file *fp = stp->st_stid.sc_file;
1444
1445 if (fp && stp->st_deny_bmap != 0)
1446 recalculate_deny_mode(fp);
1447
1448 for (i = 1; i < 4; i++) {
1449 if (test_access(i, stp))
1450 nfs4_file_put_access(stp->st_stid.sc_file, i);
1451 clear_access(i, stp);
1452 }
1453}
1454
1455static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1456{
1457 kfree(sop->so_owner.data);
1458 sop->so_ops->so_free(sop);
1459}
1460
1461static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1462{
1463 struct nfs4_client *clp = sop->so_client;
1464
1465 might_lock(&clp->cl_lock);
1466
1467 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1468 return;
1469 sop->so_ops->so_unhash(sop);
1470 spin_unlock(&clp->cl_lock);
1471 nfs4_free_stateowner(sop);
1472}
1473
1474static bool
1475nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1476{
1477 return list_empty(&stp->st_perfile);
1478}
1479
1480static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1481{
1482 struct nfs4_file *fp = stp->st_stid.sc_file;
1483
1484 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1485
1486 if (list_empty(&stp->st_perfile))
1487 return false;
1488
1489 spin_lock(&fp->fi_lock);
1490 list_del_init(&stp->st_perfile);
1491 spin_unlock(&fp->fi_lock);
1492 list_del(&stp->st_perstateowner);
1493 return true;
1494}
1495
1496static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1497{
1498 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1499
1500 put_clnt_odstate(stp->st_clnt_odstate);
1501 release_all_access(stp);
1502 if (stp->st_stateowner)
1503 nfs4_put_stateowner(stp->st_stateowner);
1504 WARN_ON(!list_empty(&stid->sc_cp_list));
1505 kmem_cache_free(stateid_slab, stid);
1506}
1507
1508static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1509{
1510 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1511 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1512 struct nfsd_file *nf;
1513
1514 nf = find_any_file(stp->st_stid.sc_file);
1515 if (nf) {
1516 get_file(nf->nf_file);
1517 filp_close(nf->nf_file, (fl_owner_t)lo);
1518 nfsd_file_put(nf);
1519 }
1520 nfs4_free_ol_stateid(stid);
1521}
1522
1523/*
1524 * Put the persistent reference to an already unhashed generic stateid, while
1525 * holding the cl_lock. If it's the last reference, then put it onto the
1526 * reaplist for later destruction.
1527 */
1528static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1529 struct list_head *reaplist)
1530{
1531 struct nfs4_stid *s = &stp->st_stid;
1532 struct nfs4_client *clp = s->sc_client;
1533
1534 lockdep_assert_held(&clp->cl_lock);
1535
1536 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1537
1538 if (!refcount_dec_and_test(&s->sc_count)) {
1539 wake_up_all(&close_wq);
1540 return;
1541 }
1542
1543 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1544 if (s->sc_status & SC_STATUS_ADMIN_REVOKED)
1545 atomic_dec(&s->sc_client->cl_admin_revoked);
1546 list_add(&stp->st_locks, reaplist);
1547}
1548
1549static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1550{
1551 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1552
1553 if (!unhash_ol_stateid(stp))
1554 return false;
1555 list_del_init(&stp->st_locks);
1556 stp->st_stid.sc_status |= SC_STATUS_CLOSED;
1557 return true;
1558}
1559
1560static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1561{
1562 struct nfs4_client *clp = stp->st_stid.sc_client;
1563 bool unhashed;
1564
1565 spin_lock(&clp->cl_lock);
1566 unhashed = unhash_lock_stateid(stp);
1567 spin_unlock(&clp->cl_lock);
1568 if (unhashed)
1569 nfs4_put_stid(&stp->st_stid);
1570}
1571
1572static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1573{
1574 struct nfs4_client *clp = lo->lo_owner.so_client;
1575
1576 lockdep_assert_held(&clp->cl_lock);
1577
1578 list_del_init(&lo->lo_owner.so_strhash);
1579}
1580
1581/*
1582 * Free a list of generic stateids that were collected earlier after being
1583 * fully unhashed.
1584 */
1585static void
1586free_ol_stateid_reaplist(struct list_head *reaplist)
1587{
1588 struct nfs4_ol_stateid *stp;
1589 struct nfs4_file *fp;
1590
1591 might_sleep();
1592
1593 while (!list_empty(reaplist)) {
1594 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1595 st_locks);
1596 list_del(&stp->st_locks);
1597 fp = stp->st_stid.sc_file;
1598 stp->st_stid.sc_free(&stp->st_stid);
1599 if (fp)
1600 put_nfs4_file(fp);
1601 }
1602}
1603
1604static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1605 struct list_head *reaplist)
1606{
1607 struct nfs4_ol_stateid *stp;
1608
1609 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1610
1611 while (!list_empty(&open_stp->st_locks)) {
1612 stp = list_entry(open_stp->st_locks.next,
1613 struct nfs4_ol_stateid, st_locks);
1614 unhash_lock_stateid(stp);
1615 put_ol_stateid_locked(stp, reaplist);
1616 }
1617}
1618
1619static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1620 struct list_head *reaplist)
1621{
1622 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1623
1624 if (!unhash_ol_stateid(stp))
1625 return false;
1626 release_open_stateid_locks(stp, reaplist);
1627 return true;
1628}
1629
1630static void release_open_stateid(struct nfs4_ol_stateid *stp)
1631{
1632 LIST_HEAD(reaplist);
1633
1634 spin_lock(&stp->st_stid.sc_client->cl_lock);
1635 stp->st_stid.sc_status |= SC_STATUS_CLOSED;
1636 if (unhash_open_stateid(stp, &reaplist))
1637 put_ol_stateid_locked(stp, &reaplist);
1638 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1639 free_ol_stateid_reaplist(&reaplist);
1640}
1641
1642static void unhash_openowner_locked(struct nfs4_openowner *oo)
1643{
1644 struct nfs4_client *clp = oo->oo_owner.so_client;
1645
1646 lockdep_assert_held(&clp->cl_lock);
1647
1648 list_del_init(&oo->oo_owner.so_strhash);
1649 list_del_init(&oo->oo_perclient);
1650}
1651
1652static void release_last_closed_stateid(struct nfs4_openowner *oo)
1653{
1654 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1655 nfsd_net_id);
1656 struct nfs4_ol_stateid *s;
1657
1658 spin_lock(&nn->client_lock);
1659 s = oo->oo_last_closed_stid;
1660 if (s) {
1661 list_del_init(&oo->oo_close_lru);
1662 oo->oo_last_closed_stid = NULL;
1663 }
1664 spin_unlock(&nn->client_lock);
1665 if (s)
1666 nfs4_put_stid(&s->st_stid);
1667}
1668
1669static void release_openowner(struct nfs4_openowner *oo)
1670{
1671 struct nfs4_ol_stateid *stp;
1672 struct nfs4_client *clp = oo->oo_owner.so_client;
1673 struct list_head reaplist;
1674
1675 INIT_LIST_HEAD(&reaplist);
1676
1677 spin_lock(&clp->cl_lock);
1678 unhash_openowner_locked(oo);
1679 while (!list_empty(&oo->oo_owner.so_stateids)) {
1680 stp = list_first_entry(&oo->oo_owner.so_stateids,
1681 struct nfs4_ol_stateid, st_perstateowner);
1682 if (unhash_open_stateid(stp, &reaplist))
1683 put_ol_stateid_locked(stp, &reaplist);
1684 }
1685 spin_unlock(&clp->cl_lock);
1686 free_ol_stateid_reaplist(&reaplist);
1687 release_last_closed_stateid(oo);
1688 nfs4_put_stateowner(&oo->oo_owner);
1689}
1690
1691static struct nfs4_stid *find_one_sb_stid(struct nfs4_client *clp,
1692 struct super_block *sb,
1693 unsigned int sc_types)
1694{
1695 unsigned long id, tmp;
1696 struct nfs4_stid *stid;
1697
1698 spin_lock(&clp->cl_lock);
1699 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id)
1700 if ((stid->sc_type & sc_types) &&
1701 stid->sc_status == 0 &&
1702 stid->sc_file->fi_inode->i_sb == sb) {
1703 refcount_inc(&stid->sc_count);
1704 break;
1705 }
1706 spin_unlock(&clp->cl_lock);
1707 return stid;
1708}
1709
1710/**
1711 * nfsd4_revoke_states - revoke all nfsv4 states associated with given filesystem
1712 * @net: used to identify instance of nfsd (there is one per net namespace)
1713 * @sb: super_block used to identify target filesystem
1714 *
1715 * All nfs4 states (open, lock, delegation, layout) held by the server instance
1716 * and associated with a file on the given filesystem will be revoked resulting
1717 * in any files being closed and so all references from nfsd to the filesystem
1718 * being released. Thus nfsd will no longer prevent the filesystem from being
1719 * unmounted.
1720 *
1721 * The clients which own the states will subsequently being notified that the
1722 * states have been "admin-revoked".
1723 */
1724void nfsd4_revoke_states(struct net *net, struct super_block *sb)
1725{
1726 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1727 unsigned int idhashval;
1728 unsigned int sc_types;
1729
1730 sc_types = SC_TYPE_OPEN | SC_TYPE_LOCK | SC_TYPE_DELEG | SC_TYPE_LAYOUT;
1731
1732 spin_lock(&nn->client_lock);
1733 for (idhashval = 0; idhashval < CLIENT_HASH_MASK; idhashval++) {
1734 struct list_head *head = &nn->conf_id_hashtbl[idhashval];
1735 struct nfs4_client *clp;
1736 retry:
1737 list_for_each_entry(clp, head, cl_idhash) {
1738 struct nfs4_stid *stid = find_one_sb_stid(clp, sb,
1739 sc_types);
1740 if (stid) {
1741 struct nfs4_ol_stateid *stp;
1742 struct nfs4_delegation *dp;
1743 struct nfs4_layout_stateid *ls;
1744
1745 spin_unlock(&nn->client_lock);
1746 switch (stid->sc_type) {
1747 case SC_TYPE_OPEN:
1748 stp = openlockstateid(stid);
1749 mutex_lock_nested(&stp->st_mutex,
1750 OPEN_STATEID_MUTEX);
1751
1752 spin_lock(&clp->cl_lock);
1753 if (stid->sc_status == 0) {
1754 stid->sc_status |=
1755 SC_STATUS_ADMIN_REVOKED;
1756 atomic_inc(&clp->cl_admin_revoked);
1757 spin_unlock(&clp->cl_lock);
1758 release_all_access(stp);
1759 } else
1760 spin_unlock(&clp->cl_lock);
1761 mutex_unlock(&stp->st_mutex);
1762 break;
1763 case SC_TYPE_LOCK:
1764 stp = openlockstateid(stid);
1765 mutex_lock_nested(&stp->st_mutex,
1766 LOCK_STATEID_MUTEX);
1767 spin_lock(&clp->cl_lock);
1768 if (stid->sc_status == 0) {
1769 struct nfs4_lockowner *lo =
1770 lockowner(stp->st_stateowner);
1771 struct nfsd_file *nf;
1772
1773 stid->sc_status |=
1774 SC_STATUS_ADMIN_REVOKED;
1775 atomic_inc(&clp->cl_admin_revoked);
1776 spin_unlock(&clp->cl_lock);
1777 nf = find_any_file(stp->st_stid.sc_file);
1778 if (nf) {
1779 get_file(nf->nf_file);
1780 filp_close(nf->nf_file,
1781 (fl_owner_t)lo);
1782 nfsd_file_put(nf);
1783 }
1784 release_all_access(stp);
1785 } else
1786 spin_unlock(&clp->cl_lock);
1787 mutex_unlock(&stp->st_mutex);
1788 break;
1789 case SC_TYPE_DELEG:
1790 dp = delegstateid(stid);
1791 spin_lock(&state_lock);
1792 if (!unhash_delegation_locked(
1793 dp, SC_STATUS_ADMIN_REVOKED))
1794 dp = NULL;
1795 spin_unlock(&state_lock);
1796 if (dp)
1797 revoke_delegation(dp);
1798 break;
1799 case SC_TYPE_LAYOUT:
1800 ls = layoutstateid(stid);
1801 nfsd4_close_layout(ls);
1802 break;
1803 }
1804 nfs4_put_stid(stid);
1805 spin_lock(&nn->client_lock);
1806 if (clp->cl_minorversion == 0)
1807 /* Allow cleanup after a lease period.
1808 * store_release ensures cleanup will
1809 * see any newly revoked states if it
1810 * sees the time updated.
1811 */
1812 nn->nfs40_last_revoke =
1813 ktime_get_boottime_seconds();
1814 goto retry;
1815 }
1816 }
1817 }
1818 spin_unlock(&nn->client_lock);
1819}
1820
1821static inline int
1822hash_sessionid(struct nfs4_sessionid *sessionid)
1823{
1824 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1825
1826 return sid->sequence % SESSION_HASH_SIZE;
1827}
1828
1829#ifdef CONFIG_SUNRPC_DEBUG
1830static inline void
1831dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1832{
1833 u32 *ptr = (u32 *)(&sessionid->data[0]);
1834 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1835}
1836#else
1837static inline void
1838dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1839{
1840}
1841#endif
1842
1843/*
1844 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1845 * won't be used for replay.
1846 */
1847void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1848{
1849 struct nfs4_stateowner *so = cstate->replay_owner;
1850
1851 if (nfserr == nfserr_replay_me)
1852 return;
1853
1854 if (!seqid_mutating_err(ntohl(nfserr))) {
1855 nfsd4_cstate_clear_replay(cstate);
1856 return;
1857 }
1858 if (!so)
1859 return;
1860 if (so->so_is_open_owner)
1861 release_last_closed_stateid(openowner(so));
1862 so->so_seqid++;
1863 return;
1864}
1865
1866static void
1867gen_sessionid(struct nfsd4_session *ses)
1868{
1869 struct nfs4_client *clp = ses->se_client;
1870 struct nfsd4_sessionid *sid;
1871
1872 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1873 sid->clientid = clp->cl_clientid;
1874 sid->sequence = current_sessionid++;
1875 sid->reserved = 0;
1876}
1877
1878/*
1879 * The protocol defines ca_maxresponssize_cached to include the size of
1880 * the rpc header, but all we need to cache is the data starting after
1881 * the end of the initial SEQUENCE operation--the rest we regenerate
1882 * each time. Therefore we can advertise a ca_maxresponssize_cached
1883 * value that is the number of bytes in our cache plus a few additional
1884 * bytes. In order to stay on the safe side, and not promise more than
1885 * we can cache, those additional bytes must be the minimum possible: 24
1886 * bytes of rpc header (xid through accept state, with AUTH_NULL
1887 * verifier), 12 for the compound header (with zero-length tag), and 44
1888 * for the SEQUENCE op response:
1889 */
1890#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1891
1892static void
1893free_session_slots(struct nfsd4_session *ses)
1894{
1895 int i;
1896
1897 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1898 free_svc_cred(&ses->se_slots[i]->sl_cred);
1899 kfree(ses->se_slots[i]);
1900 }
1901}
1902
1903/*
1904 * We don't actually need to cache the rpc and session headers, so we
1905 * can allocate a little less for each slot:
1906 */
1907static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1908{
1909 u32 size;
1910
1911 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1912 size = 0;
1913 else
1914 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1915 return size + sizeof(struct nfsd4_slot);
1916}
1917
1918/*
1919 * XXX: If we run out of reserved DRC memory we could (up to a point)
1920 * re-negotiate active sessions and reduce their slot usage to make
1921 * room for new connections. For now we just fail the create session.
1922 */
1923static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1924{
1925 u32 slotsize = slot_bytes(ca);
1926 u32 num = ca->maxreqs;
1927 unsigned long avail, total_avail;
1928 unsigned int scale_factor;
1929
1930 spin_lock(&nfsd_drc_lock);
1931 if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1932 total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1933 else
1934 /* We have handed out more space than we chose in
1935 * set_max_drc() to allow. That isn't really a
1936 * problem as long as that doesn't make us think we
1937 * have lots more due to integer overflow.
1938 */
1939 total_avail = 0;
1940 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1941 /*
1942 * Never use more than a fraction of the remaining memory,
1943 * unless it's the only way to give this client a slot.
1944 * The chosen fraction is either 1/8 or 1/number of threads,
1945 * whichever is smaller. This ensures there are adequate
1946 * slots to support multiple clients per thread.
1947 * Give the client one slot even if that would require
1948 * over-allocation--it is better than failure.
1949 */
1950 scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1951
1952 avail = clamp_t(unsigned long, avail, slotsize,
1953 total_avail/scale_factor);
1954 num = min_t(int, num, avail / slotsize);
1955 num = max_t(int, num, 1);
1956 nfsd_drc_mem_used += num * slotsize;
1957 spin_unlock(&nfsd_drc_lock);
1958
1959 return num;
1960}
1961
1962static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1963{
1964 int slotsize = slot_bytes(ca);
1965
1966 spin_lock(&nfsd_drc_lock);
1967 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1968 spin_unlock(&nfsd_drc_lock);
1969}
1970
1971static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1972 struct nfsd4_channel_attrs *battrs)
1973{
1974 int numslots = fattrs->maxreqs;
1975 int slotsize = slot_bytes(fattrs);
1976 struct nfsd4_session *new;
1977 int i;
1978
1979 BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION)
1980 > PAGE_SIZE);
1981
1982 new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL);
1983 if (!new)
1984 return NULL;
1985 /* allocate each struct nfsd4_slot and data cache in one piece */
1986 for (i = 0; i < numslots; i++) {
1987 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1988 if (!new->se_slots[i])
1989 goto out_free;
1990 }
1991
1992 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1993 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1994
1995 return new;
1996out_free:
1997 while (i--)
1998 kfree(new->se_slots[i]);
1999 kfree(new);
2000 return NULL;
2001}
2002
2003static void free_conn(struct nfsd4_conn *c)
2004{
2005 svc_xprt_put(c->cn_xprt);
2006 kfree(c);
2007}
2008
2009static void nfsd4_conn_lost(struct svc_xpt_user *u)
2010{
2011 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
2012 struct nfs4_client *clp = c->cn_session->se_client;
2013
2014 trace_nfsd_cb_lost(clp);
2015
2016 spin_lock(&clp->cl_lock);
2017 if (!list_empty(&c->cn_persession)) {
2018 list_del(&c->cn_persession);
2019 free_conn(c);
2020 }
2021 nfsd4_probe_callback(clp);
2022 spin_unlock(&clp->cl_lock);
2023}
2024
2025static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
2026{
2027 struct nfsd4_conn *conn;
2028
2029 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
2030 if (!conn)
2031 return NULL;
2032 svc_xprt_get(rqstp->rq_xprt);
2033 conn->cn_xprt = rqstp->rq_xprt;
2034 conn->cn_flags = flags;
2035 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
2036 return conn;
2037}
2038
2039static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
2040{
2041 conn->cn_session = ses;
2042 list_add(&conn->cn_persession, &ses->se_conns);
2043}
2044
2045static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
2046{
2047 struct nfs4_client *clp = ses->se_client;
2048
2049 spin_lock(&clp->cl_lock);
2050 __nfsd4_hash_conn(conn, ses);
2051 spin_unlock(&clp->cl_lock);
2052}
2053
2054static int nfsd4_register_conn(struct nfsd4_conn *conn)
2055{
2056 conn->cn_xpt_user.callback = nfsd4_conn_lost;
2057 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
2058}
2059
2060static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
2061{
2062 int ret;
2063
2064 nfsd4_hash_conn(conn, ses);
2065 ret = nfsd4_register_conn(conn);
2066 if (ret)
2067 /* oops; xprt is already down: */
2068 nfsd4_conn_lost(&conn->cn_xpt_user);
2069 /* We may have gained or lost a callback channel: */
2070 nfsd4_probe_callback_sync(ses->se_client);
2071}
2072
2073static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
2074{
2075 u32 dir = NFS4_CDFC4_FORE;
2076
2077 if (cses->flags & SESSION4_BACK_CHAN)
2078 dir |= NFS4_CDFC4_BACK;
2079 return alloc_conn(rqstp, dir);
2080}
2081
2082/* must be called under client_lock */
2083static void nfsd4_del_conns(struct nfsd4_session *s)
2084{
2085 struct nfs4_client *clp = s->se_client;
2086 struct nfsd4_conn *c;
2087
2088 spin_lock(&clp->cl_lock);
2089 while (!list_empty(&s->se_conns)) {
2090 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
2091 list_del_init(&c->cn_persession);
2092 spin_unlock(&clp->cl_lock);
2093
2094 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
2095 free_conn(c);
2096
2097 spin_lock(&clp->cl_lock);
2098 }
2099 spin_unlock(&clp->cl_lock);
2100}
2101
2102static void __free_session(struct nfsd4_session *ses)
2103{
2104 free_session_slots(ses);
2105 kfree(ses);
2106}
2107
2108static void free_session(struct nfsd4_session *ses)
2109{
2110 nfsd4_del_conns(ses);
2111 nfsd4_put_drc_mem(&ses->se_fchannel);
2112 __free_session(ses);
2113}
2114
2115static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
2116{
2117 int idx;
2118 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2119
2120 new->se_client = clp;
2121 gen_sessionid(new);
2122
2123 INIT_LIST_HEAD(&new->se_conns);
2124
2125 new->se_cb_seq_nr = 1;
2126 new->se_flags = cses->flags;
2127 new->se_cb_prog = cses->callback_prog;
2128 new->se_cb_sec = cses->cb_sec;
2129 atomic_set(&new->se_ref, 0);
2130 idx = hash_sessionid(&new->se_sessionid);
2131 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
2132 spin_lock(&clp->cl_lock);
2133 list_add(&new->se_perclnt, &clp->cl_sessions);
2134 spin_unlock(&clp->cl_lock);
2135
2136 {
2137 struct sockaddr *sa = svc_addr(rqstp);
2138 /*
2139 * This is a little silly; with sessions there's no real
2140 * use for the callback address. Use the peer address
2141 * as a reasonable default for now, but consider fixing
2142 * the rpc client not to require an address in the
2143 * future:
2144 */
2145 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
2146 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
2147 }
2148}
2149
2150/* caller must hold client_lock */
2151static struct nfsd4_session *
2152__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
2153{
2154 struct nfsd4_session *elem;
2155 int idx;
2156 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2157
2158 lockdep_assert_held(&nn->client_lock);
2159
2160 dump_sessionid(__func__, sessionid);
2161 idx = hash_sessionid(sessionid);
2162 /* Search in the appropriate list */
2163 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
2164 if (!memcmp(elem->se_sessionid.data, sessionid->data,
2165 NFS4_MAX_SESSIONID_LEN)) {
2166 return elem;
2167 }
2168 }
2169
2170 dprintk("%s: session not found\n", __func__);
2171 return NULL;
2172}
2173
2174static struct nfsd4_session *
2175find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
2176 __be32 *ret)
2177{
2178 struct nfsd4_session *session;
2179 __be32 status = nfserr_badsession;
2180
2181 session = __find_in_sessionid_hashtbl(sessionid, net);
2182 if (!session)
2183 goto out;
2184 status = nfsd4_get_session_locked(session);
2185 if (status)
2186 session = NULL;
2187out:
2188 *ret = status;
2189 return session;
2190}
2191
2192/* caller must hold client_lock */
2193static void
2194unhash_session(struct nfsd4_session *ses)
2195{
2196 struct nfs4_client *clp = ses->se_client;
2197 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2198
2199 lockdep_assert_held(&nn->client_lock);
2200
2201 list_del(&ses->se_hash);
2202 spin_lock(&ses->se_client->cl_lock);
2203 list_del(&ses->se_perclnt);
2204 spin_unlock(&ses->se_client->cl_lock);
2205}
2206
2207/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
2208static int
2209STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
2210{
2211 /*
2212 * We're assuming the clid was not given out from a boot
2213 * precisely 2^32 (about 136 years) before this one. That seems
2214 * a safe assumption:
2215 */
2216 if (clid->cl_boot == (u32)nn->boot_time)
2217 return 0;
2218 trace_nfsd_clid_stale(clid);
2219 return 1;
2220}
2221
2222/*
2223 * XXX Should we use a slab cache ?
2224 * This type of memory management is somewhat inefficient, but we use it
2225 * anyway since SETCLIENTID is not a common operation.
2226 */
2227static struct nfs4_client *alloc_client(struct xdr_netobj name,
2228 struct nfsd_net *nn)
2229{
2230 struct nfs4_client *clp;
2231 int i;
2232
2233 if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) {
2234 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
2235 return NULL;
2236 }
2237 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
2238 if (clp == NULL)
2239 return NULL;
2240 xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
2241 if (clp->cl_name.data == NULL)
2242 goto err_no_name;
2243 clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
2244 sizeof(struct list_head),
2245 GFP_KERNEL);
2246 if (!clp->cl_ownerstr_hashtbl)
2247 goto err_no_hashtbl;
2248 for (i = 0; i < OWNER_HASH_SIZE; i++)
2249 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
2250 INIT_LIST_HEAD(&clp->cl_sessions);
2251 idr_init(&clp->cl_stateids);
2252 atomic_set(&clp->cl_rpc_users, 0);
2253 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
2254 clp->cl_state = NFSD4_ACTIVE;
2255 atomic_inc(&nn->nfs4_client_count);
2256 atomic_set(&clp->cl_delegs_in_recall, 0);
2257 INIT_LIST_HEAD(&clp->cl_idhash);
2258 INIT_LIST_HEAD(&clp->cl_openowners);
2259 INIT_LIST_HEAD(&clp->cl_delegations);
2260 INIT_LIST_HEAD(&clp->cl_lru);
2261 INIT_LIST_HEAD(&clp->cl_revoked);
2262#ifdef CONFIG_NFSD_PNFS
2263 INIT_LIST_HEAD(&clp->cl_lo_states);
2264#endif
2265 INIT_LIST_HEAD(&clp->async_copies);
2266 spin_lock_init(&clp->async_lock);
2267 spin_lock_init(&clp->cl_lock);
2268 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
2269 return clp;
2270err_no_hashtbl:
2271 kfree(clp->cl_name.data);
2272err_no_name:
2273 kmem_cache_free(client_slab, clp);
2274 return NULL;
2275}
2276
2277static void __free_client(struct kref *k)
2278{
2279 struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
2280 struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
2281
2282 free_svc_cred(&clp->cl_cred);
2283 kfree(clp->cl_ownerstr_hashtbl);
2284 kfree(clp->cl_name.data);
2285 kfree(clp->cl_nii_domain.data);
2286 kfree(clp->cl_nii_name.data);
2287 idr_destroy(&clp->cl_stateids);
2288 kfree(clp->cl_ra);
2289 kmem_cache_free(client_slab, clp);
2290}
2291
2292static void drop_client(struct nfs4_client *clp)
2293{
2294 kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2295}
2296
2297static void
2298free_client(struct nfs4_client *clp)
2299{
2300 while (!list_empty(&clp->cl_sessions)) {
2301 struct nfsd4_session *ses;
2302 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2303 se_perclnt);
2304 list_del(&ses->se_perclnt);
2305 WARN_ON_ONCE(atomic_read(&ses->se_ref));
2306 free_session(ses);
2307 }
2308 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2309 if (clp->cl_nfsd_dentry) {
2310 nfsd_client_rmdir(clp->cl_nfsd_dentry);
2311 clp->cl_nfsd_dentry = NULL;
2312 wake_up_all(&expiry_wq);
2313 }
2314 drop_client(clp);
2315}
2316
2317/* must be called under the client_lock */
2318static void
2319unhash_client_locked(struct nfs4_client *clp)
2320{
2321 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2322 struct nfsd4_session *ses;
2323
2324 lockdep_assert_held(&nn->client_lock);
2325
2326 /* Mark the client as expired! */
2327 clp->cl_time = 0;
2328 /* Make it invisible */
2329 if (!list_empty(&clp->cl_idhash)) {
2330 list_del_init(&clp->cl_idhash);
2331 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2332 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2333 else
2334 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2335 }
2336 list_del_init(&clp->cl_lru);
2337 spin_lock(&clp->cl_lock);
2338 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2339 list_del_init(&ses->se_hash);
2340 spin_unlock(&clp->cl_lock);
2341}
2342
2343static void
2344unhash_client(struct nfs4_client *clp)
2345{
2346 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2347
2348 spin_lock(&nn->client_lock);
2349 unhash_client_locked(clp);
2350 spin_unlock(&nn->client_lock);
2351}
2352
2353static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2354{
2355 if (atomic_read(&clp->cl_rpc_users))
2356 return nfserr_jukebox;
2357 unhash_client_locked(clp);
2358 return nfs_ok;
2359}
2360
2361static void
2362__destroy_client(struct nfs4_client *clp)
2363{
2364 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2365 int i;
2366 struct nfs4_openowner *oo;
2367 struct nfs4_delegation *dp;
2368 struct list_head reaplist;
2369
2370 INIT_LIST_HEAD(&reaplist);
2371 spin_lock(&state_lock);
2372 while (!list_empty(&clp->cl_delegations)) {
2373 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2374 unhash_delegation_locked(dp, SC_STATUS_CLOSED);
2375 list_add(&dp->dl_recall_lru, &reaplist);
2376 }
2377 spin_unlock(&state_lock);
2378 while (!list_empty(&reaplist)) {
2379 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2380 list_del_init(&dp->dl_recall_lru);
2381 destroy_unhashed_deleg(dp);
2382 }
2383 while (!list_empty(&clp->cl_revoked)) {
2384 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2385 list_del_init(&dp->dl_recall_lru);
2386 nfs4_put_stid(&dp->dl_stid);
2387 }
2388 while (!list_empty(&clp->cl_openowners)) {
2389 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2390 nfs4_get_stateowner(&oo->oo_owner);
2391 release_openowner(oo);
2392 }
2393 for (i = 0; i < OWNER_HASH_SIZE; i++) {
2394 struct nfs4_stateowner *so, *tmp;
2395
2396 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2397 so_strhash) {
2398 /* Should be no openowners at this point */
2399 WARN_ON_ONCE(so->so_is_open_owner);
2400 remove_blocked_locks(lockowner(so));
2401 }
2402 }
2403 nfsd4_return_all_client_layouts(clp);
2404 nfsd4_shutdown_copy(clp);
2405 nfsd4_shutdown_callback(clp);
2406 if (clp->cl_cb_conn.cb_xprt)
2407 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2408 atomic_add_unless(&nn->nfs4_client_count, -1, 0);
2409 nfsd4_dec_courtesy_client_count(nn, clp);
2410 free_client(clp);
2411 wake_up_all(&expiry_wq);
2412}
2413
2414static void
2415destroy_client(struct nfs4_client *clp)
2416{
2417 unhash_client(clp);
2418 __destroy_client(clp);
2419}
2420
2421static void inc_reclaim_complete(struct nfs4_client *clp)
2422{
2423 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2424
2425 if (!nn->track_reclaim_completes)
2426 return;
2427 if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2428 return;
2429 if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2430 nn->reclaim_str_hashtbl_size) {
2431 printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2432 clp->net->ns.inum);
2433 nfsd4_end_grace(nn);
2434 }
2435}
2436
2437static void expire_client(struct nfs4_client *clp)
2438{
2439 unhash_client(clp);
2440 nfsd4_client_record_remove(clp);
2441 __destroy_client(clp);
2442}
2443
2444static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2445{
2446 memcpy(target->cl_verifier.data, source->data,
2447 sizeof(target->cl_verifier.data));
2448}
2449
2450static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2451{
2452 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
2453 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
2454}
2455
2456static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2457{
2458 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2459 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2460 GFP_KERNEL);
2461 target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2462 if ((source->cr_principal && !target->cr_principal) ||
2463 (source->cr_raw_principal && !target->cr_raw_principal) ||
2464 (source->cr_targ_princ && !target->cr_targ_princ))
2465 return -ENOMEM;
2466
2467 target->cr_flavor = source->cr_flavor;
2468 target->cr_uid = source->cr_uid;
2469 target->cr_gid = source->cr_gid;
2470 target->cr_group_info = source->cr_group_info;
2471 get_group_info(target->cr_group_info);
2472 target->cr_gss_mech = source->cr_gss_mech;
2473 if (source->cr_gss_mech)
2474 gss_mech_get(source->cr_gss_mech);
2475 return 0;
2476}
2477
2478static int
2479compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2480{
2481 if (o1->len < o2->len)
2482 return -1;
2483 if (o1->len > o2->len)
2484 return 1;
2485 return memcmp(o1->data, o2->data, o1->len);
2486}
2487
2488static int
2489same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2490{
2491 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2492}
2493
2494static int
2495same_clid(clientid_t *cl1, clientid_t *cl2)
2496{
2497 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2498}
2499
2500static bool groups_equal(struct group_info *g1, struct group_info *g2)
2501{
2502 int i;
2503
2504 if (g1->ngroups != g2->ngroups)
2505 return false;
2506 for (i=0; i<g1->ngroups; i++)
2507 if (!gid_eq(g1->gid[i], g2->gid[i]))
2508 return false;
2509 return true;
2510}
2511
2512/*
2513 * RFC 3530 language requires clid_inuse be returned when the
2514 * "principal" associated with a requests differs from that previously
2515 * used. We use uid, gid's, and gss principal string as our best
2516 * approximation. We also don't want to allow non-gss use of a client
2517 * established using gss: in theory cr_principal should catch that
2518 * change, but in practice cr_principal can be null even in the gss case
2519 * since gssd doesn't always pass down a principal string.
2520 */
2521static bool is_gss_cred(struct svc_cred *cr)
2522{
2523 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2524 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2525}
2526
2527
2528static bool
2529same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2530{
2531 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2532 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2533 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2534 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2535 return false;
2536 /* XXX: check that cr_targ_princ fields match ? */
2537 if (cr1->cr_principal == cr2->cr_principal)
2538 return true;
2539 if (!cr1->cr_principal || !cr2->cr_principal)
2540 return false;
2541 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2542}
2543
2544static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2545{
2546 struct svc_cred *cr = &rqstp->rq_cred;
2547 u32 service;
2548
2549 if (!cr->cr_gss_mech)
2550 return false;
2551 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2552 return service == RPC_GSS_SVC_INTEGRITY ||
2553 service == RPC_GSS_SVC_PRIVACY;
2554}
2555
2556bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2557{
2558 struct svc_cred *cr = &rqstp->rq_cred;
2559
2560 if (!cl->cl_mach_cred)
2561 return true;
2562 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2563 return false;
2564 if (!svc_rqst_integrity_protected(rqstp))
2565 return false;
2566 if (cl->cl_cred.cr_raw_principal)
2567 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2568 cr->cr_raw_principal);
2569 if (!cr->cr_principal)
2570 return false;
2571 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2572}
2573
2574static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2575{
2576 __be32 verf[2];
2577
2578 /*
2579 * This is opaque to client, so no need to byte-swap. Use
2580 * __force to keep sparse happy
2581 */
2582 verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2583 verf[1] = (__force __be32)nn->clverifier_counter++;
2584 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2585}
2586
2587static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2588{
2589 clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2590 clp->cl_clientid.cl_id = nn->clientid_counter++;
2591 gen_confirm(clp, nn);
2592}
2593
2594static struct nfs4_stid *
2595find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2596{
2597 struct nfs4_stid *ret;
2598
2599 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2600 if (!ret || !ret->sc_type)
2601 return NULL;
2602 return ret;
2603}
2604
2605static struct nfs4_stid *
2606find_stateid_by_type(struct nfs4_client *cl, stateid_t *t,
2607 unsigned short typemask, unsigned short ok_states)
2608{
2609 struct nfs4_stid *s;
2610
2611 spin_lock(&cl->cl_lock);
2612 s = find_stateid_locked(cl, t);
2613 if (s != NULL) {
2614 if ((s->sc_status & ~ok_states) == 0 &&
2615 (typemask & s->sc_type))
2616 refcount_inc(&s->sc_count);
2617 else
2618 s = NULL;
2619 }
2620 spin_unlock(&cl->cl_lock);
2621 return s;
2622}
2623
2624static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2625{
2626 struct nfsdfs_client *nc;
2627 nc = get_nfsdfs_client(inode);
2628 if (!nc)
2629 return NULL;
2630 return container_of(nc, struct nfs4_client, cl_nfsdfs);
2631}
2632
2633static void seq_quote_mem(struct seq_file *m, char *data, int len)
2634{
2635 seq_puts(m, "\"");
2636 seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
2637 seq_puts(m, "\"");
2638}
2639
2640static const char *cb_state2str(int state)
2641{
2642 switch (state) {
2643 case NFSD4_CB_UP:
2644 return "UP";
2645 case NFSD4_CB_UNKNOWN:
2646 return "UNKNOWN";
2647 case NFSD4_CB_DOWN:
2648 return "DOWN";
2649 case NFSD4_CB_FAULT:
2650 return "FAULT";
2651 }
2652 return "UNDEFINED";
2653}
2654
2655static int client_info_show(struct seq_file *m, void *v)
2656{
2657 struct inode *inode = file_inode(m->file);
2658 struct nfs4_client *clp;
2659 u64 clid;
2660
2661 clp = get_nfsdfs_clp(inode);
2662 if (!clp)
2663 return -ENXIO;
2664 memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2665 seq_printf(m, "clientid: 0x%llx\n", clid);
2666 seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2667
2668 if (clp->cl_state == NFSD4_COURTESY)
2669 seq_puts(m, "status: courtesy\n");
2670 else if (clp->cl_state == NFSD4_EXPIRABLE)
2671 seq_puts(m, "status: expirable\n");
2672 else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2673 seq_puts(m, "status: confirmed\n");
2674 else
2675 seq_puts(m, "status: unconfirmed\n");
2676 seq_printf(m, "seconds from last renew: %lld\n",
2677 ktime_get_boottime_seconds() - clp->cl_time);
2678 seq_puts(m, "name: ");
2679 seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2680 seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2681 if (clp->cl_nii_domain.data) {
2682 seq_puts(m, "Implementation domain: ");
2683 seq_quote_mem(m, clp->cl_nii_domain.data,
2684 clp->cl_nii_domain.len);
2685 seq_puts(m, "\nImplementation name: ");
2686 seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2687 seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2688 clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2689 }
2690 seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
2691 seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
2692 seq_printf(m, "admin-revoked states: %d\n",
2693 atomic_read(&clp->cl_admin_revoked));
2694 drop_client(clp);
2695
2696 return 0;
2697}
2698
2699DEFINE_SHOW_ATTRIBUTE(client_info);
2700
2701static void *states_start(struct seq_file *s, loff_t *pos)
2702 __acquires(&clp->cl_lock)
2703{
2704 struct nfs4_client *clp = s->private;
2705 unsigned long id = *pos;
2706 void *ret;
2707
2708 spin_lock(&clp->cl_lock);
2709 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2710 *pos = id;
2711 return ret;
2712}
2713
2714static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2715{
2716 struct nfs4_client *clp = s->private;
2717 unsigned long id = *pos;
2718 void *ret;
2719
2720 id = *pos;
2721 id++;
2722 ret = idr_get_next_ul(&clp->cl_stateids, &id);
2723 *pos = id;
2724 return ret;
2725}
2726
2727static void states_stop(struct seq_file *s, void *v)
2728 __releases(&clp->cl_lock)
2729{
2730 struct nfs4_client *clp = s->private;
2731
2732 spin_unlock(&clp->cl_lock);
2733}
2734
2735static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2736{
2737 seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2738}
2739
2740static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2741{
2742 struct inode *inode = file_inode(f->nf_file);
2743
2744 seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2745 MAJOR(inode->i_sb->s_dev),
2746 MINOR(inode->i_sb->s_dev),
2747 inode->i_ino);
2748}
2749
2750static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2751{
2752 seq_puts(s, "owner: ");
2753 seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2754}
2755
2756static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2757{
2758 seq_printf(s, "0x%.8x", stid->si_generation);
2759 seq_printf(s, "%12phN", &stid->si_opaque);
2760}
2761
2762static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2763{
2764 struct nfs4_ol_stateid *ols;
2765 struct nfs4_file *nf;
2766 struct nfsd_file *file;
2767 struct nfs4_stateowner *oo;
2768 unsigned int access, deny;
2769
2770 ols = openlockstateid(st);
2771 oo = ols->st_stateowner;
2772 nf = st->sc_file;
2773
2774 seq_puts(s, "- ");
2775 nfs4_show_stateid(s, &st->sc_stateid);
2776 seq_puts(s, ": { type: open, ");
2777
2778 access = bmap_to_share_mode(ols->st_access_bmap);
2779 deny = bmap_to_share_mode(ols->st_deny_bmap);
2780
2781 seq_printf(s, "access: %s%s, ",
2782 access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2783 access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2784 seq_printf(s, "deny: %s%s, ",
2785 deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2786 deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2787
2788 spin_lock(&nf->fi_lock);
2789 file = find_any_file_locked(nf);
2790 if (file) {
2791 nfs4_show_superblock(s, file);
2792 seq_puts(s, ", ");
2793 nfs4_show_fname(s, file);
2794 seq_puts(s, ", ");
2795 }
2796 spin_unlock(&nf->fi_lock);
2797 nfs4_show_owner(s, oo);
2798 if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
2799 seq_puts(s, ", admin-revoked");
2800 seq_puts(s, " }\n");
2801 return 0;
2802}
2803
2804static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2805{
2806 struct nfs4_ol_stateid *ols;
2807 struct nfs4_file *nf;
2808 struct nfsd_file *file;
2809 struct nfs4_stateowner *oo;
2810
2811 ols = openlockstateid(st);
2812 oo = ols->st_stateowner;
2813 nf = st->sc_file;
2814
2815 seq_puts(s, "- ");
2816 nfs4_show_stateid(s, &st->sc_stateid);
2817 seq_puts(s, ": { type: lock, ");
2818
2819 spin_lock(&nf->fi_lock);
2820 file = find_any_file_locked(nf);
2821 if (file) {
2822 /*
2823 * Note: a lock stateid isn't really the same thing as a lock,
2824 * it's the locking state held by one owner on a file, and there
2825 * may be multiple (or no) lock ranges associated with it.
2826 * (Same for the matter is true of open stateids.)
2827 */
2828
2829 nfs4_show_superblock(s, file);
2830 /* XXX: open stateid? */
2831 seq_puts(s, ", ");
2832 nfs4_show_fname(s, file);
2833 seq_puts(s, ", ");
2834 }
2835 nfs4_show_owner(s, oo);
2836 if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
2837 seq_puts(s, ", admin-revoked");
2838 seq_puts(s, " }\n");
2839 spin_unlock(&nf->fi_lock);
2840 return 0;
2841}
2842
2843static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2844{
2845 struct nfs4_delegation *ds;
2846 struct nfs4_file *nf;
2847 struct nfsd_file *file;
2848
2849 ds = delegstateid(st);
2850 nf = st->sc_file;
2851
2852 seq_puts(s, "- ");
2853 nfs4_show_stateid(s, &st->sc_stateid);
2854 seq_puts(s, ": { type: deleg, ");
2855
2856 seq_printf(s, "access: %s",
2857 ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2858
2859 /* XXX: lease time, whether it's being recalled. */
2860
2861 spin_lock(&nf->fi_lock);
2862 file = nf->fi_deleg_file;
2863 if (file) {
2864 seq_puts(s, ", ");
2865 nfs4_show_superblock(s, file);
2866 seq_puts(s, ", ");
2867 nfs4_show_fname(s, file);
2868 }
2869 spin_unlock(&nf->fi_lock);
2870 if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
2871 seq_puts(s, ", admin-revoked");
2872 seq_puts(s, " }\n");
2873 return 0;
2874}
2875
2876static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2877{
2878 struct nfs4_layout_stateid *ls;
2879 struct nfsd_file *file;
2880
2881 ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2882
2883 seq_puts(s, "- ");
2884 nfs4_show_stateid(s, &st->sc_stateid);
2885 seq_puts(s, ": { type: layout");
2886
2887 /* XXX: What else would be useful? */
2888
2889 spin_lock(&ls->ls_stid.sc_file->fi_lock);
2890 file = ls->ls_file;
2891 if (file) {
2892 seq_puts(s, ", ");
2893 nfs4_show_superblock(s, file);
2894 seq_puts(s, ", ");
2895 nfs4_show_fname(s, file);
2896 }
2897 spin_unlock(&ls->ls_stid.sc_file->fi_lock);
2898 if (st->sc_status & SC_STATUS_ADMIN_REVOKED)
2899 seq_puts(s, ", admin-revoked");
2900 seq_puts(s, " }\n");
2901
2902 return 0;
2903}
2904
2905static int states_show(struct seq_file *s, void *v)
2906{
2907 struct nfs4_stid *st = v;
2908
2909 switch (st->sc_type) {
2910 case SC_TYPE_OPEN:
2911 return nfs4_show_open(s, st);
2912 case SC_TYPE_LOCK:
2913 return nfs4_show_lock(s, st);
2914 case SC_TYPE_DELEG:
2915 return nfs4_show_deleg(s, st);
2916 case SC_TYPE_LAYOUT:
2917 return nfs4_show_layout(s, st);
2918 default:
2919 return 0; /* XXX: or SEQ_SKIP? */
2920 }
2921 /* XXX: copy stateids? */
2922}
2923
2924static struct seq_operations states_seq_ops = {
2925 .start = states_start,
2926 .next = states_next,
2927 .stop = states_stop,
2928 .show = states_show
2929};
2930
2931static int client_states_open(struct inode *inode, struct file *file)
2932{
2933 struct seq_file *s;
2934 struct nfs4_client *clp;
2935 int ret;
2936
2937 clp = get_nfsdfs_clp(inode);
2938 if (!clp)
2939 return -ENXIO;
2940
2941 ret = seq_open(file, &states_seq_ops);
2942 if (ret)
2943 return ret;
2944 s = file->private_data;
2945 s->private = clp;
2946 return 0;
2947}
2948
2949static int client_opens_release(struct inode *inode, struct file *file)
2950{
2951 struct seq_file *m = file->private_data;
2952 struct nfs4_client *clp = m->private;
2953
2954 /* XXX: alternatively, we could get/drop in seq start/stop */
2955 drop_client(clp);
2956 return seq_release(inode, file);
2957}
2958
2959static const struct file_operations client_states_fops = {
2960 .open = client_states_open,
2961 .read = seq_read,
2962 .llseek = seq_lseek,
2963 .release = client_opens_release,
2964};
2965
2966/*
2967 * Normally we refuse to destroy clients that are in use, but here the
2968 * administrator is telling us to just do it. We also want to wait
2969 * so the caller has a guarantee that the client's locks are gone by
2970 * the time the write returns:
2971 */
2972static void force_expire_client(struct nfs4_client *clp)
2973{
2974 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2975 bool already_expired;
2976
2977 trace_nfsd_clid_admin_expired(&clp->cl_clientid);
2978
2979 spin_lock(&nn->client_lock);
2980 clp->cl_time = 0;
2981 spin_unlock(&nn->client_lock);
2982
2983 wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2984 spin_lock(&nn->client_lock);
2985 already_expired = list_empty(&clp->cl_lru);
2986 if (!already_expired)
2987 unhash_client_locked(clp);
2988 spin_unlock(&nn->client_lock);
2989
2990 if (!already_expired)
2991 expire_client(clp);
2992 else
2993 wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2994}
2995
2996static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2997 size_t size, loff_t *pos)
2998{
2999 char *data;
3000 struct nfs4_client *clp;
3001
3002 data = simple_transaction_get(file, buf, size);
3003 if (IS_ERR(data))
3004 return PTR_ERR(data);
3005 if (size != 7 || 0 != memcmp(data, "expire\n", 7))
3006 return -EINVAL;
3007 clp = get_nfsdfs_clp(file_inode(file));
3008 if (!clp)
3009 return -ENXIO;
3010 force_expire_client(clp);
3011 drop_client(clp);
3012 return 7;
3013}
3014
3015static const struct file_operations client_ctl_fops = {
3016 .write = client_ctl_write,
3017 .release = simple_transaction_release,
3018};
3019
3020static const struct tree_descr client_files[] = {
3021 [0] = {"info", &client_info_fops, S_IRUSR},
3022 [1] = {"states", &client_states_fops, S_IRUSR},
3023 [2] = {"ctl", &client_ctl_fops, S_IWUSR},
3024 [3] = {""},
3025};
3026
3027static int
3028nfsd4_cb_recall_any_done(struct nfsd4_callback *cb,
3029 struct rpc_task *task)
3030{
3031 trace_nfsd_cb_recall_any_done(cb, task);
3032 switch (task->tk_status) {
3033 case -NFS4ERR_DELAY:
3034 rpc_delay(task, 2 * HZ);
3035 return 0;
3036 default:
3037 return 1;
3038 }
3039}
3040
3041static void
3042nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
3043{
3044 struct nfs4_client *clp = cb->cb_clp;
3045
3046 clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
3047 drop_client(clp);
3048}
3049
3050static int
3051nfsd4_cb_getattr_done(struct nfsd4_callback *cb, struct rpc_task *task)
3052{
3053 struct nfs4_cb_fattr *ncf =
3054 container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
3055
3056 ncf->ncf_cb_status = task->tk_status;
3057 switch (task->tk_status) {
3058 case -NFS4ERR_DELAY:
3059 rpc_delay(task, 2 * HZ);
3060 return 0;
3061 default:
3062 return 1;
3063 }
3064}
3065
3066static void
3067nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
3068{
3069 struct nfs4_cb_fattr *ncf =
3070 container_of(cb, struct nfs4_cb_fattr, ncf_getattr);
3071 struct nfs4_delegation *dp =
3072 container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
3073
3074 nfs4_put_stid(&dp->dl_stid);
3075 clear_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
3076 wake_up_bit(&ncf->ncf_cb_flags, CB_GETATTR_BUSY);
3077}
3078
3079static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
3080 .done = nfsd4_cb_recall_any_done,
3081 .release = nfsd4_cb_recall_any_release,
3082};
3083
3084static const struct nfsd4_callback_ops nfsd4_cb_getattr_ops = {
3085 .done = nfsd4_cb_getattr_done,
3086 .release = nfsd4_cb_getattr_release,
3087};
3088
3089static void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf)
3090{
3091 struct nfs4_delegation *dp =
3092 container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
3093
3094 if (test_and_set_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags))
3095 return;
3096 /* set to proper status when nfsd4_cb_getattr_done runs */
3097 ncf->ncf_cb_status = NFS4ERR_IO;
3098
3099 refcount_inc(&dp->dl_stid.sc_count);
3100 nfsd4_run_cb(&ncf->ncf_getattr);
3101}
3102
3103static struct nfs4_client *create_client(struct xdr_netobj name,
3104 struct svc_rqst *rqstp, nfs4_verifier *verf)
3105{
3106 struct nfs4_client *clp;
3107 struct sockaddr *sa = svc_addr(rqstp);
3108 int ret;
3109 struct net *net = SVC_NET(rqstp);
3110 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3111 struct dentry *dentries[ARRAY_SIZE(client_files)];
3112
3113 clp = alloc_client(name, nn);
3114 if (clp == NULL)
3115 return NULL;
3116
3117 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
3118 if (ret) {
3119 free_client(clp);
3120 return NULL;
3121 }
3122 gen_clid(clp, nn);
3123 kref_init(&clp->cl_nfsdfs.cl_ref);
3124 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
3125 clp->cl_time = ktime_get_boottime_seconds();
3126 clear_bit(0, &clp->cl_cb_slot_busy);
3127 copy_verf(clp, verf);
3128 memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
3129 clp->cl_cb_session = NULL;
3130 clp->net = net;
3131 clp->cl_nfsd_dentry = nfsd_client_mkdir(
3132 nn, &clp->cl_nfsdfs,
3133 clp->cl_clientid.cl_id - nn->clientid_base,
3134 client_files, dentries);
3135 clp->cl_nfsd_info_dentry = dentries[0];
3136 if (!clp->cl_nfsd_dentry) {
3137 free_client(clp);
3138 return NULL;
3139 }
3140 clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL);
3141 if (!clp->cl_ra) {
3142 free_client(clp);
3143 return NULL;
3144 }
3145 clp->cl_ra_time = 0;
3146 nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops,
3147 NFSPROC4_CLNT_CB_RECALL_ANY);
3148 return clp;
3149}
3150
3151static void
3152add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
3153{
3154 struct rb_node **new = &(root->rb_node), *parent = NULL;
3155 struct nfs4_client *clp;
3156
3157 while (*new) {
3158 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
3159 parent = *new;
3160
3161 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
3162 new = &((*new)->rb_left);
3163 else
3164 new = &((*new)->rb_right);
3165 }
3166
3167 rb_link_node(&new_clp->cl_namenode, parent, new);
3168 rb_insert_color(&new_clp->cl_namenode, root);
3169}
3170
3171static struct nfs4_client *
3172find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
3173{
3174 int cmp;
3175 struct rb_node *node = root->rb_node;
3176 struct nfs4_client *clp;
3177
3178 while (node) {
3179 clp = rb_entry(node, struct nfs4_client, cl_namenode);
3180 cmp = compare_blob(&clp->cl_name, name);
3181 if (cmp > 0)
3182 node = node->rb_left;
3183 else if (cmp < 0)
3184 node = node->rb_right;
3185 else
3186 return clp;
3187 }
3188 return NULL;
3189}
3190
3191static void
3192add_to_unconfirmed(struct nfs4_client *clp)
3193{
3194 unsigned int idhashval;
3195 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3196
3197 lockdep_assert_held(&nn->client_lock);
3198
3199 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3200 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
3201 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3202 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
3203 renew_client_locked(clp);
3204}
3205
3206static void
3207move_to_confirmed(struct nfs4_client *clp)
3208{
3209 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3210 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3211
3212 lockdep_assert_held(&nn->client_lock);
3213
3214 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
3215 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
3216 add_clp_to_name_tree(clp, &nn->conf_name_tree);
3217 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3218 trace_nfsd_clid_confirmed(&clp->cl_clientid);
3219 renew_client_locked(clp);
3220}
3221
3222static struct nfs4_client *
3223find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
3224{
3225 struct nfs4_client *clp;
3226 unsigned int idhashval = clientid_hashval(clid->cl_id);
3227
3228 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
3229 if (same_clid(&clp->cl_clientid, clid)) {
3230 if ((bool)clp->cl_minorversion != sessions)
3231 return NULL;
3232 renew_client_locked(clp);
3233 return clp;
3234 }
3235 }
3236 return NULL;
3237}
3238
3239static struct nfs4_client *
3240find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3241{
3242 struct list_head *tbl = nn->conf_id_hashtbl;
3243
3244 lockdep_assert_held(&nn->client_lock);
3245 return find_client_in_id_table(tbl, clid, sessions);
3246}
3247
3248static struct nfs4_client *
3249find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3250{
3251 struct list_head *tbl = nn->unconf_id_hashtbl;
3252
3253 lockdep_assert_held(&nn->client_lock);
3254 return find_client_in_id_table(tbl, clid, sessions);
3255}
3256
3257static bool clp_used_exchangeid(struct nfs4_client *clp)
3258{
3259 return clp->cl_exchange_flags != 0;
3260}
3261
3262static struct nfs4_client *
3263find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3264{
3265 lockdep_assert_held(&nn->client_lock);
3266 return find_clp_in_name_tree(name, &nn->conf_name_tree);
3267}
3268
3269static struct nfs4_client *
3270find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3271{
3272 lockdep_assert_held(&nn->client_lock);
3273 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
3274}
3275
3276static void
3277gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
3278{
3279 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
3280 struct sockaddr *sa = svc_addr(rqstp);
3281 u32 scopeid = rpc_get_scope_id(sa);
3282 unsigned short expected_family;
3283
3284 /* Currently, we only support tcp and tcp6 for the callback channel */
3285 if (se->se_callback_netid_len == 3 &&
3286 !memcmp(se->se_callback_netid_val, "tcp", 3))
3287 expected_family = AF_INET;
3288 else if (se->se_callback_netid_len == 4 &&
3289 !memcmp(se->se_callback_netid_val, "tcp6", 4))
3290 expected_family = AF_INET6;
3291 else
3292 goto out_err;
3293
3294 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
3295 se->se_callback_addr_len,
3296 (struct sockaddr *)&conn->cb_addr,
3297 sizeof(conn->cb_addr));
3298
3299 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
3300 goto out_err;
3301
3302 if (conn->cb_addr.ss_family == AF_INET6)
3303 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
3304
3305 conn->cb_prog = se->se_callback_prog;
3306 conn->cb_ident = se->se_callback_ident;
3307 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
3308 trace_nfsd_cb_args(clp, conn);
3309 return;
3310out_err:
3311 conn->cb_addr.ss_family = AF_UNSPEC;
3312 conn->cb_addrlen = 0;
3313 trace_nfsd_cb_nodelegs(clp);
3314 return;
3315}
3316
3317/*
3318 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
3319 */
3320static void
3321nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
3322{
3323 struct xdr_buf *buf = resp->xdr->buf;
3324 struct nfsd4_slot *slot = resp->cstate.slot;
3325 unsigned int base;
3326
3327 dprintk("--> %s slot %p\n", __func__, slot);
3328
3329 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
3330 slot->sl_opcnt = resp->opcnt;
3331 slot->sl_status = resp->cstate.status;
3332 free_svc_cred(&slot->sl_cred);
3333 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
3334
3335 if (!nfsd4_cache_this(resp)) {
3336 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
3337 return;
3338 }
3339 slot->sl_flags |= NFSD4_SLOT_CACHED;
3340
3341 base = resp->cstate.data_offset;
3342 slot->sl_datalen = buf->len - base;
3343 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
3344 WARN(1, "%s: sessions DRC could not cache compound\n",
3345 __func__);
3346 return;
3347}
3348
3349/*
3350 * Encode the replay sequence operation from the slot values.
3351 * If cachethis is FALSE encode the uncached rep error on the next
3352 * operation which sets resp->p and increments resp->opcnt for
3353 * nfs4svc_encode_compoundres.
3354 *
3355 */
3356static __be32
3357nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
3358 struct nfsd4_compoundres *resp)
3359{
3360 struct nfsd4_op *op;
3361 struct nfsd4_slot *slot = resp->cstate.slot;
3362
3363 /* Encode the replayed sequence operation */
3364 op = &args->ops[resp->opcnt - 1];
3365 nfsd4_encode_operation(resp, op);
3366
3367 if (slot->sl_flags & NFSD4_SLOT_CACHED)
3368 return op->status;
3369 if (args->opcnt == 1) {
3370 /*
3371 * The original operation wasn't a solo sequence--we
3372 * always cache those--so this retry must not match the
3373 * original:
3374 */
3375 op->status = nfserr_seq_false_retry;
3376 } else {
3377 op = &args->ops[resp->opcnt++];
3378 op->status = nfserr_retry_uncached_rep;
3379 nfsd4_encode_operation(resp, op);
3380 }
3381 return op->status;
3382}
3383
3384/*
3385 * The sequence operation is not cached because we can use the slot and
3386 * session values.
3387 */
3388static __be32
3389nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
3390 struct nfsd4_sequence *seq)
3391{
3392 struct nfsd4_slot *slot = resp->cstate.slot;
3393 struct xdr_stream *xdr = resp->xdr;
3394 __be32 *p;
3395 __be32 status;
3396
3397 dprintk("--> %s slot %p\n", __func__, slot);
3398
3399 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
3400 if (status)
3401 return status;
3402
3403 p = xdr_reserve_space(xdr, slot->sl_datalen);
3404 if (!p) {
3405 WARN_ON_ONCE(1);
3406 return nfserr_serverfault;
3407 }
3408 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
3409 xdr_commit_encode(xdr);
3410
3411 resp->opcnt = slot->sl_opcnt;
3412 return slot->sl_status;
3413}
3414
3415/*
3416 * Set the exchange_id flags returned by the server.
3417 */
3418static void
3419nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3420{
3421#ifdef CONFIG_NFSD_PNFS
3422 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3423#else
3424 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3425#endif
3426
3427 /* Referrals are supported, Migration is not. */
3428 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3429
3430 /* set the wire flags to return to client. */
3431 clid->flags = new->cl_exchange_flags;
3432}
3433
3434static bool client_has_openowners(struct nfs4_client *clp)
3435{
3436 struct nfs4_openowner *oo;
3437
3438 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3439 if (!list_empty(&oo->oo_owner.so_stateids))
3440 return true;
3441 }
3442 return false;
3443}
3444
3445static bool client_has_state(struct nfs4_client *clp)
3446{
3447 return client_has_openowners(clp)
3448#ifdef CONFIG_NFSD_PNFS
3449 || !list_empty(&clp->cl_lo_states)
3450#endif
3451 || !list_empty(&clp->cl_delegations)
3452 || !list_empty(&clp->cl_sessions)
3453 || !list_empty(&clp->async_copies);
3454}
3455
3456static __be32 copy_impl_id(struct nfs4_client *clp,
3457 struct nfsd4_exchange_id *exid)
3458{
3459 if (!exid->nii_domain.data)
3460 return 0;
3461 xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3462 if (!clp->cl_nii_domain.data)
3463 return nfserr_jukebox;
3464 xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3465 if (!clp->cl_nii_name.data)
3466 return nfserr_jukebox;
3467 clp->cl_nii_time = exid->nii_time;
3468 return 0;
3469}
3470
3471__be32
3472nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3473 union nfsd4_op_u *u)
3474{
3475 struct nfsd4_exchange_id *exid = &u->exchange_id;
3476 struct nfs4_client *conf, *new;
3477 struct nfs4_client *unconf = NULL;
3478 __be32 status;
3479 char addr_str[INET6_ADDRSTRLEN];
3480 nfs4_verifier verf = exid->verifier;
3481 struct sockaddr *sa = svc_addr(rqstp);
3482 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3483 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3484
3485 rpc_ntop(sa, addr_str, sizeof(addr_str));
3486 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3487 "ip_addr=%s flags %x, spa_how %u\n",
3488 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
3489 addr_str, exid->flags, exid->spa_how);
3490
3491 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3492 return nfserr_inval;
3493
3494 new = create_client(exid->clname, rqstp, &verf);
3495 if (new == NULL)
3496 return nfserr_jukebox;
3497 status = copy_impl_id(new, exid);
3498 if (status)
3499 goto out_nolock;
3500
3501 switch (exid->spa_how) {
3502 case SP4_MACH_CRED:
3503 exid->spo_must_enforce[0] = 0;
3504 exid->spo_must_enforce[1] = (
3505 1 << (OP_BIND_CONN_TO_SESSION - 32) |
3506 1 << (OP_EXCHANGE_ID - 32) |
3507 1 << (OP_CREATE_SESSION - 32) |
3508 1 << (OP_DESTROY_SESSION - 32) |
3509 1 << (OP_DESTROY_CLIENTID - 32));
3510
3511 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3512 1 << (OP_OPEN_DOWNGRADE) |
3513 1 << (OP_LOCKU) |
3514 1 << (OP_DELEGRETURN));
3515
3516 exid->spo_must_allow[1] &= (
3517 1 << (OP_TEST_STATEID - 32) |
3518 1 << (OP_FREE_STATEID - 32));
3519 if (!svc_rqst_integrity_protected(rqstp)) {
3520 status = nfserr_inval;
3521 goto out_nolock;
3522 }
3523 /*
3524 * Sometimes userspace doesn't give us a principal.
3525 * Which is a bug, really. Anyway, we can't enforce
3526 * MACH_CRED in that case, better to give up now:
3527 */
3528 if (!new->cl_cred.cr_principal &&
3529 !new->cl_cred.cr_raw_principal) {
3530 status = nfserr_serverfault;
3531 goto out_nolock;
3532 }
3533 new->cl_mach_cred = true;
3534 break;
3535 case SP4_NONE:
3536 break;
3537 default: /* checked by xdr code */
3538 WARN_ON_ONCE(1);
3539 fallthrough;
3540 case SP4_SSV:
3541 status = nfserr_encr_alg_unsupp;
3542 goto out_nolock;
3543 }
3544
3545 /* Cases below refer to rfc 5661 section 18.35.4: */
3546 spin_lock(&nn->client_lock);
3547 conf = find_confirmed_client_by_name(&exid->clname, nn);
3548 if (conf) {
3549 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3550 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3551
3552 if (update) {
3553 if (!clp_used_exchangeid(conf)) { /* buggy client */
3554 status = nfserr_inval;
3555 goto out;
3556 }
3557 if (!nfsd4_mach_creds_match(conf, rqstp)) {
3558 status = nfserr_wrong_cred;
3559 goto out;
3560 }
3561 if (!creds_match) { /* case 9 */
3562 status = nfserr_perm;
3563 goto out;
3564 }
3565 if (!verfs_match) { /* case 8 */
3566 status = nfserr_not_same;
3567 goto out;
3568 }
3569 /* case 6 */
3570 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3571 trace_nfsd_clid_confirmed_r(conf);
3572 goto out_copy;
3573 }
3574 if (!creds_match) { /* case 3 */
3575 if (client_has_state(conf)) {
3576 status = nfserr_clid_inuse;
3577 trace_nfsd_clid_cred_mismatch(conf, rqstp);
3578 goto out;
3579 }
3580 goto out_new;
3581 }
3582 if (verfs_match) { /* case 2 */
3583 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3584 trace_nfsd_clid_confirmed_r(conf);
3585 goto out_copy;
3586 }
3587 /* case 5, client reboot */
3588 trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
3589 conf = NULL;
3590 goto out_new;
3591 }
3592
3593 if (update) { /* case 7 */
3594 status = nfserr_noent;
3595 goto out;
3596 }
3597
3598 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3599 if (unconf) /* case 4, possible retry or client restart */
3600 unhash_client_locked(unconf);
3601
3602 /* case 1, new owner ID */
3603 trace_nfsd_clid_fresh(new);
3604
3605out_new:
3606 if (conf) {
3607 status = mark_client_expired_locked(conf);
3608 if (status)
3609 goto out;
3610 trace_nfsd_clid_replaced(&conf->cl_clientid);
3611 }
3612 new->cl_minorversion = cstate->minorversion;
3613 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3614 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3615
3616 /* Contrived initial CREATE_SESSION response */
3617 new->cl_cs_slot.sl_status = nfserr_seq_misordered;
3618
3619 add_to_unconfirmed(new);
3620 swap(new, conf);
3621out_copy:
3622 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3623 exid->clientid.cl_id = conf->cl_clientid.cl_id;
3624
3625 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3626 nfsd4_set_ex_flags(conf, exid);
3627
3628 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3629 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3630 status = nfs_ok;
3631
3632out:
3633 spin_unlock(&nn->client_lock);
3634out_nolock:
3635 if (new)
3636 expire_client(new);
3637 if (unconf) {
3638 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
3639 expire_client(unconf);
3640 }
3641 return status;
3642}
3643
3644static __be32
3645check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3646{
3647 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3648 slot_seqid);
3649
3650 /* The slot is in use, and no response has been sent. */
3651 if (slot_inuse) {
3652 if (seqid == slot_seqid)
3653 return nfserr_jukebox;
3654 else
3655 return nfserr_seq_misordered;
3656 }
3657 /* Note unsigned 32-bit arithmetic handles wraparound: */
3658 if (likely(seqid == slot_seqid + 1))
3659 return nfs_ok;
3660 if (seqid == slot_seqid)
3661 return nfserr_replay_cache;
3662 return nfserr_seq_misordered;
3663}
3664
3665/*
3666 * Cache the create session result into the create session single DRC
3667 * slot cache by saving the xdr structure. sl_seqid has been set.
3668 * Do this for solo or embedded create session operations.
3669 */
3670static void
3671nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3672 struct nfsd4_clid_slot *slot, __be32 nfserr)
3673{
3674 slot->sl_status = nfserr;
3675 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3676}
3677
3678static __be32
3679nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3680 struct nfsd4_clid_slot *slot)
3681{
3682 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3683 return slot->sl_status;
3684}
3685
3686#define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
3687 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3688 1 + /* MIN tag is length with zero, only length */ \
3689 3 + /* version, opcount, opcode */ \
3690 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3691 /* seqid, slotID, slotID, cache */ \
3692 4 ) * sizeof(__be32))
3693
3694#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3695 2 + /* verifier: AUTH_NULL, length 0 */\
3696 1 + /* status */ \
3697 1 + /* MIN tag is length with zero, only length */ \
3698 3 + /* opcount, opcode, opstatus*/ \
3699 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3700 /* seqid, slotID, slotID, slotID, status */ \
3701 5 ) * sizeof(__be32))
3702
3703static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3704{
3705 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3706
3707 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3708 return nfserr_toosmall;
3709 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3710 return nfserr_toosmall;
3711 ca->headerpadsz = 0;
3712 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3713 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3714 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3715 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3716 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3717 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3718 /*
3719 * Note decreasing slot size below client's request may make it
3720 * difficult for client to function correctly, whereas
3721 * decreasing the number of slots will (just?) affect
3722 * performance. When short on memory we therefore prefer to
3723 * decrease number of slots instead of their size. Clients that
3724 * request larger slots than they need will get poor results:
3725 * Note that we always allow at least one slot, because our
3726 * accounting is soft and provides no guarantees either way.
3727 */
3728 ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3729
3730 return nfs_ok;
3731}
3732
3733/*
3734 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3735 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3736 */
3737#define RPC_MAX_HEADER_WITH_AUTH_SYS \
3738 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3739
3740#define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3741 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3742
3743#define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
3744 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3745#define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
3746 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3747 sizeof(__be32))
3748
3749static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3750{
3751 ca->headerpadsz = 0;
3752
3753 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3754 return nfserr_toosmall;
3755 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3756 return nfserr_toosmall;
3757 ca->maxresp_cached = 0;
3758 if (ca->maxops < 2)
3759 return nfserr_toosmall;
3760
3761 return nfs_ok;
3762}
3763
3764static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3765{
3766 switch (cbs->flavor) {
3767 case RPC_AUTH_NULL:
3768 case RPC_AUTH_UNIX:
3769 return nfs_ok;
3770 default:
3771 /*
3772 * GSS case: the spec doesn't allow us to return this
3773 * error. But it also doesn't allow us not to support
3774 * GSS.
3775 * I'd rather this fail hard than return some error the
3776 * client might think it can already handle:
3777 */
3778 return nfserr_encr_alg_unsupp;
3779 }
3780}
3781
3782__be32
3783nfsd4_create_session(struct svc_rqst *rqstp,
3784 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3785{
3786 struct nfsd4_create_session *cr_ses = &u->create_session;
3787 struct sockaddr *sa = svc_addr(rqstp);
3788 struct nfs4_client *conf, *unconf;
3789 struct nfsd4_clid_slot *cs_slot;
3790 struct nfs4_client *old = NULL;
3791 struct nfsd4_session *new;
3792 struct nfsd4_conn *conn;
3793 __be32 status = 0;
3794 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3795
3796 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3797 return nfserr_inval;
3798 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3799 if (status)
3800 return status;
3801 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3802 if (status)
3803 return status;
3804 status = check_backchannel_attrs(&cr_ses->back_channel);
3805 if (status)
3806 goto out_release_drc_mem;
3807 status = nfserr_jukebox;
3808 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3809 if (!new)
3810 goto out_release_drc_mem;
3811 conn = alloc_conn_from_crses(rqstp, cr_ses);
3812 if (!conn)
3813 goto out_free_session;
3814
3815 spin_lock(&nn->client_lock);
3816
3817 /* RFC 8881 Section 18.36.4 Phase 1: Client record look-up. */
3818 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3819 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3820 if (!conf && !unconf) {
3821 status = nfserr_stale_clientid;
3822 goto out_free_conn;
3823 }
3824
3825 /* RFC 8881 Section 18.36.4 Phase 2: Sequence ID processing. */
3826 if (conf)
3827 cs_slot = &conf->cl_cs_slot;
3828 else
3829 cs_slot = &unconf->cl_cs_slot;
3830 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3831 switch (status) {
3832 case nfs_ok:
3833 cs_slot->sl_seqid++;
3834 cr_ses->seqid = cs_slot->sl_seqid;
3835 break;
3836 case nfserr_replay_cache:
3837 status = nfsd4_replay_create_session(cr_ses, cs_slot);
3838 fallthrough;
3839 case nfserr_jukebox:
3840 /* The server MUST NOT cache NFS4ERR_DELAY */
3841 goto out_free_conn;
3842 default:
3843 goto out_cache_error;
3844 }
3845
3846 /* RFC 8881 Section 18.36.4 Phase 3: Client ID confirmation. */
3847 if (conf) {
3848 status = nfserr_wrong_cred;
3849 if (!nfsd4_mach_creds_match(conf, rqstp))
3850 goto out_cache_error;
3851 } else {
3852 status = nfserr_clid_inuse;
3853 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3854 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3855 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
3856 goto out_cache_error;
3857 }
3858 status = nfserr_wrong_cred;
3859 if (!nfsd4_mach_creds_match(unconf, rqstp))
3860 goto out_cache_error;
3861 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3862 if (old) {
3863 status = mark_client_expired_locked(old);
3864 if (status)
3865 goto out_expired_error;
3866 trace_nfsd_clid_replaced(&old->cl_clientid);
3867 }
3868 move_to_confirmed(unconf);
3869 conf = unconf;
3870 }
3871
3872 /* RFC 8881 Section 18.36.4 Phase 4: Session creation. */
3873 status = nfs_ok;
3874 /* Persistent sessions are not supported */
3875 cr_ses->flags &= ~SESSION4_PERSIST;
3876 /* Upshifting from TCP to RDMA is not supported */
3877 cr_ses->flags &= ~SESSION4_RDMA;
3878
3879 init_session(rqstp, new, conf, cr_ses);
3880 nfsd4_get_session_locked(new);
3881
3882 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3883 NFS4_MAX_SESSIONID_LEN);
3884
3885 /* cache solo and embedded create sessions under the client_lock */
3886 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3887 spin_unlock(&nn->client_lock);
3888 if (conf == unconf)
3889 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
3890 /* init connection and backchannel */
3891 nfsd4_init_conn(rqstp, conn, new);
3892 nfsd4_put_session(new);
3893 if (old)
3894 expire_client(old);
3895 return status;
3896
3897out_expired_error:
3898 old = NULL;
3899 /*
3900 * Revert the slot seq_nr change so the server will process
3901 * the client's resend instead of returning a cached response.
3902 */
3903 if (status == nfserr_jukebox) {
3904 cs_slot->sl_seqid--;
3905 cr_ses->seqid = cs_slot->sl_seqid;
3906 goto out_free_conn;
3907 }
3908out_cache_error:
3909 nfsd4_cache_create_session(cr_ses, cs_slot, status);
3910out_free_conn:
3911 spin_unlock(&nn->client_lock);
3912 free_conn(conn);
3913 if (old)
3914 expire_client(old);
3915out_free_session:
3916 __free_session(new);
3917out_release_drc_mem:
3918 nfsd4_put_drc_mem(&cr_ses->fore_channel);
3919 return status;
3920}
3921
3922static __be32 nfsd4_map_bcts_dir(u32 *dir)
3923{
3924 switch (*dir) {
3925 case NFS4_CDFC4_FORE:
3926 case NFS4_CDFC4_BACK:
3927 return nfs_ok;
3928 case NFS4_CDFC4_FORE_OR_BOTH:
3929 case NFS4_CDFC4_BACK_OR_BOTH:
3930 *dir = NFS4_CDFC4_BOTH;
3931 return nfs_ok;
3932 }
3933 return nfserr_inval;
3934}
3935
3936__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3937 struct nfsd4_compound_state *cstate,
3938 union nfsd4_op_u *u)
3939{
3940 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3941 struct nfsd4_session *session = cstate->session;
3942 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3943 __be32 status;
3944
3945 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3946 if (status)
3947 return status;
3948 spin_lock(&nn->client_lock);
3949 session->se_cb_prog = bc->bc_cb_program;
3950 session->se_cb_sec = bc->bc_cb_sec;
3951 spin_unlock(&nn->client_lock);
3952
3953 nfsd4_probe_callback(session->se_client);
3954
3955 return nfs_ok;
3956}
3957
3958static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3959{
3960 struct nfsd4_conn *c;
3961
3962 list_for_each_entry(c, &s->se_conns, cn_persession) {
3963 if (c->cn_xprt == xpt) {
3964 return c;
3965 }
3966 }
3967 return NULL;
3968}
3969
3970static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3971 struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
3972{
3973 struct nfs4_client *clp = session->se_client;
3974 struct svc_xprt *xpt = rqst->rq_xprt;
3975 struct nfsd4_conn *c;
3976 __be32 status;
3977
3978 /* Following the last paragraph of RFC 5661 Section 18.34.3: */
3979 spin_lock(&clp->cl_lock);
3980 c = __nfsd4_find_conn(xpt, session);
3981 if (!c)
3982 status = nfserr_noent;
3983 else if (req == c->cn_flags)
3984 status = nfs_ok;
3985 else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3986 c->cn_flags != NFS4_CDFC4_BACK)
3987 status = nfs_ok;
3988 else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3989 c->cn_flags != NFS4_CDFC4_FORE)
3990 status = nfs_ok;
3991 else
3992 status = nfserr_inval;
3993 spin_unlock(&clp->cl_lock);
3994 if (status == nfs_ok && conn)
3995 *conn = c;
3996 return status;
3997}
3998
3999__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
4000 struct nfsd4_compound_state *cstate,
4001 union nfsd4_op_u *u)
4002{
4003 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
4004 __be32 status;
4005 struct nfsd4_conn *conn;
4006 struct nfsd4_session *session;
4007 struct net *net = SVC_NET(rqstp);
4008 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4009
4010 if (!nfsd4_last_compound_op(rqstp))
4011 return nfserr_not_only_op;
4012 spin_lock(&nn->client_lock);
4013 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
4014 spin_unlock(&nn->client_lock);
4015 if (!session)
4016 goto out_no_session;
4017 status = nfserr_wrong_cred;
4018 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
4019 goto out;
4020 status = nfsd4_match_existing_connection(rqstp, session,
4021 bcts->dir, &conn);
4022 if (status == nfs_ok) {
4023 if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
4024 bcts->dir == NFS4_CDFC4_BACK)
4025 conn->cn_flags |= NFS4_CDFC4_BACK;
4026 nfsd4_probe_callback(session->se_client);
4027 goto out;
4028 }
4029 if (status == nfserr_inval)
4030 goto out;
4031 status = nfsd4_map_bcts_dir(&bcts->dir);
4032 if (status)
4033 goto out;
4034 conn = alloc_conn(rqstp, bcts->dir);
4035 status = nfserr_jukebox;
4036 if (!conn)
4037 goto out;
4038 nfsd4_init_conn(rqstp, conn, session);
4039 status = nfs_ok;
4040out:
4041 nfsd4_put_session(session);
4042out_no_session:
4043 return status;
4044}
4045
4046static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
4047{
4048 if (!cstate->session)
4049 return false;
4050 return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
4051}
4052
4053__be32
4054nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
4055 union nfsd4_op_u *u)
4056{
4057 struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
4058 struct nfsd4_session *ses;
4059 __be32 status;
4060 int ref_held_by_me = 0;
4061 struct net *net = SVC_NET(r);
4062 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4063
4064 status = nfserr_not_only_op;
4065 if (nfsd4_compound_in_session(cstate, sessionid)) {
4066 if (!nfsd4_last_compound_op(r))
4067 goto out;
4068 ref_held_by_me++;
4069 }
4070 dump_sessionid(__func__, sessionid);
4071 spin_lock(&nn->client_lock);
4072 ses = find_in_sessionid_hashtbl(sessionid, net, &status);
4073 if (!ses)
4074 goto out_client_lock;
4075 status = nfserr_wrong_cred;
4076 if (!nfsd4_mach_creds_match(ses->se_client, r))
4077 goto out_put_session;
4078 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
4079 if (status)
4080 goto out_put_session;
4081 unhash_session(ses);
4082 spin_unlock(&nn->client_lock);
4083
4084 nfsd4_probe_callback_sync(ses->se_client);
4085
4086 spin_lock(&nn->client_lock);
4087 status = nfs_ok;
4088out_put_session:
4089 nfsd4_put_session_locked(ses);
4090out_client_lock:
4091 spin_unlock(&nn->client_lock);
4092out:
4093 return status;
4094}
4095
4096static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
4097{
4098 struct nfs4_client *clp = ses->se_client;
4099 struct nfsd4_conn *c;
4100 __be32 status = nfs_ok;
4101 int ret;
4102
4103 spin_lock(&clp->cl_lock);
4104 c = __nfsd4_find_conn(new->cn_xprt, ses);
4105 if (c)
4106 goto out_free;
4107 status = nfserr_conn_not_bound_to_session;
4108 if (clp->cl_mach_cred)
4109 goto out_free;
4110 __nfsd4_hash_conn(new, ses);
4111 spin_unlock(&clp->cl_lock);
4112 ret = nfsd4_register_conn(new);
4113 if (ret)
4114 /* oops; xprt is already down: */
4115 nfsd4_conn_lost(&new->cn_xpt_user);
4116 return nfs_ok;
4117out_free:
4118 spin_unlock(&clp->cl_lock);
4119 free_conn(new);
4120 return status;
4121}
4122
4123static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
4124{
4125 struct nfsd4_compoundargs *args = rqstp->rq_argp;
4126
4127 return args->opcnt > session->se_fchannel.maxops;
4128}
4129
4130static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
4131 struct nfsd4_session *session)
4132{
4133 struct xdr_buf *xb = &rqstp->rq_arg;
4134
4135 return xb->len > session->se_fchannel.maxreq_sz;
4136}
4137
4138static bool replay_matches_cache(struct svc_rqst *rqstp,
4139 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
4140{
4141 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
4142
4143 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
4144 (bool)seq->cachethis)
4145 return false;
4146 /*
4147 * If there's an error then the reply can have fewer ops than
4148 * the call.
4149 */
4150 if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
4151 return false;
4152 /*
4153 * But if we cached a reply with *more* ops than the call you're
4154 * sending us now, then this new call is clearly not really a
4155 * replay of the old one:
4156 */
4157 if (slot->sl_opcnt > argp->opcnt)
4158 return false;
4159 /* This is the only check explicitly called by spec: */
4160 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
4161 return false;
4162 /*
4163 * There may be more comparisons we could actually do, but the
4164 * spec doesn't require us to catch every case where the calls
4165 * don't match (that would require caching the call as well as
4166 * the reply), so we don't bother.
4167 */
4168 return true;
4169}
4170
4171__be32
4172nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4173 union nfsd4_op_u *u)
4174{
4175 struct nfsd4_sequence *seq = &u->sequence;
4176 struct nfsd4_compoundres *resp = rqstp->rq_resp;
4177 struct xdr_stream *xdr = resp->xdr;
4178 struct nfsd4_session *session;
4179 struct nfs4_client *clp;
4180 struct nfsd4_slot *slot;
4181 struct nfsd4_conn *conn;
4182 __be32 status;
4183 int buflen;
4184 struct net *net = SVC_NET(rqstp);
4185 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4186
4187 if (resp->opcnt != 1)
4188 return nfserr_sequence_pos;
4189
4190 /*
4191 * Will be either used or freed by nfsd4_sequence_check_conn
4192 * below.
4193 */
4194 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
4195 if (!conn)
4196 return nfserr_jukebox;
4197
4198 spin_lock(&nn->client_lock);
4199 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
4200 if (!session)
4201 goto out_no_session;
4202 clp = session->se_client;
4203
4204 status = nfserr_too_many_ops;
4205 if (nfsd4_session_too_many_ops(rqstp, session))
4206 goto out_put_session;
4207
4208 status = nfserr_req_too_big;
4209 if (nfsd4_request_too_big(rqstp, session))
4210 goto out_put_session;
4211
4212 status = nfserr_badslot;
4213 if (seq->slotid >= session->se_fchannel.maxreqs)
4214 goto out_put_session;
4215
4216 slot = session->se_slots[seq->slotid];
4217 dprintk("%s: slotid %d\n", __func__, seq->slotid);
4218
4219 /* We do not negotiate the number of slots yet, so set the
4220 * maxslots to the session maxreqs which is used to encode
4221 * sr_highest_slotid and the sr_target_slot id to maxslots */
4222 seq->maxslots = session->se_fchannel.maxreqs;
4223
4224 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
4225 slot->sl_flags & NFSD4_SLOT_INUSE);
4226 if (status == nfserr_replay_cache) {
4227 status = nfserr_seq_misordered;
4228 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
4229 goto out_put_session;
4230 status = nfserr_seq_false_retry;
4231 if (!replay_matches_cache(rqstp, seq, slot))
4232 goto out_put_session;
4233 cstate->slot = slot;
4234 cstate->session = session;
4235 cstate->clp = clp;
4236 /* Return the cached reply status and set cstate->status
4237 * for nfsd4_proc_compound processing */
4238 status = nfsd4_replay_cache_entry(resp, seq);
4239 cstate->status = nfserr_replay_cache;
4240 goto out;
4241 }
4242 if (status)
4243 goto out_put_session;
4244
4245 status = nfsd4_sequence_check_conn(conn, session);
4246 conn = NULL;
4247 if (status)
4248 goto out_put_session;
4249
4250 buflen = (seq->cachethis) ?
4251 session->se_fchannel.maxresp_cached :
4252 session->se_fchannel.maxresp_sz;
4253 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
4254 nfserr_rep_too_big;
4255 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
4256 goto out_put_session;
4257 svc_reserve(rqstp, buflen);
4258
4259 status = nfs_ok;
4260 /* Success! bump slot seqid */
4261 slot->sl_seqid = seq->seqid;
4262 slot->sl_flags |= NFSD4_SLOT_INUSE;
4263 if (seq->cachethis)
4264 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
4265 else
4266 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
4267
4268 cstate->slot = slot;
4269 cstate->session = session;
4270 cstate->clp = clp;
4271
4272out:
4273 switch (clp->cl_cb_state) {
4274 case NFSD4_CB_DOWN:
4275 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
4276 break;
4277 case NFSD4_CB_FAULT:
4278 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
4279 break;
4280 default:
4281 seq->status_flags = 0;
4282 }
4283 if (!list_empty(&clp->cl_revoked))
4284 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
4285 if (atomic_read(&clp->cl_admin_revoked))
4286 seq->status_flags |= SEQ4_STATUS_ADMIN_STATE_REVOKED;
4287 trace_nfsd_seq4_status(rqstp, seq);
4288out_no_session:
4289 if (conn)
4290 free_conn(conn);
4291 spin_unlock(&nn->client_lock);
4292 return status;
4293out_put_session:
4294 nfsd4_put_session_locked(session);
4295 goto out_no_session;
4296}
4297
4298void
4299nfsd4_sequence_done(struct nfsd4_compoundres *resp)
4300{
4301 struct nfsd4_compound_state *cs = &resp->cstate;
4302
4303 if (nfsd4_has_session(cs)) {
4304 if (cs->status != nfserr_replay_cache) {
4305 nfsd4_store_cache_entry(resp);
4306 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
4307 }
4308 /* Drop session reference that was taken in nfsd4_sequence() */
4309 nfsd4_put_session(cs->session);
4310 } else if (cs->clp)
4311 put_client_renew(cs->clp);
4312}
4313
4314__be32
4315nfsd4_destroy_clientid(struct svc_rqst *rqstp,
4316 struct nfsd4_compound_state *cstate,
4317 union nfsd4_op_u *u)
4318{
4319 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
4320 struct nfs4_client *conf, *unconf;
4321 struct nfs4_client *clp = NULL;
4322 __be32 status = 0;
4323 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4324
4325 spin_lock(&nn->client_lock);
4326 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
4327 conf = find_confirmed_client(&dc->clientid, true, nn);
4328 WARN_ON_ONCE(conf && unconf);
4329
4330 if (conf) {
4331 if (client_has_state(conf)) {
4332 status = nfserr_clientid_busy;
4333 goto out;
4334 }
4335 status = mark_client_expired_locked(conf);
4336 if (status)
4337 goto out;
4338 clp = conf;
4339 } else if (unconf)
4340 clp = unconf;
4341 else {
4342 status = nfserr_stale_clientid;
4343 goto out;
4344 }
4345 if (!nfsd4_mach_creds_match(clp, rqstp)) {
4346 clp = NULL;
4347 status = nfserr_wrong_cred;
4348 goto out;
4349 }
4350 trace_nfsd_clid_destroyed(&clp->cl_clientid);
4351 unhash_client_locked(clp);
4352out:
4353 spin_unlock(&nn->client_lock);
4354 if (clp)
4355 expire_client(clp);
4356 return status;
4357}
4358
4359__be32
4360nfsd4_reclaim_complete(struct svc_rqst *rqstp,
4361 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
4362{
4363 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
4364 struct nfs4_client *clp = cstate->clp;
4365 __be32 status = 0;
4366
4367 if (rc->rca_one_fs) {
4368 if (!cstate->current_fh.fh_dentry)
4369 return nfserr_nofilehandle;
4370 /*
4371 * We don't take advantage of the rca_one_fs case.
4372 * That's OK, it's optional, we can safely ignore it.
4373 */
4374 return nfs_ok;
4375 }
4376
4377 status = nfserr_complete_already;
4378 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
4379 goto out;
4380
4381 status = nfserr_stale_clientid;
4382 if (is_client_expired(clp))
4383 /*
4384 * The following error isn't really legal.
4385 * But we only get here if the client just explicitly
4386 * destroyed the client. Surely it no longer cares what
4387 * error it gets back on an operation for the dead
4388 * client.
4389 */
4390 goto out;
4391
4392 status = nfs_ok;
4393 trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
4394 nfsd4_client_record_create(clp);
4395 inc_reclaim_complete(clp);
4396out:
4397 return status;
4398}
4399
4400__be32
4401nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4402 union nfsd4_op_u *u)
4403{
4404 struct nfsd4_setclientid *setclid = &u->setclientid;
4405 struct xdr_netobj clname = setclid->se_name;
4406 nfs4_verifier clverifier = setclid->se_verf;
4407 struct nfs4_client *conf, *new;
4408 struct nfs4_client *unconf = NULL;
4409 __be32 status;
4410 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4411
4412 new = create_client(clname, rqstp, &clverifier);
4413 if (new == NULL)
4414 return nfserr_jukebox;
4415 spin_lock(&nn->client_lock);
4416 conf = find_confirmed_client_by_name(&clname, nn);
4417 if (conf && client_has_state(conf)) {
4418 status = nfserr_clid_inuse;
4419 if (clp_used_exchangeid(conf))
4420 goto out;
4421 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4422 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4423 goto out;
4424 }
4425 }
4426 unconf = find_unconfirmed_client_by_name(&clname, nn);
4427 if (unconf)
4428 unhash_client_locked(unconf);
4429 if (conf) {
4430 if (same_verf(&conf->cl_verifier, &clverifier)) {
4431 copy_clid(new, conf);
4432 gen_confirm(new, nn);
4433 } else
4434 trace_nfsd_clid_verf_mismatch(conf, rqstp,
4435 &clverifier);
4436 } else
4437 trace_nfsd_clid_fresh(new);
4438 new->cl_minorversion = 0;
4439 gen_callback(new, setclid, rqstp);
4440 add_to_unconfirmed(new);
4441 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
4442 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
4443 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
4444 new = NULL;
4445 status = nfs_ok;
4446out:
4447 spin_unlock(&nn->client_lock);
4448 if (new)
4449 free_client(new);
4450 if (unconf) {
4451 trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
4452 expire_client(unconf);
4453 }
4454 return status;
4455}
4456
4457__be32
4458nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
4459 struct nfsd4_compound_state *cstate,
4460 union nfsd4_op_u *u)
4461{
4462 struct nfsd4_setclientid_confirm *setclientid_confirm =
4463 &u->setclientid_confirm;
4464 struct nfs4_client *conf, *unconf;
4465 struct nfs4_client *old = NULL;
4466 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
4467 clientid_t * clid = &setclientid_confirm->sc_clientid;
4468 __be32 status;
4469 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4470
4471 if (STALE_CLIENTID(clid, nn))
4472 return nfserr_stale_clientid;
4473
4474 spin_lock(&nn->client_lock);
4475 conf = find_confirmed_client(clid, false, nn);
4476 unconf = find_unconfirmed_client(clid, false, nn);
4477 /*
4478 * We try hard to give out unique clientid's, so if we get an
4479 * attempt to confirm the same clientid with a different cred,
4480 * the client may be buggy; this should never happen.
4481 *
4482 * Nevertheless, RFC 7530 recommends INUSE for this case:
4483 */
4484 status = nfserr_clid_inuse;
4485 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
4486 trace_nfsd_clid_cred_mismatch(unconf, rqstp);
4487 goto out;
4488 }
4489 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4490 trace_nfsd_clid_cred_mismatch(conf, rqstp);
4491 goto out;
4492 }
4493 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4494 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4495 status = nfs_ok;
4496 } else
4497 status = nfserr_stale_clientid;
4498 goto out;
4499 }
4500 status = nfs_ok;
4501 if (conf) {
4502 old = unconf;
4503 unhash_client_locked(old);
4504 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4505 } else {
4506 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4507 if (old) {
4508 status = nfserr_clid_inuse;
4509 if (client_has_state(old)
4510 && !same_creds(&unconf->cl_cred,
4511 &old->cl_cred)) {
4512 old = NULL;
4513 goto out;
4514 }
4515 status = mark_client_expired_locked(old);
4516 if (status) {
4517 old = NULL;
4518 goto out;
4519 }
4520 trace_nfsd_clid_replaced(&old->cl_clientid);
4521 }
4522 move_to_confirmed(unconf);
4523 conf = unconf;
4524 }
4525 get_client_locked(conf);
4526 spin_unlock(&nn->client_lock);
4527 if (conf == unconf)
4528 fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
4529 nfsd4_probe_callback(conf);
4530 spin_lock(&nn->client_lock);
4531 put_client_renew_locked(conf);
4532out:
4533 spin_unlock(&nn->client_lock);
4534 if (old)
4535 expire_client(old);
4536 return status;
4537}
4538
4539static struct nfs4_file *nfsd4_alloc_file(void)
4540{
4541 return kmem_cache_alloc(file_slab, GFP_KERNEL);
4542}
4543
4544/* OPEN Share state helper functions */
4545
4546static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp)
4547{
4548 refcount_set(&fp->fi_ref, 1);
4549 spin_lock_init(&fp->fi_lock);
4550 INIT_LIST_HEAD(&fp->fi_stateids);
4551 INIT_LIST_HEAD(&fp->fi_delegations);
4552 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4553 fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
4554 fp->fi_deleg_file = NULL;
4555 fp->fi_had_conflict = false;
4556 fp->fi_share_deny = 0;
4557 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4558 memset(fp->fi_access, 0, sizeof(fp->fi_access));
4559 fp->fi_aliased = false;
4560 fp->fi_inode = d_inode(fh->fh_dentry);
4561#ifdef CONFIG_NFSD_PNFS
4562 INIT_LIST_HEAD(&fp->fi_lo_states);
4563 atomic_set(&fp->fi_lo_recalls, 0);
4564#endif
4565}
4566
4567void
4568nfsd4_free_slabs(void)
4569{
4570 kmem_cache_destroy(client_slab);
4571 kmem_cache_destroy(openowner_slab);
4572 kmem_cache_destroy(lockowner_slab);
4573 kmem_cache_destroy(file_slab);
4574 kmem_cache_destroy(stateid_slab);
4575 kmem_cache_destroy(deleg_slab);
4576 kmem_cache_destroy(odstate_slab);
4577}
4578
4579int
4580nfsd4_init_slabs(void)
4581{
4582 client_slab = KMEM_CACHE(nfs4_client, 0);
4583 if (client_slab == NULL)
4584 goto out;
4585 openowner_slab = KMEM_CACHE(nfs4_openowner, 0);
4586 if (openowner_slab == NULL)
4587 goto out_free_client_slab;
4588 lockowner_slab = KMEM_CACHE(nfs4_lockowner, 0);
4589 if (lockowner_slab == NULL)
4590 goto out_free_openowner_slab;
4591 file_slab = KMEM_CACHE(nfs4_file, 0);
4592 if (file_slab == NULL)
4593 goto out_free_lockowner_slab;
4594 stateid_slab = KMEM_CACHE(nfs4_ol_stateid, 0);
4595 if (stateid_slab == NULL)
4596 goto out_free_file_slab;
4597 deleg_slab = KMEM_CACHE(nfs4_delegation, 0);
4598 if (deleg_slab == NULL)
4599 goto out_free_stateid_slab;
4600 odstate_slab = KMEM_CACHE(nfs4_clnt_odstate, 0);
4601 if (odstate_slab == NULL)
4602 goto out_free_deleg_slab;
4603 return 0;
4604
4605out_free_deleg_slab:
4606 kmem_cache_destroy(deleg_slab);
4607out_free_stateid_slab:
4608 kmem_cache_destroy(stateid_slab);
4609out_free_file_slab:
4610 kmem_cache_destroy(file_slab);
4611out_free_lockowner_slab:
4612 kmem_cache_destroy(lockowner_slab);
4613out_free_openowner_slab:
4614 kmem_cache_destroy(openowner_slab);
4615out_free_client_slab:
4616 kmem_cache_destroy(client_slab);
4617out:
4618 return -ENOMEM;
4619}
4620
4621static unsigned long
4622nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
4623{
4624 int count;
4625 struct nfsd_net *nn = shrink->private_data;
4626
4627 count = atomic_read(&nn->nfsd_courtesy_clients);
4628 if (!count)
4629 count = atomic_long_read(&num_delegations);
4630 if (count)
4631 queue_work(laundry_wq, &nn->nfsd_shrinker_work);
4632 return (unsigned long)count;
4633}
4634
4635static unsigned long
4636nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
4637{
4638 return SHRINK_STOP;
4639}
4640
4641void
4642nfsd4_init_leases_net(struct nfsd_net *nn)
4643{
4644 struct sysinfo si;
4645 u64 max_clients;
4646
4647 nn->nfsd4_lease = 90; /* default lease time */
4648 nn->nfsd4_grace = 90;
4649 nn->somebody_reclaimed = false;
4650 nn->track_reclaim_completes = false;
4651 nn->clverifier_counter = get_random_u32();
4652 nn->clientid_base = get_random_u32();
4653 nn->clientid_counter = nn->clientid_base + 1;
4654 nn->s2s_cp_cl_id = nn->clientid_counter++;
4655
4656 atomic_set(&nn->nfs4_client_count, 0);
4657 si_meminfo(&si);
4658 max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024);
4659 max_clients *= NFS4_CLIENTS_PER_GB;
4660 nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
4661
4662 atomic_set(&nn->nfsd_courtesy_clients, 0);
4663}
4664
4665static void init_nfs4_replay(struct nfs4_replay *rp)
4666{
4667 rp->rp_status = nfserr_serverfault;
4668 rp->rp_buflen = 0;
4669 rp->rp_buf = rp->rp_ibuf;
4670 mutex_init(&rp->rp_mutex);
4671}
4672
4673static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4674 struct nfs4_stateowner *so)
4675{
4676 if (!nfsd4_has_session(cstate)) {
4677 mutex_lock(&so->so_replay.rp_mutex);
4678 cstate->replay_owner = nfs4_get_stateowner(so);
4679 }
4680}
4681
4682void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4683{
4684 struct nfs4_stateowner *so = cstate->replay_owner;
4685
4686 if (so != NULL) {
4687 cstate->replay_owner = NULL;
4688 mutex_unlock(&so->so_replay.rp_mutex);
4689 nfs4_put_stateowner(so);
4690 }
4691}
4692
4693static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4694{
4695 struct nfs4_stateowner *sop;
4696
4697 sop = kmem_cache_alloc(slab, GFP_KERNEL);
4698 if (!sop)
4699 return NULL;
4700
4701 xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4702 if (!sop->so_owner.data) {
4703 kmem_cache_free(slab, sop);
4704 return NULL;
4705 }
4706
4707 INIT_LIST_HEAD(&sop->so_stateids);
4708 sop->so_client = clp;
4709 init_nfs4_replay(&sop->so_replay);
4710 atomic_set(&sop->so_count, 1);
4711 return sop;
4712}
4713
4714static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4715{
4716 lockdep_assert_held(&clp->cl_lock);
4717
4718 list_add(&oo->oo_owner.so_strhash,
4719 &clp->cl_ownerstr_hashtbl[strhashval]);
4720 list_add(&oo->oo_perclient, &clp->cl_openowners);
4721}
4722
4723static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4724{
4725 unhash_openowner_locked(openowner(so));
4726}
4727
4728static void nfs4_free_openowner(struct nfs4_stateowner *so)
4729{
4730 struct nfs4_openowner *oo = openowner(so);
4731
4732 kmem_cache_free(openowner_slab, oo);
4733}
4734
4735static const struct nfs4_stateowner_operations openowner_ops = {
4736 .so_unhash = nfs4_unhash_openowner,
4737 .so_free = nfs4_free_openowner,
4738};
4739
4740static struct nfs4_ol_stateid *
4741nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4742{
4743 struct nfs4_ol_stateid *local, *ret = NULL;
4744 struct nfs4_openowner *oo = open->op_openowner;
4745
4746 lockdep_assert_held(&fp->fi_lock);
4747
4748 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4749 /* ignore lock owners */
4750 if (local->st_stateowner->so_is_open_owner == 0)
4751 continue;
4752 if (local->st_stateowner != &oo->oo_owner)
4753 continue;
4754 if (local->st_stid.sc_type == SC_TYPE_OPEN &&
4755 !local->st_stid.sc_status) {
4756 ret = local;
4757 refcount_inc(&ret->st_stid.sc_count);
4758 break;
4759 }
4760 }
4761 return ret;
4762}
4763
4764static void nfsd4_drop_revoked_stid(struct nfs4_stid *s)
4765 __releases(&s->sc_client->cl_lock)
4766{
4767 struct nfs4_client *cl = s->sc_client;
4768 LIST_HEAD(reaplist);
4769 struct nfs4_ol_stateid *stp;
4770 struct nfs4_delegation *dp;
4771 bool unhashed;
4772
4773 switch (s->sc_type) {
4774 case SC_TYPE_OPEN:
4775 stp = openlockstateid(s);
4776 if (unhash_open_stateid(stp, &reaplist))
4777 put_ol_stateid_locked(stp, &reaplist);
4778 spin_unlock(&cl->cl_lock);
4779 free_ol_stateid_reaplist(&reaplist);
4780 break;
4781 case SC_TYPE_LOCK:
4782 stp = openlockstateid(s);
4783 unhashed = unhash_lock_stateid(stp);
4784 spin_unlock(&cl->cl_lock);
4785 if (unhashed)
4786 nfs4_put_stid(s);
4787 break;
4788 case SC_TYPE_DELEG:
4789 dp = delegstateid(s);
4790 list_del_init(&dp->dl_recall_lru);
4791 spin_unlock(&cl->cl_lock);
4792 nfs4_put_stid(s);
4793 break;
4794 default:
4795 spin_unlock(&cl->cl_lock);
4796 }
4797}
4798
4799static void nfsd40_drop_revoked_stid(struct nfs4_client *cl,
4800 stateid_t *stid)
4801{
4802 /* NFSv4.0 has no way for the client to tell the server
4803 * that it can forget an admin-revoked stateid.
4804 * So we keep it around until the first time that the
4805 * client uses it, and drop it the first time
4806 * nfserr_admin_revoked is returned.
4807 * For v4.1 and later we wait until explicitly told
4808 * to free the stateid.
4809 */
4810 if (cl->cl_minorversion == 0) {
4811 struct nfs4_stid *st;
4812
4813 spin_lock(&cl->cl_lock);
4814 st = find_stateid_locked(cl, stid);
4815 if (st)
4816 nfsd4_drop_revoked_stid(st);
4817 else
4818 spin_unlock(&cl->cl_lock);
4819 }
4820}
4821
4822static __be32
4823nfsd4_verify_open_stid(struct nfs4_stid *s)
4824{
4825 __be32 ret = nfs_ok;
4826
4827 if (s->sc_status & SC_STATUS_ADMIN_REVOKED)
4828 ret = nfserr_admin_revoked;
4829 else if (s->sc_status & SC_STATUS_REVOKED)
4830 ret = nfserr_deleg_revoked;
4831 else if (s->sc_status & SC_STATUS_CLOSED)
4832 ret = nfserr_bad_stateid;
4833 return ret;
4834}
4835
4836/* Lock the stateid st_mutex, and deal with races with CLOSE */
4837static __be32
4838nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4839{
4840 __be32 ret;
4841
4842 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4843 ret = nfsd4_verify_open_stid(&stp->st_stid);
4844 if (ret == nfserr_admin_revoked)
4845 nfsd40_drop_revoked_stid(stp->st_stid.sc_client,
4846 &stp->st_stid.sc_stateid);
4847
4848 if (ret != nfs_ok)
4849 mutex_unlock(&stp->st_mutex);
4850 return ret;
4851}
4852
4853static struct nfs4_ol_stateid *
4854nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4855{
4856 struct nfs4_ol_stateid *stp;
4857 for (;;) {
4858 spin_lock(&fp->fi_lock);
4859 stp = nfsd4_find_existing_open(fp, open);
4860 spin_unlock(&fp->fi_lock);
4861 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4862 break;
4863 nfs4_put_stid(&stp->st_stid);
4864 }
4865 return stp;
4866}
4867
4868static struct nfs4_openowner *
4869alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4870 struct nfsd4_compound_state *cstate)
4871{
4872 struct nfs4_client *clp = cstate->clp;
4873 struct nfs4_openowner *oo, *ret;
4874
4875 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4876 if (!oo)
4877 return NULL;
4878 oo->oo_owner.so_ops = &openowner_ops;
4879 oo->oo_owner.so_is_open_owner = 1;
4880 oo->oo_owner.so_seqid = open->op_seqid;
4881 oo->oo_flags = 0;
4882 if (nfsd4_has_session(cstate))
4883 oo->oo_flags |= NFS4_OO_CONFIRMED;
4884 oo->oo_time = 0;
4885 oo->oo_last_closed_stid = NULL;
4886 INIT_LIST_HEAD(&oo->oo_close_lru);
4887 spin_lock(&clp->cl_lock);
4888 ret = find_openstateowner_str_locked(strhashval, open, clp);
4889 if (ret == NULL) {
4890 hash_openowner(oo, clp, strhashval);
4891 ret = oo;
4892 } else
4893 nfs4_free_stateowner(&oo->oo_owner);
4894
4895 spin_unlock(&clp->cl_lock);
4896 return ret;
4897}
4898
4899static struct nfs4_ol_stateid *
4900init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4901{
4902
4903 struct nfs4_openowner *oo = open->op_openowner;
4904 struct nfs4_ol_stateid *retstp = NULL;
4905 struct nfs4_ol_stateid *stp;
4906
4907 stp = open->op_stp;
4908 /* We are moving these outside of the spinlocks to avoid the warnings */
4909 mutex_init(&stp->st_mutex);
4910 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4911
4912retry:
4913 spin_lock(&oo->oo_owner.so_client->cl_lock);
4914 spin_lock(&fp->fi_lock);
4915
4916 retstp = nfsd4_find_existing_open(fp, open);
4917 if (retstp)
4918 goto out_unlock;
4919
4920 open->op_stp = NULL;
4921 refcount_inc(&stp->st_stid.sc_count);
4922 stp->st_stid.sc_type = SC_TYPE_OPEN;
4923 INIT_LIST_HEAD(&stp->st_locks);
4924 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4925 get_nfs4_file(fp);
4926 stp->st_stid.sc_file = fp;
4927 stp->st_access_bmap = 0;
4928 stp->st_deny_bmap = 0;
4929 stp->st_openstp = NULL;
4930 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4931 list_add(&stp->st_perfile, &fp->fi_stateids);
4932
4933out_unlock:
4934 spin_unlock(&fp->fi_lock);
4935 spin_unlock(&oo->oo_owner.so_client->cl_lock);
4936 if (retstp) {
4937 /* Handle races with CLOSE */
4938 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4939 nfs4_put_stid(&retstp->st_stid);
4940 goto retry;
4941 }
4942 /* To keep mutex tracking happy */
4943 mutex_unlock(&stp->st_mutex);
4944 stp = retstp;
4945 }
4946 return stp;
4947}
4948
4949/*
4950 * In the 4.0 case we need to keep the owners around a little while to handle
4951 * CLOSE replay. We still do need to release any file access that is held by
4952 * them before returning however.
4953 */
4954static void
4955move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4956{
4957 struct nfs4_ol_stateid *last;
4958 struct nfs4_openowner *oo = openowner(s->st_stateowner);
4959 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4960 nfsd_net_id);
4961
4962 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
4963
4964 /*
4965 * We know that we hold one reference via nfsd4_close, and another
4966 * "persistent" reference for the client. If the refcount is higher
4967 * than 2, then there are still calls in progress that are using this
4968 * stateid. We can't put the sc_file reference until they are finished.
4969 * Wait for the refcount to drop to 2. Since it has been unhashed,
4970 * there should be no danger of the refcount going back up again at
4971 * this point.
4972 */
4973 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4974
4975 release_all_access(s);
4976 if (s->st_stid.sc_file) {
4977 put_nfs4_file(s->st_stid.sc_file);
4978 s->st_stid.sc_file = NULL;
4979 }
4980
4981 spin_lock(&nn->client_lock);
4982 last = oo->oo_last_closed_stid;
4983 oo->oo_last_closed_stid = s;
4984 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4985 oo->oo_time = ktime_get_boottime_seconds();
4986 spin_unlock(&nn->client_lock);
4987 if (last)
4988 nfs4_put_stid(&last->st_stid);
4989}
4990
4991static noinline_for_stack struct nfs4_file *
4992nfsd4_file_hash_lookup(const struct svc_fh *fhp)
4993{
4994 struct inode *inode = d_inode(fhp->fh_dentry);
4995 struct rhlist_head *tmp, *list;
4996 struct nfs4_file *fi;
4997
4998 rcu_read_lock();
4999 list = rhltable_lookup(&nfs4_file_rhltable, &inode,
5000 nfs4_file_rhash_params);
5001 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
5002 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
5003 if (refcount_inc_not_zero(&fi->fi_ref)) {
5004 rcu_read_unlock();
5005 return fi;
5006 }
5007 }
5008 }
5009 rcu_read_unlock();
5010 return NULL;
5011}
5012
5013/*
5014 * On hash insertion, identify entries with the same inode but
5015 * distinct filehandles. They will all be on the list returned
5016 * by rhltable_lookup().
5017 *
5018 * inode->i_lock prevents racing insertions from adding an entry
5019 * for the same inode/fhp pair twice.
5020 */
5021static noinline_for_stack struct nfs4_file *
5022nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp)
5023{
5024 struct inode *inode = d_inode(fhp->fh_dentry);
5025 struct rhlist_head *tmp, *list;
5026 struct nfs4_file *ret = NULL;
5027 bool alias_found = false;
5028 struct nfs4_file *fi;
5029 int err;
5030
5031 rcu_read_lock();
5032 spin_lock(&inode->i_lock);
5033
5034 list = rhltable_lookup(&nfs4_file_rhltable, &inode,
5035 nfs4_file_rhash_params);
5036 rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
5037 if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
5038 if (refcount_inc_not_zero(&fi->fi_ref))
5039 ret = fi;
5040 } else
5041 fi->fi_aliased = alias_found = true;
5042 }
5043 if (ret)
5044 goto out_unlock;
5045
5046 nfsd4_file_init(fhp, new);
5047 err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist,
5048 nfs4_file_rhash_params);
5049 if (err)
5050 goto out_unlock;
5051
5052 new->fi_aliased = alias_found;
5053 ret = new;
5054
5055out_unlock:
5056 spin_unlock(&inode->i_lock);
5057 rcu_read_unlock();
5058 return ret;
5059}
5060
5061static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi)
5062{
5063 rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist,
5064 nfs4_file_rhash_params);
5065}
5066
5067/*
5068 * Called to check deny when READ with all zero stateid or
5069 * WRITE with all zero or all one stateid
5070 */
5071static __be32
5072nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
5073{
5074 struct nfs4_file *fp;
5075 __be32 ret = nfs_ok;
5076
5077 fp = nfsd4_file_hash_lookup(current_fh);
5078 if (!fp)
5079 return ret;
5080
5081 /* Check for conflicting share reservations */
5082 spin_lock(&fp->fi_lock);
5083 if (fp->fi_share_deny & deny_type)
5084 ret = nfserr_locked;
5085 spin_unlock(&fp->fi_lock);
5086 put_nfs4_file(fp);
5087 return ret;
5088}
5089
5090static bool nfsd4_deleg_present(const struct inode *inode)
5091{
5092 struct file_lock_context *ctx = locks_inode_context(inode);
5093
5094 return ctx && !list_empty_careful(&ctx->flc_lease);
5095}
5096
5097/**
5098 * nfsd_wait_for_delegreturn - wait for delegations to be returned
5099 * @rqstp: the RPC transaction being executed
5100 * @inode: in-core inode of the file being waited for
5101 *
5102 * The timeout prevents deadlock if all nfsd threads happen to be
5103 * tied up waiting for returning delegations.
5104 *
5105 * Return values:
5106 * %true: delegation was returned
5107 * %false: timed out waiting for delegreturn
5108 */
5109bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode)
5110{
5111 long __maybe_unused timeo;
5112
5113 timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode),
5114 NFSD_DELEGRETURN_TIMEOUT);
5115 trace_nfsd_delegret_wakeup(rqstp, inode, timeo);
5116 return timeo > 0;
5117}
5118
5119static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
5120{
5121 struct nfs4_delegation *dp = cb_to_delegation(cb);
5122 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
5123 nfsd_net_id);
5124
5125 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
5126
5127 /*
5128 * We can't do this in nfsd_break_deleg_cb because it is
5129 * already holding inode->i_lock.
5130 *
5131 * If the dl_time != 0, then we know that it has already been
5132 * queued for a lease break. Don't queue it again.
5133 */
5134 spin_lock(&state_lock);
5135 if (delegation_hashed(dp) && dp->dl_time == 0) {
5136 dp->dl_time = ktime_get_boottime_seconds();
5137 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
5138 }
5139 spin_unlock(&state_lock);
5140}
5141
5142static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
5143 struct rpc_task *task)
5144{
5145 struct nfs4_delegation *dp = cb_to_delegation(cb);
5146
5147 trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task);
5148
5149 if (dp->dl_stid.sc_status)
5150 /* CLOSED or REVOKED */
5151 return 1;
5152
5153 switch (task->tk_status) {
5154 case 0:
5155 return 1;
5156 case -NFS4ERR_DELAY:
5157 rpc_delay(task, 2 * HZ);
5158 return 0;
5159 case -EBADHANDLE:
5160 case -NFS4ERR_BAD_STATEID:
5161 /*
5162 * Race: client probably got cb_recall before open reply
5163 * granting delegation.
5164 */
5165 if (dp->dl_retries--) {
5166 rpc_delay(task, 2 * HZ);
5167 return 0;
5168 }
5169 fallthrough;
5170 default:
5171 return 1;
5172 }
5173}
5174
5175static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
5176{
5177 struct nfs4_delegation *dp = cb_to_delegation(cb);
5178
5179 nfs4_put_stid(&dp->dl_stid);
5180}
5181
5182static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
5183 .prepare = nfsd4_cb_recall_prepare,
5184 .done = nfsd4_cb_recall_done,
5185 .release = nfsd4_cb_recall_release,
5186};
5187
5188static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
5189{
5190 /*
5191 * We're assuming the state code never drops its reference
5192 * without first removing the lease. Since we're in this lease
5193 * callback (and since the lease code is serialized by the
5194 * flc_lock) we know the server hasn't removed the lease yet, and
5195 * we know it's safe to take a reference.
5196 */
5197 refcount_inc(&dp->dl_stid.sc_count);
5198 WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
5199}
5200
5201/* Called from break_lease() with flc_lock held. */
5202static bool
5203nfsd_break_deleg_cb(struct file_lease *fl)
5204{
5205 struct nfs4_delegation *dp = (struct nfs4_delegation *) fl->c.flc_owner;
5206 struct nfs4_file *fp = dp->dl_stid.sc_file;
5207 struct nfs4_client *clp = dp->dl_stid.sc_client;
5208 struct nfsd_net *nn;
5209
5210 trace_nfsd_cb_recall(&dp->dl_stid);
5211
5212 dp->dl_recalled = true;
5213 atomic_inc(&clp->cl_delegs_in_recall);
5214 if (try_to_expire_client(clp)) {
5215 nn = net_generic(clp->net, nfsd_net_id);
5216 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
5217 }
5218
5219 /*
5220 * We don't want the locks code to timeout the lease for us;
5221 * we'll remove it ourself if a delegation isn't returned
5222 * in time:
5223 */
5224 fl->fl_break_time = 0;
5225
5226 fp->fi_had_conflict = true;
5227 nfsd_break_one_deleg(dp);
5228 return false;
5229}
5230
5231/**
5232 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
5233 * @fl: Lock state to check
5234 *
5235 * Return values:
5236 * %true: Lease conflict was resolved
5237 * %false: Lease conflict was not resolved.
5238 */
5239static bool nfsd_breaker_owns_lease(struct file_lease *fl)
5240{
5241 struct nfs4_delegation *dl = fl->c.flc_owner;
5242 struct svc_rqst *rqst;
5243 struct nfs4_client *clp;
5244
5245 if (!i_am_nfsd())
5246 return false;
5247 rqst = kthread_data(current);
5248 /* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
5249 if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
5250 return false;
5251 clp = *(rqst->rq_lease_breaker);
5252 return dl->dl_stid.sc_client == clp;
5253}
5254
5255static int
5256nfsd_change_deleg_cb(struct file_lease *onlist, int arg,
5257 struct list_head *dispose)
5258{
5259 struct nfs4_delegation *dp = (struct nfs4_delegation *) onlist->c.flc_owner;
5260 struct nfs4_client *clp = dp->dl_stid.sc_client;
5261
5262 if (arg & F_UNLCK) {
5263 if (dp->dl_recalled)
5264 atomic_dec(&clp->cl_delegs_in_recall);
5265 return lease_modify(onlist, arg, dispose);
5266 } else
5267 return -EAGAIN;
5268}
5269
5270static const struct lease_manager_operations nfsd_lease_mng_ops = {
5271 .lm_breaker_owns_lease = nfsd_breaker_owns_lease,
5272 .lm_break = nfsd_break_deleg_cb,
5273 .lm_change = nfsd_change_deleg_cb,
5274};
5275
5276static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
5277{
5278 if (nfsd4_has_session(cstate))
5279 return nfs_ok;
5280 if (seqid == so->so_seqid - 1)
5281 return nfserr_replay_me;
5282 if (seqid == so->so_seqid)
5283 return nfs_ok;
5284 return nfserr_bad_seqid;
5285}
5286
5287static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
5288 struct nfsd_net *nn)
5289{
5290 struct nfs4_client *found;
5291
5292 spin_lock(&nn->client_lock);
5293 found = find_confirmed_client(clid, sessions, nn);
5294 if (found)
5295 atomic_inc(&found->cl_rpc_users);
5296 spin_unlock(&nn->client_lock);
5297 return found;
5298}
5299
5300static __be32 set_client(clientid_t *clid,
5301 struct nfsd4_compound_state *cstate,
5302 struct nfsd_net *nn)
5303{
5304 if (cstate->clp) {
5305 if (!same_clid(&cstate->clp->cl_clientid, clid))
5306 return nfserr_stale_clientid;
5307 return nfs_ok;
5308 }
5309 if (STALE_CLIENTID(clid, nn))
5310 return nfserr_stale_clientid;
5311 /*
5312 * We're in the 4.0 case (otherwise the SEQUENCE op would have
5313 * set cstate->clp), so session = false:
5314 */
5315 cstate->clp = lookup_clientid(clid, false, nn);
5316 if (!cstate->clp)
5317 return nfserr_expired;
5318 return nfs_ok;
5319}
5320
5321__be32
5322nfsd4_process_open1(struct nfsd4_compound_state *cstate,
5323 struct nfsd4_open *open, struct nfsd_net *nn)
5324{
5325 clientid_t *clientid = &open->op_clientid;
5326 struct nfs4_client *clp = NULL;
5327 unsigned int strhashval;
5328 struct nfs4_openowner *oo = NULL;
5329 __be32 status;
5330
5331 /*
5332 * In case we need it later, after we've already created the
5333 * file and don't want to risk a further failure:
5334 */
5335 open->op_file = nfsd4_alloc_file();
5336 if (open->op_file == NULL)
5337 return nfserr_jukebox;
5338
5339 status = set_client(clientid, cstate, nn);
5340 if (status)
5341 return status;
5342 clp = cstate->clp;
5343
5344 strhashval = ownerstr_hashval(&open->op_owner);
5345 oo = find_openstateowner_str(strhashval, open, clp);
5346 open->op_openowner = oo;
5347 if (!oo) {
5348 goto new_owner;
5349 }
5350 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5351 /* Replace unconfirmed owners without checking for replay. */
5352 release_openowner(oo);
5353 open->op_openowner = NULL;
5354 goto new_owner;
5355 }
5356 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
5357 if (status)
5358 return status;
5359 goto alloc_stateid;
5360new_owner:
5361 oo = alloc_init_open_stateowner(strhashval, open, cstate);
5362 if (oo == NULL)
5363 return nfserr_jukebox;
5364 open->op_openowner = oo;
5365alloc_stateid:
5366 open->op_stp = nfs4_alloc_open_stateid(clp);
5367 if (!open->op_stp)
5368 return nfserr_jukebox;
5369
5370 if (nfsd4_has_session(cstate) &&
5371 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
5372 open->op_odstate = alloc_clnt_odstate(clp);
5373 if (!open->op_odstate)
5374 return nfserr_jukebox;
5375 }
5376
5377 return nfs_ok;
5378}
5379
5380static inline __be32
5381nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
5382{
5383 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
5384 return nfserr_openmode;
5385 else
5386 return nfs_ok;
5387}
5388
5389static int share_access_to_flags(u32 share_access)
5390{
5391 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
5392}
5393
5394static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl,
5395 stateid_t *s)
5396{
5397 struct nfs4_stid *ret;
5398
5399 ret = find_stateid_by_type(cl, s, SC_TYPE_DELEG, SC_STATUS_REVOKED);
5400 if (!ret)
5401 return NULL;
5402 return delegstateid(ret);
5403}
5404
5405static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
5406{
5407 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
5408 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
5409}
5410
5411static __be32
5412nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
5413 struct nfs4_delegation **dp)
5414{
5415 int flags;
5416 __be32 status = nfserr_bad_stateid;
5417 struct nfs4_delegation *deleg;
5418
5419 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
5420 if (deleg == NULL)
5421 goto out;
5422 if (deleg->dl_stid.sc_status & SC_STATUS_ADMIN_REVOKED) {
5423 nfs4_put_stid(&deleg->dl_stid);
5424 status = nfserr_admin_revoked;
5425 goto out;
5426 }
5427 if (deleg->dl_stid.sc_status & SC_STATUS_REVOKED) {
5428 nfs4_put_stid(&deleg->dl_stid);
5429 nfsd40_drop_revoked_stid(cl, &open->op_delegate_stateid);
5430 status = nfserr_deleg_revoked;
5431 goto out;
5432 }
5433 flags = share_access_to_flags(open->op_share_access);
5434 status = nfs4_check_delegmode(deleg, flags);
5435 if (status) {
5436 nfs4_put_stid(&deleg->dl_stid);
5437 goto out;
5438 }
5439 *dp = deleg;
5440out:
5441 if (!nfsd4_is_deleg_cur(open))
5442 return nfs_ok;
5443 if (status)
5444 return status;
5445 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5446 return nfs_ok;
5447}
5448
5449static inline int nfs4_access_to_access(u32 nfs4_access)
5450{
5451 int flags = 0;
5452
5453 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
5454 flags |= NFSD_MAY_READ;
5455 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
5456 flags |= NFSD_MAY_WRITE;
5457 return flags;
5458}
5459
5460static inline __be32
5461nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
5462 struct nfsd4_open *open)
5463{
5464 struct iattr iattr = {
5465 .ia_valid = ATTR_SIZE,
5466 .ia_size = 0,
5467 };
5468 struct nfsd_attrs attrs = {
5469 .na_iattr = &iattr,
5470 };
5471 if (!open->op_truncate)
5472 return 0;
5473 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
5474 return nfserr_inval;
5475 return nfsd_setattr(rqstp, fh, &attrs, NULL);
5476}
5477
5478static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
5479 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5480 struct nfsd4_open *open, bool new_stp)
5481{
5482 struct nfsd_file *nf = NULL;
5483 __be32 status;
5484 int oflag = nfs4_access_to_omode(open->op_share_access);
5485 int access = nfs4_access_to_access(open->op_share_access);
5486 unsigned char old_access_bmap, old_deny_bmap;
5487
5488 spin_lock(&fp->fi_lock);
5489
5490 /*
5491 * Are we trying to set a deny mode that would conflict with
5492 * current access?
5493 */
5494 status = nfs4_file_check_deny(fp, open->op_share_deny);
5495 if (status != nfs_ok) {
5496 if (status != nfserr_share_denied) {
5497 spin_unlock(&fp->fi_lock);
5498 goto out;
5499 }
5500 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5501 stp, open->op_share_deny, false))
5502 status = nfserr_jukebox;
5503 spin_unlock(&fp->fi_lock);
5504 goto out;
5505 }
5506
5507 /* set access to the file */
5508 status = nfs4_file_get_access(fp, open->op_share_access);
5509 if (status != nfs_ok) {
5510 if (status != nfserr_share_denied) {
5511 spin_unlock(&fp->fi_lock);
5512 goto out;
5513 }
5514 if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5515 stp, open->op_share_access, true))
5516 status = nfserr_jukebox;
5517 spin_unlock(&fp->fi_lock);
5518 goto out;
5519 }
5520
5521 /* Set access bits in stateid */
5522 old_access_bmap = stp->st_access_bmap;
5523 set_access(open->op_share_access, stp);
5524
5525 /* Set new deny mask */
5526 old_deny_bmap = stp->st_deny_bmap;
5527 set_deny(open->op_share_deny, stp);
5528 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5529
5530 if (!fp->fi_fds[oflag]) {
5531 spin_unlock(&fp->fi_lock);
5532
5533 status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
5534 open->op_filp, &nf);
5535 if (status != nfs_ok)
5536 goto out_put_access;
5537
5538 spin_lock(&fp->fi_lock);
5539 if (!fp->fi_fds[oflag]) {
5540 fp->fi_fds[oflag] = nf;
5541 nf = NULL;
5542 }
5543 }
5544 spin_unlock(&fp->fi_lock);
5545 if (nf)
5546 nfsd_file_put(nf);
5547
5548 status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
5549 access));
5550 if (status)
5551 goto out_put_access;
5552
5553 status = nfsd4_truncate(rqstp, cur_fh, open);
5554 if (status)
5555 goto out_put_access;
5556out:
5557 return status;
5558out_put_access:
5559 stp->st_access_bmap = old_access_bmap;
5560 nfs4_file_put_access(fp, open->op_share_access);
5561 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
5562 goto out;
5563}
5564
5565static __be32
5566nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
5567 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5568 struct nfsd4_open *open)
5569{
5570 __be32 status;
5571 unsigned char old_deny_bmap = stp->st_deny_bmap;
5572
5573 if (!test_access(open->op_share_access, stp))
5574 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false);
5575
5576 /* test and set deny mode */
5577 spin_lock(&fp->fi_lock);
5578 status = nfs4_file_check_deny(fp, open->op_share_deny);
5579 switch (status) {
5580 case nfs_ok:
5581 set_deny(open->op_share_deny, stp);
5582 fp->fi_share_deny |=
5583 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5584 break;
5585 case nfserr_share_denied:
5586 if (nfs4_resolve_deny_conflicts_locked(fp, false,
5587 stp, open->op_share_deny, false))
5588 status = nfserr_jukebox;
5589 break;
5590 }
5591 spin_unlock(&fp->fi_lock);
5592
5593 if (status != nfs_ok)
5594 return status;
5595
5596 status = nfsd4_truncate(rqstp, cur_fh, open);
5597 if (status != nfs_ok)
5598 reset_union_bmap_deny(old_deny_bmap, stp);
5599 return status;
5600}
5601
5602/* Should we give out recallable state?: */
5603static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
5604{
5605 if (clp->cl_cb_state == NFSD4_CB_UP)
5606 return true;
5607 /*
5608 * In the sessions case, since we don't have to establish a
5609 * separate connection for callbacks, we assume it's OK
5610 * until we hear otherwise:
5611 */
5612 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
5613}
5614
5615static struct file_lease *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
5616 int flag)
5617{
5618 struct file_lease *fl;
5619
5620 fl = locks_alloc_lease();
5621 if (!fl)
5622 return NULL;
5623 fl->fl_lmops = &nfsd_lease_mng_ops;
5624 fl->c.flc_flags = FL_DELEG;
5625 fl->c.flc_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
5626 fl->c.flc_owner = (fl_owner_t)dp;
5627 fl->c.flc_pid = current->tgid;
5628 fl->c.flc_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
5629 return fl;
5630}
5631
5632static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
5633 struct nfs4_file *fp)
5634{
5635 struct nfs4_ol_stateid *st;
5636 struct file *f = fp->fi_deleg_file->nf_file;
5637 struct inode *ino = file_inode(f);
5638 int writes;
5639
5640 writes = atomic_read(&ino->i_writecount);
5641 if (!writes)
5642 return 0;
5643 /*
5644 * There could be multiple filehandles (hence multiple
5645 * nfs4_files) referencing this file, but that's not too
5646 * common; let's just give up in that case rather than
5647 * trying to go look up all the clients using that other
5648 * nfs4_file as well:
5649 */
5650 if (fp->fi_aliased)
5651 return -EAGAIN;
5652 /*
5653 * If there's a close in progress, make sure that we see it
5654 * clear any fi_fds[] entries before we see it decrement
5655 * i_writecount:
5656 */
5657 smp_mb__after_atomic();
5658
5659 if (fp->fi_fds[O_WRONLY])
5660 writes--;
5661 if (fp->fi_fds[O_RDWR])
5662 writes--;
5663 if (writes > 0)
5664 return -EAGAIN; /* There may be non-NFSv4 writers */
5665 /*
5666 * It's possible there are non-NFSv4 write opens in progress,
5667 * but if they haven't incremented i_writecount yet then they
5668 * also haven't called break lease yet; so, they'll break this
5669 * lease soon enough. So, all that's left to check for is NFSv4
5670 * opens:
5671 */
5672 spin_lock(&fp->fi_lock);
5673 list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
5674 if (st->st_openstp == NULL /* it's an open */ &&
5675 access_permit_write(st) &&
5676 st->st_stid.sc_client != clp) {
5677 spin_unlock(&fp->fi_lock);
5678 return -EAGAIN;
5679 }
5680 }
5681 spin_unlock(&fp->fi_lock);
5682 /*
5683 * There's a small chance that we could be racing with another
5684 * NFSv4 open. However, any open that hasn't added itself to
5685 * the fi_stateids list also hasn't called break_lease yet; so,
5686 * they'll break this lease soon enough.
5687 */
5688 return 0;
5689}
5690
5691/*
5692 * It's possible that between opening the dentry and setting the delegation,
5693 * that it has been renamed or unlinked. Redo the lookup to verify that this
5694 * hasn't happened.
5695 */
5696static int
5697nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
5698 struct svc_fh *parent)
5699{
5700 struct svc_export *exp;
5701 struct dentry *child;
5702 __be32 err;
5703
5704 err = nfsd_lookup_dentry(open->op_rqstp, parent,
5705 open->op_fname, open->op_fnamelen,
5706 &exp, &child);
5707
5708 if (err)
5709 return -EAGAIN;
5710
5711 exp_put(exp);
5712 dput(child);
5713 if (child != file_dentry(fp->fi_deleg_file->nf_file))
5714 return -EAGAIN;
5715
5716 return 0;
5717}
5718
5719/*
5720 * We avoid breaking delegations held by a client due to its own activity, but
5721 * clearing setuid/setgid bits on a write is an implicit activity and the client
5722 * may not notice and continue using the old mode. Avoid giving out a delegation
5723 * on setuid/setgid files when the client is requesting an open for write.
5724 */
5725static int
5726nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
5727{
5728 struct inode *inode = file_inode(nf->nf_file);
5729
5730 if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) &&
5731 (inode->i_mode & (S_ISUID|S_ISGID)))
5732 return -EAGAIN;
5733 return 0;
5734}
5735
5736static struct nfs4_delegation *
5737nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5738 struct svc_fh *parent)
5739{
5740 int status = 0;
5741 struct nfs4_client *clp = stp->st_stid.sc_client;
5742 struct nfs4_file *fp = stp->st_stid.sc_file;
5743 struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
5744 struct nfs4_delegation *dp;
5745 struct nfsd_file *nf = NULL;
5746 struct file_lease *fl;
5747 u32 dl_type;
5748
5749 /*
5750 * The fi_had_conflict and nfs_get_existing_delegation checks
5751 * here are just optimizations; we'll need to recheck them at
5752 * the end:
5753 */
5754 if (fp->fi_had_conflict)
5755 return ERR_PTR(-EAGAIN);
5756
5757 /*
5758 * Try for a write delegation first. RFC8881 section 10.4 says:
5759 *
5760 * "An OPEN_DELEGATE_WRITE delegation allows the client to handle,
5761 * on its own, all opens."
5762 *
5763 * Furthermore the client can use a write delegation for most READ
5764 * operations as well, so we require a O_RDWR file here.
5765 *
5766 * Offer a write delegation in the case of a BOTH open, and ensure
5767 * we get the O_RDWR descriptor.
5768 */
5769 if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) {
5770 nf = find_rw_file(fp);
5771 dl_type = NFS4_OPEN_DELEGATE_WRITE;
5772 }
5773
5774 /*
5775 * If the file is being opened O_RDONLY or we couldn't get a O_RDWR
5776 * file for some reason, then try for a read delegation instead.
5777 */
5778 if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) {
5779 nf = find_readable_file(fp);
5780 dl_type = NFS4_OPEN_DELEGATE_READ;
5781 }
5782
5783 if (!nf)
5784 return ERR_PTR(-EAGAIN);
5785
5786 spin_lock(&state_lock);
5787 spin_lock(&fp->fi_lock);
5788 if (nfs4_delegation_exists(clp, fp))
5789 status = -EAGAIN;
5790 else if (nfsd4_verify_setuid_write(open, nf))
5791 status = -EAGAIN;
5792 else if (!fp->fi_deleg_file) {
5793 fp->fi_deleg_file = nf;
5794 /* increment early to prevent fi_deleg_file from being
5795 * cleared */
5796 fp->fi_delegees = 1;
5797 nf = NULL;
5798 } else
5799 fp->fi_delegees++;
5800 spin_unlock(&fp->fi_lock);
5801 spin_unlock(&state_lock);
5802 if (nf)
5803 nfsd_file_put(nf);
5804 if (status)
5805 return ERR_PTR(status);
5806
5807 status = -ENOMEM;
5808 dp = alloc_init_deleg(clp, fp, odstate, dl_type);
5809 if (!dp)
5810 goto out_delegees;
5811
5812 fl = nfs4_alloc_init_lease(dp, dl_type);
5813 if (!fl)
5814 goto out_clnt_odstate;
5815
5816 status = kernel_setlease(fp->fi_deleg_file->nf_file,
5817 fl->c.flc_type, &fl, NULL);
5818 if (fl)
5819 locks_free_lease(fl);
5820 if (status)
5821 goto out_clnt_odstate;
5822
5823 if (parent) {
5824 status = nfsd4_verify_deleg_dentry(open, fp, parent);
5825 if (status)
5826 goto out_unlock;
5827 }
5828
5829 status = nfsd4_check_conflicting_opens(clp, fp);
5830 if (status)
5831 goto out_unlock;
5832
5833 /*
5834 * Now that the deleg is set, check again to ensure that nothing
5835 * raced in and changed the mode while we weren't lookng.
5836 */
5837 status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
5838 if (status)
5839 goto out_unlock;
5840
5841 status = -EAGAIN;
5842 if (fp->fi_had_conflict)
5843 goto out_unlock;
5844
5845 spin_lock(&state_lock);
5846 spin_lock(&clp->cl_lock);
5847 spin_lock(&fp->fi_lock);
5848 status = hash_delegation_locked(dp, fp);
5849 spin_unlock(&fp->fi_lock);
5850 spin_unlock(&clp->cl_lock);
5851 spin_unlock(&state_lock);
5852
5853 if (status)
5854 goto out_unlock;
5855
5856 return dp;
5857out_unlock:
5858 kernel_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5859out_clnt_odstate:
5860 put_clnt_odstate(dp->dl_clnt_odstate);
5861 nfs4_put_stid(&dp->dl_stid);
5862out_delegees:
5863 put_deleg_file(fp);
5864 return ERR_PTR(status);
5865}
5866
5867static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5868{
5869 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5870 if (status == -EAGAIN)
5871 open->op_why_no_deleg = WND4_CONTENTION;
5872 else {
5873 open->op_why_no_deleg = WND4_RESOURCE;
5874 switch (open->op_deleg_want) {
5875 case NFS4_SHARE_WANT_READ_DELEG:
5876 case NFS4_SHARE_WANT_WRITE_DELEG:
5877 case NFS4_SHARE_WANT_ANY_DELEG:
5878 break;
5879 case NFS4_SHARE_WANT_CANCEL:
5880 open->op_why_no_deleg = WND4_CANCELLED;
5881 break;
5882 case NFS4_SHARE_WANT_NO_DELEG:
5883 WARN_ON_ONCE(1);
5884 }
5885 }
5886}
5887
5888/*
5889 * The Linux NFS server does not offer write delegations to NFSv4.0
5890 * clients in order to avoid conflicts between write delegations and
5891 * GETATTRs requesting CHANGE or SIZE attributes.
5892 *
5893 * With NFSv4.1 and later minorversions, the SEQUENCE operation that
5894 * begins each COMPOUND contains a client ID. Delegation recall can
5895 * be avoided when the server recognizes the client sending a
5896 * GETATTR also holds write delegation it conflicts with.
5897 *
5898 * However, the NFSv4.0 protocol does not enable a server to
5899 * determine that a GETATTR originated from the client holding the
5900 * conflicting delegation versus coming from some other client. Per
5901 * RFC 7530 Section 16.7.5, the server must recall or send a
5902 * CB_GETATTR even when the GETATTR originates from the client that
5903 * holds the conflicting delegation.
5904 *
5905 * An NFSv4.0 client can trigger a pathological situation if it
5906 * always sends a DELEGRETURN preceded by a conflicting GETATTR in
5907 * the same COMPOUND. COMPOUND execution will always stop at the
5908 * GETATTR and the DELEGRETURN will never get executed. The server
5909 * eventually revokes the delegation, which can result in loss of
5910 * open or lock state.
5911 */
5912static void
5913nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5914 struct svc_fh *currentfh)
5915{
5916 struct nfs4_delegation *dp;
5917 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5918 struct nfs4_client *clp = stp->st_stid.sc_client;
5919 struct svc_fh *parent = NULL;
5920 int cb_up;
5921 int status = 0;
5922 struct kstat stat;
5923 struct path path;
5924
5925 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5926 open->op_recall = false;
5927 switch (open->op_claim_type) {
5928 case NFS4_OPEN_CLAIM_PREVIOUS:
5929 if (!cb_up)
5930 open->op_recall = true;
5931 break;
5932 case NFS4_OPEN_CLAIM_NULL:
5933 parent = currentfh;
5934 fallthrough;
5935 case NFS4_OPEN_CLAIM_FH:
5936 /*
5937 * Let's not give out any delegations till everyone's
5938 * had the chance to reclaim theirs, *and* until
5939 * NLM locks have all been reclaimed:
5940 */
5941 if (locks_in_grace(clp->net))
5942 goto out_no_deleg;
5943 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5944 goto out_no_deleg;
5945 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE &&
5946 !clp->cl_minorversion)
5947 goto out_no_deleg;
5948 break;
5949 default:
5950 goto out_no_deleg;
5951 }
5952 dp = nfs4_set_delegation(open, stp, parent);
5953 if (IS_ERR(dp))
5954 goto out_no_deleg;
5955
5956 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5957
5958 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
5959 open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
5960 trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
5961 path.mnt = currentfh->fh_export->ex_path.mnt;
5962 path.dentry = currentfh->fh_dentry;
5963 if (vfs_getattr(&path, &stat,
5964 (STATX_SIZE | STATX_CTIME | STATX_CHANGE_COOKIE),
5965 AT_STATX_SYNC_AS_STAT)) {
5966 nfs4_put_stid(&dp->dl_stid);
5967 destroy_delegation(dp);
5968 goto out_no_deleg;
5969 }
5970 dp->dl_cb_fattr.ncf_cur_fsize = stat.size;
5971 dp->dl_cb_fattr.ncf_initial_cinfo =
5972 nfsd4_change_attribute(&stat, d_inode(currentfh->fh_dentry));
5973 } else {
5974 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5975 trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
5976 }
5977 nfs4_put_stid(&dp->dl_stid);
5978 return;
5979out_no_deleg:
5980 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5981 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5982 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5983 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5984 open->op_recall = true;
5985 }
5986
5987 /* 4.1 client asking for a delegation? */
5988 if (open->op_deleg_want)
5989 nfsd4_open_deleg_none_ext(open, status);
5990 return;
5991}
5992
5993static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5994 struct nfs4_delegation *dp)
5995{
5996 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5997 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5998 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5999 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
6000 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
6001 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
6002 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
6003 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
6004 }
6005 /* Otherwise the client must be confused wanting a delegation
6006 * it already has, therefore we don't return
6007 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
6008 */
6009}
6010
6011/**
6012 * nfsd4_process_open2 - finish open processing
6013 * @rqstp: the RPC transaction being executed
6014 * @current_fh: NFSv4 COMPOUND's current filehandle
6015 * @open: OPEN arguments
6016 *
6017 * If successful, (1) truncate the file if open->op_truncate was
6018 * set, (2) set open->op_stateid, (3) set open->op_delegation.
6019 *
6020 * Returns %nfs_ok on success; otherwise an nfs4stat value in
6021 * network byte order is returned.
6022 */
6023__be32
6024nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
6025{
6026 struct nfsd4_compoundres *resp = rqstp->rq_resp;
6027 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
6028 struct nfs4_file *fp = NULL;
6029 struct nfs4_ol_stateid *stp = NULL;
6030 struct nfs4_delegation *dp = NULL;
6031 __be32 status;
6032 bool new_stp = false;
6033
6034 /*
6035 * Lookup file; if found, lookup stateid and check open request,
6036 * and check for delegations in the process of being recalled.
6037 * If not found, create the nfs4_file struct
6038 */
6039 fp = nfsd4_file_hash_insert(open->op_file, current_fh);
6040 if (unlikely(!fp))
6041 return nfserr_jukebox;
6042 if (fp != open->op_file) {
6043 status = nfs4_check_deleg(cl, open, &dp);
6044 if (status)
6045 goto out;
6046 stp = nfsd4_find_and_lock_existing_open(fp, open);
6047 } else {
6048 open->op_file = NULL;
6049 status = nfserr_bad_stateid;
6050 if (nfsd4_is_deleg_cur(open))
6051 goto out;
6052 }
6053
6054 if (!stp) {
6055 stp = init_open_stateid(fp, open);
6056 if (!open->op_stp)
6057 new_stp = true;
6058 }
6059
6060 /*
6061 * OPEN the file, or upgrade an existing OPEN.
6062 * If truncate fails, the OPEN fails.
6063 *
6064 * stp is already locked.
6065 */
6066 if (!new_stp) {
6067 /* Stateid was found, this is an OPEN upgrade */
6068 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
6069 if (status) {
6070 mutex_unlock(&stp->st_mutex);
6071 goto out;
6072 }
6073 } else {
6074 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true);
6075 if (status) {
6076 release_open_stateid(stp);
6077 mutex_unlock(&stp->st_mutex);
6078 goto out;
6079 }
6080
6081 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
6082 open->op_odstate);
6083 if (stp->st_clnt_odstate == open->op_odstate)
6084 open->op_odstate = NULL;
6085 }
6086
6087 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
6088 mutex_unlock(&stp->st_mutex);
6089
6090 if (nfsd4_has_session(&resp->cstate)) {
6091 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
6092 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
6093 open->op_why_no_deleg = WND4_NOT_WANTED;
6094 goto nodeleg;
6095 }
6096 }
6097
6098 /*
6099 * Attempt to hand out a delegation. No error return, because the
6100 * OPEN succeeds even if we fail.
6101 */
6102 nfs4_open_delegation(open, stp, &resp->cstate.current_fh);
6103nodeleg:
6104 status = nfs_ok;
6105 trace_nfsd_open(&stp->st_stid.sc_stateid);
6106out:
6107 /* 4.1 client trying to upgrade/downgrade delegation? */
6108 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
6109 open->op_deleg_want)
6110 nfsd4_deleg_xgrade_none_ext(open, dp);
6111
6112 if (fp)
6113 put_nfs4_file(fp);
6114 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
6115 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
6116 /*
6117 * To finish the open response, we just need to set the rflags.
6118 */
6119 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
6120 if (nfsd4_has_session(&resp->cstate))
6121 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
6122 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
6123 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
6124
6125 if (dp)
6126 nfs4_put_stid(&dp->dl_stid);
6127 if (stp)
6128 nfs4_put_stid(&stp->st_stid);
6129
6130 return status;
6131}
6132
6133void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
6134 struct nfsd4_open *open)
6135{
6136 if (open->op_openowner) {
6137 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
6138
6139 nfsd4_cstate_assign_replay(cstate, so);
6140 nfs4_put_stateowner(so);
6141 }
6142 if (open->op_file)
6143 kmem_cache_free(file_slab, open->op_file);
6144 if (open->op_stp)
6145 nfs4_put_stid(&open->op_stp->st_stid);
6146 if (open->op_odstate)
6147 kmem_cache_free(odstate_slab, open->op_odstate);
6148}
6149
6150__be32
6151nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6152 union nfsd4_op_u *u)
6153{
6154 clientid_t *clid = &u->renew;
6155 struct nfs4_client *clp;
6156 __be32 status;
6157 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6158
6159 trace_nfsd_clid_renew(clid);
6160 status = set_client(clid, cstate, nn);
6161 if (status)
6162 return status;
6163 clp = cstate->clp;
6164 if (!list_empty(&clp->cl_delegations)
6165 && clp->cl_cb_state != NFSD4_CB_UP)
6166 return nfserr_cb_path_down;
6167 return nfs_ok;
6168}
6169
6170void
6171nfsd4_end_grace(struct nfsd_net *nn)
6172{
6173 /* do nothing if grace period already ended */
6174 if (nn->grace_ended)
6175 return;
6176
6177 trace_nfsd_grace_complete(nn);
6178 nn->grace_ended = true;
6179 /*
6180 * If the server goes down again right now, an NFSv4
6181 * client will still be allowed to reclaim after it comes back up,
6182 * even if it hasn't yet had a chance to reclaim state this time.
6183 *
6184 */
6185 nfsd4_record_grace_done(nn);
6186 /*
6187 * At this point, NFSv4 clients can still reclaim. But if the
6188 * server crashes, any that have not yet reclaimed will be out
6189 * of luck on the next boot.
6190 *
6191 * (NFSv4.1+ clients are considered to have reclaimed once they
6192 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
6193 * have reclaimed after their first OPEN.)
6194 */
6195 locks_end_grace(&nn->nfsd4_manager);
6196 /*
6197 * At this point, and once lockd and/or any other containers
6198 * exit their grace period, further reclaims will fail and
6199 * regular locking can resume.
6200 */
6201}
6202
6203/*
6204 * If we've waited a lease period but there are still clients trying to
6205 * reclaim, wait a little longer to give them a chance to finish.
6206 */
6207static bool clients_still_reclaiming(struct nfsd_net *nn)
6208{
6209 time64_t double_grace_period_end = nn->boot_time +
6210 2 * nn->nfsd4_lease;
6211
6212 if (nn->track_reclaim_completes &&
6213 atomic_read(&nn->nr_reclaim_complete) ==
6214 nn->reclaim_str_hashtbl_size)
6215 return false;
6216 if (!nn->somebody_reclaimed)
6217 return false;
6218 nn->somebody_reclaimed = false;
6219 /*
6220 * If we've given them *two* lease times to reclaim, and they're
6221 * still not done, give up:
6222 */
6223 if (ktime_get_boottime_seconds() > double_grace_period_end)
6224 return false;
6225 return true;
6226}
6227
6228struct laundry_time {
6229 time64_t cutoff;
6230 time64_t new_timeo;
6231};
6232
6233static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
6234{
6235 time64_t time_remaining;
6236
6237 if (last_refresh < lt->cutoff)
6238 return true;
6239 time_remaining = last_refresh - lt->cutoff;
6240 lt->new_timeo = min(lt->new_timeo, time_remaining);
6241 return false;
6242}
6243
6244#ifdef CONFIG_NFSD_V4_2_INTER_SSC
6245void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
6246{
6247 spin_lock_init(&nn->nfsd_ssc_lock);
6248 INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
6249 init_waitqueue_head(&nn->nfsd_ssc_waitq);
6250}
6251EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
6252
6253/*
6254 * This is called when nfsd is being shutdown, after all inter_ssc
6255 * cleanup were done, to destroy the ssc delayed unmount list.
6256 */
6257static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
6258{
6259 struct nfsd4_ssc_umount_item *ni = NULL;
6260 struct nfsd4_ssc_umount_item *tmp;
6261
6262 spin_lock(&nn->nfsd_ssc_lock);
6263 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
6264 list_del(&ni->nsui_list);
6265 spin_unlock(&nn->nfsd_ssc_lock);
6266 mntput(ni->nsui_vfsmount);
6267 kfree(ni);
6268 spin_lock(&nn->nfsd_ssc_lock);
6269 }
6270 spin_unlock(&nn->nfsd_ssc_lock);
6271}
6272
6273static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
6274{
6275 bool do_wakeup = false;
6276 struct nfsd4_ssc_umount_item *ni = NULL;
6277 struct nfsd4_ssc_umount_item *tmp;
6278
6279 spin_lock(&nn->nfsd_ssc_lock);
6280 list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
6281 if (time_after(jiffies, ni->nsui_expire)) {
6282 if (refcount_read(&ni->nsui_refcnt) > 1)
6283 continue;
6284
6285 /* mark being unmount */
6286 ni->nsui_busy = true;
6287 spin_unlock(&nn->nfsd_ssc_lock);
6288 mntput(ni->nsui_vfsmount);
6289 spin_lock(&nn->nfsd_ssc_lock);
6290
6291 /* waiters need to start from begin of list */
6292 list_del(&ni->nsui_list);
6293 kfree(ni);
6294
6295 /* wakeup ssc_connect waiters */
6296 do_wakeup = true;
6297 continue;
6298 }
6299 break;
6300 }
6301 if (do_wakeup)
6302 wake_up_all(&nn->nfsd_ssc_waitq);
6303 spin_unlock(&nn->nfsd_ssc_lock);
6304}
6305#endif
6306
6307/* Check if any lock belonging to this lockowner has any blockers */
6308static bool
6309nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
6310{
6311 struct file_lock_context *ctx;
6312 struct nfs4_ol_stateid *stp;
6313 struct nfs4_file *nf;
6314
6315 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
6316 nf = stp->st_stid.sc_file;
6317 ctx = locks_inode_context(nf->fi_inode);
6318 if (!ctx)
6319 continue;
6320 if (locks_owner_has_blockers(ctx, lo))
6321 return true;
6322 }
6323 return false;
6324}
6325
6326static bool
6327nfs4_anylock_blockers(struct nfs4_client *clp)
6328{
6329 int i;
6330 struct nfs4_stateowner *so;
6331 struct nfs4_lockowner *lo;
6332
6333 if (atomic_read(&clp->cl_delegs_in_recall))
6334 return true;
6335 spin_lock(&clp->cl_lock);
6336 for (i = 0; i < OWNER_HASH_SIZE; i++) {
6337 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i],
6338 so_strhash) {
6339 if (so->so_is_open_owner)
6340 continue;
6341 lo = lockowner(so);
6342 if (nfs4_lockowner_has_blockers(lo)) {
6343 spin_unlock(&clp->cl_lock);
6344 return true;
6345 }
6346 }
6347 }
6348 spin_unlock(&clp->cl_lock);
6349 return false;
6350}
6351
6352static void
6353nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
6354 struct laundry_time *lt)
6355{
6356 unsigned int maxreap, reapcnt = 0;
6357 struct list_head *pos, *next;
6358 struct nfs4_client *clp;
6359
6360 maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ?
6361 NFSD_CLIENT_MAX_TRIM_PER_RUN : 0;
6362 INIT_LIST_HEAD(reaplist);
6363 spin_lock(&nn->client_lock);
6364 list_for_each_safe(pos, next, &nn->client_lru) {
6365 clp = list_entry(pos, struct nfs4_client, cl_lru);
6366 if (clp->cl_state == NFSD4_EXPIRABLE)
6367 goto exp_client;
6368 if (!state_expired(lt, clp->cl_time))
6369 break;
6370 if (!atomic_read(&clp->cl_rpc_users)) {
6371 if (clp->cl_state == NFSD4_ACTIVE)
6372 atomic_inc(&nn->nfsd_courtesy_clients);
6373 clp->cl_state = NFSD4_COURTESY;
6374 }
6375 if (!client_has_state(clp))
6376 goto exp_client;
6377 if (!nfs4_anylock_blockers(clp))
6378 if (reapcnt >= maxreap)
6379 continue;
6380exp_client:
6381 if (!mark_client_expired_locked(clp)) {
6382 list_add(&clp->cl_lru, reaplist);
6383 reapcnt++;
6384 }
6385 }
6386 spin_unlock(&nn->client_lock);
6387}
6388
6389static void
6390nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn,
6391 struct list_head *reaplist)
6392{
6393 unsigned int maxreap = 0, reapcnt = 0;
6394 struct list_head *pos, *next;
6395 struct nfs4_client *clp;
6396
6397 maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN;
6398 INIT_LIST_HEAD(reaplist);
6399
6400 spin_lock(&nn->client_lock);
6401 list_for_each_safe(pos, next, &nn->client_lru) {
6402 clp = list_entry(pos, struct nfs4_client, cl_lru);
6403 if (clp->cl_state == NFSD4_ACTIVE)
6404 break;
6405 if (reapcnt >= maxreap)
6406 break;
6407 if (!mark_client_expired_locked(clp)) {
6408 list_add(&clp->cl_lru, reaplist);
6409 reapcnt++;
6410 }
6411 }
6412 spin_unlock(&nn->client_lock);
6413}
6414
6415static void
6416nfs4_process_client_reaplist(struct list_head *reaplist)
6417{
6418 struct list_head *pos, *next;
6419 struct nfs4_client *clp;
6420
6421 list_for_each_safe(pos, next, reaplist) {
6422 clp = list_entry(pos, struct nfs4_client, cl_lru);
6423 trace_nfsd_clid_purged(&clp->cl_clientid);
6424 list_del_init(&clp->cl_lru);
6425 expire_client(clp);
6426 }
6427}
6428
6429static void nfs40_clean_admin_revoked(struct nfsd_net *nn,
6430 struct laundry_time *lt)
6431{
6432 struct nfs4_client *clp;
6433
6434 spin_lock(&nn->client_lock);
6435 if (nn->nfs40_last_revoke == 0 ||
6436 nn->nfs40_last_revoke > lt->cutoff) {
6437 spin_unlock(&nn->client_lock);
6438 return;
6439 }
6440 nn->nfs40_last_revoke = 0;
6441
6442retry:
6443 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6444 unsigned long id, tmp;
6445 struct nfs4_stid *stid;
6446
6447 if (atomic_read(&clp->cl_admin_revoked) == 0)
6448 continue;
6449
6450 spin_lock(&clp->cl_lock);
6451 idr_for_each_entry_ul(&clp->cl_stateids, stid, tmp, id)
6452 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) {
6453 refcount_inc(&stid->sc_count);
6454 spin_unlock(&nn->client_lock);
6455 /* this function drops ->cl_lock */
6456 nfsd4_drop_revoked_stid(stid);
6457 nfs4_put_stid(stid);
6458 spin_lock(&nn->client_lock);
6459 goto retry;
6460 }
6461 spin_unlock(&clp->cl_lock);
6462 }
6463 spin_unlock(&nn->client_lock);
6464}
6465
6466static time64_t
6467nfs4_laundromat(struct nfsd_net *nn)
6468{
6469 struct nfs4_openowner *oo;
6470 struct nfs4_delegation *dp;
6471 struct nfs4_ol_stateid *stp;
6472 struct nfsd4_blocked_lock *nbl;
6473 struct list_head *pos, *next, reaplist;
6474 struct laundry_time lt = {
6475 .cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
6476 .new_timeo = nn->nfsd4_lease
6477 };
6478 struct nfs4_cpntf_state *cps;
6479 copy_stateid_t *cps_t;
6480 int i;
6481
6482 if (clients_still_reclaiming(nn)) {
6483 lt.new_timeo = 0;
6484 goto out;
6485 }
6486 nfsd4_end_grace(nn);
6487
6488 spin_lock(&nn->s2s_cp_lock);
6489 idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
6490 cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
6491 if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID &&
6492 state_expired(<, cps->cpntf_time))
6493 _free_cpntf_state_locked(nn, cps);
6494 }
6495 spin_unlock(&nn->s2s_cp_lock);
6496 nfs4_get_client_reaplist(nn, &reaplist, <);
6497 nfs4_process_client_reaplist(&reaplist);
6498
6499 nfs40_clean_admin_revoked(nn, <);
6500
6501 spin_lock(&state_lock);
6502 list_for_each_safe(pos, next, &nn->del_recall_lru) {
6503 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6504 if (!state_expired(<, dp->dl_time))
6505 break;
6506 unhash_delegation_locked(dp, SC_STATUS_REVOKED);
6507 list_add(&dp->dl_recall_lru, &reaplist);
6508 }
6509 spin_unlock(&state_lock);
6510 while (!list_empty(&reaplist)) {
6511 dp = list_first_entry(&reaplist, struct nfs4_delegation,
6512 dl_recall_lru);
6513 list_del_init(&dp->dl_recall_lru);
6514 revoke_delegation(dp);
6515 }
6516
6517 spin_lock(&nn->client_lock);
6518 while (!list_empty(&nn->close_lru)) {
6519 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
6520 oo_close_lru);
6521 if (!state_expired(<, oo->oo_time))
6522 break;
6523 list_del_init(&oo->oo_close_lru);
6524 stp = oo->oo_last_closed_stid;
6525 oo->oo_last_closed_stid = NULL;
6526 spin_unlock(&nn->client_lock);
6527 nfs4_put_stid(&stp->st_stid);
6528 spin_lock(&nn->client_lock);
6529 }
6530 spin_unlock(&nn->client_lock);
6531
6532 /*
6533 * It's possible for a client to try and acquire an already held lock
6534 * that is being held for a long time, and then lose interest in it.
6535 * So, we clean out any un-revisited request after a lease period
6536 * under the assumption that the client is no longer interested.
6537 *
6538 * RFC5661, sec. 9.6 states that the client must not rely on getting
6539 * notifications and must continue to poll for locks, even when the
6540 * server supports them. Thus this shouldn't lead to clients blocking
6541 * indefinitely once the lock does become free.
6542 */
6543 BUG_ON(!list_empty(&reaplist));
6544 spin_lock(&nn->blocked_locks_lock);
6545 while (!list_empty(&nn->blocked_locks_lru)) {
6546 nbl = list_first_entry(&nn->blocked_locks_lru,
6547 struct nfsd4_blocked_lock, nbl_lru);
6548 if (!state_expired(<, nbl->nbl_time))
6549 break;
6550 list_move(&nbl->nbl_lru, &reaplist);
6551 list_del_init(&nbl->nbl_list);
6552 }
6553 spin_unlock(&nn->blocked_locks_lock);
6554
6555 while (!list_empty(&reaplist)) {
6556 nbl = list_first_entry(&reaplist,
6557 struct nfsd4_blocked_lock, nbl_lru);
6558 list_del_init(&nbl->nbl_lru);
6559 free_blocked_lock(nbl);
6560 }
6561#ifdef CONFIG_NFSD_V4_2_INTER_SSC
6562 /* service the server-to-server copy delayed unmount list */
6563 nfsd4_ssc_expire_umount(nn);
6564#endif
6565 if (atomic_long_read(&num_delegations) >= max_delegations)
6566 deleg_reaper(nn);
6567out:
6568 return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
6569}
6570
6571static void laundromat_main(struct work_struct *);
6572
6573static void
6574laundromat_main(struct work_struct *laundry)
6575{
6576 time64_t t;
6577 struct delayed_work *dwork = to_delayed_work(laundry);
6578 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
6579 laundromat_work);
6580
6581 t = nfs4_laundromat(nn);
6582 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
6583}
6584
6585static void
6586courtesy_client_reaper(struct nfsd_net *nn)
6587{
6588 struct list_head reaplist;
6589
6590 nfs4_get_courtesy_client_reaplist(nn, &reaplist);
6591 nfs4_process_client_reaplist(&reaplist);
6592}
6593
6594static void
6595deleg_reaper(struct nfsd_net *nn)
6596{
6597 struct list_head *pos, *next;
6598 struct nfs4_client *clp;
6599 struct list_head cblist;
6600
6601 INIT_LIST_HEAD(&cblist);
6602 spin_lock(&nn->client_lock);
6603 list_for_each_safe(pos, next, &nn->client_lru) {
6604 clp = list_entry(pos, struct nfs4_client, cl_lru);
6605 if (clp->cl_state != NFSD4_ACTIVE ||
6606 list_empty(&clp->cl_delegations) ||
6607 atomic_read(&clp->cl_delegs_in_recall) ||
6608 test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) ||
6609 (ktime_get_boottime_seconds() -
6610 clp->cl_ra_time < 5)) {
6611 continue;
6612 }
6613 list_add(&clp->cl_ra_cblist, &cblist);
6614
6615 /* release in nfsd4_cb_recall_any_release */
6616 kref_get(&clp->cl_nfsdfs.cl_ref);
6617 set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
6618 clp->cl_ra_time = ktime_get_boottime_seconds();
6619 }
6620 spin_unlock(&nn->client_lock);
6621
6622 while (!list_empty(&cblist)) {
6623 clp = list_first_entry(&cblist, struct nfs4_client,
6624 cl_ra_cblist);
6625 list_del_init(&clp->cl_ra_cblist);
6626 clp->cl_ra->ra_keep = 0;
6627 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG);
6628 clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) |
6629 BIT(RCA4_TYPE_MASK_WDATA_DLG);
6630 trace_nfsd_cb_recall_any(clp->cl_ra);
6631 nfsd4_run_cb(&clp->cl_ra->ra_cb);
6632 }
6633}
6634
6635static void
6636nfsd4_state_shrinker_worker(struct work_struct *work)
6637{
6638 struct nfsd_net *nn = container_of(work, struct nfsd_net,
6639 nfsd_shrinker_work);
6640
6641 courtesy_client_reaper(nn);
6642 deleg_reaper(nn);
6643}
6644
6645static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
6646{
6647 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
6648 return nfserr_bad_stateid;
6649 return nfs_ok;
6650}
6651
6652static
6653__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
6654{
6655 __be32 status = nfserr_openmode;
6656
6657 /* For lock stateid's, we test the parent open, not the lock: */
6658 if (stp->st_openstp)
6659 stp = stp->st_openstp;
6660 if ((flags & WR_STATE) && !access_permit_write(stp))
6661 goto out;
6662 if ((flags & RD_STATE) && !access_permit_read(stp))
6663 goto out;
6664 status = nfs_ok;
6665out:
6666 return status;
6667}
6668
6669static inline __be32
6670check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
6671{
6672 if (ONE_STATEID(stateid) && (flags & RD_STATE))
6673 return nfs_ok;
6674 else if (opens_in_grace(net)) {
6675 /* Answer in remaining cases depends on existence of
6676 * conflicting state; so we must wait out the grace period. */
6677 return nfserr_grace;
6678 } else if (flags & WR_STATE)
6679 return nfs4_share_conflict(current_fh,
6680 NFS4_SHARE_DENY_WRITE);
6681 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
6682 return nfs4_share_conflict(current_fh,
6683 NFS4_SHARE_DENY_READ);
6684}
6685
6686static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
6687{
6688 /*
6689 * When sessions are used the stateid generation number is ignored
6690 * when it is zero.
6691 */
6692 if (has_session && in->si_generation == 0)
6693 return nfs_ok;
6694
6695 if (in->si_generation == ref->si_generation)
6696 return nfs_ok;
6697
6698 /* If the client sends us a stateid from the future, it's buggy: */
6699 if (nfsd4_stateid_generation_after(in, ref))
6700 return nfserr_bad_stateid;
6701 /*
6702 * However, we could see a stateid from the past, even from a
6703 * non-buggy client. For example, if the client sends a lock
6704 * while some IO is outstanding, the lock may bump si_generation
6705 * while the IO is still in flight. The client could avoid that
6706 * situation by waiting for responses on all the IO requests,
6707 * but better performance may result in retrying IO that
6708 * receives an old_stateid error if requests are rarely
6709 * reordered in flight:
6710 */
6711 return nfserr_old_stateid;
6712}
6713
6714static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
6715{
6716 __be32 ret;
6717
6718 spin_lock(&s->sc_lock);
6719 ret = nfsd4_verify_open_stid(s);
6720 if (ret == nfs_ok)
6721 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
6722 spin_unlock(&s->sc_lock);
6723 if (ret == nfserr_admin_revoked)
6724 nfsd40_drop_revoked_stid(s->sc_client,
6725 &s->sc_stateid);
6726 return ret;
6727}
6728
6729static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
6730{
6731 if (ols->st_stateowner->so_is_open_owner &&
6732 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
6733 return nfserr_bad_stateid;
6734 return nfs_ok;
6735}
6736
6737static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
6738{
6739 struct nfs4_stid *s;
6740 __be32 status = nfserr_bad_stateid;
6741
6742 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6743 CLOSE_STATEID(stateid))
6744 return status;
6745 spin_lock(&cl->cl_lock);
6746 s = find_stateid_locked(cl, stateid);
6747 if (!s)
6748 goto out_unlock;
6749 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
6750 if (status)
6751 goto out_unlock;
6752 status = nfsd4_verify_open_stid(s);
6753 if (status)
6754 goto out_unlock;
6755
6756 switch (s->sc_type) {
6757 case SC_TYPE_DELEG:
6758 status = nfs_ok;
6759 break;
6760 case SC_TYPE_OPEN:
6761 case SC_TYPE_LOCK:
6762 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
6763 break;
6764 default:
6765 printk("unknown stateid type %x\n", s->sc_type);
6766 status = nfserr_bad_stateid;
6767 }
6768out_unlock:
6769 spin_unlock(&cl->cl_lock);
6770 if (status == nfserr_admin_revoked)
6771 nfsd40_drop_revoked_stid(cl, stateid);
6772 return status;
6773}
6774
6775__be32
6776nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
6777 stateid_t *stateid,
6778 unsigned short typemask, unsigned short statusmask,
6779 struct nfs4_stid **s, struct nfsd_net *nn)
6780{
6781 __be32 status;
6782 struct nfs4_stid *stid;
6783 bool return_revoked = false;
6784
6785 /*
6786 * only return revoked delegations if explicitly asked.
6787 * otherwise we report revoked or bad_stateid status.
6788 */
6789 if (statusmask & SC_STATUS_REVOKED)
6790 return_revoked = true;
6791 if (typemask & SC_TYPE_DELEG)
6792 /* Always allow REVOKED for DELEG so we can
6793 * retturn the appropriate error.
6794 */
6795 statusmask |= SC_STATUS_REVOKED;
6796
6797 statusmask |= SC_STATUS_ADMIN_REVOKED;
6798
6799 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6800 CLOSE_STATEID(stateid))
6801 return nfserr_bad_stateid;
6802 status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
6803 if (status == nfserr_stale_clientid) {
6804 if (cstate->session)
6805 return nfserr_bad_stateid;
6806 return nfserr_stale_stateid;
6807 }
6808 if (status)
6809 return status;
6810 stid = find_stateid_by_type(cstate->clp, stateid, typemask, statusmask);
6811 if (!stid)
6812 return nfserr_bad_stateid;
6813 if ((stid->sc_status & SC_STATUS_REVOKED) && !return_revoked) {
6814 nfs4_put_stid(stid);
6815 return nfserr_deleg_revoked;
6816 }
6817 if (stid->sc_status & SC_STATUS_ADMIN_REVOKED) {
6818 nfsd40_drop_revoked_stid(cstate->clp, stateid);
6819 nfs4_put_stid(stid);
6820 return nfserr_admin_revoked;
6821 }
6822 *s = stid;
6823 return nfs_ok;
6824}
6825
6826static struct nfsd_file *
6827nfs4_find_file(struct nfs4_stid *s, int flags)
6828{
6829 struct nfsd_file *ret = NULL;
6830
6831 if (!s || s->sc_status)
6832 return NULL;
6833
6834 switch (s->sc_type) {
6835 case SC_TYPE_DELEG:
6836 spin_lock(&s->sc_file->fi_lock);
6837 ret = nfsd_file_get(s->sc_file->fi_deleg_file);
6838 spin_unlock(&s->sc_file->fi_lock);
6839 break;
6840 case SC_TYPE_OPEN:
6841 case SC_TYPE_LOCK:
6842 if (flags & RD_STATE)
6843 ret = find_readable_file(s->sc_file);
6844 else
6845 ret = find_writeable_file(s->sc_file);
6846 }
6847
6848 return ret;
6849}
6850
6851static __be32
6852nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
6853{
6854 __be32 status;
6855
6856 status = nfsd4_check_openowner_confirmed(ols);
6857 if (status)
6858 return status;
6859 return nfs4_check_openmode(ols, flags);
6860}
6861
6862static __be32
6863nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
6864 struct nfsd_file **nfp, int flags)
6865{
6866 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
6867 struct nfsd_file *nf;
6868 __be32 status;
6869
6870 nf = nfs4_find_file(s, flags);
6871 if (nf) {
6872 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
6873 acc | NFSD_MAY_OWNER_OVERRIDE);
6874 if (status) {
6875 nfsd_file_put(nf);
6876 goto out;
6877 }
6878 } else {
6879 status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
6880 if (status)
6881 return status;
6882 }
6883 *nfp = nf;
6884out:
6885 return status;
6886}
6887static void
6888_free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6889{
6890 WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID);
6891 if (!refcount_dec_and_test(&cps->cp_stateid.cs_count))
6892 return;
6893 list_del(&cps->cp_list);
6894 idr_remove(&nn->s2s_cp_stateids,
6895 cps->cp_stateid.cs_stid.si_opaque.so_id);
6896 kfree(cps);
6897}
6898/*
6899 * A READ from an inter server to server COPY will have a
6900 * copy stateid. Look up the copy notify stateid from the
6901 * idr structure and take a reference on it.
6902 */
6903__be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6904 struct nfs4_client *clp,
6905 struct nfs4_cpntf_state **cps)
6906{
6907 copy_stateid_t *cps_t;
6908 struct nfs4_cpntf_state *state = NULL;
6909
6910 if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
6911 return nfserr_bad_stateid;
6912 spin_lock(&nn->s2s_cp_lock);
6913 cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
6914 if (cps_t) {
6915 state = container_of(cps_t, struct nfs4_cpntf_state,
6916 cp_stateid);
6917 if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) {
6918 state = NULL;
6919 goto unlock;
6920 }
6921 if (!clp)
6922 refcount_inc(&state->cp_stateid.cs_count);
6923 else
6924 _free_cpntf_state_locked(nn, state);
6925 }
6926unlock:
6927 spin_unlock(&nn->s2s_cp_lock);
6928 if (!state)
6929 return nfserr_bad_stateid;
6930 if (!clp)
6931 *cps = state;
6932 return 0;
6933}
6934
6935static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6936 struct nfs4_stid **stid)
6937{
6938 __be32 status;
6939 struct nfs4_cpntf_state *cps = NULL;
6940 struct nfs4_client *found;
6941
6942 status = manage_cpntf_state(nn, st, NULL, &cps);
6943 if (status)
6944 return status;
6945
6946 cps->cpntf_time = ktime_get_boottime_seconds();
6947
6948 status = nfserr_expired;
6949 found = lookup_clientid(&cps->cp_p_clid, true, nn);
6950 if (!found)
6951 goto out;
6952
6953 *stid = find_stateid_by_type(found, &cps->cp_p_stateid,
6954 SC_TYPE_DELEG|SC_TYPE_OPEN|SC_TYPE_LOCK,
6955 0);
6956 if (*stid)
6957 status = nfs_ok;
6958 else
6959 status = nfserr_bad_stateid;
6960
6961 put_client_renew(found);
6962out:
6963 nfs4_put_cpntf_state(nn, cps);
6964 return status;
6965}
6966
6967void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6968{
6969 spin_lock(&nn->s2s_cp_lock);
6970 _free_cpntf_state_locked(nn, cps);
6971 spin_unlock(&nn->s2s_cp_lock);
6972}
6973
6974/**
6975 * nfs4_preprocess_stateid_op - find and prep stateid for an operation
6976 * @rqstp: incoming request from client
6977 * @cstate: current compound state
6978 * @fhp: filehandle associated with requested stateid
6979 * @stateid: stateid (provided by client)
6980 * @flags: flags describing type of operation to be done
6981 * @nfp: optional nfsd_file return pointer (may be NULL)
6982 * @cstid: optional returned nfs4_stid pointer (may be NULL)
6983 *
6984 * Given info from the client, look up a nfs4_stid for the operation. On
6985 * success, it returns a reference to the nfs4_stid and/or the nfsd_file
6986 * associated with it.
6987 */
6988__be32
6989nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
6990 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
6991 stateid_t *stateid, int flags, struct nfsd_file **nfp,
6992 struct nfs4_stid **cstid)
6993{
6994 struct net *net = SVC_NET(rqstp);
6995 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6996 struct nfs4_stid *s = NULL;
6997 __be32 status;
6998
6999 if (nfp)
7000 *nfp = NULL;
7001
7002 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
7003 if (cstid)
7004 status = nfserr_bad_stateid;
7005 else
7006 status = check_special_stateids(net, fhp, stateid,
7007 flags);
7008 goto done;
7009 }
7010
7011 status = nfsd4_lookup_stateid(cstate, stateid,
7012 SC_TYPE_DELEG|SC_TYPE_OPEN|SC_TYPE_LOCK,
7013 0, &s, nn);
7014 if (status == nfserr_bad_stateid)
7015 status = find_cpntf_state(nn, stateid, &s);
7016 if (status)
7017 return status;
7018 status = nfsd4_stid_check_stateid_generation(stateid, s,
7019 nfsd4_has_session(cstate));
7020 if (status)
7021 goto out;
7022
7023 switch (s->sc_type) {
7024 case SC_TYPE_DELEG:
7025 status = nfs4_check_delegmode(delegstateid(s), flags);
7026 break;
7027 case SC_TYPE_OPEN:
7028 case SC_TYPE_LOCK:
7029 status = nfs4_check_olstateid(openlockstateid(s), flags);
7030 break;
7031 }
7032 if (status)
7033 goto out;
7034 status = nfs4_check_fh(fhp, s);
7035
7036done:
7037 if (status == nfs_ok && nfp)
7038 status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
7039out:
7040 if (s) {
7041 if (!status && cstid)
7042 *cstid = s;
7043 else
7044 nfs4_put_stid(s);
7045 }
7046 return status;
7047}
7048
7049/*
7050 * Test if the stateid is valid
7051 */
7052__be32
7053nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7054 union nfsd4_op_u *u)
7055{
7056 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
7057 struct nfsd4_test_stateid_id *stateid;
7058 struct nfs4_client *cl = cstate->clp;
7059
7060 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
7061 stateid->ts_id_status =
7062 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
7063
7064 return nfs_ok;
7065}
7066
7067static __be32
7068nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
7069{
7070 struct nfs4_ol_stateid *stp = openlockstateid(s);
7071 __be32 ret;
7072
7073 ret = nfsd4_lock_ol_stateid(stp);
7074 if (ret)
7075 goto out_put_stid;
7076
7077 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
7078 if (ret)
7079 goto out;
7080
7081 ret = nfserr_locks_held;
7082 if (check_for_locks(stp->st_stid.sc_file,
7083 lockowner(stp->st_stateowner)))
7084 goto out;
7085
7086 release_lock_stateid(stp);
7087 ret = nfs_ok;
7088
7089out:
7090 mutex_unlock(&stp->st_mutex);
7091out_put_stid:
7092 nfs4_put_stid(s);
7093 return ret;
7094}
7095
7096__be32
7097nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7098 union nfsd4_op_u *u)
7099{
7100 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
7101 stateid_t *stateid = &free_stateid->fr_stateid;
7102 struct nfs4_stid *s;
7103 struct nfs4_delegation *dp;
7104 struct nfs4_client *cl = cstate->clp;
7105 __be32 ret = nfserr_bad_stateid;
7106
7107 spin_lock(&cl->cl_lock);
7108 s = find_stateid_locked(cl, stateid);
7109 if (!s || s->sc_status & SC_STATUS_CLOSED)
7110 goto out_unlock;
7111 if (s->sc_status & SC_STATUS_ADMIN_REVOKED) {
7112 nfsd4_drop_revoked_stid(s);
7113 ret = nfs_ok;
7114 goto out;
7115 }
7116 spin_lock(&s->sc_lock);
7117 switch (s->sc_type) {
7118 case SC_TYPE_DELEG:
7119 if (s->sc_status & SC_STATUS_REVOKED) {
7120 spin_unlock(&s->sc_lock);
7121 dp = delegstateid(s);
7122 list_del_init(&dp->dl_recall_lru);
7123 spin_unlock(&cl->cl_lock);
7124 nfs4_put_stid(s);
7125 ret = nfs_ok;
7126 goto out;
7127 }
7128 ret = nfserr_locks_held;
7129 break;
7130 case SC_TYPE_OPEN:
7131 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
7132 if (ret)
7133 break;
7134 ret = nfserr_locks_held;
7135 break;
7136 case SC_TYPE_LOCK:
7137 spin_unlock(&s->sc_lock);
7138 refcount_inc(&s->sc_count);
7139 spin_unlock(&cl->cl_lock);
7140 ret = nfsd4_free_lock_stateid(stateid, s);
7141 goto out;
7142 }
7143 spin_unlock(&s->sc_lock);
7144out_unlock:
7145 spin_unlock(&cl->cl_lock);
7146out:
7147 return ret;
7148}
7149
7150static inline int
7151setlkflg (int type)
7152{
7153 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
7154 RD_STATE : WR_STATE;
7155}
7156
7157static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
7158{
7159 struct svc_fh *current_fh = &cstate->current_fh;
7160 struct nfs4_stateowner *sop = stp->st_stateowner;
7161 __be32 status;
7162
7163 status = nfsd4_check_seqid(cstate, sop, seqid);
7164 if (status)
7165 return status;
7166 status = nfsd4_lock_ol_stateid(stp);
7167 if (status != nfs_ok)
7168 return status;
7169 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
7170 if (status == nfs_ok)
7171 status = nfs4_check_fh(current_fh, &stp->st_stid);
7172 if (status != nfs_ok)
7173 mutex_unlock(&stp->st_mutex);
7174 return status;
7175}
7176
7177/**
7178 * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op
7179 * @cstate: compund state
7180 * @seqid: seqid (provided by client)
7181 * @stateid: stateid (provided by client)
7182 * @typemask: mask of allowable types for this operation
7183 * @statusmask: mask of allowed states: 0 or STID_CLOSED
7184 * @stpp: return pointer for the stateid found
7185 * @nn: net namespace for request
7186 *
7187 * Given a stateid+seqid from a client, look up an nfs4_ol_stateid and
7188 * return it in @stpp. On a nfs_ok return, the returned stateid will
7189 * have its st_mutex locked.
7190 */
7191static __be32
7192nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
7193 stateid_t *stateid,
7194 unsigned short typemask, unsigned short statusmask,
7195 struct nfs4_ol_stateid **stpp,
7196 struct nfsd_net *nn)
7197{
7198 __be32 status;
7199 struct nfs4_stid *s;
7200 struct nfs4_ol_stateid *stp = NULL;
7201
7202 trace_nfsd_preprocess(seqid, stateid);
7203
7204 *stpp = NULL;
7205 status = nfsd4_lookup_stateid(cstate, stateid,
7206 typemask, statusmask, &s, nn);
7207 if (status)
7208 return status;
7209 stp = openlockstateid(s);
7210 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
7211
7212 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
7213 if (!status)
7214 *stpp = stp;
7215 else
7216 nfs4_put_stid(&stp->st_stid);
7217 return status;
7218}
7219
7220static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
7221 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
7222{
7223 __be32 status;
7224 struct nfs4_openowner *oo;
7225 struct nfs4_ol_stateid *stp;
7226
7227 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
7228 SC_TYPE_OPEN, 0, &stp, nn);
7229 if (status)
7230 return status;
7231 oo = openowner(stp->st_stateowner);
7232 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
7233 mutex_unlock(&stp->st_mutex);
7234 nfs4_put_stid(&stp->st_stid);
7235 return nfserr_bad_stateid;
7236 }
7237 *stpp = stp;
7238 return nfs_ok;
7239}
7240
7241__be32
7242nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7243 union nfsd4_op_u *u)
7244{
7245 struct nfsd4_open_confirm *oc = &u->open_confirm;
7246 __be32 status;
7247 struct nfs4_openowner *oo;
7248 struct nfs4_ol_stateid *stp;
7249 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7250
7251 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
7252 cstate->current_fh.fh_dentry);
7253
7254 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
7255 if (status)
7256 return status;
7257
7258 status = nfs4_preprocess_seqid_op(cstate,
7259 oc->oc_seqid, &oc->oc_req_stateid,
7260 SC_TYPE_OPEN, 0, &stp, nn);
7261 if (status)
7262 goto out;
7263 oo = openowner(stp->st_stateowner);
7264 status = nfserr_bad_stateid;
7265 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
7266 mutex_unlock(&stp->st_mutex);
7267 goto put_stateid;
7268 }
7269 oo->oo_flags |= NFS4_OO_CONFIRMED;
7270 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
7271 mutex_unlock(&stp->st_mutex);
7272 trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
7273 nfsd4_client_record_create(oo->oo_owner.so_client);
7274 status = nfs_ok;
7275put_stateid:
7276 nfs4_put_stid(&stp->st_stid);
7277out:
7278 nfsd4_bump_seqid(cstate, status);
7279 return status;
7280}
7281
7282static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
7283{
7284 if (!test_access(access, stp))
7285 return;
7286 nfs4_file_put_access(stp->st_stid.sc_file, access);
7287 clear_access(access, stp);
7288}
7289
7290static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
7291{
7292 switch (to_access) {
7293 case NFS4_SHARE_ACCESS_READ:
7294 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
7295 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
7296 break;
7297 case NFS4_SHARE_ACCESS_WRITE:
7298 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
7299 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
7300 break;
7301 case NFS4_SHARE_ACCESS_BOTH:
7302 break;
7303 default:
7304 WARN_ON_ONCE(1);
7305 }
7306}
7307
7308__be32
7309nfsd4_open_downgrade(struct svc_rqst *rqstp,
7310 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
7311{
7312 struct nfsd4_open_downgrade *od = &u->open_downgrade;
7313 __be32 status;
7314 struct nfs4_ol_stateid *stp;
7315 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7316
7317 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
7318 cstate->current_fh.fh_dentry);
7319
7320 /* We don't yet support WANT bits: */
7321 if (od->od_deleg_want)
7322 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
7323 od->od_deleg_want);
7324
7325 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
7326 &od->od_stateid, &stp, nn);
7327 if (status)
7328 goto out;
7329 status = nfserr_inval;
7330 if (!test_access(od->od_share_access, stp)) {
7331 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
7332 stp->st_access_bmap, od->od_share_access);
7333 goto put_stateid;
7334 }
7335 if (!test_deny(od->od_share_deny, stp)) {
7336 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
7337 stp->st_deny_bmap, od->od_share_deny);
7338 goto put_stateid;
7339 }
7340 nfs4_stateid_downgrade(stp, od->od_share_access);
7341 reset_union_bmap_deny(od->od_share_deny, stp);
7342 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
7343 status = nfs_ok;
7344put_stateid:
7345 mutex_unlock(&stp->st_mutex);
7346 nfs4_put_stid(&stp->st_stid);
7347out:
7348 nfsd4_bump_seqid(cstate, status);
7349 return status;
7350}
7351
7352static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
7353{
7354 struct nfs4_client *clp = s->st_stid.sc_client;
7355 bool unhashed;
7356 LIST_HEAD(reaplist);
7357 struct nfs4_ol_stateid *stp;
7358
7359 spin_lock(&clp->cl_lock);
7360 unhashed = unhash_open_stateid(s, &reaplist);
7361
7362 if (clp->cl_minorversion) {
7363 if (unhashed)
7364 put_ol_stateid_locked(s, &reaplist);
7365 spin_unlock(&clp->cl_lock);
7366 list_for_each_entry(stp, &reaplist, st_locks)
7367 nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
7368 free_ol_stateid_reaplist(&reaplist);
7369 } else {
7370 spin_unlock(&clp->cl_lock);
7371 free_ol_stateid_reaplist(&reaplist);
7372 if (unhashed)
7373 move_to_close_lru(s, clp->net);
7374 }
7375}
7376
7377/*
7378 * nfs4_unlock_state() called after encode
7379 */
7380__be32
7381nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7382 union nfsd4_op_u *u)
7383{
7384 struct nfsd4_close *close = &u->close;
7385 __be32 status;
7386 struct nfs4_ol_stateid *stp;
7387 struct net *net = SVC_NET(rqstp);
7388 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7389
7390 dprintk("NFSD: nfsd4_close on file %pd\n",
7391 cstate->current_fh.fh_dentry);
7392
7393 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
7394 &close->cl_stateid,
7395 SC_TYPE_OPEN, SC_STATUS_CLOSED,
7396 &stp, nn);
7397 nfsd4_bump_seqid(cstate, status);
7398 if (status)
7399 goto out;
7400
7401 spin_lock(&stp->st_stid.sc_client->cl_lock);
7402 stp->st_stid.sc_status |= SC_STATUS_CLOSED;
7403 spin_unlock(&stp->st_stid.sc_client->cl_lock);
7404
7405 /*
7406 * Technically we don't _really_ have to increment or copy it, since
7407 * it should just be gone after this operation and we clobber the
7408 * copied value below, but we continue to do so here just to ensure
7409 * that racing ops see that there was a state change.
7410 */
7411 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
7412
7413 nfsd4_close_open_stateid(stp);
7414 mutex_unlock(&stp->st_mutex);
7415
7416 /* v4.1+ suggests that we send a special stateid in here, since the
7417 * clients should just ignore this anyway. Since this is not useful
7418 * for v4.0 clients either, we set it to the special close_stateid
7419 * universally.
7420 *
7421 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
7422 */
7423 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
7424
7425 /* put reference from nfs4_preprocess_seqid_op */
7426 nfs4_put_stid(&stp->st_stid);
7427out:
7428 return status;
7429}
7430
7431__be32
7432nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7433 union nfsd4_op_u *u)
7434{
7435 struct nfsd4_delegreturn *dr = &u->delegreturn;
7436 struct nfs4_delegation *dp;
7437 stateid_t *stateid = &dr->dr_stateid;
7438 struct nfs4_stid *s;
7439 __be32 status;
7440 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7441
7442 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7443 return status;
7444
7445 status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG, 0, &s, nn);
7446 if (status)
7447 goto out;
7448 dp = delegstateid(s);
7449 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
7450 if (status)
7451 goto put_stateid;
7452
7453 trace_nfsd_deleg_return(stateid);
7454 wake_up_var(d_inode(cstate->current_fh.fh_dentry));
7455 destroy_delegation(dp);
7456put_stateid:
7457 nfs4_put_stid(&dp->dl_stid);
7458out:
7459 return status;
7460}
7461
7462/* last octet in a range */
7463static inline u64
7464last_byte_offset(u64 start, u64 len)
7465{
7466 u64 end;
7467
7468 WARN_ON_ONCE(!len);
7469 end = start + len;
7470 return end > start ? end - 1: NFS4_MAX_UINT64;
7471}
7472
7473/*
7474 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
7475 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
7476 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
7477 * locking, this prevents us from being completely protocol-compliant. The
7478 * real solution to this problem is to start using unsigned file offsets in
7479 * the VFS, but this is a very deep change!
7480 */
7481static inline void
7482nfs4_transform_lock_offset(struct file_lock *lock)
7483{
7484 if (lock->fl_start < 0)
7485 lock->fl_start = OFFSET_MAX;
7486 if (lock->fl_end < 0)
7487 lock->fl_end = OFFSET_MAX;
7488}
7489
7490static fl_owner_t
7491nfsd4_lm_get_owner(fl_owner_t owner)
7492{
7493 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7494
7495 nfs4_get_stateowner(&lo->lo_owner);
7496 return owner;
7497}
7498
7499static void
7500nfsd4_lm_put_owner(fl_owner_t owner)
7501{
7502 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7503
7504 if (lo)
7505 nfs4_put_stateowner(&lo->lo_owner);
7506}
7507
7508/* return pointer to struct nfs4_client if client is expirable */
7509static bool
7510nfsd4_lm_lock_expirable(struct file_lock *cfl)
7511{
7512 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) cfl->c.flc_owner;
7513 struct nfs4_client *clp = lo->lo_owner.so_client;
7514 struct nfsd_net *nn;
7515
7516 if (try_to_expire_client(clp)) {
7517 nn = net_generic(clp->net, nfsd_net_id);
7518 mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
7519 return true;
7520 }
7521 return false;
7522}
7523
7524/* schedule laundromat to run immediately and wait for it to complete */
7525static void
7526nfsd4_lm_expire_lock(void)
7527{
7528 flush_workqueue(laundry_wq);
7529}
7530
7531static void
7532nfsd4_lm_notify(struct file_lock *fl)
7533{
7534 struct nfs4_lockowner *lo = (struct nfs4_lockowner *) fl->c.flc_owner;
7535 struct net *net = lo->lo_owner.so_client->net;
7536 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7537 struct nfsd4_blocked_lock *nbl = container_of(fl,
7538 struct nfsd4_blocked_lock, nbl_lock);
7539 bool queue = false;
7540
7541 /* An empty list means that something else is going to be using it */
7542 spin_lock(&nn->blocked_locks_lock);
7543 if (!list_empty(&nbl->nbl_list)) {
7544 list_del_init(&nbl->nbl_list);
7545 list_del_init(&nbl->nbl_lru);
7546 queue = true;
7547 }
7548 spin_unlock(&nn->blocked_locks_lock);
7549
7550 if (queue) {
7551 trace_nfsd_cb_notify_lock(lo, nbl);
7552 nfsd4_run_cb(&nbl->nbl_cb);
7553 }
7554}
7555
7556static const struct lock_manager_operations nfsd_posix_mng_ops = {
7557 .lm_mod_owner = THIS_MODULE,
7558 .lm_notify = nfsd4_lm_notify,
7559 .lm_get_owner = nfsd4_lm_get_owner,
7560 .lm_put_owner = nfsd4_lm_put_owner,
7561 .lm_lock_expirable = nfsd4_lm_lock_expirable,
7562 .lm_expire_lock = nfsd4_lm_expire_lock,
7563};
7564
7565static inline void
7566nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
7567{
7568 struct nfs4_lockowner *lo;
7569
7570 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
7571 lo = (struct nfs4_lockowner *) fl->c.flc_owner;
7572 xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
7573 GFP_KERNEL);
7574 if (!deny->ld_owner.data)
7575 /* We just don't care that much */
7576 goto nevermind;
7577 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
7578 } else {
7579nevermind:
7580 deny->ld_owner.len = 0;
7581 deny->ld_owner.data = NULL;
7582 deny->ld_clientid.cl_boot = 0;
7583 deny->ld_clientid.cl_id = 0;
7584 }
7585 deny->ld_start = fl->fl_start;
7586 deny->ld_length = NFS4_MAX_UINT64;
7587 if (fl->fl_end != NFS4_MAX_UINT64)
7588 deny->ld_length = fl->fl_end - fl->fl_start + 1;
7589 deny->ld_type = NFS4_READ_LT;
7590 if (fl->c.flc_type != F_RDLCK)
7591 deny->ld_type = NFS4_WRITE_LT;
7592}
7593
7594static struct nfs4_lockowner *
7595find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
7596{
7597 unsigned int strhashval = ownerstr_hashval(owner);
7598 struct nfs4_stateowner *so;
7599
7600 lockdep_assert_held(&clp->cl_lock);
7601
7602 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
7603 so_strhash) {
7604 if (so->so_is_open_owner)
7605 continue;
7606 if (same_owner_str(so, owner))
7607 return lockowner(nfs4_get_stateowner(so));
7608 }
7609 return NULL;
7610}
7611
7612static struct nfs4_lockowner *
7613find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
7614{
7615 struct nfs4_lockowner *lo;
7616
7617 spin_lock(&clp->cl_lock);
7618 lo = find_lockowner_str_locked(clp, owner);
7619 spin_unlock(&clp->cl_lock);
7620 return lo;
7621}
7622
7623static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
7624{
7625 unhash_lockowner_locked(lockowner(sop));
7626}
7627
7628static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
7629{
7630 struct nfs4_lockowner *lo = lockowner(sop);
7631
7632 kmem_cache_free(lockowner_slab, lo);
7633}
7634
7635static const struct nfs4_stateowner_operations lockowner_ops = {
7636 .so_unhash = nfs4_unhash_lockowner,
7637 .so_free = nfs4_free_lockowner,
7638};
7639
7640/*
7641 * Alloc a lock owner structure.
7642 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
7643 * occurred.
7644 *
7645 * strhashval = ownerstr_hashval
7646 */
7647static struct nfs4_lockowner *
7648alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
7649 struct nfs4_ol_stateid *open_stp,
7650 struct nfsd4_lock *lock)
7651{
7652 struct nfs4_lockowner *lo, *ret;
7653
7654 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
7655 if (!lo)
7656 return NULL;
7657 INIT_LIST_HEAD(&lo->lo_blocked);
7658 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
7659 lo->lo_owner.so_is_open_owner = 0;
7660 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
7661 lo->lo_owner.so_ops = &lockowner_ops;
7662 spin_lock(&clp->cl_lock);
7663 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
7664 if (ret == NULL) {
7665 list_add(&lo->lo_owner.so_strhash,
7666 &clp->cl_ownerstr_hashtbl[strhashval]);
7667 ret = lo;
7668 } else
7669 nfs4_free_stateowner(&lo->lo_owner);
7670
7671 spin_unlock(&clp->cl_lock);
7672 return ret;
7673}
7674
7675static struct nfs4_ol_stateid *
7676find_lock_stateid(const struct nfs4_lockowner *lo,
7677 const struct nfs4_ol_stateid *ost)
7678{
7679 struct nfs4_ol_stateid *lst;
7680
7681 lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
7682
7683 /* If ost is not hashed, ost->st_locks will not be valid */
7684 if (!nfs4_ol_stateid_unhashed(ost))
7685 list_for_each_entry(lst, &ost->st_locks, st_locks) {
7686 if (lst->st_stateowner == &lo->lo_owner) {
7687 refcount_inc(&lst->st_stid.sc_count);
7688 return lst;
7689 }
7690 }
7691 return NULL;
7692}
7693
7694static struct nfs4_ol_stateid *
7695init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
7696 struct nfs4_file *fp, struct inode *inode,
7697 struct nfs4_ol_stateid *open_stp)
7698{
7699 struct nfs4_client *clp = lo->lo_owner.so_client;
7700 struct nfs4_ol_stateid *retstp;
7701
7702 mutex_init(&stp->st_mutex);
7703 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
7704retry:
7705 spin_lock(&clp->cl_lock);
7706 if (nfs4_ol_stateid_unhashed(open_stp))
7707 goto out_close;
7708 retstp = find_lock_stateid(lo, open_stp);
7709 if (retstp)
7710 goto out_found;
7711 refcount_inc(&stp->st_stid.sc_count);
7712 stp->st_stid.sc_type = SC_TYPE_LOCK;
7713 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
7714 get_nfs4_file(fp);
7715 stp->st_stid.sc_file = fp;
7716 stp->st_access_bmap = 0;
7717 stp->st_deny_bmap = open_stp->st_deny_bmap;
7718 stp->st_openstp = open_stp;
7719 spin_lock(&fp->fi_lock);
7720 list_add(&stp->st_locks, &open_stp->st_locks);
7721 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
7722 list_add(&stp->st_perfile, &fp->fi_stateids);
7723 spin_unlock(&fp->fi_lock);
7724 spin_unlock(&clp->cl_lock);
7725 return stp;
7726out_found:
7727 spin_unlock(&clp->cl_lock);
7728 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
7729 nfs4_put_stid(&retstp->st_stid);
7730 goto retry;
7731 }
7732 /* To keep mutex tracking happy */
7733 mutex_unlock(&stp->st_mutex);
7734 return retstp;
7735out_close:
7736 spin_unlock(&clp->cl_lock);
7737 mutex_unlock(&stp->st_mutex);
7738 return NULL;
7739}
7740
7741static struct nfs4_ol_stateid *
7742find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
7743 struct inode *inode, struct nfs4_ol_stateid *ost,
7744 bool *new)
7745{
7746 struct nfs4_stid *ns = NULL;
7747 struct nfs4_ol_stateid *lst;
7748 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7749 struct nfs4_client *clp = oo->oo_owner.so_client;
7750
7751 *new = false;
7752 spin_lock(&clp->cl_lock);
7753 lst = find_lock_stateid(lo, ost);
7754 spin_unlock(&clp->cl_lock);
7755 if (lst != NULL) {
7756 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
7757 goto out;
7758 nfs4_put_stid(&lst->st_stid);
7759 }
7760 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
7761 if (ns == NULL)
7762 return NULL;
7763
7764 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
7765 if (lst == openlockstateid(ns))
7766 *new = true;
7767 else
7768 nfs4_put_stid(ns);
7769out:
7770 return lst;
7771}
7772
7773static int
7774check_lock_length(u64 offset, u64 length)
7775{
7776 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
7777 (length > ~offset)));
7778}
7779
7780static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
7781{
7782 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
7783
7784 lockdep_assert_held(&fp->fi_lock);
7785
7786 if (test_access(access, lock_stp))
7787 return;
7788 __nfs4_file_get_access(fp, access);
7789 set_access(access, lock_stp);
7790}
7791
7792static __be32
7793lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
7794 struct nfs4_ol_stateid *ost,
7795 struct nfsd4_lock *lock,
7796 struct nfs4_ol_stateid **plst, bool *new)
7797{
7798 __be32 status;
7799 struct nfs4_file *fi = ost->st_stid.sc_file;
7800 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7801 struct nfs4_client *cl = oo->oo_owner.so_client;
7802 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
7803 struct nfs4_lockowner *lo;
7804 struct nfs4_ol_stateid *lst;
7805 unsigned int strhashval;
7806
7807 lo = find_lockowner_str(cl, &lock->lk_new_owner);
7808 if (!lo) {
7809 strhashval = ownerstr_hashval(&lock->lk_new_owner);
7810 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
7811 if (lo == NULL)
7812 return nfserr_jukebox;
7813 } else {
7814 /* with an existing lockowner, seqids must be the same */
7815 status = nfserr_bad_seqid;
7816 if (!cstate->minorversion &&
7817 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
7818 goto out;
7819 }
7820
7821 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
7822 if (lst == NULL) {
7823 status = nfserr_jukebox;
7824 goto out;
7825 }
7826
7827 status = nfs_ok;
7828 *plst = lst;
7829out:
7830 nfs4_put_stateowner(&lo->lo_owner);
7831 return status;
7832}
7833
7834/*
7835 * LOCK operation
7836 */
7837__be32
7838nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7839 union nfsd4_op_u *u)
7840{
7841 struct nfsd4_lock *lock = &u->lock;
7842 struct nfs4_openowner *open_sop = NULL;
7843 struct nfs4_lockowner *lock_sop = NULL;
7844 struct nfs4_ol_stateid *lock_stp = NULL;
7845 struct nfs4_ol_stateid *open_stp = NULL;
7846 struct nfs4_file *fp;
7847 struct nfsd_file *nf = NULL;
7848 struct nfsd4_blocked_lock *nbl = NULL;
7849 struct file_lock *file_lock = NULL;
7850 struct file_lock *conflock = NULL;
7851 struct super_block *sb;
7852 __be32 status = 0;
7853 int lkflg;
7854 int err;
7855 bool new = false;
7856 unsigned char type;
7857 unsigned int flags = FL_POSIX;
7858 struct net *net = SVC_NET(rqstp);
7859 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7860
7861 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
7862 (long long) lock->lk_offset,
7863 (long long) lock->lk_length);
7864
7865 if (check_lock_length(lock->lk_offset, lock->lk_length))
7866 return nfserr_inval;
7867
7868 if ((status = fh_verify(rqstp, &cstate->current_fh,
7869 S_IFREG, NFSD_MAY_LOCK))) {
7870 dprintk("NFSD: nfsd4_lock: permission denied!\n");
7871 return status;
7872 }
7873 sb = cstate->current_fh.fh_dentry->d_sb;
7874
7875 if (lock->lk_is_new) {
7876 if (nfsd4_has_session(cstate))
7877 /* See rfc 5661 18.10.3: given clientid is ignored: */
7878 memcpy(&lock->lk_new_clientid,
7879 &cstate->clp->cl_clientid,
7880 sizeof(clientid_t));
7881
7882 /* validate and update open stateid and open seqid */
7883 status = nfs4_preprocess_confirmed_seqid_op(cstate,
7884 lock->lk_new_open_seqid,
7885 &lock->lk_new_open_stateid,
7886 &open_stp, nn);
7887 if (status)
7888 goto out;
7889 mutex_unlock(&open_stp->st_mutex);
7890 open_sop = openowner(open_stp->st_stateowner);
7891 status = nfserr_bad_stateid;
7892 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
7893 &lock->lk_new_clientid))
7894 goto out;
7895 status = lookup_or_create_lock_state(cstate, open_stp, lock,
7896 &lock_stp, &new);
7897 } else {
7898 status = nfs4_preprocess_seqid_op(cstate,
7899 lock->lk_old_lock_seqid,
7900 &lock->lk_old_lock_stateid,
7901 SC_TYPE_LOCK, 0, &lock_stp,
7902 nn);
7903 }
7904 if (status)
7905 goto out;
7906 lock_sop = lockowner(lock_stp->st_stateowner);
7907
7908 lkflg = setlkflg(lock->lk_type);
7909 status = nfs4_check_openmode(lock_stp, lkflg);
7910 if (status)
7911 goto out;
7912
7913 status = nfserr_grace;
7914 if (locks_in_grace(net) && !lock->lk_reclaim)
7915 goto out;
7916 status = nfserr_no_grace;
7917 if (!locks_in_grace(net) && lock->lk_reclaim)
7918 goto out;
7919
7920 if (lock->lk_reclaim)
7921 flags |= FL_RECLAIM;
7922
7923 fp = lock_stp->st_stid.sc_file;
7924 switch (lock->lk_type) {
7925 case NFS4_READW_LT:
7926 if (nfsd4_has_session(cstate) ||
7927 exportfs_lock_op_is_async(sb->s_export_op))
7928 flags |= FL_SLEEP;
7929 fallthrough;
7930 case NFS4_READ_LT:
7931 spin_lock(&fp->fi_lock);
7932 nf = find_readable_file_locked(fp);
7933 if (nf)
7934 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
7935 spin_unlock(&fp->fi_lock);
7936 type = F_RDLCK;
7937 break;
7938 case NFS4_WRITEW_LT:
7939 if (nfsd4_has_session(cstate) ||
7940 exportfs_lock_op_is_async(sb->s_export_op))
7941 flags |= FL_SLEEP;
7942 fallthrough;
7943 case NFS4_WRITE_LT:
7944 spin_lock(&fp->fi_lock);
7945 nf = find_writeable_file_locked(fp);
7946 if (nf)
7947 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
7948 spin_unlock(&fp->fi_lock);
7949 type = F_WRLCK;
7950 break;
7951 default:
7952 status = nfserr_inval;
7953 goto out;
7954 }
7955
7956 if (!nf) {
7957 status = nfserr_openmode;
7958 goto out;
7959 }
7960
7961 /*
7962 * Most filesystems with their own ->lock operations will block
7963 * the nfsd thread waiting to acquire the lock. That leads to
7964 * deadlocks (we don't want every nfsd thread tied up waiting
7965 * for file locks), so don't attempt blocking lock notifications
7966 * on those filesystems:
7967 */
7968 if (!exportfs_lock_op_is_async(sb->s_export_op))
7969 flags &= ~FL_SLEEP;
7970
7971 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
7972 if (!nbl) {
7973 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
7974 status = nfserr_jukebox;
7975 goto out;
7976 }
7977
7978 file_lock = &nbl->nbl_lock;
7979 file_lock->c.flc_type = type;
7980 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
7981 file_lock->c.flc_pid = current->tgid;
7982 file_lock->c.flc_file = nf->nf_file;
7983 file_lock->c.flc_flags = flags;
7984 file_lock->fl_lmops = &nfsd_posix_mng_ops;
7985 file_lock->fl_start = lock->lk_offset;
7986 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
7987 nfs4_transform_lock_offset(file_lock);
7988
7989 conflock = locks_alloc_lock();
7990 if (!conflock) {
7991 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7992 status = nfserr_jukebox;
7993 goto out;
7994 }
7995
7996 if (flags & FL_SLEEP) {
7997 nbl->nbl_time = ktime_get_boottime_seconds();
7998 spin_lock(&nn->blocked_locks_lock);
7999 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
8000 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
8001 kref_get(&nbl->nbl_kref);
8002 spin_unlock(&nn->blocked_locks_lock);
8003 }
8004
8005 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
8006 switch (err) {
8007 case 0: /* success! */
8008 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
8009 status = 0;
8010 if (lock->lk_reclaim)
8011 nn->somebody_reclaimed = true;
8012 break;
8013 case FILE_LOCK_DEFERRED:
8014 kref_put(&nbl->nbl_kref, free_nbl);
8015 nbl = NULL;
8016 fallthrough;
8017 case -EAGAIN: /* conflock holds conflicting lock */
8018 status = nfserr_denied;
8019 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
8020 nfs4_set_lock_denied(conflock, &lock->lk_denied);
8021 break;
8022 case -EDEADLK:
8023 status = nfserr_deadlock;
8024 break;
8025 default:
8026 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
8027 status = nfserrno(err);
8028 break;
8029 }
8030out:
8031 if (nbl) {
8032 /* dequeue it if we queued it before */
8033 if (flags & FL_SLEEP) {
8034 spin_lock(&nn->blocked_locks_lock);
8035 if (!list_empty(&nbl->nbl_list) &&
8036 !list_empty(&nbl->nbl_lru)) {
8037 list_del_init(&nbl->nbl_list);
8038 list_del_init(&nbl->nbl_lru);
8039 kref_put(&nbl->nbl_kref, free_nbl);
8040 }
8041 /* nbl can use one of lists to be linked to reaplist */
8042 spin_unlock(&nn->blocked_locks_lock);
8043 }
8044 free_blocked_lock(nbl);
8045 }
8046 if (nf)
8047 nfsd_file_put(nf);
8048 if (lock_stp) {
8049 /* Bump seqid manually if the 4.0 replay owner is openowner */
8050 if (cstate->replay_owner &&
8051 cstate->replay_owner != &lock_sop->lo_owner &&
8052 seqid_mutating_err(ntohl(status)))
8053 lock_sop->lo_owner.so_seqid++;
8054
8055 /*
8056 * If this is a new, never-before-used stateid, and we are
8057 * returning an error, then just go ahead and release it.
8058 */
8059 if (status && new)
8060 release_lock_stateid(lock_stp);
8061
8062 mutex_unlock(&lock_stp->st_mutex);
8063
8064 nfs4_put_stid(&lock_stp->st_stid);
8065 }
8066 if (open_stp)
8067 nfs4_put_stid(&open_stp->st_stid);
8068 nfsd4_bump_seqid(cstate, status);
8069 if (conflock)
8070 locks_free_lock(conflock);
8071 return status;
8072}
8073
8074void nfsd4_lock_release(union nfsd4_op_u *u)
8075{
8076 struct nfsd4_lock *lock = &u->lock;
8077 struct nfsd4_lock_denied *deny = &lock->lk_denied;
8078
8079 kfree(deny->ld_owner.data);
8080}
8081
8082/*
8083 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
8084 * so we do a temporary open here just to get an open file to pass to
8085 * vfs_test_lock.
8086 */
8087static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
8088{
8089 struct nfsd_file *nf;
8090 struct inode *inode;
8091 __be32 err;
8092
8093 err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
8094 if (err)
8095 return err;
8096 inode = fhp->fh_dentry->d_inode;
8097 inode_lock(inode); /* to block new leases till after test_lock: */
8098 err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
8099 if (err)
8100 goto out;
8101 lock->c.flc_file = nf->nf_file;
8102 err = nfserrno(vfs_test_lock(nf->nf_file, lock));
8103 lock->c.flc_file = NULL;
8104out:
8105 inode_unlock(inode);
8106 nfsd_file_put(nf);
8107 return err;
8108}
8109
8110/*
8111 * LOCKT operation
8112 */
8113__be32
8114nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
8115 union nfsd4_op_u *u)
8116{
8117 struct nfsd4_lockt *lockt = &u->lockt;
8118 struct file_lock *file_lock = NULL;
8119 struct nfs4_lockowner *lo = NULL;
8120 __be32 status;
8121 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
8122
8123 if (locks_in_grace(SVC_NET(rqstp)))
8124 return nfserr_grace;
8125
8126 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
8127 return nfserr_inval;
8128
8129 if (!nfsd4_has_session(cstate)) {
8130 status = set_client(&lockt->lt_clientid, cstate, nn);
8131 if (status)
8132 goto out;
8133 }
8134
8135 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
8136 goto out;
8137
8138 file_lock = locks_alloc_lock();
8139 if (!file_lock) {
8140 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
8141 status = nfserr_jukebox;
8142 goto out;
8143 }
8144
8145 switch (lockt->lt_type) {
8146 case NFS4_READ_LT:
8147 case NFS4_READW_LT:
8148 file_lock->c.flc_type = F_RDLCK;
8149 break;
8150 case NFS4_WRITE_LT:
8151 case NFS4_WRITEW_LT:
8152 file_lock->c.flc_type = F_WRLCK;
8153 break;
8154 default:
8155 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
8156 status = nfserr_inval;
8157 goto out;
8158 }
8159
8160 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
8161 if (lo)
8162 file_lock->c.flc_owner = (fl_owner_t)lo;
8163 file_lock->c.flc_pid = current->tgid;
8164 file_lock->c.flc_flags = FL_POSIX;
8165
8166 file_lock->fl_start = lockt->lt_offset;
8167 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
8168
8169 nfs4_transform_lock_offset(file_lock);
8170
8171 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
8172 if (status)
8173 goto out;
8174
8175 if (file_lock->c.flc_type != F_UNLCK) {
8176 status = nfserr_denied;
8177 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
8178 }
8179out:
8180 if (lo)
8181 nfs4_put_stateowner(&lo->lo_owner);
8182 if (file_lock)
8183 locks_free_lock(file_lock);
8184 return status;
8185}
8186
8187void nfsd4_lockt_release(union nfsd4_op_u *u)
8188{
8189 struct nfsd4_lockt *lockt = &u->lockt;
8190 struct nfsd4_lock_denied *deny = &lockt->lt_denied;
8191
8192 kfree(deny->ld_owner.data);
8193}
8194
8195__be32
8196nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
8197 union nfsd4_op_u *u)
8198{
8199 struct nfsd4_locku *locku = &u->locku;
8200 struct nfs4_ol_stateid *stp;
8201 struct nfsd_file *nf = NULL;
8202 struct file_lock *file_lock = NULL;
8203 __be32 status;
8204 int err;
8205 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
8206
8207 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
8208 (long long) locku->lu_offset,
8209 (long long) locku->lu_length);
8210
8211 if (check_lock_length(locku->lu_offset, locku->lu_length))
8212 return nfserr_inval;
8213
8214 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
8215 &locku->lu_stateid, SC_TYPE_LOCK, 0,
8216 &stp, nn);
8217 if (status)
8218 goto out;
8219 nf = find_any_file(stp->st_stid.sc_file);
8220 if (!nf) {
8221 status = nfserr_lock_range;
8222 goto put_stateid;
8223 }
8224 file_lock = locks_alloc_lock();
8225 if (!file_lock) {
8226 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
8227 status = nfserr_jukebox;
8228 goto put_file;
8229 }
8230
8231 file_lock->c.flc_type = F_UNLCK;
8232 file_lock->c.flc_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
8233 file_lock->c.flc_pid = current->tgid;
8234 file_lock->c.flc_file = nf->nf_file;
8235 file_lock->c.flc_flags = FL_POSIX;
8236 file_lock->fl_lmops = &nfsd_posix_mng_ops;
8237 file_lock->fl_start = locku->lu_offset;
8238
8239 file_lock->fl_end = last_byte_offset(locku->lu_offset,
8240 locku->lu_length);
8241 nfs4_transform_lock_offset(file_lock);
8242
8243 err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
8244 if (err) {
8245 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
8246 goto out_nfserr;
8247 }
8248 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
8249put_file:
8250 nfsd_file_put(nf);
8251put_stateid:
8252 mutex_unlock(&stp->st_mutex);
8253 nfs4_put_stid(&stp->st_stid);
8254out:
8255 nfsd4_bump_seqid(cstate, status);
8256 if (file_lock)
8257 locks_free_lock(file_lock);
8258 return status;
8259
8260out_nfserr:
8261 status = nfserrno(err);
8262 goto put_file;
8263}
8264
8265/*
8266 * returns
8267 * true: locks held by lockowner
8268 * false: no locks held by lockowner
8269 */
8270static bool
8271check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
8272{
8273 struct file_lock *fl;
8274 int status = false;
8275 struct nfsd_file *nf;
8276 struct inode *inode;
8277 struct file_lock_context *flctx;
8278
8279 spin_lock(&fp->fi_lock);
8280 nf = find_any_file_locked(fp);
8281 if (!nf) {
8282 /* Any valid lock stateid should have some sort of access */
8283 WARN_ON_ONCE(1);
8284 goto out;
8285 }
8286
8287 inode = file_inode(nf->nf_file);
8288 flctx = locks_inode_context(inode);
8289
8290 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
8291 spin_lock(&flctx->flc_lock);
8292 for_each_file_lock(fl, &flctx->flc_posix) {
8293 if (fl->c.flc_owner == (fl_owner_t)lowner) {
8294 status = true;
8295 break;
8296 }
8297 }
8298 spin_unlock(&flctx->flc_lock);
8299 }
8300out:
8301 spin_unlock(&fp->fi_lock);
8302 return status;
8303}
8304
8305/**
8306 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
8307 * @rqstp: RPC transaction
8308 * @cstate: NFSv4 COMPOUND state
8309 * @u: RELEASE_LOCKOWNER arguments
8310 *
8311 * Check if theree are any locks still held and if not - free the lockowner
8312 * and any lock state that is owned.
8313 *
8314 * Return values:
8315 * %nfs_ok: lockowner released or not found
8316 * %nfserr_locks_held: lockowner still in use
8317 * %nfserr_stale_clientid: clientid no longer active
8318 * %nfserr_expired: clientid not recognized
8319 */
8320__be32
8321nfsd4_release_lockowner(struct svc_rqst *rqstp,
8322 struct nfsd4_compound_state *cstate,
8323 union nfsd4_op_u *u)
8324{
8325 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
8326 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
8327 clientid_t *clid = &rlockowner->rl_clientid;
8328 struct nfs4_ol_stateid *stp;
8329 struct nfs4_lockowner *lo;
8330 struct nfs4_client *clp;
8331 LIST_HEAD(reaplist);
8332 __be32 status;
8333
8334 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
8335 clid->cl_boot, clid->cl_id);
8336
8337 status = set_client(clid, cstate, nn);
8338 if (status)
8339 return status;
8340 clp = cstate->clp;
8341
8342 spin_lock(&clp->cl_lock);
8343 lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner);
8344 if (!lo) {
8345 spin_unlock(&clp->cl_lock);
8346 return nfs_ok;
8347 }
8348
8349 list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
8350 if (check_for_locks(stp->st_stid.sc_file, lo)) {
8351 spin_unlock(&clp->cl_lock);
8352 nfs4_put_stateowner(&lo->lo_owner);
8353 return nfserr_locks_held;
8354 }
8355 }
8356 unhash_lockowner_locked(lo);
8357 while (!list_empty(&lo->lo_owner.so_stateids)) {
8358 stp = list_first_entry(&lo->lo_owner.so_stateids,
8359 struct nfs4_ol_stateid,
8360 st_perstateowner);
8361 unhash_lock_stateid(stp);
8362 put_ol_stateid_locked(stp, &reaplist);
8363 }
8364 spin_unlock(&clp->cl_lock);
8365
8366 free_ol_stateid_reaplist(&reaplist);
8367 remove_blocked_locks(lo);
8368 nfs4_put_stateowner(&lo->lo_owner);
8369 return nfs_ok;
8370}
8371
8372static inline struct nfs4_client_reclaim *
8373alloc_reclaim(void)
8374{
8375 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
8376}
8377
8378bool
8379nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
8380{
8381 struct nfs4_client_reclaim *crp;
8382
8383 crp = nfsd4_find_reclaim_client(name, nn);
8384 return (crp && crp->cr_clp);
8385}
8386
8387/*
8388 * failure => all reset bets are off, nfserr_no_grace...
8389 *
8390 * The caller is responsible for freeing name.data if NULL is returned (it
8391 * will be freed in nfs4_remove_reclaim_record in the normal case).
8392 */
8393struct nfs4_client_reclaim *
8394nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
8395 struct nfsd_net *nn)
8396{
8397 unsigned int strhashval;
8398 struct nfs4_client_reclaim *crp;
8399
8400 crp = alloc_reclaim();
8401 if (crp) {
8402 strhashval = clientstr_hashval(name);
8403 INIT_LIST_HEAD(&crp->cr_strhash);
8404 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
8405 crp->cr_name.data = name.data;
8406 crp->cr_name.len = name.len;
8407 crp->cr_princhash.data = princhash.data;
8408 crp->cr_princhash.len = princhash.len;
8409 crp->cr_clp = NULL;
8410 nn->reclaim_str_hashtbl_size++;
8411 }
8412 return crp;
8413}
8414
8415void
8416nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
8417{
8418 list_del(&crp->cr_strhash);
8419 kfree(crp->cr_name.data);
8420 kfree(crp->cr_princhash.data);
8421 kfree(crp);
8422 nn->reclaim_str_hashtbl_size--;
8423}
8424
8425void
8426nfs4_release_reclaim(struct nfsd_net *nn)
8427{
8428 struct nfs4_client_reclaim *crp = NULL;
8429 int i;
8430
8431 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8432 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
8433 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
8434 struct nfs4_client_reclaim, cr_strhash);
8435 nfs4_remove_reclaim_record(crp, nn);
8436 }
8437 }
8438 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
8439}
8440
8441/*
8442 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
8443struct nfs4_client_reclaim *
8444nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
8445{
8446 unsigned int strhashval;
8447 struct nfs4_client_reclaim *crp = NULL;
8448
8449 strhashval = clientstr_hashval(name);
8450 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
8451 if (compare_blob(&crp->cr_name, &name) == 0) {
8452 return crp;
8453 }
8454 }
8455 return NULL;
8456}
8457
8458__be32
8459nfs4_check_open_reclaim(struct nfs4_client *clp)
8460{
8461 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
8462 return nfserr_no_grace;
8463
8464 if (nfsd4_client_record_check(clp))
8465 return nfserr_reclaim_bad;
8466
8467 return nfs_ok;
8468}
8469
8470/*
8471 * Since the lifetime of a delegation isn't limited to that of an open, a
8472 * client may quite reasonably hang on to a delegation as long as it has
8473 * the inode cached. This becomes an obvious problem the first time a
8474 * client's inode cache approaches the size of the server's total memory.
8475 *
8476 * For now we avoid this problem by imposing a hard limit on the number
8477 * of delegations, which varies according to the server's memory size.
8478 */
8479static void
8480set_max_delegations(void)
8481{
8482 /*
8483 * Allow at most 4 delegations per megabyte of RAM. Quick
8484 * estimates suggest that in the worst case (where every delegation
8485 * is for a different inode), a delegation could take about 1.5K,
8486 * giving a worst case usage of about 6% of memory.
8487 */
8488 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
8489}
8490
8491static int nfs4_state_create_net(struct net *net)
8492{
8493 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8494 int i;
8495
8496 nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8497 sizeof(struct list_head),
8498 GFP_KERNEL);
8499 if (!nn->conf_id_hashtbl)
8500 goto err;
8501 nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8502 sizeof(struct list_head),
8503 GFP_KERNEL);
8504 if (!nn->unconf_id_hashtbl)
8505 goto err_unconf_id;
8506 nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
8507 sizeof(struct list_head),
8508 GFP_KERNEL);
8509 if (!nn->sessionid_hashtbl)
8510 goto err_sessionid;
8511
8512 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8513 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
8514 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
8515 }
8516 for (i = 0; i < SESSION_HASH_SIZE; i++)
8517 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
8518 nn->conf_name_tree = RB_ROOT;
8519 nn->unconf_name_tree = RB_ROOT;
8520 nn->boot_time = ktime_get_real_seconds();
8521 nn->grace_ended = false;
8522 nn->nfsd4_manager.block_opens = true;
8523 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
8524 INIT_LIST_HEAD(&nn->client_lru);
8525 INIT_LIST_HEAD(&nn->close_lru);
8526 INIT_LIST_HEAD(&nn->del_recall_lru);
8527 spin_lock_init(&nn->client_lock);
8528 spin_lock_init(&nn->s2s_cp_lock);
8529 idr_init(&nn->s2s_cp_stateids);
8530
8531 spin_lock_init(&nn->blocked_locks_lock);
8532 INIT_LIST_HEAD(&nn->blocked_locks_lru);
8533
8534 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
8535 INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
8536 get_net(net);
8537
8538 nn->nfsd_client_shrinker = shrinker_alloc(0, "nfsd-client");
8539 if (!nn->nfsd_client_shrinker)
8540 goto err_shrinker;
8541
8542 nn->nfsd_client_shrinker->scan_objects = nfsd4_state_shrinker_scan;
8543 nn->nfsd_client_shrinker->count_objects = nfsd4_state_shrinker_count;
8544 nn->nfsd_client_shrinker->private_data = nn;
8545
8546 shrinker_register(nn->nfsd_client_shrinker);
8547
8548 return 0;
8549
8550err_shrinker:
8551 put_net(net);
8552 kfree(nn->sessionid_hashtbl);
8553err_sessionid:
8554 kfree(nn->unconf_id_hashtbl);
8555err_unconf_id:
8556 kfree(nn->conf_id_hashtbl);
8557err:
8558 return -ENOMEM;
8559}
8560
8561static void
8562nfs4_state_destroy_net(struct net *net)
8563{
8564 int i;
8565 struct nfs4_client *clp = NULL;
8566 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8567
8568 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8569 while (!list_empty(&nn->conf_id_hashtbl[i])) {
8570 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8571 destroy_client(clp);
8572 }
8573 }
8574
8575 WARN_ON(!list_empty(&nn->blocked_locks_lru));
8576
8577 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8578 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
8579 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8580 destroy_client(clp);
8581 }
8582 }
8583
8584 kfree(nn->sessionid_hashtbl);
8585 kfree(nn->unconf_id_hashtbl);
8586 kfree(nn->conf_id_hashtbl);
8587 put_net(net);
8588}
8589
8590int
8591nfs4_state_start_net(struct net *net)
8592{
8593 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8594 int ret;
8595
8596 ret = nfs4_state_create_net(net);
8597 if (ret)
8598 return ret;
8599 locks_start_grace(net, &nn->nfsd4_manager);
8600 nfsd4_client_tracking_init(net);
8601 if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
8602 goto skip_grace;
8603 printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
8604 nn->nfsd4_grace, net->ns.inum);
8605 trace_nfsd_grace_start(nn);
8606 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
8607 return 0;
8608
8609skip_grace:
8610 printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
8611 net->ns.inum);
8612 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
8613 nfsd4_end_grace(nn);
8614 return 0;
8615}
8616
8617/* initialization to perform when the nfsd service is started: */
8618
8619int
8620nfs4_state_start(void)
8621{
8622 int ret;
8623
8624 ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params);
8625 if (ret)
8626 return ret;
8627
8628 ret = nfsd4_create_callback_queue();
8629 if (ret) {
8630 rhltable_destroy(&nfs4_file_rhltable);
8631 return ret;
8632 }
8633
8634 set_max_delegations();
8635 return 0;
8636}
8637
8638void
8639nfs4_state_shutdown_net(struct net *net)
8640{
8641 struct nfs4_delegation *dp = NULL;
8642 struct list_head *pos, *next, reaplist;
8643 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8644
8645 shrinker_free(nn->nfsd_client_shrinker);
8646 cancel_work(&nn->nfsd_shrinker_work);
8647 cancel_delayed_work_sync(&nn->laundromat_work);
8648 locks_end_grace(&nn->nfsd4_manager);
8649
8650 INIT_LIST_HEAD(&reaplist);
8651 spin_lock(&state_lock);
8652 list_for_each_safe(pos, next, &nn->del_recall_lru) {
8653 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8654 unhash_delegation_locked(dp, SC_STATUS_CLOSED);
8655 list_add(&dp->dl_recall_lru, &reaplist);
8656 }
8657 spin_unlock(&state_lock);
8658 list_for_each_safe(pos, next, &reaplist) {
8659 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8660 list_del_init(&dp->dl_recall_lru);
8661 destroy_unhashed_deleg(dp);
8662 }
8663
8664 nfsd4_client_tracking_exit(net);
8665 nfs4_state_destroy_net(net);
8666#ifdef CONFIG_NFSD_V4_2_INTER_SSC
8667 nfsd4_ssc_shutdown_umount(nn);
8668#endif
8669}
8670
8671void
8672nfs4_state_shutdown(void)
8673{
8674 nfsd4_destroy_callback_queue();
8675 rhltable_destroy(&nfs4_file_rhltable);
8676}
8677
8678static void
8679get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8680{
8681 if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
8682 CURRENT_STATEID(stateid))
8683 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
8684}
8685
8686static void
8687put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8688{
8689 if (cstate->minorversion) {
8690 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
8691 SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8692 }
8693}
8694
8695void
8696clear_current_stateid(struct nfsd4_compound_state *cstate)
8697{
8698 CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8699}
8700
8701/*
8702 * functions to set current state id
8703 */
8704void
8705nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
8706 union nfsd4_op_u *u)
8707{
8708 put_stateid(cstate, &u->open_downgrade.od_stateid);
8709}
8710
8711void
8712nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
8713 union nfsd4_op_u *u)
8714{
8715 put_stateid(cstate, &u->open.op_stateid);
8716}
8717
8718void
8719nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
8720 union nfsd4_op_u *u)
8721{
8722 put_stateid(cstate, &u->close.cl_stateid);
8723}
8724
8725void
8726nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
8727 union nfsd4_op_u *u)
8728{
8729 put_stateid(cstate, &u->lock.lk_resp_stateid);
8730}
8731
8732/*
8733 * functions to consume current state id
8734 */
8735
8736void
8737nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
8738 union nfsd4_op_u *u)
8739{
8740 get_stateid(cstate, &u->open_downgrade.od_stateid);
8741}
8742
8743void
8744nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
8745 union nfsd4_op_u *u)
8746{
8747 get_stateid(cstate, &u->delegreturn.dr_stateid);
8748}
8749
8750void
8751nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
8752 union nfsd4_op_u *u)
8753{
8754 get_stateid(cstate, &u->free_stateid.fr_stateid);
8755}
8756
8757void
8758nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
8759 union nfsd4_op_u *u)
8760{
8761 get_stateid(cstate, &u->setattr.sa_stateid);
8762}
8763
8764void
8765nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
8766 union nfsd4_op_u *u)
8767{
8768 get_stateid(cstate, &u->close.cl_stateid);
8769}
8770
8771void
8772nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
8773 union nfsd4_op_u *u)
8774{
8775 get_stateid(cstate, &u->locku.lu_stateid);
8776}
8777
8778void
8779nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
8780 union nfsd4_op_u *u)
8781{
8782 get_stateid(cstate, &u->read.rd_stateid);
8783}
8784
8785void
8786nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
8787 union nfsd4_op_u *u)
8788{
8789 get_stateid(cstate, &u->write.wr_stateid);
8790}
8791
8792/**
8793 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
8794 * @rqstp: RPC transaction context
8795 * @inode: file to be checked for a conflict
8796 * @modified: return true if file was modified
8797 * @size: new size of file if modified is true
8798 *
8799 * This function is called when there is a conflict between a write
8800 * delegation and a change/size GETATTR from another client. The server
8801 * must either use the CB_GETATTR to get the current values of the
8802 * attributes from the client that holds the delegation or recall the
8803 * delegation before replying to the GETATTR. See RFC 8881 section
8804 * 18.7.4.
8805 *
8806 * Returns 0 if there is no conflict; otherwise an nfs_stat
8807 * code is returned.
8808 */
8809__be32
8810nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode,
8811 bool *modified, u64 *size)
8812{
8813 __be32 status;
8814 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
8815 struct file_lock_context *ctx;
8816 struct file_lease *fl;
8817 struct nfs4_delegation *dp;
8818 struct iattr attrs;
8819 struct nfs4_cb_fattr *ncf;
8820
8821 *modified = false;
8822 ctx = locks_inode_context(inode);
8823 if (!ctx)
8824 return 0;
8825 spin_lock(&ctx->flc_lock);
8826 for_each_file_lock(fl, &ctx->flc_lease) {
8827 unsigned char type = fl->c.flc_type;
8828
8829 if (fl->c.flc_flags == FL_LAYOUT)
8830 continue;
8831 if (fl->fl_lmops != &nfsd_lease_mng_ops) {
8832 /*
8833 * non-nfs lease, if it's a lease with F_RDLCK then
8834 * we are done; there isn't any write delegation
8835 * on this inode
8836 */
8837 if (type == F_RDLCK)
8838 break;
8839 goto break_lease;
8840 }
8841 if (type == F_WRLCK) {
8842 dp = fl->c.flc_owner;
8843 if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
8844 spin_unlock(&ctx->flc_lock);
8845 return 0;
8846 }
8847break_lease:
8848 nfsd_stats_wdeleg_getattr_inc(nn);
8849 dp = fl->c.flc_owner;
8850 ncf = &dp->dl_cb_fattr;
8851 nfs4_cb_getattr(&dp->dl_cb_fattr);
8852 spin_unlock(&ctx->flc_lock);
8853 wait_on_bit_timeout(&ncf->ncf_cb_flags, CB_GETATTR_BUSY,
8854 TASK_INTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT);
8855 if (ncf->ncf_cb_status) {
8856 /* Recall delegation only if client didn't respond */
8857 status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
8858 if (status != nfserr_jukebox ||
8859 !nfsd_wait_for_delegreturn(rqstp, inode))
8860 return status;
8861 }
8862 if (!ncf->ncf_file_modified &&
8863 (ncf->ncf_initial_cinfo != ncf->ncf_cb_change ||
8864 ncf->ncf_cur_fsize != ncf->ncf_cb_fsize))
8865 ncf->ncf_file_modified = true;
8866 if (ncf->ncf_file_modified) {
8867 /*
8868 * Per section 10.4.3 of RFC 8881, the server would
8869 * not update the file's metadata with the client's
8870 * modified size
8871 */
8872 attrs.ia_mtime = attrs.ia_ctime = current_time(inode);
8873 attrs.ia_valid = ATTR_MTIME | ATTR_CTIME;
8874 setattr_copy(&nop_mnt_idmap, inode, &attrs);
8875 mark_inode_dirty(inode);
8876 ncf->ncf_cur_fsize = ncf->ncf_cb_fsize;
8877 *size = ncf->ncf_cur_fsize;
8878 *modified = true;
8879 }
8880 return 0;
8881 }
8882 break;
8883 }
8884 spin_unlock(&ctx->flc_lock);
8885 return 0;
8886}
1/*
2* Copyright (c) 2001 The Regents of the University of Michigan.
3* All rights reserved.
4*
5* Kendrick Smith <kmsmith@umich.edu>
6* Andy Adamson <kandros@umich.edu>
7*
8* Redistribution and use in source and binary forms, with or without
9* modification, are permitted provided that the following conditions
10* are met:
11*
12* 1. Redistributions of source code must retain the above copyright
13* notice, this list of conditions and the following disclaimer.
14* 2. Redistributions in binary form must reproduce the above copyright
15* notice, this list of conditions and the following disclaimer in the
16* documentation and/or other materials provided with the distribution.
17* 3. Neither the name of the University nor the names of its
18* contributors may be used to endorse or promote products derived
19* from this software without specific prior written permission.
20*
21* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32*
33*/
34
35#include <linux/file.h>
36#include <linux/fs.h>
37#include <linux/slab.h>
38#include <linux/namei.h>
39#include <linux/swap.h>
40#include <linux/pagemap.h>
41#include <linux/ratelimit.h>
42#include <linux/sunrpc/svcauth_gss.h>
43#include <linux/sunrpc/addr.h>
44#include <linux/jhash.h>
45#include "xdr4.h"
46#include "xdr4cb.h"
47#include "vfs.h"
48#include "current_stateid.h"
49
50#include "netns.h"
51#include "pnfs.h"
52
53#define NFSDDBG_FACILITY NFSDDBG_PROC
54
55#define all_ones {{~0,~0},~0}
56static const stateid_t one_stateid = {
57 .si_generation = ~0,
58 .si_opaque = all_ones,
59};
60static const stateid_t zero_stateid = {
61 /* all fields zero */
62};
63static const stateid_t currentstateid = {
64 .si_generation = 1,
65};
66static const stateid_t close_stateid = {
67 .si_generation = 0xffffffffU,
68};
69
70static u64 current_sessionid = 1;
71
72#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
73#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
74#define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
75#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
76
77/* forward declarations */
78static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
79static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
80
81/* Locking: */
82
83/*
84 * Currently used for the del_recall_lru and file hash table. In an
85 * effort to decrease the scope of the client_mutex, this spinlock may
86 * eventually cover more:
87 */
88static DEFINE_SPINLOCK(state_lock);
89
90enum nfsd4_st_mutex_lock_subclass {
91 OPEN_STATEID_MUTEX = 0,
92 LOCK_STATEID_MUTEX = 1,
93};
94
95/*
96 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
97 * the refcount on the open stateid to drop.
98 */
99static DECLARE_WAIT_QUEUE_HEAD(close_wq);
100
101static struct kmem_cache *client_slab;
102static struct kmem_cache *openowner_slab;
103static struct kmem_cache *lockowner_slab;
104static struct kmem_cache *file_slab;
105static struct kmem_cache *stateid_slab;
106static struct kmem_cache *deleg_slab;
107static struct kmem_cache *odstate_slab;
108
109static void free_session(struct nfsd4_session *);
110
111static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
112static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
113
114static bool is_session_dead(struct nfsd4_session *ses)
115{
116 return ses->se_flags & NFS4_SESSION_DEAD;
117}
118
119static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
120{
121 if (atomic_read(&ses->se_ref) > ref_held_by_me)
122 return nfserr_jukebox;
123 ses->se_flags |= NFS4_SESSION_DEAD;
124 return nfs_ok;
125}
126
127static bool is_client_expired(struct nfs4_client *clp)
128{
129 return clp->cl_time == 0;
130}
131
132static __be32 get_client_locked(struct nfs4_client *clp)
133{
134 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
135
136 lockdep_assert_held(&nn->client_lock);
137
138 if (is_client_expired(clp))
139 return nfserr_expired;
140 atomic_inc(&clp->cl_refcount);
141 return nfs_ok;
142}
143
144/* must be called under the client_lock */
145static inline void
146renew_client_locked(struct nfs4_client *clp)
147{
148 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
149
150 if (is_client_expired(clp)) {
151 WARN_ON(1);
152 printk("%s: client (clientid %08x/%08x) already expired\n",
153 __func__,
154 clp->cl_clientid.cl_boot,
155 clp->cl_clientid.cl_id);
156 return;
157 }
158
159 dprintk("renewing client (clientid %08x/%08x)\n",
160 clp->cl_clientid.cl_boot,
161 clp->cl_clientid.cl_id);
162 list_move_tail(&clp->cl_lru, &nn->client_lru);
163 clp->cl_time = get_seconds();
164}
165
166static void put_client_renew_locked(struct nfs4_client *clp)
167{
168 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
169
170 lockdep_assert_held(&nn->client_lock);
171
172 if (!atomic_dec_and_test(&clp->cl_refcount))
173 return;
174 if (!is_client_expired(clp))
175 renew_client_locked(clp);
176}
177
178static void put_client_renew(struct nfs4_client *clp)
179{
180 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
181
182 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
183 return;
184 if (!is_client_expired(clp))
185 renew_client_locked(clp);
186 spin_unlock(&nn->client_lock);
187}
188
189static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
190{
191 __be32 status;
192
193 if (is_session_dead(ses))
194 return nfserr_badsession;
195 status = get_client_locked(ses->se_client);
196 if (status)
197 return status;
198 atomic_inc(&ses->se_ref);
199 return nfs_ok;
200}
201
202static void nfsd4_put_session_locked(struct nfsd4_session *ses)
203{
204 struct nfs4_client *clp = ses->se_client;
205 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
206
207 lockdep_assert_held(&nn->client_lock);
208
209 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
210 free_session(ses);
211 put_client_renew_locked(clp);
212}
213
214static void nfsd4_put_session(struct nfsd4_session *ses)
215{
216 struct nfs4_client *clp = ses->se_client;
217 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
218
219 spin_lock(&nn->client_lock);
220 nfsd4_put_session_locked(ses);
221 spin_unlock(&nn->client_lock);
222}
223
224static struct nfsd4_blocked_lock *
225find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
226 struct nfsd_net *nn)
227{
228 struct nfsd4_blocked_lock *cur, *found = NULL;
229
230 spin_lock(&nn->blocked_locks_lock);
231 list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
232 if (fh_match(fh, &cur->nbl_fh)) {
233 list_del_init(&cur->nbl_list);
234 list_del_init(&cur->nbl_lru);
235 found = cur;
236 break;
237 }
238 }
239 spin_unlock(&nn->blocked_locks_lock);
240 if (found)
241 posix_unblock_lock(&found->nbl_lock);
242 return found;
243}
244
245static struct nfsd4_blocked_lock *
246find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
247 struct nfsd_net *nn)
248{
249 struct nfsd4_blocked_lock *nbl;
250
251 nbl = find_blocked_lock(lo, fh, nn);
252 if (!nbl) {
253 nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
254 if (nbl) {
255 fh_copy_shallow(&nbl->nbl_fh, fh);
256 locks_init_lock(&nbl->nbl_lock);
257 nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
258 &nfsd4_cb_notify_lock_ops,
259 NFSPROC4_CLNT_CB_NOTIFY_LOCK);
260 }
261 }
262 return nbl;
263}
264
265static void
266free_blocked_lock(struct nfsd4_blocked_lock *nbl)
267{
268 locks_release_private(&nbl->nbl_lock);
269 kfree(nbl);
270}
271
272static void
273remove_blocked_locks(struct nfs4_lockowner *lo)
274{
275 struct nfs4_client *clp = lo->lo_owner.so_client;
276 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
277 struct nfsd4_blocked_lock *nbl;
278 LIST_HEAD(reaplist);
279
280 /* Dequeue all blocked locks */
281 spin_lock(&nn->blocked_locks_lock);
282 while (!list_empty(&lo->lo_blocked)) {
283 nbl = list_first_entry(&lo->lo_blocked,
284 struct nfsd4_blocked_lock,
285 nbl_list);
286 list_del_init(&nbl->nbl_list);
287 list_move(&nbl->nbl_lru, &reaplist);
288 }
289 spin_unlock(&nn->blocked_locks_lock);
290
291 /* Now free them */
292 while (!list_empty(&reaplist)) {
293 nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
294 nbl_lru);
295 list_del_init(&nbl->nbl_lru);
296 posix_unblock_lock(&nbl->nbl_lock);
297 free_blocked_lock(nbl);
298 }
299}
300
301static int
302nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
303{
304 /*
305 * Since this is just an optimization, we don't try very hard if it
306 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
307 * just quit trying on anything else.
308 */
309 switch (task->tk_status) {
310 case -NFS4ERR_DELAY:
311 rpc_delay(task, 1 * HZ);
312 return 0;
313 default:
314 return 1;
315 }
316}
317
318static void
319nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
320{
321 struct nfsd4_blocked_lock *nbl = container_of(cb,
322 struct nfsd4_blocked_lock, nbl_cb);
323
324 free_blocked_lock(nbl);
325}
326
327static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
328 .done = nfsd4_cb_notify_lock_done,
329 .release = nfsd4_cb_notify_lock_release,
330};
331
332static inline struct nfs4_stateowner *
333nfs4_get_stateowner(struct nfs4_stateowner *sop)
334{
335 atomic_inc(&sop->so_count);
336 return sop;
337}
338
339static int
340same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
341{
342 return (sop->so_owner.len == owner->len) &&
343 0 == memcmp(sop->so_owner.data, owner->data, owner->len);
344}
345
346static struct nfs4_openowner *
347find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
348 struct nfs4_client *clp)
349{
350 struct nfs4_stateowner *so;
351
352 lockdep_assert_held(&clp->cl_lock);
353
354 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
355 so_strhash) {
356 if (!so->so_is_open_owner)
357 continue;
358 if (same_owner_str(so, &open->op_owner))
359 return openowner(nfs4_get_stateowner(so));
360 }
361 return NULL;
362}
363
364static struct nfs4_openowner *
365find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
366 struct nfs4_client *clp)
367{
368 struct nfs4_openowner *oo;
369
370 spin_lock(&clp->cl_lock);
371 oo = find_openstateowner_str_locked(hashval, open, clp);
372 spin_unlock(&clp->cl_lock);
373 return oo;
374}
375
376static inline u32
377opaque_hashval(const void *ptr, int nbytes)
378{
379 unsigned char *cptr = (unsigned char *) ptr;
380
381 u32 x = 0;
382 while (nbytes--) {
383 x *= 37;
384 x += *cptr++;
385 }
386 return x;
387}
388
389static void nfsd4_free_file_rcu(struct rcu_head *rcu)
390{
391 struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
392
393 kmem_cache_free(file_slab, fp);
394}
395
396void
397put_nfs4_file(struct nfs4_file *fi)
398{
399 might_lock(&state_lock);
400
401 if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) {
402 hlist_del_rcu(&fi->fi_hash);
403 spin_unlock(&state_lock);
404 WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
405 WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
406 call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
407 }
408}
409
410static struct file *
411__nfs4_get_fd(struct nfs4_file *f, int oflag)
412{
413 if (f->fi_fds[oflag])
414 return get_file(f->fi_fds[oflag]);
415 return NULL;
416}
417
418static struct file *
419find_writeable_file_locked(struct nfs4_file *f)
420{
421 struct file *ret;
422
423 lockdep_assert_held(&f->fi_lock);
424
425 ret = __nfs4_get_fd(f, O_WRONLY);
426 if (!ret)
427 ret = __nfs4_get_fd(f, O_RDWR);
428 return ret;
429}
430
431static struct file *
432find_writeable_file(struct nfs4_file *f)
433{
434 struct file *ret;
435
436 spin_lock(&f->fi_lock);
437 ret = find_writeable_file_locked(f);
438 spin_unlock(&f->fi_lock);
439
440 return ret;
441}
442
443static struct file *find_readable_file_locked(struct nfs4_file *f)
444{
445 struct file *ret;
446
447 lockdep_assert_held(&f->fi_lock);
448
449 ret = __nfs4_get_fd(f, O_RDONLY);
450 if (!ret)
451 ret = __nfs4_get_fd(f, O_RDWR);
452 return ret;
453}
454
455static struct file *
456find_readable_file(struct nfs4_file *f)
457{
458 struct file *ret;
459
460 spin_lock(&f->fi_lock);
461 ret = find_readable_file_locked(f);
462 spin_unlock(&f->fi_lock);
463
464 return ret;
465}
466
467struct file *
468find_any_file(struct nfs4_file *f)
469{
470 struct file *ret;
471
472 spin_lock(&f->fi_lock);
473 ret = __nfs4_get_fd(f, O_RDWR);
474 if (!ret) {
475 ret = __nfs4_get_fd(f, O_WRONLY);
476 if (!ret)
477 ret = __nfs4_get_fd(f, O_RDONLY);
478 }
479 spin_unlock(&f->fi_lock);
480 return ret;
481}
482
483static atomic_long_t num_delegations;
484unsigned long max_delegations;
485
486/*
487 * Open owner state (share locks)
488 */
489
490/* hash tables for lock and open owners */
491#define OWNER_HASH_BITS 8
492#define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
493#define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
494
495static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
496{
497 unsigned int ret;
498
499 ret = opaque_hashval(ownername->data, ownername->len);
500 return ret & OWNER_HASH_MASK;
501}
502
503/* hash table for nfs4_file */
504#define FILE_HASH_BITS 8
505#define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
506
507static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
508{
509 return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
510}
511
512static unsigned int file_hashval(struct knfsd_fh *fh)
513{
514 return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
515}
516
517static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
518
519static void
520__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
521{
522 lockdep_assert_held(&fp->fi_lock);
523
524 if (access & NFS4_SHARE_ACCESS_WRITE)
525 atomic_inc(&fp->fi_access[O_WRONLY]);
526 if (access & NFS4_SHARE_ACCESS_READ)
527 atomic_inc(&fp->fi_access[O_RDONLY]);
528}
529
530static __be32
531nfs4_file_get_access(struct nfs4_file *fp, u32 access)
532{
533 lockdep_assert_held(&fp->fi_lock);
534
535 /* Does this access mode make sense? */
536 if (access & ~NFS4_SHARE_ACCESS_BOTH)
537 return nfserr_inval;
538
539 /* Does it conflict with a deny mode already set? */
540 if ((access & fp->fi_share_deny) != 0)
541 return nfserr_share_denied;
542
543 __nfs4_file_get_access(fp, access);
544 return nfs_ok;
545}
546
547static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
548{
549 /* Common case is that there is no deny mode. */
550 if (deny) {
551 /* Does this deny mode make sense? */
552 if (deny & ~NFS4_SHARE_DENY_BOTH)
553 return nfserr_inval;
554
555 if ((deny & NFS4_SHARE_DENY_READ) &&
556 atomic_read(&fp->fi_access[O_RDONLY]))
557 return nfserr_share_denied;
558
559 if ((deny & NFS4_SHARE_DENY_WRITE) &&
560 atomic_read(&fp->fi_access[O_WRONLY]))
561 return nfserr_share_denied;
562 }
563 return nfs_ok;
564}
565
566static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
567{
568 might_lock(&fp->fi_lock);
569
570 if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
571 struct file *f1 = NULL;
572 struct file *f2 = NULL;
573
574 swap(f1, fp->fi_fds[oflag]);
575 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
576 swap(f2, fp->fi_fds[O_RDWR]);
577 spin_unlock(&fp->fi_lock);
578 if (f1)
579 fput(f1);
580 if (f2)
581 fput(f2);
582 }
583}
584
585static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
586{
587 WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
588
589 if (access & NFS4_SHARE_ACCESS_WRITE)
590 __nfs4_file_put_access(fp, O_WRONLY);
591 if (access & NFS4_SHARE_ACCESS_READ)
592 __nfs4_file_put_access(fp, O_RDONLY);
593}
594
595/*
596 * Allocate a new open/delegation state counter. This is needed for
597 * pNFS for proper return on close semantics.
598 *
599 * Note that we only allocate it for pNFS-enabled exports, otherwise
600 * all pointers to struct nfs4_clnt_odstate are always NULL.
601 */
602static struct nfs4_clnt_odstate *
603alloc_clnt_odstate(struct nfs4_client *clp)
604{
605 struct nfs4_clnt_odstate *co;
606
607 co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
608 if (co) {
609 co->co_client = clp;
610 refcount_set(&co->co_odcount, 1);
611 }
612 return co;
613}
614
615static void
616hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
617{
618 struct nfs4_file *fp = co->co_file;
619
620 lockdep_assert_held(&fp->fi_lock);
621 list_add(&co->co_perfile, &fp->fi_clnt_odstate);
622}
623
624static inline void
625get_clnt_odstate(struct nfs4_clnt_odstate *co)
626{
627 if (co)
628 refcount_inc(&co->co_odcount);
629}
630
631static void
632put_clnt_odstate(struct nfs4_clnt_odstate *co)
633{
634 struct nfs4_file *fp;
635
636 if (!co)
637 return;
638
639 fp = co->co_file;
640 if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
641 list_del(&co->co_perfile);
642 spin_unlock(&fp->fi_lock);
643
644 nfsd4_return_all_file_layouts(co->co_client, fp);
645 kmem_cache_free(odstate_slab, co);
646 }
647}
648
649static struct nfs4_clnt_odstate *
650find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
651{
652 struct nfs4_clnt_odstate *co;
653 struct nfs4_client *cl;
654
655 if (!new)
656 return NULL;
657
658 cl = new->co_client;
659
660 spin_lock(&fp->fi_lock);
661 list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
662 if (co->co_client == cl) {
663 get_clnt_odstate(co);
664 goto out;
665 }
666 }
667 co = new;
668 co->co_file = fp;
669 hash_clnt_odstate_locked(new);
670out:
671 spin_unlock(&fp->fi_lock);
672 return co;
673}
674
675struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
676 void (*sc_free)(struct nfs4_stid *))
677{
678 struct nfs4_stid *stid;
679 int new_id;
680
681 stid = kmem_cache_zalloc(slab, GFP_KERNEL);
682 if (!stid)
683 return NULL;
684
685 idr_preload(GFP_KERNEL);
686 spin_lock(&cl->cl_lock);
687 new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
688 spin_unlock(&cl->cl_lock);
689 idr_preload_end();
690 if (new_id < 0)
691 goto out_free;
692
693 stid->sc_free = sc_free;
694 stid->sc_client = cl;
695 stid->sc_stateid.si_opaque.so_id = new_id;
696 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
697 /* Will be incremented before return to client: */
698 refcount_set(&stid->sc_count, 1);
699 spin_lock_init(&stid->sc_lock);
700
701 /*
702 * It shouldn't be a problem to reuse an opaque stateid value.
703 * I don't think it is for 4.1. But with 4.0 I worry that, for
704 * example, a stray write retransmission could be accepted by
705 * the server when it should have been rejected. Therefore,
706 * adopt a trick from the sctp code to attempt to maximize the
707 * amount of time until an id is reused, by ensuring they always
708 * "increase" (mod INT_MAX):
709 */
710 return stid;
711out_free:
712 kmem_cache_free(slab, stid);
713 return NULL;
714}
715
716static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
717{
718 struct nfs4_stid *stid;
719
720 stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
721 if (!stid)
722 return NULL;
723
724 return openlockstateid(stid);
725}
726
727static void nfs4_free_deleg(struct nfs4_stid *stid)
728{
729 kmem_cache_free(deleg_slab, stid);
730 atomic_long_dec(&num_delegations);
731}
732
733/*
734 * When we recall a delegation, we should be careful not to hand it
735 * out again straight away.
736 * To ensure this we keep a pair of bloom filters ('new' and 'old')
737 * in which the filehandles of recalled delegations are "stored".
738 * If a filehandle appear in either filter, a delegation is blocked.
739 * When a delegation is recalled, the filehandle is stored in the "new"
740 * filter.
741 * Every 30 seconds we swap the filters and clear the "new" one,
742 * unless both are empty of course.
743 *
744 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
745 * low 3 bytes as hash-table indices.
746 *
747 * 'blocked_delegations_lock', which is always taken in block_delegations(),
748 * is used to manage concurrent access. Testing does not need the lock
749 * except when swapping the two filters.
750 */
751static DEFINE_SPINLOCK(blocked_delegations_lock);
752static struct bloom_pair {
753 int entries, old_entries;
754 time_t swap_time;
755 int new; /* index into 'set' */
756 DECLARE_BITMAP(set[2], 256);
757} blocked_delegations;
758
759static int delegation_blocked(struct knfsd_fh *fh)
760{
761 u32 hash;
762 struct bloom_pair *bd = &blocked_delegations;
763
764 if (bd->entries == 0)
765 return 0;
766 if (seconds_since_boot() - bd->swap_time > 30) {
767 spin_lock(&blocked_delegations_lock);
768 if (seconds_since_boot() - bd->swap_time > 30) {
769 bd->entries -= bd->old_entries;
770 bd->old_entries = bd->entries;
771 memset(bd->set[bd->new], 0,
772 sizeof(bd->set[0]));
773 bd->new = 1-bd->new;
774 bd->swap_time = seconds_since_boot();
775 }
776 spin_unlock(&blocked_delegations_lock);
777 }
778 hash = jhash(&fh->fh_base, fh->fh_size, 0);
779 if (test_bit(hash&255, bd->set[0]) &&
780 test_bit((hash>>8)&255, bd->set[0]) &&
781 test_bit((hash>>16)&255, bd->set[0]))
782 return 1;
783
784 if (test_bit(hash&255, bd->set[1]) &&
785 test_bit((hash>>8)&255, bd->set[1]) &&
786 test_bit((hash>>16)&255, bd->set[1]))
787 return 1;
788
789 return 0;
790}
791
792static void block_delegations(struct knfsd_fh *fh)
793{
794 u32 hash;
795 struct bloom_pair *bd = &blocked_delegations;
796
797 hash = jhash(&fh->fh_base, fh->fh_size, 0);
798
799 spin_lock(&blocked_delegations_lock);
800 __set_bit(hash&255, bd->set[bd->new]);
801 __set_bit((hash>>8)&255, bd->set[bd->new]);
802 __set_bit((hash>>16)&255, bd->set[bd->new]);
803 if (bd->entries == 0)
804 bd->swap_time = seconds_since_boot();
805 bd->entries += 1;
806 spin_unlock(&blocked_delegations_lock);
807}
808
809static struct nfs4_delegation *
810alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
811 struct svc_fh *current_fh,
812 struct nfs4_clnt_odstate *odstate)
813{
814 struct nfs4_delegation *dp;
815 long n;
816
817 dprintk("NFSD alloc_init_deleg\n");
818 n = atomic_long_inc_return(&num_delegations);
819 if (n < 0 || n > max_delegations)
820 goto out_dec;
821 if (delegation_blocked(¤t_fh->fh_handle))
822 goto out_dec;
823 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg));
824 if (dp == NULL)
825 goto out_dec;
826
827 /*
828 * delegation seqid's are never incremented. The 4.1 special
829 * meaning of seqid 0 isn't meaningful, really, but let's avoid
830 * 0 anyway just for consistency and use 1:
831 */
832 dp->dl_stid.sc_stateid.si_generation = 1;
833 INIT_LIST_HEAD(&dp->dl_perfile);
834 INIT_LIST_HEAD(&dp->dl_perclnt);
835 INIT_LIST_HEAD(&dp->dl_recall_lru);
836 dp->dl_clnt_odstate = odstate;
837 get_clnt_odstate(odstate);
838 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
839 dp->dl_retries = 1;
840 nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
841 &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
842 get_nfs4_file(fp);
843 dp->dl_stid.sc_file = fp;
844 return dp;
845out_dec:
846 atomic_long_dec(&num_delegations);
847 return NULL;
848}
849
850void
851nfs4_put_stid(struct nfs4_stid *s)
852{
853 struct nfs4_file *fp = s->sc_file;
854 struct nfs4_client *clp = s->sc_client;
855
856 might_lock(&clp->cl_lock);
857
858 if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
859 wake_up_all(&close_wq);
860 return;
861 }
862 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
863 spin_unlock(&clp->cl_lock);
864 s->sc_free(s);
865 if (fp)
866 put_nfs4_file(fp);
867}
868
869void
870nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
871{
872 stateid_t *src = &stid->sc_stateid;
873
874 spin_lock(&stid->sc_lock);
875 if (unlikely(++src->si_generation == 0))
876 src->si_generation = 1;
877 memcpy(dst, src, sizeof(*dst));
878 spin_unlock(&stid->sc_lock);
879}
880
881static void put_deleg_file(struct nfs4_file *fp)
882{
883 struct file *filp = NULL;
884
885 spin_lock(&fp->fi_lock);
886 if (--fp->fi_delegees == 0)
887 swap(filp, fp->fi_deleg_file);
888 spin_unlock(&fp->fi_lock);
889
890 if (filp)
891 fput(filp);
892}
893
894static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
895{
896 struct nfs4_file *fp = dp->dl_stid.sc_file;
897 struct file *filp = fp->fi_deleg_file;
898
899 WARN_ON_ONCE(!fp->fi_delegees);
900
901 vfs_setlease(filp, F_UNLCK, NULL, (void **)&dp);
902 put_deleg_file(fp);
903}
904
905static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
906{
907 put_clnt_odstate(dp->dl_clnt_odstate);
908 nfs4_unlock_deleg_lease(dp);
909 nfs4_put_stid(&dp->dl_stid);
910}
911
912void nfs4_unhash_stid(struct nfs4_stid *s)
913{
914 s->sc_type = 0;
915}
916
917/**
918 * nfs4_delegation_exists - Discover if this delegation already exists
919 * @clp: a pointer to the nfs4_client we're granting a delegation to
920 * @fp: a pointer to the nfs4_file we're granting a delegation on
921 *
922 * Return:
923 * On success: true iff an existing delegation is found
924 */
925
926static bool
927nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
928{
929 struct nfs4_delegation *searchdp = NULL;
930 struct nfs4_client *searchclp = NULL;
931
932 lockdep_assert_held(&state_lock);
933 lockdep_assert_held(&fp->fi_lock);
934
935 list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
936 searchclp = searchdp->dl_stid.sc_client;
937 if (clp == searchclp) {
938 return true;
939 }
940 }
941 return false;
942}
943
944/**
945 * hash_delegation_locked - Add a delegation to the appropriate lists
946 * @dp: a pointer to the nfs4_delegation we are adding.
947 * @fp: a pointer to the nfs4_file we're granting a delegation on
948 *
949 * Return:
950 * On success: NULL if the delegation was successfully hashed.
951 *
952 * On error: -EAGAIN if one was previously granted to this
953 * nfs4_client for this nfs4_file. Delegation is not hashed.
954 *
955 */
956
957static int
958hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
959{
960 struct nfs4_client *clp = dp->dl_stid.sc_client;
961
962 lockdep_assert_held(&state_lock);
963 lockdep_assert_held(&fp->fi_lock);
964
965 if (nfs4_delegation_exists(clp, fp))
966 return -EAGAIN;
967 refcount_inc(&dp->dl_stid.sc_count);
968 dp->dl_stid.sc_type = NFS4_DELEG_STID;
969 list_add(&dp->dl_perfile, &fp->fi_delegations);
970 list_add(&dp->dl_perclnt, &clp->cl_delegations);
971 return 0;
972}
973
974static bool
975unhash_delegation_locked(struct nfs4_delegation *dp)
976{
977 struct nfs4_file *fp = dp->dl_stid.sc_file;
978
979 lockdep_assert_held(&state_lock);
980
981 if (list_empty(&dp->dl_perfile))
982 return false;
983
984 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
985 /* Ensure that deleg break won't try to requeue it */
986 ++dp->dl_time;
987 spin_lock(&fp->fi_lock);
988 list_del_init(&dp->dl_perclnt);
989 list_del_init(&dp->dl_recall_lru);
990 list_del_init(&dp->dl_perfile);
991 spin_unlock(&fp->fi_lock);
992 return true;
993}
994
995static void destroy_delegation(struct nfs4_delegation *dp)
996{
997 bool unhashed;
998
999 spin_lock(&state_lock);
1000 unhashed = unhash_delegation_locked(dp);
1001 spin_unlock(&state_lock);
1002 if (unhashed)
1003 destroy_unhashed_deleg(dp);
1004}
1005
1006static void revoke_delegation(struct nfs4_delegation *dp)
1007{
1008 struct nfs4_client *clp = dp->dl_stid.sc_client;
1009
1010 WARN_ON(!list_empty(&dp->dl_recall_lru));
1011
1012 if (clp->cl_minorversion) {
1013 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1014 refcount_inc(&dp->dl_stid.sc_count);
1015 spin_lock(&clp->cl_lock);
1016 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1017 spin_unlock(&clp->cl_lock);
1018 }
1019 destroy_unhashed_deleg(dp);
1020}
1021
1022/*
1023 * SETCLIENTID state
1024 */
1025
1026static unsigned int clientid_hashval(u32 id)
1027{
1028 return id & CLIENT_HASH_MASK;
1029}
1030
1031static unsigned int clientstr_hashval(const char *name)
1032{
1033 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
1034}
1035
1036/*
1037 * We store the NONE, READ, WRITE, and BOTH bits separately in the
1038 * st_{access,deny}_bmap field of the stateid, in order to track not
1039 * only what share bits are currently in force, but also what
1040 * combinations of share bits previous opens have used. This allows us
1041 * to enforce the recommendation of rfc 3530 14.2.19 that the server
1042 * return an error if the client attempt to downgrade to a combination
1043 * of share bits not explicable by closing some of its previous opens.
1044 *
1045 * XXX: This enforcement is actually incomplete, since we don't keep
1046 * track of access/deny bit combinations; so, e.g., we allow:
1047 *
1048 * OPEN allow read, deny write
1049 * OPEN allow both, deny none
1050 * DOWNGRADE allow read, deny none
1051 *
1052 * which we should reject.
1053 */
1054static unsigned int
1055bmap_to_share_mode(unsigned long bmap) {
1056 int i;
1057 unsigned int access = 0;
1058
1059 for (i = 1; i < 4; i++) {
1060 if (test_bit(i, &bmap))
1061 access |= i;
1062 }
1063 return access;
1064}
1065
1066/* set share access for a given stateid */
1067static inline void
1068set_access(u32 access, struct nfs4_ol_stateid *stp)
1069{
1070 unsigned char mask = 1 << access;
1071
1072 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1073 stp->st_access_bmap |= mask;
1074}
1075
1076/* clear share access for a given stateid */
1077static inline void
1078clear_access(u32 access, struct nfs4_ol_stateid *stp)
1079{
1080 unsigned char mask = 1 << access;
1081
1082 WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
1083 stp->st_access_bmap &= ~mask;
1084}
1085
1086/* test whether a given stateid has access */
1087static inline bool
1088test_access(u32 access, struct nfs4_ol_stateid *stp)
1089{
1090 unsigned char mask = 1 << access;
1091
1092 return (bool)(stp->st_access_bmap & mask);
1093}
1094
1095/* set share deny for a given stateid */
1096static inline void
1097set_deny(u32 deny, struct nfs4_ol_stateid *stp)
1098{
1099 unsigned char mask = 1 << deny;
1100
1101 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1102 stp->st_deny_bmap |= mask;
1103}
1104
1105/* clear share deny for a given stateid */
1106static inline void
1107clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
1108{
1109 unsigned char mask = 1 << deny;
1110
1111 WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
1112 stp->st_deny_bmap &= ~mask;
1113}
1114
1115/* test whether a given stateid is denying specific access */
1116static inline bool
1117test_deny(u32 deny, struct nfs4_ol_stateid *stp)
1118{
1119 unsigned char mask = 1 << deny;
1120
1121 return (bool)(stp->st_deny_bmap & mask);
1122}
1123
1124static int nfs4_access_to_omode(u32 access)
1125{
1126 switch (access & NFS4_SHARE_ACCESS_BOTH) {
1127 case NFS4_SHARE_ACCESS_READ:
1128 return O_RDONLY;
1129 case NFS4_SHARE_ACCESS_WRITE:
1130 return O_WRONLY;
1131 case NFS4_SHARE_ACCESS_BOTH:
1132 return O_RDWR;
1133 }
1134 WARN_ON_ONCE(1);
1135 return O_RDONLY;
1136}
1137
1138/*
1139 * A stateid that had a deny mode associated with it is being released
1140 * or downgraded. Recalculate the deny mode on the file.
1141 */
1142static void
1143recalculate_deny_mode(struct nfs4_file *fp)
1144{
1145 struct nfs4_ol_stateid *stp;
1146
1147 spin_lock(&fp->fi_lock);
1148 fp->fi_share_deny = 0;
1149 list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1150 fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1151 spin_unlock(&fp->fi_lock);
1152}
1153
1154static void
1155reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1156{
1157 int i;
1158 bool change = false;
1159
1160 for (i = 1; i < 4; i++) {
1161 if ((i & deny) != i) {
1162 change = true;
1163 clear_deny(i, stp);
1164 }
1165 }
1166
1167 /* Recalculate per-file deny mode if there was a change */
1168 if (change)
1169 recalculate_deny_mode(stp->st_stid.sc_file);
1170}
1171
1172/* release all access and file references for a given stateid */
1173static void
1174release_all_access(struct nfs4_ol_stateid *stp)
1175{
1176 int i;
1177 struct nfs4_file *fp = stp->st_stid.sc_file;
1178
1179 if (fp && stp->st_deny_bmap != 0)
1180 recalculate_deny_mode(fp);
1181
1182 for (i = 1; i < 4; i++) {
1183 if (test_access(i, stp))
1184 nfs4_file_put_access(stp->st_stid.sc_file, i);
1185 clear_access(i, stp);
1186 }
1187}
1188
1189static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1190{
1191 kfree(sop->so_owner.data);
1192 sop->so_ops->so_free(sop);
1193}
1194
1195static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1196{
1197 struct nfs4_client *clp = sop->so_client;
1198
1199 might_lock(&clp->cl_lock);
1200
1201 if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1202 return;
1203 sop->so_ops->so_unhash(sop);
1204 spin_unlock(&clp->cl_lock);
1205 nfs4_free_stateowner(sop);
1206}
1207
1208static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1209{
1210 struct nfs4_file *fp = stp->st_stid.sc_file;
1211
1212 lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1213
1214 if (list_empty(&stp->st_perfile))
1215 return false;
1216
1217 spin_lock(&fp->fi_lock);
1218 list_del_init(&stp->st_perfile);
1219 spin_unlock(&fp->fi_lock);
1220 list_del(&stp->st_perstateowner);
1221 return true;
1222}
1223
1224static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1225{
1226 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1227
1228 put_clnt_odstate(stp->st_clnt_odstate);
1229 release_all_access(stp);
1230 if (stp->st_stateowner)
1231 nfs4_put_stateowner(stp->st_stateowner);
1232 kmem_cache_free(stateid_slab, stid);
1233}
1234
1235static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1236{
1237 struct nfs4_ol_stateid *stp = openlockstateid(stid);
1238 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1239 struct file *file;
1240
1241 file = find_any_file(stp->st_stid.sc_file);
1242 if (file)
1243 filp_close(file, (fl_owner_t)lo);
1244 nfs4_free_ol_stateid(stid);
1245}
1246
1247/*
1248 * Put the persistent reference to an already unhashed generic stateid, while
1249 * holding the cl_lock. If it's the last reference, then put it onto the
1250 * reaplist for later destruction.
1251 */
1252static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1253 struct list_head *reaplist)
1254{
1255 struct nfs4_stid *s = &stp->st_stid;
1256 struct nfs4_client *clp = s->sc_client;
1257
1258 lockdep_assert_held(&clp->cl_lock);
1259
1260 WARN_ON_ONCE(!list_empty(&stp->st_locks));
1261
1262 if (!refcount_dec_and_test(&s->sc_count)) {
1263 wake_up_all(&close_wq);
1264 return;
1265 }
1266
1267 idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1268 list_add(&stp->st_locks, reaplist);
1269}
1270
1271static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1272{
1273 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1274
1275 list_del_init(&stp->st_locks);
1276 nfs4_unhash_stid(&stp->st_stid);
1277 return unhash_ol_stateid(stp);
1278}
1279
1280static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1281{
1282 struct nfs4_client *clp = stp->st_stid.sc_client;
1283 bool unhashed;
1284
1285 spin_lock(&clp->cl_lock);
1286 unhashed = unhash_lock_stateid(stp);
1287 spin_unlock(&clp->cl_lock);
1288 if (unhashed)
1289 nfs4_put_stid(&stp->st_stid);
1290}
1291
1292static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1293{
1294 struct nfs4_client *clp = lo->lo_owner.so_client;
1295
1296 lockdep_assert_held(&clp->cl_lock);
1297
1298 list_del_init(&lo->lo_owner.so_strhash);
1299}
1300
1301/*
1302 * Free a list of generic stateids that were collected earlier after being
1303 * fully unhashed.
1304 */
1305static void
1306free_ol_stateid_reaplist(struct list_head *reaplist)
1307{
1308 struct nfs4_ol_stateid *stp;
1309 struct nfs4_file *fp;
1310
1311 might_sleep();
1312
1313 while (!list_empty(reaplist)) {
1314 stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1315 st_locks);
1316 list_del(&stp->st_locks);
1317 fp = stp->st_stid.sc_file;
1318 stp->st_stid.sc_free(&stp->st_stid);
1319 if (fp)
1320 put_nfs4_file(fp);
1321 }
1322}
1323
1324static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1325 struct list_head *reaplist)
1326{
1327 struct nfs4_ol_stateid *stp;
1328
1329 lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1330
1331 while (!list_empty(&open_stp->st_locks)) {
1332 stp = list_entry(open_stp->st_locks.next,
1333 struct nfs4_ol_stateid, st_locks);
1334 WARN_ON(!unhash_lock_stateid(stp));
1335 put_ol_stateid_locked(stp, reaplist);
1336 }
1337}
1338
1339static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1340 struct list_head *reaplist)
1341{
1342 bool unhashed;
1343
1344 lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1345
1346 unhashed = unhash_ol_stateid(stp);
1347 release_open_stateid_locks(stp, reaplist);
1348 return unhashed;
1349}
1350
1351static void release_open_stateid(struct nfs4_ol_stateid *stp)
1352{
1353 LIST_HEAD(reaplist);
1354
1355 spin_lock(&stp->st_stid.sc_client->cl_lock);
1356 if (unhash_open_stateid(stp, &reaplist))
1357 put_ol_stateid_locked(stp, &reaplist);
1358 spin_unlock(&stp->st_stid.sc_client->cl_lock);
1359 free_ol_stateid_reaplist(&reaplist);
1360}
1361
1362static void unhash_openowner_locked(struct nfs4_openowner *oo)
1363{
1364 struct nfs4_client *clp = oo->oo_owner.so_client;
1365
1366 lockdep_assert_held(&clp->cl_lock);
1367
1368 list_del_init(&oo->oo_owner.so_strhash);
1369 list_del_init(&oo->oo_perclient);
1370}
1371
1372static void release_last_closed_stateid(struct nfs4_openowner *oo)
1373{
1374 struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1375 nfsd_net_id);
1376 struct nfs4_ol_stateid *s;
1377
1378 spin_lock(&nn->client_lock);
1379 s = oo->oo_last_closed_stid;
1380 if (s) {
1381 list_del_init(&oo->oo_close_lru);
1382 oo->oo_last_closed_stid = NULL;
1383 }
1384 spin_unlock(&nn->client_lock);
1385 if (s)
1386 nfs4_put_stid(&s->st_stid);
1387}
1388
1389static void release_openowner(struct nfs4_openowner *oo)
1390{
1391 struct nfs4_ol_stateid *stp;
1392 struct nfs4_client *clp = oo->oo_owner.so_client;
1393 struct list_head reaplist;
1394
1395 INIT_LIST_HEAD(&reaplist);
1396
1397 spin_lock(&clp->cl_lock);
1398 unhash_openowner_locked(oo);
1399 while (!list_empty(&oo->oo_owner.so_stateids)) {
1400 stp = list_first_entry(&oo->oo_owner.so_stateids,
1401 struct nfs4_ol_stateid, st_perstateowner);
1402 if (unhash_open_stateid(stp, &reaplist))
1403 put_ol_stateid_locked(stp, &reaplist);
1404 }
1405 spin_unlock(&clp->cl_lock);
1406 free_ol_stateid_reaplist(&reaplist);
1407 release_last_closed_stateid(oo);
1408 nfs4_put_stateowner(&oo->oo_owner);
1409}
1410
1411static inline int
1412hash_sessionid(struct nfs4_sessionid *sessionid)
1413{
1414 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1415
1416 return sid->sequence % SESSION_HASH_SIZE;
1417}
1418
1419#ifdef CONFIG_SUNRPC_DEBUG
1420static inline void
1421dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1422{
1423 u32 *ptr = (u32 *)(&sessionid->data[0]);
1424 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1425}
1426#else
1427static inline void
1428dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1429{
1430}
1431#endif
1432
1433/*
1434 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1435 * won't be used for replay.
1436 */
1437void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1438{
1439 struct nfs4_stateowner *so = cstate->replay_owner;
1440
1441 if (nfserr == nfserr_replay_me)
1442 return;
1443
1444 if (!seqid_mutating_err(ntohl(nfserr))) {
1445 nfsd4_cstate_clear_replay(cstate);
1446 return;
1447 }
1448 if (!so)
1449 return;
1450 if (so->so_is_open_owner)
1451 release_last_closed_stateid(openowner(so));
1452 so->so_seqid++;
1453 return;
1454}
1455
1456static void
1457gen_sessionid(struct nfsd4_session *ses)
1458{
1459 struct nfs4_client *clp = ses->se_client;
1460 struct nfsd4_sessionid *sid;
1461
1462 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1463 sid->clientid = clp->cl_clientid;
1464 sid->sequence = current_sessionid++;
1465 sid->reserved = 0;
1466}
1467
1468/*
1469 * The protocol defines ca_maxresponssize_cached to include the size of
1470 * the rpc header, but all we need to cache is the data starting after
1471 * the end of the initial SEQUENCE operation--the rest we regenerate
1472 * each time. Therefore we can advertise a ca_maxresponssize_cached
1473 * value that is the number of bytes in our cache plus a few additional
1474 * bytes. In order to stay on the safe side, and not promise more than
1475 * we can cache, those additional bytes must be the minimum possible: 24
1476 * bytes of rpc header (xid through accept state, with AUTH_NULL
1477 * verifier), 12 for the compound header (with zero-length tag), and 44
1478 * for the SEQUENCE op response:
1479 */
1480#define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
1481
1482static void
1483free_session_slots(struct nfsd4_session *ses)
1484{
1485 int i;
1486
1487 for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1488 free_svc_cred(&ses->se_slots[i]->sl_cred);
1489 kfree(ses->se_slots[i]);
1490 }
1491}
1492
1493/*
1494 * We don't actually need to cache the rpc and session headers, so we
1495 * can allocate a little less for each slot:
1496 */
1497static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1498{
1499 u32 size;
1500
1501 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1502 size = 0;
1503 else
1504 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1505 return size + sizeof(struct nfsd4_slot);
1506}
1507
1508/*
1509 * XXX: If we run out of reserved DRC memory we could (up to a point)
1510 * re-negotiate active sessions and reduce their slot usage to make
1511 * room for new connections. For now we just fail the create session.
1512 */
1513static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
1514{
1515 u32 slotsize = slot_bytes(ca);
1516 u32 num = ca->maxreqs;
1517 int avail;
1518
1519 spin_lock(&nfsd_drc_lock);
1520 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
1521 nfsd_drc_max_mem - nfsd_drc_mem_used);
1522 /*
1523 * Never use more than a third of the remaining memory,
1524 * unless it's the only way to give this client a slot:
1525 */
1526 avail = clamp_t(int, avail, slotsize, avail/3);
1527 num = min_t(int, num, avail / slotsize);
1528 nfsd_drc_mem_used += num * slotsize;
1529 spin_unlock(&nfsd_drc_lock);
1530
1531 return num;
1532}
1533
1534static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1535{
1536 int slotsize = slot_bytes(ca);
1537
1538 spin_lock(&nfsd_drc_lock);
1539 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1540 spin_unlock(&nfsd_drc_lock);
1541}
1542
1543static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1544 struct nfsd4_channel_attrs *battrs)
1545{
1546 int numslots = fattrs->maxreqs;
1547 int slotsize = slot_bytes(fattrs);
1548 struct nfsd4_session *new;
1549 int mem, i;
1550
1551 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
1552 + sizeof(struct nfsd4_session) > PAGE_SIZE);
1553 mem = numslots * sizeof(struct nfsd4_slot *);
1554
1555 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
1556 if (!new)
1557 return NULL;
1558 /* allocate each struct nfsd4_slot and data cache in one piece */
1559 for (i = 0; i < numslots; i++) {
1560 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1561 if (!new->se_slots[i])
1562 goto out_free;
1563 }
1564
1565 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1566 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1567
1568 return new;
1569out_free:
1570 while (i--)
1571 kfree(new->se_slots[i]);
1572 kfree(new);
1573 return NULL;
1574}
1575
1576static void free_conn(struct nfsd4_conn *c)
1577{
1578 svc_xprt_put(c->cn_xprt);
1579 kfree(c);
1580}
1581
1582static void nfsd4_conn_lost(struct svc_xpt_user *u)
1583{
1584 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1585 struct nfs4_client *clp = c->cn_session->se_client;
1586
1587 spin_lock(&clp->cl_lock);
1588 if (!list_empty(&c->cn_persession)) {
1589 list_del(&c->cn_persession);
1590 free_conn(c);
1591 }
1592 nfsd4_probe_callback(clp);
1593 spin_unlock(&clp->cl_lock);
1594}
1595
1596static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1597{
1598 struct nfsd4_conn *conn;
1599
1600 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1601 if (!conn)
1602 return NULL;
1603 svc_xprt_get(rqstp->rq_xprt);
1604 conn->cn_xprt = rqstp->rq_xprt;
1605 conn->cn_flags = flags;
1606 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1607 return conn;
1608}
1609
1610static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1611{
1612 conn->cn_session = ses;
1613 list_add(&conn->cn_persession, &ses->se_conns);
1614}
1615
1616static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1617{
1618 struct nfs4_client *clp = ses->se_client;
1619
1620 spin_lock(&clp->cl_lock);
1621 __nfsd4_hash_conn(conn, ses);
1622 spin_unlock(&clp->cl_lock);
1623}
1624
1625static int nfsd4_register_conn(struct nfsd4_conn *conn)
1626{
1627 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1628 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1629}
1630
1631static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1632{
1633 int ret;
1634
1635 nfsd4_hash_conn(conn, ses);
1636 ret = nfsd4_register_conn(conn);
1637 if (ret)
1638 /* oops; xprt is already down: */
1639 nfsd4_conn_lost(&conn->cn_xpt_user);
1640 /* We may have gained or lost a callback channel: */
1641 nfsd4_probe_callback_sync(ses->se_client);
1642}
1643
1644static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1645{
1646 u32 dir = NFS4_CDFC4_FORE;
1647
1648 if (cses->flags & SESSION4_BACK_CHAN)
1649 dir |= NFS4_CDFC4_BACK;
1650 return alloc_conn(rqstp, dir);
1651}
1652
1653/* must be called under client_lock */
1654static void nfsd4_del_conns(struct nfsd4_session *s)
1655{
1656 struct nfs4_client *clp = s->se_client;
1657 struct nfsd4_conn *c;
1658
1659 spin_lock(&clp->cl_lock);
1660 while (!list_empty(&s->se_conns)) {
1661 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1662 list_del_init(&c->cn_persession);
1663 spin_unlock(&clp->cl_lock);
1664
1665 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1666 free_conn(c);
1667
1668 spin_lock(&clp->cl_lock);
1669 }
1670 spin_unlock(&clp->cl_lock);
1671}
1672
1673static void __free_session(struct nfsd4_session *ses)
1674{
1675 free_session_slots(ses);
1676 kfree(ses);
1677}
1678
1679static void free_session(struct nfsd4_session *ses)
1680{
1681 nfsd4_del_conns(ses);
1682 nfsd4_put_drc_mem(&ses->se_fchannel);
1683 __free_session(ses);
1684}
1685
1686static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1687{
1688 int idx;
1689 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1690
1691 new->se_client = clp;
1692 gen_sessionid(new);
1693
1694 INIT_LIST_HEAD(&new->se_conns);
1695
1696 new->se_cb_seq_nr = 1;
1697 new->se_flags = cses->flags;
1698 new->se_cb_prog = cses->callback_prog;
1699 new->se_cb_sec = cses->cb_sec;
1700 atomic_set(&new->se_ref, 0);
1701 idx = hash_sessionid(&new->se_sessionid);
1702 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1703 spin_lock(&clp->cl_lock);
1704 list_add(&new->se_perclnt, &clp->cl_sessions);
1705 spin_unlock(&clp->cl_lock);
1706
1707 {
1708 struct sockaddr *sa = svc_addr(rqstp);
1709 /*
1710 * This is a little silly; with sessions there's no real
1711 * use for the callback address. Use the peer address
1712 * as a reasonable default for now, but consider fixing
1713 * the rpc client not to require an address in the
1714 * future:
1715 */
1716 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1717 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1718 }
1719}
1720
1721/* caller must hold client_lock */
1722static struct nfsd4_session *
1723__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1724{
1725 struct nfsd4_session *elem;
1726 int idx;
1727 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1728
1729 lockdep_assert_held(&nn->client_lock);
1730
1731 dump_sessionid(__func__, sessionid);
1732 idx = hash_sessionid(sessionid);
1733 /* Search in the appropriate list */
1734 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1735 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1736 NFS4_MAX_SESSIONID_LEN)) {
1737 return elem;
1738 }
1739 }
1740
1741 dprintk("%s: session not found\n", __func__);
1742 return NULL;
1743}
1744
1745static struct nfsd4_session *
1746find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1747 __be32 *ret)
1748{
1749 struct nfsd4_session *session;
1750 __be32 status = nfserr_badsession;
1751
1752 session = __find_in_sessionid_hashtbl(sessionid, net);
1753 if (!session)
1754 goto out;
1755 status = nfsd4_get_session_locked(session);
1756 if (status)
1757 session = NULL;
1758out:
1759 *ret = status;
1760 return session;
1761}
1762
1763/* caller must hold client_lock */
1764static void
1765unhash_session(struct nfsd4_session *ses)
1766{
1767 struct nfs4_client *clp = ses->se_client;
1768 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1769
1770 lockdep_assert_held(&nn->client_lock);
1771
1772 list_del(&ses->se_hash);
1773 spin_lock(&ses->se_client->cl_lock);
1774 list_del(&ses->se_perclnt);
1775 spin_unlock(&ses->se_client->cl_lock);
1776}
1777
1778/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1779static int
1780STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1781{
1782 /*
1783 * We're assuming the clid was not given out from a boot
1784 * precisely 2^32 (about 136 years) before this one. That seems
1785 * a safe assumption:
1786 */
1787 if (clid->cl_boot == (u32)nn->boot_time)
1788 return 0;
1789 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1790 clid->cl_boot, clid->cl_id, nn->boot_time);
1791 return 1;
1792}
1793
1794/*
1795 * XXX Should we use a slab cache ?
1796 * This type of memory management is somewhat inefficient, but we use it
1797 * anyway since SETCLIENTID is not a common operation.
1798 */
1799static struct nfs4_client *alloc_client(struct xdr_netobj name)
1800{
1801 struct nfs4_client *clp;
1802 int i;
1803
1804 clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
1805 if (clp == NULL)
1806 return NULL;
1807 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1808 if (clp->cl_name.data == NULL)
1809 goto err_no_name;
1810 clp->cl_ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
1811 OWNER_HASH_SIZE, GFP_KERNEL);
1812 if (!clp->cl_ownerstr_hashtbl)
1813 goto err_no_hashtbl;
1814 for (i = 0; i < OWNER_HASH_SIZE; i++)
1815 INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
1816 clp->cl_name.len = name.len;
1817 INIT_LIST_HEAD(&clp->cl_sessions);
1818 idr_init(&clp->cl_stateids);
1819 atomic_set(&clp->cl_refcount, 0);
1820 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1821 INIT_LIST_HEAD(&clp->cl_idhash);
1822 INIT_LIST_HEAD(&clp->cl_openowners);
1823 INIT_LIST_HEAD(&clp->cl_delegations);
1824 INIT_LIST_HEAD(&clp->cl_lru);
1825 INIT_LIST_HEAD(&clp->cl_revoked);
1826#ifdef CONFIG_NFSD_PNFS
1827 INIT_LIST_HEAD(&clp->cl_lo_states);
1828#endif
1829 spin_lock_init(&clp->cl_lock);
1830 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1831 return clp;
1832err_no_hashtbl:
1833 kfree(clp->cl_name.data);
1834err_no_name:
1835 kmem_cache_free(client_slab, clp);
1836 return NULL;
1837}
1838
1839static void
1840free_client(struct nfs4_client *clp)
1841{
1842 while (!list_empty(&clp->cl_sessions)) {
1843 struct nfsd4_session *ses;
1844 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1845 se_perclnt);
1846 list_del(&ses->se_perclnt);
1847 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1848 free_session(ses);
1849 }
1850 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1851 free_svc_cred(&clp->cl_cred);
1852 kfree(clp->cl_ownerstr_hashtbl);
1853 kfree(clp->cl_name.data);
1854 idr_destroy(&clp->cl_stateids);
1855 kmem_cache_free(client_slab, clp);
1856}
1857
1858/* must be called under the client_lock */
1859static void
1860unhash_client_locked(struct nfs4_client *clp)
1861{
1862 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1863 struct nfsd4_session *ses;
1864
1865 lockdep_assert_held(&nn->client_lock);
1866
1867 /* Mark the client as expired! */
1868 clp->cl_time = 0;
1869 /* Make it invisible */
1870 if (!list_empty(&clp->cl_idhash)) {
1871 list_del_init(&clp->cl_idhash);
1872 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1873 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1874 else
1875 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1876 }
1877 list_del_init(&clp->cl_lru);
1878 spin_lock(&clp->cl_lock);
1879 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1880 list_del_init(&ses->se_hash);
1881 spin_unlock(&clp->cl_lock);
1882}
1883
1884static void
1885unhash_client(struct nfs4_client *clp)
1886{
1887 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1888
1889 spin_lock(&nn->client_lock);
1890 unhash_client_locked(clp);
1891 spin_unlock(&nn->client_lock);
1892}
1893
1894static __be32 mark_client_expired_locked(struct nfs4_client *clp)
1895{
1896 if (atomic_read(&clp->cl_refcount))
1897 return nfserr_jukebox;
1898 unhash_client_locked(clp);
1899 return nfs_ok;
1900}
1901
1902static void
1903__destroy_client(struct nfs4_client *clp)
1904{
1905 int i;
1906 struct nfs4_openowner *oo;
1907 struct nfs4_delegation *dp;
1908 struct list_head reaplist;
1909
1910 INIT_LIST_HEAD(&reaplist);
1911 spin_lock(&state_lock);
1912 while (!list_empty(&clp->cl_delegations)) {
1913 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1914 WARN_ON(!unhash_delegation_locked(dp));
1915 list_add(&dp->dl_recall_lru, &reaplist);
1916 }
1917 spin_unlock(&state_lock);
1918 while (!list_empty(&reaplist)) {
1919 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1920 list_del_init(&dp->dl_recall_lru);
1921 destroy_unhashed_deleg(dp);
1922 }
1923 while (!list_empty(&clp->cl_revoked)) {
1924 dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
1925 list_del_init(&dp->dl_recall_lru);
1926 nfs4_put_stid(&dp->dl_stid);
1927 }
1928 while (!list_empty(&clp->cl_openowners)) {
1929 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1930 nfs4_get_stateowner(&oo->oo_owner);
1931 release_openowner(oo);
1932 }
1933 for (i = 0; i < OWNER_HASH_SIZE; i++) {
1934 struct nfs4_stateowner *so, *tmp;
1935
1936 list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
1937 so_strhash) {
1938 /* Should be no openowners at this point */
1939 WARN_ON_ONCE(so->so_is_open_owner);
1940 remove_blocked_locks(lockowner(so));
1941 }
1942 }
1943 nfsd4_return_all_client_layouts(clp);
1944 nfsd4_shutdown_callback(clp);
1945 if (clp->cl_cb_conn.cb_xprt)
1946 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1947 free_client(clp);
1948}
1949
1950static void
1951destroy_client(struct nfs4_client *clp)
1952{
1953 unhash_client(clp);
1954 __destroy_client(clp);
1955}
1956
1957static void expire_client(struct nfs4_client *clp)
1958{
1959 unhash_client(clp);
1960 nfsd4_client_record_remove(clp);
1961 __destroy_client(clp);
1962}
1963
1964static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1965{
1966 memcpy(target->cl_verifier.data, source->data,
1967 sizeof(target->cl_verifier.data));
1968}
1969
1970static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1971{
1972 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1973 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1974}
1975
1976static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1977{
1978 target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
1979 target->cr_raw_principal = kstrdup(source->cr_raw_principal,
1980 GFP_KERNEL);
1981 if ((source->cr_principal && ! target->cr_principal) ||
1982 (source->cr_raw_principal && ! target->cr_raw_principal))
1983 return -ENOMEM;
1984
1985 target->cr_flavor = source->cr_flavor;
1986 target->cr_uid = source->cr_uid;
1987 target->cr_gid = source->cr_gid;
1988 target->cr_group_info = source->cr_group_info;
1989 get_group_info(target->cr_group_info);
1990 target->cr_gss_mech = source->cr_gss_mech;
1991 if (source->cr_gss_mech)
1992 gss_mech_get(source->cr_gss_mech);
1993 return 0;
1994}
1995
1996static int
1997compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1998{
1999 if (o1->len < o2->len)
2000 return -1;
2001 if (o1->len > o2->len)
2002 return 1;
2003 return memcmp(o1->data, o2->data, o1->len);
2004}
2005
2006static int same_name(const char *n1, const char *n2)
2007{
2008 return 0 == memcmp(n1, n2, HEXDIR_LEN);
2009}
2010
2011static int
2012same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2013{
2014 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2015}
2016
2017static int
2018same_clid(clientid_t *cl1, clientid_t *cl2)
2019{
2020 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2021}
2022
2023static bool groups_equal(struct group_info *g1, struct group_info *g2)
2024{
2025 int i;
2026
2027 if (g1->ngroups != g2->ngroups)
2028 return false;
2029 for (i=0; i<g1->ngroups; i++)
2030 if (!gid_eq(g1->gid[i], g2->gid[i]))
2031 return false;
2032 return true;
2033}
2034
2035/*
2036 * RFC 3530 language requires clid_inuse be returned when the
2037 * "principal" associated with a requests differs from that previously
2038 * used. We use uid, gid's, and gss principal string as our best
2039 * approximation. We also don't want to allow non-gss use of a client
2040 * established using gss: in theory cr_principal should catch that
2041 * change, but in practice cr_principal can be null even in the gss case
2042 * since gssd doesn't always pass down a principal string.
2043 */
2044static bool is_gss_cred(struct svc_cred *cr)
2045{
2046 /* Is cr_flavor one of the gss "pseudoflavors"?: */
2047 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2048}
2049
2050
2051static bool
2052same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2053{
2054 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2055 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2056 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2057 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2058 return false;
2059 if (cr1->cr_principal == cr2->cr_principal)
2060 return true;
2061 if (!cr1->cr_principal || !cr2->cr_principal)
2062 return false;
2063 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2064}
2065
2066static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2067{
2068 struct svc_cred *cr = &rqstp->rq_cred;
2069 u32 service;
2070
2071 if (!cr->cr_gss_mech)
2072 return false;
2073 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2074 return service == RPC_GSS_SVC_INTEGRITY ||
2075 service == RPC_GSS_SVC_PRIVACY;
2076}
2077
2078bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2079{
2080 struct svc_cred *cr = &rqstp->rq_cred;
2081
2082 if (!cl->cl_mach_cred)
2083 return true;
2084 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2085 return false;
2086 if (!svc_rqst_integrity_protected(rqstp))
2087 return false;
2088 if (cl->cl_cred.cr_raw_principal)
2089 return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2090 cr->cr_raw_principal);
2091 if (!cr->cr_principal)
2092 return false;
2093 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2094}
2095
2096static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2097{
2098 __be32 verf[2];
2099
2100 /*
2101 * This is opaque to client, so no need to byte-swap. Use
2102 * __force to keep sparse happy
2103 */
2104 verf[0] = (__force __be32)get_seconds();
2105 verf[1] = (__force __be32)nn->clverifier_counter++;
2106 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2107}
2108
2109static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2110{
2111 clp->cl_clientid.cl_boot = nn->boot_time;
2112 clp->cl_clientid.cl_id = nn->clientid_counter++;
2113 gen_confirm(clp, nn);
2114}
2115
2116static struct nfs4_stid *
2117find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2118{
2119 struct nfs4_stid *ret;
2120
2121 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2122 if (!ret || !ret->sc_type)
2123 return NULL;
2124 return ret;
2125}
2126
2127static struct nfs4_stid *
2128find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2129{
2130 struct nfs4_stid *s;
2131
2132 spin_lock(&cl->cl_lock);
2133 s = find_stateid_locked(cl, t);
2134 if (s != NULL) {
2135 if (typemask & s->sc_type)
2136 refcount_inc(&s->sc_count);
2137 else
2138 s = NULL;
2139 }
2140 spin_unlock(&cl->cl_lock);
2141 return s;
2142}
2143
2144static struct nfs4_client *create_client(struct xdr_netobj name,
2145 struct svc_rqst *rqstp, nfs4_verifier *verf)
2146{
2147 struct nfs4_client *clp;
2148 struct sockaddr *sa = svc_addr(rqstp);
2149 int ret;
2150 struct net *net = SVC_NET(rqstp);
2151
2152 clp = alloc_client(name);
2153 if (clp == NULL)
2154 return NULL;
2155
2156 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2157 if (ret) {
2158 free_client(clp);
2159 return NULL;
2160 }
2161 nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2162 clp->cl_time = get_seconds();
2163 clear_bit(0, &clp->cl_cb_slot_busy);
2164 copy_verf(clp, verf);
2165 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
2166 clp->cl_cb_session = NULL;
2167 clp->net = net;
2168 return clp;
2169}
2170
2171static void
2172add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2173{
2174 struct rb_node **new = &(root->rb_node), *parent = NULL;
2175 struct nfs4_client *clp;
2176
2177 while (*new) {
2178 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2179 parent = *new;
2180
2181 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2182 new = &((*new)->rb_left);
2183 else
2184 new = &((*new)->rb_right);
2185 }
2186
2187 rb_link_node(&new_clp->cl_namenode, parent, new);
2188 rb_insert_color(&new_clp->cl_namenode, root);
2189}
2190
2191static struct nfs4_client *
2192find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2193{
2194 int cmp;
2195 struct rb_node *node = root->rb_node;
2196 struct nfs4_client *clp;
2197
2198 while (node) {
2199 clp = rb_entry(node, struct nfs4_client, cl_namenode);
2200 cmp = compare_blob(&clp->cl_name, name);
2201 if (cmp > 0)
2202 node = node->rb_left;
2203 else if (cmp < 0)
2204 node = node->rb_right;
2205 else
2206 return clp;
2207 }
2208 return NULL;
2209}
2210
2211static void
2212add_to_unconfirmed(struct nfs4_client *clp)
2213{
2214 unsigned int idhashval;
2215 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2216
2217 lockdep_assert_held(&nn->client_lock);
2218
2219 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2220 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
2221 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2222 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
2223 renew_client_locked(clp);
2224}
2225
2226static void
2227move_to_confirmed(struct nfs4_client *clp)
2228{
2229 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
2230 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2231
2232 lockdep_assert_held(&nn->client_lock);
2233
2234 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
2235 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
2236 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2237 add_clp_to_name_tree(clp, &nn->conf_name_tree);
2238 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
2239 renew_client_locked(clp);
2240}
2241
2242static struct nfs4_client *
2243find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
2244{
2245 struct nfs4_client *clp;
2246 unsigned int idhashval = clientid_hashval(clid->cl_id);
2247
2248 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
2249 if (same_clid(&clp->cl_clientid, clid)) {
2250 if ((bool)clp->cl_minorversion != sessions)
2251 return NULL;
2252 renew_client_locked(clp);
2253 return clp;
2254 }
2255 }
2256 return NULL;
2257}
2258
2259static struct nfs4_client *
2260find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2261{
2262 struct list_head *tbl = nn->conf_id_hashtbl;
2263
2264 lockdep_assert_held(&nn->client_lock);
2265 return find_client_in_id_table(tbl, clid, sessions);
2266}
2267
2268static struct nfs4_client *
2269find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
2270{
2271 struct list_head *tbl = nn->unconf_id_hashtbl;
2272
2273 lockdep_assert_held(&nn->client_lock);
2274 return find_client_in_id_table(tbl, clid, sessions);
2275}
2276
2277static bool clp_used_exchangeid(struct nfs4_client *clp)
2278{
2279 return clp->cl_exchange_flags != 0;
2280}
2281
2282static struct nfs4_client *
2283find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2284{
2285 lockdep_assert_held(&nn->client_lock);
2286 return find_clp_in_name_tree(name, &nn->conf_name_tree);
2287}
2288
2289static struct nfs4_client *
2290find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
2291{
2292 lockdep_assert_held(&nn->client_lock);
2293 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
2294}
2295
2296static void
2297gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
2298{
2299 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
2300 struct sockaddr *sa = svc_addr(rqstp);
2301 u32 scopeid = rpc_get_scope_id(sa);
2302 unsigned short expected_family;
2303
2304 /* Currently, we only support tcp and tcp6 for the callback channel */
2305 if (se->se_callback_netid_len == 3 &&
2306 !memcmp(se->se_callback_netid_val, "tcp", 3))
2307 expected_family = AF_INET;
2308 else if (se->se_callback_netid_len == 4 &&
2309 !memcmp(se->se_callback_netid_val, "tcp6", 4))
2310 expected_family = AF_INET6;
2311 else
2312 goto out_err;
2313
2314 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
2315 se->se_callback_addr_len,
2316 (struct sockaddr *)&conn->cb_addr,
2317 sizeof(conn->cb_addr));
2318
2319 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
2320 goto out_err;
2321
2322 if (conn->cb_addr.ss_family == AF_INET6)
2323 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
2324
2325 conn->cb_prog = se->se_callback_prog;
2326 conn->cb_ident = se->se_callback_ident;
2327 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
2328 return;
2329out_err:
2330 conn->cb_addr.ss_family = AF_UNSPEC;
2331 conn->cb_addrlen = 0;
2332 dprintk("NFSD: this client (clientid %08x/%08x) "
2333 "will not receive delegations\n",
2334 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
2335
2336 return;
2337}
2338
2339/*
2340 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
2341 */
2342static void
2343nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
2344{
2345 struct xdr_buf *buf = resp->xdr.buf;
2346 struct nfsd4_slot *slot = resp->cstate.slot;
2347 unsigned int base;
2348
2349 dprintk("--> %s slot %p\n", __func__, slot);
2350
2351 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
2352 slot->sl_opcnt = resp->opcnt;
2353 slot->sl_status = resp->cstate.status;
2354 free_svc_cred(&slot->sl_cred);
2355 copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
2356
2357 if (!nfsd4_cache_this(resp)) {
2358 slot->sl_flags &= ~NFSD4_SLOT_CACHED;
2359 return;
2360 }
2361 slot->sl_flags |= NFSD4_SLOT_CACHED;
2362
2363 base = resp->cstate.data_offset;
2364 slot->sl_datalen = buf->len - base;
2365 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
2366 WARN(1, "%s: sessions DRC could not cache compound\n",
2367 __func__);
2368 return;
2369}
2370
2371/*
2372 * Encode the replay sequence operation from the slot values.
2373 * If cachethis is FALSE encode the uncached rep error on the next
2374 * operation which sets resp->p and increments resp->opcnt for
2375 * nfs4svc_encode_compoundres.
2376 *
2377 */
2378static __be32
2379nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
2380 struct nfsd4_compoundres *resp)
2381{
2382 struct nfsd4_op *op;
2383 struct nfsd4_slot *slot = resp->cstate.slot;
2384
2385 /* Encode the replayed sequence operation */
2386 op = &args->ops[resp->opcnt - 1];
2387 nfsd4_encode_operation(resp, op);
2388
2389 if (slot->sl_flags & NFSD4_SLOT_CACHED)
2390 return op->status;
2391 if (args->opcnt == 1) {
2392 /*
2393 * The original operation wasn't a solo sequence--we
2394 * always cache those--so this retry must not match the
2395 * original:
2396 */
2397 op->status = nfserr_seq_false_retry;
2398 } else {
2399 op = &args->ops[resp->opcnt++];
2400 op->status = nfserr_retry_uncached_rep;
2401 nfsd4_encode_operation(resp, op);
2402 }
2403 return op->status;
2404}
2405
2406/*
2407 * The sequence operation is not cached because we can use the slot and
2408 * session values.
2409 */
2410static __be32
2411nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
2412 struct nfsd4_sequence *seq)
2413{
2414 struct nfsd4_slot *slot = resp->cstate.slot;
2415 struct xdr_stream *xdr = &resp->xdr;
2416 __be32 *p;
2417 __be32 status;
2418
2419 dprintk("--> %s slot %p\n", __func__, slot);
2420
2421 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2422 if (status)
2423 return status;
2424
2425 p = xdr_reserve_space(xdr, slot->sl_datalen);
2426 if (!p) {
2427 WARN_ON_ONCE(1);
2428 return nfserr_serverfault;
2429 }
2430 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
2431 xdr_commit_encode(xdr);
2432
2433 resp->opcnt = slot->sl_opcnt;
2434 return slot->sl_status;
2435}
2436
2437/*
2438 * Set the exchange_id flags returned by the server.
2439 */
2440static void
2441nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
2442{
2443#ifdef CONFIG_NFSD_PNFS
2444 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
2445#else
2446 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
2447#endif
2448
2449 /* Referrals are supported, Migration is not. */
2450 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
2451
2452 /* set the wire flags to return to client. */
2453 clid->flags = new->cl_exchange_flags;
2454}
2455
2456static bool client_has_openowners(struct nfs4_client *clp)
2457{
2458 struct nfs4_openowner *oo;
2459
2460 list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
2461 if (!list_empty(&oo->oo_owner.so_stateids))
2462 return true;
2463 }
2464 return false;
2465}
2466
2467static bool client_has_state(struct nfs4_client *clp)
2468{
2469 return client_has_openowners(clp)
2470#ifdef CONFIG_NFSD_PNFS
2471 || !list_empty(&clp->cl_lo_states)
2472#endif
2473 || !list_empty(&clp->cl_delegations)
2474 || !list_empty(&clp->cl_sessions);
2475}
2476
2477__be32
2478nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2479 union nfsd4_op_u *u)
2480{
2481 struct nfsd4_exchange_id *exid = &u->exchange_id;
2482 struct nfs4_client *conf, *new;
2483 struct nfs4_client *unconf = NULL;
2484 __be32 status;
2485 char addr_str[INET6_ADDRSTRLEN];
2486 nfs4_verifier verf = exid->verifier;
2487 struct sockaddr *sa = svc_addr(rqstp);
2488 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2489 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2490
2491 rpc_ntop(sa, addr_str, sizeof(addr_str));
2492 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2493 "ip_addr=%s flags %x, spa_how %d\n",
2494 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
2495 addr_str, exid->flags, exid->spa_how);
2496
2497 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
2498 return nfserr_inval;
2499
2500 new = create_client(exid->clname, rqstp, &verf);
2501 if (new == NULL)
2502 return nfserr_jukebox;
2503
2504 switch (exid->spa_how) {
2505 case SP4_MACH_CRED:
2506 exid->spo_must_enforce[0] = 0;
2507 exid->spo_must_enforce[1] = (
2508 1 << (OP_BIND_CONN_TO_SESSION - 32) |
2509 1 << (OP_EXCHANGE_ID - 32) |
2510 1 << (OP_CREATE_SESSION - 32) |
2511 1 << (OP_DESTROY_SESSION - 32) |
2512 1 << (OP_DESTROY_CLIENTID - 32));
2513
2514 exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
2515 1 << (OP_OPEN_DOWNGRADE) |
2516 1 << (OP_LOCKU) |
2517 1 << (OP_DELEGRETURN));
2518
2519 exid->spo_must_allow[1] &= (
2520 1 << (OP_TEST_STATEID - 32) |
2521 1 << (OP_FREE_STATEID - 32));
2522 if (!svc_rqst_integrity_protected(rqstp)) {
2523 status = nfserr_inval;
2524 goto out_nolock;
2525 }
2526 /*
2527 * Sometimes userspace doesn't give us a principal.
2528 * Which is a bug, really. Anyway, we can't enforce
2529 * MACH_CRED in that case, better to give up now:
2530 */
2531 if (!new->cl_cred.cr_principal &&
2532 !new->cl_cred.cr_raw_principal) {
2533 status = nfserr_serverfault;
2534 goto out_nolock;
2535 }
2536 new->cl_mach_cred = true;
2537 case SP4_NONE:
2538 break;
2539 default: /* checked by xdr code */
2540 WARN_ON_ONCE(1);
2541 case SP4_SSV:
2542 status = nfserr_encr_alg_unsupp;
2543 goto out_nolock;
2544 }
2545
2546 /* Cases below refer to rfc 5661 section 18.35.4: */
2547 spin_lock(&nn->client_lock);
2548 conf = find_confirmed_client_by_name(&exid->clname, nn);
2549 if (conf) {
2550 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
2551 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
2552
2553 if (update) {
2554 if (!clp_used_exchangeid(conf)) { /* buggy client */
2555 status = nfserr_inval;
2556 goto out;
2557 }
2558 if (!nfsd4_mach_creds_match(conf, rqstp)) {
2559 status = nfserr_wrong_cred;
2560 goto out;
2561 }
2562 if (!creds_match) { /* case 9 */
2563 status = nfserr_perm;
2564 goto out;
2565 }
2566 if (!verfs_match) { /* case 8 */
2567 status = nfserr_not_same;
2568 goto out;
2569 }
2570 /* case 6 */
2571 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
2572 goto out_copy;
2573 }
2574 if (!creds_match) { /* case 3 */
2575 if (client_has_state(conf)) {
2576 status = nfserr_clid_inuse;
2577 goto out;
2578 }
2579 goto out_new;
2580 }
2581 if (verfs_match) { /* case 2 */
2582 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2583 goto out_copy;
2584 }
2585 /* case 5, client reboot */
2586 conf = NULL;
2587 goto out_new;
2588 }
2589
2590 if (update) { /* case 7 */
2591 status = nfserr_noent;
2592 goto out;
2593 }
2594
2595 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
2596 if (unconf) /* case 4, possible retry or client restart */
2597 unhash_client_locked(unconf);
2598
2599 /* case 1 (normal case) */
2600out_new:
2601 if (conf) {
2602 status = mark_client_expired_locked(conf);
2603 if (status)
2604 goto out;
2605 }
2606 new->cl_minorversion = cstate->minorversion;
2607 new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
2608 new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
2609
2610 gen_clid(new, nn);
2611 add_to_unconfirmed(new);
2612 swap(new, conf);
2613out_copy:
2614 exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
2615 exid->clientid.cl_id = conf->cl_clientid.cl_id;
2616
2617 exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
2618 nfsd4_set_ex_flags(conf, exid);
2619
2620 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2621 conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
2622 status = nfs_ok;
2623
2624out:
2625 spin_unlock(&nn->client_lock);
2626out_nolock:
2627 if (new)
2628 expire_client(new);
2629 if (unconf)
2630 expire_client(unconf);
2631 return status;
2632}
2633
2634static __be32
2635check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
2636{
2637 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
2638 slot_seqid);
2639
2640 /* The slot is in use, and no response has been sent. */
2641 if (slot_inuse) {
2642 if (seqid == slot_seqid)
2643 return nfserr_jukebox;
2644 else
2645 return nfserr_seq_misordered;
2646 }
2647 /* Note unsigned 32-bit arithmetic handles wraparound: */
2648 if (likely(seqid == slot_seqid + 1))
2649 return nfs_ok;
2650 if (seqid == slot_seqid)
2651 return nfserr_replay_cache;
2652 return nfserr_seq_misordered;
2653}
2654
2655/*
2656 * Cache the create session result into the create session single DRC
2657 * slot cache by saving the xdr structure. sl_seqid has been set.
2658 * Do this for solo or embedded create session operations.
2659 */
2660static void
2661nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
2662 struct nfsd4_clid_slot *slot, __be32 nfserr)
2663{
2664 slot->sl_status = nfserr;
2665 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
2666}
2667
2668static __be32
2669nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
2670 struct nfsd4_clid_slot *slot)
2671{
2672 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
2673 return slot->sl_status;
2674}
2675
2676#define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
2677 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
2678 1 + /* MIN tag is length with zero, only length */ \
2679 3 + /* version, opcount, opcode */ \
2680 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2681 /* seqid, slotID, slotID, cache */ \
2682 4 ) * sizeof(__be32))
2683
2684#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
2685 2 + /* verifier: AUTH_NULL, length 0 */\
2686 1 + /* status */ \
2687 1 + /* MIN tag is length with zero, only length */ \
2688 3 + /* opcount, opcode, opstatus*/ \
2689 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
2690 /* seqid, slotID, slotID, slotID, status */ \
2691 5 ) * sizeof(__be32))
2692
2693static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2694{
2695 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
2696
2697 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
2698 return nfserr_toosmall;
2699 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
2700 return nfserr_toosmall;
2701 ca->headerpadsz = 0;
2702 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
2703 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
2704 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
2705 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
2706 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
2707 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
2708 /*
2709 * Note decreasing slot size below client's request may make it
2710 * difficult for client to function correctly, whereas
2711 * decreasing the number of slots will (just?) affect
2712 * performance. When short on memory we therefore prefer to
2713 * decrease number of slots instead of their size. Clients that
2714 * request larger slots than they need will get poor results:
2715 */
2716 ca->maxreqs = nfsd4_get_drc_mem(ca);
2717 if (!ca->maxreqs)
2718 return nfserr_jukebox;
2719
2720 return nfs_ok;
2721}
2722
2723/*
2724 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
2725 * These are based on similar macros in linux/sunrpc/msg_prot.h .
2726 */
2727#define RPC_MAX_HEADER_WITH_AUTH_SYS \
2728 (RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
2729
2730#define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
2731 (RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
2732
2733#define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
2734 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
2735#define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
2736 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
2737 sizeof(__be32))
2738
2739static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2740{
2741 ca->headerpadsz = 0;
2742
2743 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2744 return nfserr_toosmall;
2745 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2746 return nfserr_toosmall;
2747 ca->maxresp_cached = 0;
2748 if (ca->maxops < 2)
2749 return nfserr_toosmall;
2750
2751 return nfs_ok;
2752}
2753
2754static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2755{
2756 switch (cbs->flavor) {
2757 case RPC_AUTH_NULL:
2758 case RPC_AUTH_UNIX:
2759 return nfs_ok;
2760 default:
2761 /*
2762 * GSS case: the spec doesn't allow us to return this
2763 * error. But it also doesn't allow us not to support
2764 * GSS.
2765 * I'd rather this fail hard than return some error the
2766 * client might think it can already handle:
2767 */
2768 return nfserr_encr_alg_unsupp;
2769 }
2770}
2771
2772__be32
2773nfsd4_create_session(struct svc_rqst *rqstp,
2774 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
2775{
2776 struct nfsd4_create_session *cr_ses = &u->create_session;
2777 struct sockaddr *sa = svc_addr(rqstp);
2778 struct nfs4_client *conf, *unconf;
2779 struct nfs4_client *old = NULL;
2780 struct nfsd4_session *new;
2781 struct nfsd4_conn *conn;
2782 struct nfsd4_clid_slot *cs_slot = NULL;
2783 __be32 status = 0;
2784 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2785
2786 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2787 return nfserr_inval;
2788 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2789 if (status)
2790 return status;
2791 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2792 if (status)
2793 return status;
2794 status = check_backchannel_attrs(&cr_ses->back_channel);
2795 if (status)
2796 goto out_release_drc_mem;
2797 status = nfserr_jukebox;
2798 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2799 if (!new)
2800 goto out_release_drc_mem;
2801 conn = alloc_conn_from_crses(rqstp, cr_ses);
2802 if (!conn)
2803 goto out_free_session;
2804
2805 spin_lock(&nn->client_lock);
2806 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2807 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2808 WARN_ON_ONCE(conf && unconf);
2809
2810 if (conf) {
2811 status = nfserr_wrong_cred;
2812 if (!nfsd4_mach_creds_match(conf, rqstp))
2813 goto out_free_conn;
2814 cs_slot = &conf->cl_cs_slot;
2815 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2816 if (status) {
2817 if (status == nfserr_replay_cache)
2818 status = nfsd4_replay_create_session(cr_ses, cs_slot);
2819 goto out_free_conn;
2820 }
2821 } else if (unconf) {
2822 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2823 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2824 status = nfserr_clid_inuse;
2825 goto out_free_conn;
2826 }
2827 status = nfserr_wrong_cred;
2828 if (!nfsd4_mach_creds_match(unconf, rqstp))
2829 goto out_free_conn;
2830 cs_slot = &unconf->cl_cs_slot;
2831 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2832 if (status) {
2833 /* an unconfirmed replay returns misordered */
2834 status = nfserr_seq_misordered;
2835 goto out_free_conn;
2836 }
2837 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2838 if (old) {
2839 status = mark_client_expired_locked(old);
2840 if (status) {
2841 old = NULL;
2842 goto out_free_conn;
2843 }
2844 }
2845 move_to_confirmed(unconf);
2846 conf = unconf;
2847 } else {
2848 status = nfserr_stale_clientid;
2849 goto out_free_conn;
2850 }
2851 status = nfs_ok;
2852 /* Persistent sessions are not supported */
2853 cr_ses->flags &= ~SESSION4_PERSIST;
2854 /* Upshifting from TCP to RDMA is not supported */
2855 cr_ses->flags &= ~SESSION4_RDMA;
2856
2857 init_session(rqstp, new, conf, cr_ses);
2858 nfsd4_get_session_locked(new);
2859
2860 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2861 NFS4_MAX_SESSIONID_LEN);
2862 cs_slot->sl_seqid++;
2863 cr_ses->seqid = cs_slot->sl_seqid;
2864
2865 /* cache solo and embedded create sessions under the client_lock */
2866 nfsd4_cache_create_session(cr_ses, cs_slot, status);
2867 spin_unlock(&nn->client_lock);
2868 /* init connection and backchannel */
2869 nfsd4_init_conn(rqstp, conn, new);
2870 nfsd4_put_session(new);
2871 if (old)
2872 expire_client(old);
2873 return status;
2874out_free_conn:
2875 spin_unlock(&nn->client_lock);
2876 free_conn(conn);
2877 if (old)
2878 expire_client(old);
2879out_free_session:
2880 __free_session(new);
2881out_release_drc_mem:
2882 nfsd4_put_drc_mem(&cr_ses->fore_channel);
2883 return status;
2884}
2885
2886static __be32 nfsd4_map_bcts_dir(u32 *dir)
2887{
2888 switch (*dir) {
2889 case NFS4_CDFC4_FORE:
2890 case NFS4_CDFC4_BACK:
2891 return nfs_ok;
2892 case NFS4_CDFC4_FORE_OR_BOTH:
2893 case NFS4_CDFC4_BACK_OR_BOTH:
2894 *dir = NFS4_CDFC4_BOTH;
2895 return nfs_ok;
2896 };
2897 return nfserr_inval;
2898}
2899
2900__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
2901 struct nfsd4_compound_state *cstate,
2902 union nfsd4_op_u *u)
2903{
2904 struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
2905 struct nfsd4_session *session = cstate->session;
2906 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2907 __be32 status;
2908
2909 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2910 if (status)
2911 return status;
2912 spin_lock(&nn->client_lock);
2913 session->se_cb_prog = bc->bc_cb_program;
2914 session->se_cb_sec = bc->bc_cb_sec;
2915 spin_unlock(&nn->client_lock);
2916
2917 nfsd4_probe_callback(session->se_client);
2918
2919 return nfs_ok;
2920}
2921
2922__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2923 struct nfsd4_compound_state *cstate,
2924 union nfsd4_op_u *u)
2925{
2926 struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
2927 __be32 status;
2928 struct nfsd4_conn *conn;
2929 struct nfsd4_session *session;
2930 struct net *net = SVC_NET(rqstp);
2931 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2932
2933 if (!nfsd4_last_compound_op(rqstp))
2934 return nfserr_not_only_op;
2935 spin_lock(&nn->client_lock);
2936 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2937 spin_unlock(&nn->client_lock);
2938 if (!session)
2939 goto out_no_session;
2940 status = nfserr_wrong_cred;
2941 if (!nfsd4_mach_creds_match(session->se_client, rqstp))
2942 goto out;
2943 status = nfsd4_map_bcts_dir(&bcts->dir);
2944 if (status)
2945 goto out;
2946 conn = alloc_conn(rqstp, bcts->dir);
2947 status = nfserr_jukebox;
2948 if (!conn)
2949 goto out;
2950 nfsd4_init_conn(rqstp, conn, session);
2951 status = nfs_ok;
2952out:
2953 nfsd4_put_session(session);
2954out_no_session:
2955 return status;
2956}
2957
2958static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2959{
2960 if (!session)
2961 return false;
2962 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2963}
2964
2965__be32
2966nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
2967 union nfsd4_op_u *u)
2968{
2969 struct nfsd4_destroy_session *sessionid = &u->destroy_session;
2970 struct nfsd4_session *ses;
2971 __be32 status;
2972 int ref_held_by_me = 0;
2973 struct net *net = SVC_NET(r);
2974 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2975
2976 status = nfserr_not_only_op;
2977 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2978 if (!nfsd4_last_compound_op(r))
2979 goto out;
2980 ref_held_by_me++;
2981 }
2982 dump_sessionid(__func__, &sessionid->sessionid);
2983 spin_lock(&nn->client_lock);
2984 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2985 if (!ses)
2986 goto out_client_lock;
2987 status = nfserr_wrong_cred;
2988 if (!nfsd4_mach_creds_match(ses->se_client, r))
2989 goto out_put_session;
2990 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2991 if (status)
2992 goto out_put_session;
2993 unhash_session(ses);
2994 spin_unlock(&nn->client_lock);
2995
2996 nfsd4_probe_callback_sync(ses->se_client);
2997
2998 spin_lock(&nn->client_lock);
2999 status = nfs_ok;
3000out_put_session:
3001 nfsd4_put_session_locked(ses);
3002out_client_lock:
3003 spin_unlock(&nn->client_lock);
3004out:
3005 return status;
3006}
3007
3008static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3009{
3010 struct nfsd4_conn *c;
3011
3012 list_for_each_entry(c, &s->se_conns, cn_persession) {
3013 if (c->cn_xprt == xpt) {
3014 return c;
3015 }
3016 }
3017 return NULL;
3018}
3019
3020static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
3021{
3022 struct nfs4_client *clp = ses->se_client;
3023 struct nfsd4_conn *c;
3024 __be32 status = nfs_ok;
3025 int ret;
3026
3027 spin_lock(&clp->cl_lock);
3028 c = __nfsd4_find_conn(new->cn_xprt, ses);
3029 if (c)
3030 goto out_free;
3031 status = nfserr_conn_not_bound_to_session;
3032 if (clp->cl_mach_cred)
3033 goto out_free;
3034 __nfsd4_hash_conn(new, ses);
3035 spin_unlock(&clp->cl_lock);
3036 ret = nfsd4_register_conn(new);
3037 if (ret)
3038 /* oops; xprt is already down: */
3039 nfsd4_conn_lost(&new->cn_xpt_user);
3040 return nfs_ok;
3041out_free:
3042 spin_unlock(&clp->cl_lock);
3043 free_conn(new);
3044 return status;
3045}
3046
3047static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3048{
3049 struct nfsd4_compoundargs *args = rqstp->rq_argp;
3050
3051 return args->opcnt > session->se_fchannel.maxops;
3052}
3053
3054static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3055 struct nfsd4_session *session)
3056{
3057 struct xdr_buf *xb = &rqstp->rq_arg;
3058
3059 return xb->len > session->se_fchannel.maxreq_sz;
3060}
3061
3062static bool replay_matches_cache(struct svc_rqst *rqstp,
3063 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3064{
3065 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3066
3067 if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3068 (bool)seq->cachethis)
3069 return false;
3070 /*
3071 * If there's an error than the reply can have fewer ops than
3072 * the call. But if we cached a reply with *more* ops than the
3073 * call you're sending us now, then this new call is clearly not
3074 * really a replay of the old one:
3075 */
3076 if (slot->sl_opcnt < argp->opcnt)
3077 return false;
3078 /* This is the only check explicitly called by spec: */
3079 if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3080 return false;
3081 /*
3082 * There may be more comparisons we could actually do, but the
3083 * spec doesn't require us to catch every case where the calls
3084 * don't match (that would require caching the call as well as
3085 * the reply), so we don't bother.
3086 */
3087 return true;
3088}
3089
3090__be32
3091nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3092 union nfsd4_op_u *u)
3093{
3094 struct nfsd4_sequence *seq = &u->sequence;
3095 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3096 struct xdr_stream *xdr = &resp->xdr;
3097 struct nfsd4_session *session;
3098 struct nfs4_client *clp;
3099 struct nfsd4_slot *slot;
3100 struct nfsd4_conn *conn;
3101 __be32 status;
3102 int buflen;
3103 struct net *net = SVC_NET(rqstp);
3104 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3105
3106 if (resp->opcnt != 1)
3107 return nfserr_sequence_pos;
3108
3109 /*
3110 * Will be either used or freed by nfsd4_sequence_check_conn
3111 * below.
3112 */
3113 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3114 if (!conn)
3115 return nfserr_jukebox;
3116
3117 spin_lock(&nn->client_lock);
3118 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
3119 if (!session)
3120 goto out_no_session;
3121 clp = session->se_client;
3122
3123 status = nfserr_too_many_ops;
3124 if (nfsd4_session_too_many_ops(rqstp, session))
3125 goto out_put_session;
3126
3127 status = nfserr_req_too_big;
3128 if (nfsd4_request_too_big(rqstp, session))
3129 goto out_put_session;
3130
3131 status = nfserr_badslot;
3132 if (seq->slotid >= session->se_fchannel.maxreqs)
3133 goto out_put_session;
3134
3135 slot = session->se_slots[seq->slotid];
3136 dprintk("%s: slotid %d\n", __func__, seq->slotid);
3137
3138 /* We do not negotiate the number of slots yet, so set the
3139 * maxslots to the session maxreqs which is used to encode
3140 * sr_highest_slotid and the sr_target_slot id to maxslots */
3141 seq->maxslots = session->se_fchannel.maxreqs;
3142
3143 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
3144 slot->sl_flags & NFSD4_SLOT_INUSE);
3145 if (status == nfserr_replay_cache) {
3146 status = nfserr_seq_misordered;
3147 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
3148 goto out_put_session;
3149 status = nfserr_seq_false_retry;
3150 if (!replay_matches_cache(rqstp, seq, slot))
3151 goto out_put_session;
3152 cstate->slot = slot;
3153 cstate->session = session;
3154 cstate->clp = clp;
3155 /* Return the cached reply status and set cstate->status
3156 * for nfsd4_proc_compound processing */
3157 status = nfsd4_replay_cache_entry(resp, seq);
3158 cstate->status = nfserr_replay_cache;
3159 goto out;
3160 }
3161 if (status)
3162 goto out_put_session;
3163
3164 status = nfsd4_sequence_check_conn(conn, session);
3165 conn = NULL;
3166 if (status)
3167 goto out_put_session;
3168
3169 buflen = (seq->cachethis) ?
3170 session->se_fchannel.maxresp_cached :
3171 session->se_fchannel.maxresp_sz;
3172 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
3173 nfserr_rep_too_big;
3174 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
3175 goto out_put_session;
3176 svc_reserve(rqstp, buflen);
3177
3178 status = nfs_ok;
3179 /* Success! bump slot seqid */
3180 slot->sl_seqid = seq->seqid;
3181 slot->sl_flags |= NFSD4_SLOT_INUSE;
3182 if (seq->cachethis)
3183 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
3184 else
3185 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
3186
3187 cstate->slot = slot;
3188 cstate->session = session;
3189 cstate->clp = clp;
3190
3191out:
3192 switch (clp->cl_cb_state) {
3193 case NFSD4_CB_DOWN:
3194 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
3195 break;
3196 case NFSD4_CB_FAULT:
3197 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
3198 break;
3199 default:
3200 seq->status_flags = 0;
3201 }
3202 if (!list_empty(&clp->cl_revoked))
3203 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
3204out_no_session:
3205 if (conn)
3206 free_conn(conn);
3207 spin_unlock(&nn->client_lock);
3208 return status;
3209out_put_session:
3210 nfsd4_put_session_locked(session);
3211 goto out_no_session;
3212}
3213
3214void
3215nfsd4_sequence_done(struct nfsd4_compoundres *resp)
3216{
3217 struct nfsd4_compound_state *cs = &resp->cstate;
3218
3219 if (nfsd4_has_session(cs)) {
3220 if (cs->status != nfserr_replay_cache) {
3221 nfsd4_store_cache_entry(resp);
3222 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
3223 }
3224 /* Drop session reference that was taken in nfsd4_sequence() */
3225 nfsd4_put_session(cs->session);
3226 } else if (cs->clp)
3227 put_client_renew(cs->clp);
3228}
3229
3230__be32
3231nfsd4_destroy_clientid(struct svc_rqst *rqstp,
3232 struct nfsd4_compound_state *cstate,
3233 union nfsd4_op_u *u)
3234{
3235 struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
3236 struct nfs4_client *conf, *unconf;
3237 struct nfs4_client *clp = NULL;
3238 __be32 status = 0;
3239 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3240
3241 spin_lock(&nn->client_lock);
3242 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
3243 conf = find_confirmed_client(&dc->clientid, true, nn);
3244 WARN_ON_ONCE(conf && unconf);
3245
3246 if (conf) {
3247 if (client_has_state(conf)) {
3248 status = nfserr_clientid_busy;
3249 goto out;
3250 }
3251 status = mark_client_expired_locked(conf);
3252 if (status)
3253 goto out;
3254 clp = conf;
3255 } else if (unconf)
3256 clp = unconf;
3257 else {
3258 status = nfserr_stale_clientid;
3259 goto out;
3260 }
3261 if (!nfsd4_mach_creds_match(clp, rqstp)) {
3262 clp = NULL;
3263 status = nfserr_wrong_cred;
3264 goto out;
3265 }
3266 unhash_client_locked(clp);
3267out:
3268 spin_unlock(&nn->client_lock);
3269 if (clp)
3270 expire_client(clp);
3271 return status;
3272}
3273
3274__be32
3275nfsd4_reclaim_complete(struct svc_rqst *rqstp,
3276 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
3277{
3278 struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
3279 __be32 status = 0;
3280
3281 if (rc->rca_one_fs) {
3282 if (!cstate->current_fh.fh_dentry)
3283 return nfserr_nofilehandle;
3284 /*
3285 * We don't take advantage of the rca_one_fs case.
3286 * That's OK, it's optional, we can safely ignore it.
3287 */
3288 return nfs_ok;
3289 }
3290
3291 status = nfserr_complete_already;
3292 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
3293 &cstate->session->se_client->cl_flags))
3294 goto out;
3295
3296 status = nfserr_stale_clientid;
3297 if (is_client_expired(cstate->session->se_client))
3298 /*
3299 * The following error isn't really legal.
3300 * But we only get here if the client just explicitly
3301 * destroyed the client. Surely it no longer cares what
3302 * error it gets back on an operation for the dead
3303 * client.
3304 */
3305 goto out;
3306
3307 status = nfs_ok;
3308 nfsd4_client_record_create(cstate->session->se_client);
3309out:
3310 return status;
3311}
3312
3313__be32
3314nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3315 union nfsd4_op_u *u)
3316{
3317 struct nfsd4_setclientid *setclid = &u->setclientid;
3318 struct xdr_netobj clname = setclid->se_name;
3319 nfs4_verifier clverifier = setclid->se_verf;
3320 struct nfs4_client *conf, *new;
3321 struct nfs4_client *unconf = NULL;
3322 __be32 status;
3323 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3324
3325 new = create_client(clname, rqstp, &clverifier);
3326 if (new == NULL)
3327 return nfserr_jukebox;
3328 /* Cases below refer to rfc 3530 section 14.2.33: */
3329 spin_lock(&nn->client_lock);
3330 conf = find_confirmed_client_by_name(&clname, nn);
3331 if (conf && client_has_state(conf)) {
3332 /* case 0: */
3333 status = nfserr_clid_inuse;
3334 if (clp_used_exchangeid(conf))
3335 goto out;
3336 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
3337 char addr_str[INET6_ADDRSTRLEN];
3338 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
3339 sizeof(addr_str));
3340 dprintk("NFSD: setclientid: string in use by client "
3341 "at %s\n", addr_str);
3342 goto out;
3343 }
3344 }
3345 unconf = find_unconfirmed_client_by_name(&clname, nn);
3346 if (unconf)
3347 unhash_client_locked(unconf);
3348 if (conf && same_verf(&conf->cl_verifier, &clverifier)) {
3349 /* case 1: probable callback update */
3350 copy_clid(new, conf);
3351 gen_confirm(new, nn);
3352 } else /* case 4 (new client) or cases 2, 3 (client reboot): */
3353 gen_clid(new, nn);
3354 new->cl_minorversion = 0;
3355 gen_callback(new, setclid, rqstp);
3356 add_to_unconfirmed(new);
3357 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
3358 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
3359 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
3360 new = NULL;
3361 status = nfs_ok;
3362out:
3363 spin_unlock(&nn->client_lock);
3364 if (new)
3365 free_client(new);
3366 if (unconf)
3367 expire_client(unconf);
3368 return status;
3369}
3370
3371
3372__be32
3373nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
3374 struct nfsd4_compound_state *cstate,
3375 union nfsd4_op_u *u)
3376{
3377 struct nfsd4_setclientid_confirm *setclientid_confirm =
3378 &u->setclientid_confirm;
3379 struct nfs4_client *conf, *unconf;
3380 struct nfs4_client *old = NULL;
3381 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
3382 clientid_t * clid = &setclientid_confirm->sc_clientid;
3383 __be32 status;
3384 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3385
3386 if (STALE_CLIENTID(clid, nn))
3387 return nfserr_stale_clientid;
3388
3389 spin_lock(&nn->client_lock);
3390 conf = find_confirmed_client(clid, false, nn);
3391 unconf = find_unconfirmed_client(clid, false, nn);
3392 /*
3393 * We try hard to give out unique clientid's, so if we get an
3394 * attempt to confirm the same clientid with a different cred,
3395 * the client may be buggy; this should never happen.
3396 *
3397 * Nevertheless, RFC 7530 recommends INUSE for this case:
3398 */
3399 status = nfserr_clid_inuse;
3400 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
3401 goto out;
3402 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
3403 goto out;
3404 /* cases below refer to rfc 3530 section 14.2.34: */
3405 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
3406 if (conf && same_verf(&confirm, &conf->cl_confirm)) {
3407 /* case 2: probable retransmit */
3408 status = nfs_ok;
3409 } else /* case 4: client hasn't noticed we rebooted yet? */
3410 status = nfserr_stale_clientid;
3411 goto out;
3412 }
3413 status = nfs_ok;
3414 if (conf) { /* case 1: callback update */
3415 old = unconf;
3416 unhash_client_locked(old);
3417 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
3418 } else { /* case 3: normal case; new or rebooted client */
3419 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3420 if (old) {
3421 status = nfserr_clid_inuse;
3422 if (client_has_state(old)
3423 && !same_creds(&unconf->cl_cred,
3424 &old->cl_cred))
3425 goto out;
3426 status = mark_client_expired_locked(old);
3427 if (status) {
3428 old = NULL;
3429 goto out;
3430 }
3431 }
3432 move_to_confirmed(unconf);
3433 conf = unconf;
3434 }
3435 get_client_locked(conf);
3436 spin_unlock(&nn->client_lock);
3437 nfsd4_probe_callback(conf);
3438 spin_lock(&nn->client_lock);
3439 put_client_renew_locked(conf);
3440out:
3441 spin_unlock(&nn->client_lock);
3442 if (old)
3443 expire_client(old);
3444 return status;
3445}
3446
3447static struct nfs4_file *nfsd4_alloc_file(void)
3448{
3449 return kmem_cache_alloc(file_slab, GFP_KERNEL);
3450}
3451
3452/* OPEN Share state helper functions */
3453static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
3454 struct nfs4_file *fp)
3455{
3456 lockdep_assert_held(&state_lock);
3457
3458 refcount_set(&fp->fi_ref, 1);
3459 spin_lock_init(&fp->fi_lock);
3460 INIT_LIST_HEAD(&fp->fi_stateids);
3461 INIT_LIST_HEAD(&fp->fi_delegations);
3462 INIT_LIST_HEAD(&fp->fi_clnt_odstate);
3463 fh_copy_shallow(&fp->fi_fhandle, fh);
3464 fp->fi_deleg_file = NULL;
3465 fp->fi_had_conflict = false;
3466 fp->fi_share_deny = 0;
3467 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
3468 memset(fp->fi_access, 0, sizeof(fp->fi_access));
3469#ifdef CONFIG_NFSD_PNFS
3470 INIT_LIST_HEAD(&fp->fi_lo_states);
3471 atomic_set(&fp->fi_lo_recalls, 0);
3472#endif
3473 hlist_add_head_rcu(&fp->fi_hash, &file_hashtbl[hashval]);
3474}
3475
3476void
3477nfsd4_free_slabs(void)
3478{
3479 kmem_cache_destroy(client_slab);
3480 kmem_cache_destroy(openowner_slab);
3481 kmem_cache_destroy(lockowner_slab);
3482 kmem_cache_destroy(file_slab);
3483 kmem_cache_destroy(stateid_slab);
3484 kmem_cache_destroy(deleg_slab);
3485 kmem_cache_destroy(odstate_slab);
3486}
3487
3488int
3489nfsd4_init_slabs(void)
3490{
3491 client_slab = kmem_cache_create("nfsd4_clients",
3492 sizeof(struct nfs4_client), 0, 0, NULL);
3493 if (client_slab == NULL)
3494 goto out;
3495 openowner_slab = kmem_cache_create("nfsd4_openowners",
3496 sizeof(struct nfs4_openowner), 0, 0, NULL);
3497 if (openowner_slab == NULL)
3498 goto out_free_client_slab;
3499 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
3500 sizeof(struct nfs4_lockowner), 0, 0, NULL);
3501 if (lockowner_slab == NULL)
3502 goto out_free_openowner_slab;
3503 file_slab = kmem_cache_create("nfsd4_files",
3504 sizeof(struct nfs4_file), 0, 0, NULL);
3505 if (file_slab == NULL)
3506 goto out_free_lockowner_slab;
3507 stateid_slab = kmem_cache_create("nfsd4_stateids",
3508 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
3509 if (stateid_slab == NULL)
3510 goto out_free_file_slab;
3511 deleg_slab = kmem_cache_create("nfsd4_delegations",
3512 sizeof(struct nfs4_delegation), 0, 0, NULL);
3513 if (deleg_slab == NULL)
3514 goto out_free_stateid_slab;
3515 odstate_slab = kmem_cache_create("nfsd4_odstate",
3516 sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
3517 if (odstate_slab == NULL)
3518 goto out_free_deleg_slab;
3519 return 0;
3520
3521out_free_deleg_slab:
3522 kmem_cache_destroy(deleg_slab);
3523out_free_stateid_slab:
3524 kmem_cache_destroy(stateid_slab);
3525out_free_file_slab:
3526 kmem_cache_destroy(file_slab);
3527out_free_lockowner_slab:
3528 kmem_cache_destroy(lockowner_slab);
3529out_free_openowner_slab:
3530 kmem_cache_destroy(openowner_slab);
3531out_free_client_slab:
3532 kmem_cache_destroy(client_slab);
3533out:
3534 dprintk("nfsd4: out of memory while initializing nfsv4\n");
3535 return -ENOMEM;
3536}
3537
3538static void init_nfs4_replay(struct nfs4_replay *rp)
3539{
3540 rp->rp_status = nfserr_serverfault;
3541 rp->rp_buflen = 0;
3542 rp->rp_buf = rp->rp_ibuf;
3543 mutex_init(&rp->rp_mutex);
3544}
3545
3546static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
3547 struct nfs4_stateowner *so)
3548{
3549 if (!nfsd4_has_session(cstate)) {
3550 mutex_lock(&so->so_replay.rp_mutex);
3551 cstate->replay_owner = nfs4_get_stateowner(so);
3552 }
3553}
3554
3555void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
3556{
3557 struct nfs4_stateowner *so = cstate->replay_owner;
3558
3559 if (so != NULL) {
3560 cstate->replay_owner = NULL;
3561 mutex_unlock(&so->so_replay.rp_mutex);
3562 nfs4_put_stateowner(so);
3563 }
3564}
3565
3566static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3567{
3568 struct nfs4_stateowner *sop;
3569
3570 sop = kmem_cache_alloc(slab, GFP_KERNEL);
3571 if (!sop)
3572 return NULL;
3573
3574 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
3575 if (!sop->so_owner.data) {
3576 kmem_cache_free(slab, sop);
3577 return NULL;
3578 }
3579 sop->so_owner.len = owner->len;
3580
3581 INIT_LIST_HEAD(&sop->so_stateids);
3582 sop->so_client = clp;
3583 init_nfs4_replay(&sop->so_replay);
3584 atomic_set(&sop->so_count, 1);
3585 return sop;
3586}
3587
3588static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3589{
3590 lockdep_assert_held(&clp->cl_lock);
3591
3592 list_add(&oo->oo_owner.so_strhash,
3593 &clp->cl_ownerstr_hashtbl[strhashval]);
3594 list_add(&oo->oo_perclient, &clp->cl_openowners);
3595}
3596
3597static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
3598{
3599 unhash_openowner_locked(openowner(so));
3600}
3601
3602static void nfs4_free_openowner(struct nfs4_stateowner *so)
3603{
3604 struct nfs4_openowner *oo = openowner(so);
3605
3606 kmem_cache_free(openowner_slab, oo);
3607}
3608
3609static const struct nfs4_stateowner_operations openowner_ops = {
3610 .so_unhash = nfs4_unhash_openowner,
3611 .so_free = nfs4_free_openowner,
3612};
3613
3614static struct nfs4_ol_stateid *
3615nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3616{
3617 struct nfs4_ol_stateid *local, *ret = NULL;
3618 struct nfs4_openowner *oo = open->op_openowner;
3619
3620 lockdep_assert_held(&fp->fi_lock);
3621
3622 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3623 /* ignore lock owners */
3624 if (local->st_stateowner->so_is_open_owner == 0)
3625 continue;
3626 if (local->st_stateowner != &oo->oo_owner)
3627 continue;
3628 if (local->st_stid.sc_type == NFS4_OPEN_STID) {
3629 ret = local;
3630 refcount_inc(&ret->st_stid.sc_count);
3631 break;
3632 }
3633 }
3634 return ret;
3635}
3636
3637static __be32
3638nfsd4_verify_open_stid(struct nfs4_stid *s)
3639{
3640 __be32 ret = nfs_ok;
3641
3642 switch (s->sc_type) {
3643 default:
3644 break;
3645 case 0:
3646 case NFS4_CLOSED_STID:
3647 case NFS4_CLOSED_DELEG_STID:
3648 ret = nfserr_bad_stateid;
3649 break;
3650 case NFS4_REVOKED_DELEG_STID:
3651 ret = nfserr_deleg_revoked;
3652 }
3653 return ret;
3654}
3655
3656/* Lock the stateid st_mutex, and deal with races with CLOSE */
3657static __be32
3658nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
3659{
3660 __be32 ret;
3661
3662 mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
3663 ret = nfsd4_verify_open_stid(&stp->st_stid);
3664 if (ret != nfs_ok)
3665 mutex_unlock(&stp->st_mutex);
3666 return ret;
3667}
3668
3669static struct nfs4_ol_stateid *
3670nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
3671{
3672 struct nfs4_ol_stateid *stp;
3673 for (;;) {
3674 spin_lock(&fp->fi_lock);
3675 stp = nfsd4_find_existing_open(fp, open);
3676 spin_unlock(&fp->fi_lock);
3677 if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
3678 break;
3679 nfs4_put_stid(&stp->st_stid);
3680 }
3681 return stp;
3682}
3683
3684static struct nfs4_openowner *
3685alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3686 struct nfsd4_compound_state *cstate)
3687{
3688 struct nfs4_client *clp = cstate->clp;
3689 struct nfs4_openowner *oo, *ret;
3690
3691 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
3692 if (!oo)
3693 return NULL;
3694 oo->oo_owner.so_ops = &openowner_ops;
3695 oo->oo_owner.so_is_open_owner = 1;
3696 oo->oo_owner.so_seqid = open->op_seqid;
3697 oo->oo_flags = 0;
3698 if (nfsd4_has_session(cstate))
3699 oo->oo_flags |= NFS4_OO_CONFIRMED;
3700 oo->oo_time = 0;
3701 oo->oo_last_closed_stid = NULL;
3702 INIT_LIST_HEAD(&oo->oo_close_lru);
3703 spin_lock(&clp->cl_lock);
3704 ret = find_openstateowner_str_locked(strhashval, open, clp);
3705 if (ret == NULL) {
3706 hash_openowner(oo, clp, strhashval);
3707 ret = oo;
3708 } else
3709 nfs4_free_stateowner(&oo->oo_owner);
3710
3711 spin_unlock(&clp->cl_lock);
3712 return ret;
3713}
3714
3715static struct nfs4_ol_stateid *
3716init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
3717{
3718
3719 struct nfs4_openowner *oo = open->op_openowner;
3720 struct nfs4_ol_stateid *retstp = NULL;
3721 struct nfs4_ol_stateid *stp;
3722
3723 stp = open->op_stp;
3724 /* We are moving these outside of the spinlocks to avoid the warnings */
3725 mutex_init(&stp->st_mutex);
3726 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
3727
3728retry:
3729 spin_lock(&oo->oo_owner.so_client->cl_lock);
3730 spin_lock(&fp->fi_lock);
3731
3732 retstp = nfsd4_find_existing_open(fp, open);
3733 if (retstp)
3734 goto out_unlock;
3735
3736 open->op_stp = NULL;
3737 refcount_inc(&stp->st_stid.sc_count);
3738 stp->st_stid.sc_type = NFS4_OPEN_STID;
3739 INIT_LIST_HEAD(&stp->st_locks);
3740 stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
3741 get_nfs4_file(fp);
3742 stp->st_stid.sc_file = fp;
3743 stp->st_access_bmap = 0;
3744 stp->st_deny_bmap = 0;
3745 stp->st_openstp = NULL;
3746 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3747 list_add(&stp->st_perfile, &fp->fi_stateids);
3748
3749out_unlock:
3750 spin_unlock(&fp->fi_lock);
3751 spin_unlock(&oo->oo_owner.so_client->cl_lock);
3752 if (retstp) {
3753 /* Handle races with CLOSE */
3754 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
3755 nfs4_put_stid(&retstp->st_stid);
3756 goto retry;
3757 }
3758 /* To keep mutex tracking happy */
3759 mutex_unlock(&stp->st_mutex);
3760 stp = retstp;
3761 }
3762 return stp;
3763}
3764
3765/*
3766 * In the 4.0 case we need to keep the owners around a little while to handle
3767 * CLOSE replay. We still do need to release any file access that is held by
3768 * them before returning however.
3769 */
3770static void
3771move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
3772{
3773 struct nfs4_ol_stateid *last;
3774 struct nfs4_openowner *oo = openowner(s->st_stateowner);
3775 struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
3776 nfsd_net_id);
3777
3778 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
3779
3780 /*
3781 * We know that we hold one reference via nfsd4_close, and another
3782 * "persistent" reference for the client. If the refcount is higher
3783 * than 2, then there are still calls in progress that are using this
3784 * stateid. We can't put the sc_file reference until they are finished.
3785 * Wait for the refcount to drop to 2. Since it has been unhashed,
3786 * there should be no danger of the refcount going back up again at
3787 * this point.
3788 */
3789 wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
3790
3791 release_all_access(s);
3792 if (s->st_stid.sc_file) {
3793 put_nfs4_file(s->st_stid.sc_file);
3794 s->st_stid.sc_file = NULL;
3795 }
3796
3797 spin_lock(&nn->client_lock);
3798 last = oo->oo_last_closed_stid;
3799 oo->oo_last_closed_stid = s;
3800 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3801 oo->oo_time = get_seconds();
3802 spin_unlock(&nn->client_lock);
3803 if (last)
3804 nfs4_put_stid(&last->st_stid);
3805}
3806
3807/* search file_hashtbl[] for file */
3808static struct nfs4_file *
3809find_file_locked(struct knfsd_fh *fh, unsigned int hashval)
3810{
3811 struct nfs4_file *fp;
3812
3813 hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) {
3814 if (fh_match(&fp->fi_fhandle, fh)) {
3815 if (refcount_inc_not_zero(&fp->fi_ref))
3816 return fp;
3817 }
3818 }
3819 return NULL;
3820}
3821
3822struct nfs4_file *
3823find_file(struct knfsd_fh *fh)
3824{
3825 struct nfs4_file *fp;
3826 unsigned int hashval = file_hashval(fh);
3827
3828 rcu_read_lock();
3829 fp = find_file_locked(fh, hashval);
3830 rcu_read_unlock();
3831 return fp;
3832}
3833
3834static struct nfs4_file *
3835find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3836{
3837 struct nfs4_file *fp;
3838 unsigned int hashval = file_hashval(fh);
3839
3840 rcu_read_lock();
3841 fp = find_file_locked(fh, hashval);
3842 rcu_read_unlock();
3843 if (fp)
3844 return fp;
3845
3846 spin_lock(&state_lock);
3847 fp = find_file_locked(fh, hashval);
3848 if (likely(fp == NULL)) {
3849 nfsd4_init_file(fh, hashval, new);
3850 fp = new;
3851 }
3852 spin_unlock(&state_lock);
3853
3854 return fp;
3855}
3856
3857/*
3858 * Called to check deny when READ with all zero stateid or
3859 * WRITE with all zero or all one stateid
3860 */
3861static __be32
3862nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
3863{
3864 struct nfs4_file *fp;
3865 __be32 ret = nfs_ok;
3866
3867 fp = find_file(¤t_fh->fh_handle);
3868 if (!fp)
3869 return ret;
3870 /* Check for conflicting share reservations */
3871 spin_lock(&fp->fi_lock);
3872 if (fp->fi_share_deny & deny_type)
3873 ret = nfserr_locked;
3874 spin_unlock(&fp->fi_lock);
3875 put_nfs4_file(fp);
3876 return ret;
3877}
3878
3879static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
3880{
3881 struct nfs4_delegation *dp = cb_to_delegation(cb);
3882 struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
3883 nfsd_net_id);
3884
3885 block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3886
3887 /*
3888 * We can't do this in nfsd_break_deleg_cb because it is
3889 * already holding inode->i_lock.
3890 *
3891 * If the dl_time != 0, then we know that it has already been
3892 * queued for a lease break. Don't queue it again.
3893 */
3894 spin_lock(&state_lock);
3895 if (dp->dl_time == 0) {
3896 dp->dl_time = get_seconds();
3897 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3898 }
3899 spin_unlock(&state_lock);
3900}
3901
3902static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
3903 struct rpc_task *task)
3904{
3905 struct nfs4_delegation *dp = cb_to_delegation(cb);
3906
3907 if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID)
3908 return 1;
3909
3910 switch (task->tk_status) {
3911 case 0:
3912 return 1;
3913 case -EBADHANDLE:
3914 case -NFS4ERR_BAD_STATEID:
3915 /*
3916 * Race: client probably got cb_recall before open reply
3917 * granting delegation.
3918 */
3919 if (dp->dl_retries--) {
3920 rpc_delay(task, 2 * HZ);
3921 return 0;
3922 }
3923 /*FALLTHRU*/
3924 default:
3925 return -1;
3926 }
3927}
3928
3929static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
3930{
3931 struct nfs4_delegation *dp = cb_to_delegation(cb);
3932
3933 nfs4_put_stid(&dp->dl_stid);
3934}
3935
3936static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
3937 .prepare = nfsd4_cb_recall_prepare,
3938 .done = nfsd4_cb_recall_done,
3939 .release = nfsd4_cb_recall_release,
3940};
3941
3942static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
3943{
3944 /*
3945 * We're assuming the state code never drops its reference
3946 * without first removing the lease. Since we're in this lease
3947 * callback (and since the lease code is serialized by the kernel
3948 * lock) we know the server hasn't removed the lease yet, we know
3949 * it's safe to take a reference.
3950 */
3951 refcount_inc(&dp->dl_stid.sc_count);
3952 nfsd4_run_cb(&dp->dl_recall);
3953}
3954
3955/* Called from break_lease() with i_lock held. */
3956static bool
3957nfsd_break_deleg_cb(struct file_lock *fl)
3958{
3959 bool ret = false;
3960 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
3961 struct nfs4_file *fp = dp->dl_stid.sc_file;
3962
3963 /*
3964 * We don't want the locks code to timeout the lease for us;
3965 * we'll remove it ourself if a delegation isn't returned
3966 * in time:
3967 */
3968 fl->fl_break_time = 0;
3969
3970 spin_lock(&fp->fi_lock);
3971 fp->fi_had_conflict = true;
3972 nfsd_break_one_deleg(dp);
3973 spin_unlock(&fp->fi_lock);
3974 return ret;
3975}
3976
3977static int
3978nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
3979 struct list_head *dispose)
3980{
3981 if (arg & F_UNLCK)
3982 return lease_modify(onlist, arg, dispose);
3983 else
3984 return -EAGAIN;
3985}
3986
3987static const struct lock_manager_operations nfsd_lease_mng_ops = {
3988 .lm_break = nfsd_break_deleg_cb,
3989 .lm_change = nfsd_change_deleg_cb,
3990};
3991
3992static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
3993{
3994 if (nfsd4_has_session(cstate))
3995 return nfs_ok;
3996 if (seqid == so->so_seqid - 1)
3997 return nfserr_replay_me;
3998 if (seqid == so->so_seqid)
3999 return nfs_ok;
4000 return nfserr_bad_seqid;
4001}
4002
4003static __be32 lookup_clientid(clientid_t *clid,
4004 struct nfsd4_compound_state *cstate,
4005 struct nfsd_net *nn)
4006{
4007 struct nfs4_client *found;
4008
4009 if (cstate->clp) {
4010 found = cstate->clp;
4011 if (!same_clid(&found->cl_clientid, clid))
4012 return nfserr_stale_clientid;
4013 return nfs_ok;
4014 }
4015
4016 if (STALE_CLIENTID(clid, nn))
4017 return nfserr_stale_clientid;
4018
4019 /*
4020 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
4021 * cached already then we know this is for is for v4.0 and "sessions"
4022 * will be false.
4023 */
4024 WARN_ON_ONCE(cstate->session);
4025 spin_lock(&nn->client_lock);
4026 found = find_confirmed_client(clid, false, nn);
4027 if (!found) {
4028 spin_unlock(&nn->client_lock);
4029 return nfserr_expired;
4030 }
4031 atomic_inc(&found->cl_refcount);
4032 spin_unlock(&nn->client_lock);
4033
4034 /* Cache the nfs4_client in cstate! */
4035 cstate->clp = found;
4036 return nfs_ok;
4037}
4038
4039__be32
4040nfsd4_process_open1(struct nfsd4_compound_state *cstate,
4041 struct nfsd4_open *open, struct nfsd_net *nn)
4042{
4043 clientid_t *clientid = &open->op_clientid;
4044 struct nfs4_client *clp = NULL;
4045 unsigned int strhashval;
4046 struct nfs4_openowner *oo = NULL;
4047 __be32 status;
4048
4049 if (STALE_CLIENTID(&open->op_clientid, nn))
4050 return nfserr_stale_clientid;
4051 /*
4052 * In case we need it later, after we've already created the
4053 * file and don't want to risk a further failure:
4054 */
4055 open->op_file = nfsd4_alloc_file();
4056 if (open->op_file == NULL)
4057 return nfserr_jukebox;
4058
4059 status = lookup_clientid(clientid, cstate, nn);
4060 if (status)
4061 return status;
4062 clp = cstate->clp;
4063
4064 strhashval = ownerstr_hashval(&open->op_owner);
4065 oo = find_openstateowner_str(strhashval, open, clp);
4066 open->op_openowner = oo;
4067 if (!oo) {
4068 goto new_owner;
4069 }
4070 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4071 /* Replace unconfirmed owners without checking for replay. */
4072 release_openowner(oo);
4073 open->op_openowner = NULL;
4074 goto new_owner;
4075 }
4076 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
4077 if (status)
4078 return status;
4079 goto alloc_stateid;
4080new_owner:
4081 oo = alloc_init_open_stateowner(strhashval, open, cstate);
4082 if (oo == NULL)
4083 return nfserr_jukebox;
4084 open->op_openowner = oo;
4085alloc_stateid:
4086 open->op_stp = nfs4_alloc_open_stateid(clp);
4087 if (!open->op_stp)
4088 return nfserr_jukebox;
4089
4090 if (nfsd4_has_session(cstate) &&
4091 (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
4092 open->op_odstate = alloc_clnt_odstate(clp);
4093 if (!open->op_odstate)
4094 return nfserr_jukebox;
4095 }
4096
4097 return nfs_ok;
4098}
4099
4100static inline __be32
4101nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
4102{
4103 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
4104 return nfserr_openmode;
4105 else
4106 return nfs_ok;
4107}
4108
4109static int share_access_to_flags(u32 share_access)
4110{
4111 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
4112}
4113
4114static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
4115{
4116 struct nfs4_stid *ret;
4117
4118 ret = find_stateid_by_type(cl, s,
4119 NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
4120 if (!ret)
4121 return NULL;
4122 return delegstateid(ret);
4123}
4124
4125static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
4126{
4127 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
4128 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
4129}
4130
4131static __be32
4132nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
4133 struct nfs4_delegation **dp)
4134{
4135 int flags;
4136 __be32 status = nfserr_bad_stateid;
4137 struct nfs4_delegation *deleg;
4138
4139 deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
4140 if (deleg == NULL)
4141 goto out;
4142 if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
4143 nfs4_put_stid(&deleg->dl_stid);
4144 if (cl->cl_minorversion)
4145 status = nfserr_deleg_revoked;
4146 goto out;
4147 }
4148 flags = share_access_to_flags(open->op_share_access);
4149 status = nfs4_check_delegmode(deleg, flags);
4150 if (status) {
4151 nfs4_put_stid(&deleg->dl_stid);
4152 goto out;
4153 }
4154 *dp = deleg;
4155out:
4156 if (!nfsd4_is_deleg_cur(open))
4157 return nfs_ok;
4158 if (status)
4159 return status;
4160 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4161 return nfs_ok;
4162}
4163
4164static inline int nfs4_access_to_access(u32 nfs4_access)
4165{
4166 int flags = 0;
4167
4168 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
4169 flags |= NFSD_MAY_READ;
4170 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
4171 flags |= NFSD_MAY_WRITE;
4172 return flags;
4173}
4174
4175static inline __be32
4176nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
4177 struct nfsd4_open *open)
4178{
4179 struct iattr iattr = {
4180 .ia_valid = ATTR_SIZE,
4181 .ia_size = 0,
4182 };
4183 if (!open->op_truncate)
4184 return 0;
4185 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
4186 return nfserr_inval;
4187 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
4188}
4189
4190static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
4191 struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
4192 struct nfsd4_open *open)
4193{
4194 struct file *filp = NULL;
4195 __be32 status;
4196 int oflag = nfs4_access_to_omode(open->op_share_access);
4197 int access = nfs4_access_to_access(open->op_share_access);
4198 unsigned char old_access_bmap, old_deny_bmap;
4199
4200 spin_lock(&fp->fi_lock);
4201
4202 /*
4203 * Are we trying to set a deny mode that would conflict with
4204 * current access?
4205 */
4206 status = nfs4_file_check_deny(fp, open->op_share_deny);
4207 if (status != nfs_ok) {
4208 spin_unlock(&fp->fi_lock);
4209 goto out;
4210 }
4211
4212 /* set access to the file */
4213 status = nfs4_file_get_access(fp, open->op_share_access);
4214 if (status != nfs_ok) {
4215 spin_unlock(&fp->fi_lock);
4216 goto out;
4217 }
4218
4219 /* Set access bits in stateid */
4220 old_access_bmap = stp->st_access_bmap;
4221 set_access(open->op_share_access, stp);
4222
4223 /* Set new deny mask */
4224 old_deny_bmap = stp->st_deny_bmap;
4225 set_deny(open->op_share_deny, stp);
4226 fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4227
4228 if (!fp->fi_fds[oflag]) {
4229 spin_unlock(&fp->fi_lock);
4230 status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
4231 if (status)
4232 goto out_put_access;
4233 spin_lock(&fp->fi_lock);
4234 if (!fp->fi_fds[oflag]) {
4235 fp->fi_fds[oflag] = filp;
4236 filp = NULL;
4237 }
4238 }
4239 spin_unlock(&fp->fi_lock);
4240 if (filp)
4241 fput(filp);
4242
4243 status = nfsd4_truncate(rqstp, cur_fh, open);
4244 if (status)
4245 goto out_put_access;
4246out:
4247 return status;
4248out_put_access:
4249 stp->st_access_bmap = old_access_bmap;
4250 nfs4_file_put_access(fp, open->op_share_access);
4251 reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
4252 goto out;
4253}
4254
4255static __be32
4256nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
4257{
4258 __be32 status;
4259 unsigned char old_deny_bmap = stp->st_deny_bmap;
4260
4261 if (!test_access(open->op_share_access, stp))
4262 return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
4263
4264 /* test and set deny mode */
4265 spin_lock(&fp->fi_lock);
4266 status = nfs4_file_check_deny(fp, open->op_share_deny);
4267 if (status == nfs_ok) {
4268 set_deny(open->op_share_deny, stp);
4269 fp->fi_share_deny |=
4270 (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
4271 }
4272 spin_unlock(&fp->fi_lock);
4273
4274 if (status != nfs_ok)
4275 return status;
4276
4277 status = nfsd4_truncate(rqstp, cur_fh, open);
4278 if (status != nfs_ok)
4279 reset_union_bmap_deny(old_deny_bmap, stp);
4280 return status;
4281}
4282
4283/* Should we give out recallable state?: */
4284static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
4285{
4286 if (clp->cl_cb_state == NFSD4_CB_UP)
4287 return true;
4288 /*
4289 * In the sessions case, since we don't have to establish a
4290 * separate connection for callbacks, we assume it's OK
4291 * until we hear otherwise:
4292 */
4293 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
4294}
4295
4296static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
4297 int flag)
4298{
4299 struct file_lock *fl;
4300
4301 fl = locks_alloc_lock();
4302 if (!fl)
4303 return NULL;
4304 fl->fl_lmops = &nfsd_lease_mng_ops;
4305 fl->fl_flags = FL_DELEG;
4306 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
4307 fl->fl_end = OFFSET_MAX;
4308 fl->fl_owner = (fl_owner_t)dp;
4309 fl->fl_pid = current->tgid;
4310 fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file;
4311 return fl;
4312}
4313
4314static struct nfs4_delegation *
4315nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
4316 struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
4317{
4318 int status = 0;
4319 struct nfs4_delegation *dp;
4320 struct file *filp;
4321 struct file_lock *fl;
4322
4323 /*
4324 * The fi_had_conflict and nfs_get_existing_delegation checks
4325 * here are just optimizations; we'll need to recheck them at
4326 * the end:
4327 */
4328 if (fp->fi_had_conflict)
4329 return ERR_PTR(-EAGAIN);
4330
4331 filp = find_readable_file(fp);
4332 if (!filp) {
4333 /* We should always have a readable file here */
4334 WARN_ON_ONCE(1);
4335 return ERR_PTR(-EBADF);
4336 }
4337 spin_lock(&state_lock);
4338 spin_lock(&fp->fi_lock);
4339 if (nfs4_delegation_exists(clp, fp))
4340 status = -EAGAIN;
4341 else if (!fp->fi_deleg_file) {
4342 fp->fi_deleg_file = filp;
4343 /* increment early to prevent fi_deleg_file from being
4344 * cleared */
4345 fp->fi_delegees = 1;
4346 filp = NULL;
4347 } else
4348 fp->fi_delegees++;
4349 spin_unlock(&fp->fi_lock);
4350 spin_unlock(&state_lock);
4351 if (filp)
4352 fput(filp);
4353 if (status)
4354 return ERR_PTR(status);
4355
4356 status = -ENOMEM;
4357 dp = alloc_init_deleg(clp, fp, fh, odstate);
4358 if (!dp)
4359 goto out_delegees;
4360
4361 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
4362 if (!fl)
4363 goto out_stid;
4364
4365 status = vfs_setlease(fp->fi_deleg_file, fl->fl_type, &fl, NULL);
4366 if (fl)
4367 locks_free_lock(fl);
4368 if (status)
4369 goto out_clnt_odstate;
4370
4371 spin_lock(&state_lock);
4372 spin_lock(&fp->fi_lock);
4373 if (fp->fi_had_conflict)
4374 status = -EAGAIN;
4375 else
4376 status = hash_delegation_locked(dp, fp);
4377 spin_unlock(&fp->fi_lock);
4378 spin_unlock(&state_lock);
4379
4380 if (status)
4381 destroy_unhashed_deleg(dp);
4382 return dp;
4383out_clnt_odstate:
4384 put_clnt_odstate(dp->dl_clnt_odstate);
4385out_stid:
4386 nfs4_put_stid(&dp->dl_stid);
4387out_delegees:
4388 put_deleg_file(fp);
4389 return ERR_PTR(status);
4390}
4391
4392static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
4393{
4394 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4395 if (status == -EAGAIN)
4396 open->op_why_no_deleg = WND4_CONTENTION;
4397 else {
4398 open->op_why_no_deleg = WND4_RESOURCE;
4399 switch (open->op_deleg_want) {
4400 case NFS4_SHARE_WANT_READ_DELEG:
4401 case NFS4_SHARE_WANT_WRITE_DELEG:
4402 case NFS4_SHARE_WANT_ANY_DELEG:
4403 break;
4404 case NFS4_SHARE_WANT_CANCEL:
4405 open->op_why_no_deleg = WND4_CANCELLED;
4406 break;
4407 case NFS4_SHARE_WANT_NO_DELEG:
4408 WARN_ON_ONCE(1);
4409 }
4410 }
4411}
4412
4413/*
4414 * Attempt to hand out a delegation.
4415 *
4416 * Note we don't support write delegations, and won't until the vfs has
4417 * proper support for them.
4418 */
4419static void
4420nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
4421 struct nfs4_ol_stateid *stp)
4422{
4423 struct nfs4_delegation *dp;
4424 struct nfs4_openowner *oo = openowner(stp->st_stateowner);
4425 struct nfs4_client *clp = stp->st_stid.sc_client;
4426 int cb_up;
4427 int status = 0;
4428
4429 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
4430 open->op_recall = 0;
4431 switch (open->op_claim_type) {
4432 case NFS4_OPEN_CLAIM_PREVIOUS:
4433 if (!cb_up)
4434 open->op_recall = 1;
4435 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
4436 goto out_no_deleg;
4437 break;
4438 case NFS4_OPEN_CLAIM_NULL:
4439 case NFS4_OPEN_CLAIM_FH:
4440 /*
4441 * Let's not give out any delegations till everyone's
4442 * had the chance to reclaim theirs, *and* until
4443 * NLM locks have all been reclaimed:
4444 */
4445 if (locks_in_grace(clp->net))
4446 goto out_no_deleg;
4447 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
4448 goto out_no_deleg;
4449 /*
4450 * Also, if the file was opened for write or
4451 * create, there's a good chance the client's
4452 * about to write to it, resulting in an
4453 * immediate recall (since we don't support
4454 * write delegations):
4455 */
4456 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
4457 goto out_no_deleg;
4458 if (open->op_create == NFS4_OPEN_CREATE)
4459 goto out_no_deleg;
4460 break;
4461 default:
4462 goto out_no_deleg;
4463 }
4464 dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
4465 if (IS_ERR(dp))
4466 goto out_no_deleg;
4467
4468 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
4469
4470 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
4471 STATEID_VAL(&dp->dl_stid.sc_stateid));
4472 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
4473 nfs4_put_stid(&dp->dl_stid);
4474 return;
4475out_no_deleg:
4476 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
4477 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
4478 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
4479 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
4480 open->op_recall = 1;
4481 }
4482
4483 /* 4.1 client asking for a delegation? */
4484 if (open->op_deleg_want)
4485 nfsd4_open_deleg_none_ext(open, status);
4486 return;
4487}
4488
4489static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
4490 struct nfs4_delegation *dp)
4491{
4492 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
4493 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4494 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4495 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
4496 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
4497 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
4498 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4499 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
4500 }
4501 /* Otherwise the client must be confused wanting a delegation
4502 * it already has, therefore we don't return
4503 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
4504 */
4505}
4506
4507__be32
4508nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
4509{
4510 struct nfsd4_compoundres *resp = rqstp->rq_resp;
4511 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
4512 struct nfs4_file *fp = NULL;
4513 struct nfs4_ol_stateid *stp = NULL;
4514 struct nfs4_delegation *dp = NULL;
4515 __be32 status;
4516 bool new_stp = false;
4517
4518 /*
4519 * Lookup file; if found, lookup stateid and check open request,
4520 * and check for delegations in the process of being recalled.
4521 * If not found, create the nfs4_file struct
4522 */
4523 fp = find_or_add_file(open->op_file, ¤t_fh->fh_handle);
4524 if (fp != open->op_file) {
4525 status = nfs4_check_deleg(cl, open, &dp);
4526 if (status)
4527 goto out;
4528 stp = nfsd4_find_and_lock_existing_open(fp, open);
4529 } else {
4530 open->op_file = NULL;
4531 status = nfserr_bad_stateid;
4532 if (nfsd4_is_deleg_cur(open))
4533 goto out;
4534 }
4535
4536 if (!stp) {
4537 stp = init_open_stateid(fp, open);
4538 if (!open->op_stp)
4539 new_stp = true;
4540 }
4541
4542 /*
4543 * OPEN the file, or upgrade an existing OPEN.
4544 * If truncate fails, the OPEN fails.
4545 *
4546 * stp is already locked.
4547 */
4548 if (!new_stp) {
4549 /* Stateid was found, this is an OPEN upgrade */
4550 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
4551 if (status) {
4552 mutex_unlock(&stp->st_mutex);
4553 goto out;
4554 }
4555 } else {
4556 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
4557 if (status) {
4558 stp->st_stid.sc_type = NFS4_CLOSED_STID;
4559 release_open_stateid(stp);
4560 mutex_unlock(&stp->st_mutex);
4561 goto out;
4562 }
4563
4564 stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
4565 open->op_odstate);
4566 if (stp->st_clnt_odstate == open->op_odstate)
4567 open->op_odstate = NULL;
4568 }
4569
4570 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4571 mutex_unlock(&stp->st_mutex);
4572
4573 if (nfsd4_has_session(&resp->cstate)) {
4574 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4575 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
4576 open->op_why_no_deleg = WND4_NOT_WANTED;
4577 goto nodeleg;
4578 }
4579 }
4580
4581 /*
4582 * Attempt to hand out a delegation. No error return, because the
4583 * OPEN succeeds even if we fail.
4584 */
4585 nfs4_open_delegation(current_fh, open, stp);
4586nodeleg:
4587 status = nfs_ok;
4588
4589 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
4590 STATEID_VAL(&stp->st_stid.sc_stateid));
4591out:
4592 /* 4.1 client trying to upgrade/downgrade delegation? */
4593 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
4594 open->op_deleg_want)
4595 nfsd4_deleg_xgrade_none_ext(open, dp);
4596
4597 if (fp)
4598 put_nfs4_file(fp);
4599 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
4600 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
4601 /*
4602 * To finish the open response, we just need to set the rflags.
4603 */
4604 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
4605 if (nfsd4_has_session(&resp->cstate))
4606 open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
4607 else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
4608 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
4609
4610 if (dp)
4611 nfs4_put_stid(&dp->dl_stid);
4612 if (stp)
4613 nfs4_put_stid(&stp->st_stid);
4614
4615 return status;
4616}
4617
4618void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
4619 struct nfsd4_open *open)
4620{
4621 if (open->op_openowner) {
4622 struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
4623
4624 nfsd4_cstate_assign_replay(cstate, so);
4625 nfs4_put_stateowner(so);
4626 }
4627 if (open->op_file)
4628 kmem_cache_free(file_slab, open->op_file);
4629 if (open->op_stp)
4630 nfs4_put_stid(&open->op_stp->st_stid);
4631 if (open->op_odstate)
4632 kmem_cache_free(odstate_slab, open->op_odstate);
4633}
4634
4635__be32
4636nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4637 union nfsd4_op_u *u)
4638{
4639 clientid_t *clid = &u->renew;
4640 struct nfs4_client *clp;
4641 __be32 status;
4642 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4643
4644 dprintk("process_renew(%08x/%08x): starting\n",
4645 clid->cl_boot, clid->cl_id);
4646 status = lookup_clientid(clid, cstate, nn);
4647 if (status)
4648 goto out;
4649 clp = cstate->clp;
4650 status = nfserr_cb_path_down;
4651 if (!list_empty(&clp->cl_delegations)
4652 && clp->cl_cb_state != NFSD4_CB_UP)
4653 goto out;
4654 status = nfs_ok;
4655out:
4656 return status;
4657}
4658
4659void
4660nfsd4_end_grace(struct nfsd_net *nn)
4661{
4662 /* do nothing if grace period already ended */
4663 if (nn->grace_ended)
4664 return;
4665
4666 dprintk("NFSD: end of grace period\n");
4667 nn->grace_ended = true;
4668 /*
4669 * If the server goes down again right now, an NFSv4
4670 * client will still be allowed to reclaim after it comes back up,
4671 * even if it hasn't yet had a chance to reclaim state this time.
4672 *
4673 */
4674 nfsd4_record_grace_done(nn);
4675 /*
4676 * At this point, NFSv4 clients can still reclaim. But if the
4677 * server crashes, any that have not yet reclaimed will be out
4678 * of luck on the next boot.
4679 *
4680 * (NFSv4.1+ clients are considered to have reclaimed once they
4681 * call RECLAIM_COMPLETE. NFSv4.0 clients are considered to
4682 * have reclaimed after their first OPEN.)
4683 */
4684 locks_end_grace(&nn->nfsd4_manager);
4685 /*
4686 * At this point, and once lockd and/or any other containers
4687 * exit their grace period, further reclaims will fail and
4688 * regular locking can resume.
4689 */
4690}
4691
4692static time_t
4693nfs4_laundromat(struct nfsd_net *nn)
4694{
4695 struct nfs4_client *clp;
4696 struct nfs4_openowner *oo;
4697 struct nfs4_delegation *dp;
4698 struct nfs4_ol_stateid *stp;
4699 struct nfsd4_blocked_lock *nbl;
4700 struct list_head *pos, *next, reaplist;
4701 time_t cutoff = get_seconds() - nn->nfsd4_lease;
4702 time_t t, new_timeo = nn->nfsd4_lease;
4703
4704 dprintk("NFSD: laundromat service - starting\n");
4705 nfsd4_end_grace(nn);
4706 INIT_LIST_HEAD(&reaplist);
4707 spin_lock(&nn->client_lock);
4708 list_for_each_safe(pos, next, &nn->client_lru) {
4709 clp = list_entry(pos, struct nfs4_client, cl_lru);
4710 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
4711 t = clp->cl_time - cutoff;
4712 new_timeo = min(new_timeo, t);
4713 break;
4714 }
4715 if (mark_client_expired_locked(clp)) {
4716 dprintk("NFSD: client in use (clientid %08x)\n",
4717 clp->cl_clientid.cl_id);
4718 continue;
4719 }
4720 list_add(&clp->cl_lru, &reaplist);
4721 }
4722 spin_unlock(&nn->client_lock);
4723 list_for_each_safe(pos, next, &reaplist) {
4724 clp = list_entry(pos, struct nfs4_client, cl_lru);
4725 dprintk("NFSD: purging unused client (clientid %08x)\n",
4726 clp->cl_clientid.cl_id);
4727 list_del_init(&clp->cl_lru);
4728 expire_client(clp);
4729 }
4730 spin_lock(&state_lock);
4731 list_for_each_safe(pos, next, &nn->del_recall_lru) {
4732 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4733 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4734 t = dp->dl_time - cutoff;
4735 new_timeo = min(new_timeo, t);
4736 break;
4737 }
4738 WARN_ON(!unhash_delegation_locked(dp));
4739 list_add(&dp->dl_recall_lru, &reaplist);
4740 }
4741 spin_unlock(&state_lock);
4742 while (!list_empty(&reaplist)) {
4743 dp = list_first_entry(&reaplist, struct nfs4_delegation,
4744 dl_recall_lru);
4745 list_del_init(&dp->dl_recall_lru);
4746 revoke_delegation(dp);
4747 }
4748
4749 spin_lock(&nn->client_lock);
4750 while (!list_empty(&nn->close_lru)) {
4751 oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
4752 oo_close_lru);
4753 if (time_after((unsigned long)oo->oo_time,
4754 (unsigned long)cutoff)) {
4755 t = oo->oo_time - cutoff;
4756 new_timeo = min(new_timeo, t);
4757 break;
4758 }
4759 list_del_init(&oo->oo_close_lru);
4760 stp = oo->oo_last_closed_stid;
4761 oo->oo_last_closed_stid = NULL;
4762 spin_unlock(&nn->client_lock);
4763 nfs4_put_stid(&stp->st_stid);
4764 spin_lock(&nn->client_lock);
4765 }
4766 spin_unlock(&nn->client_lock);
4767
4768 /*
4769 * It's possible for a client to try and acquire an already held lock
4770 * that is being held for a long time, and then lose interest in it.
4771 * So, we clean out any un-revisited request after a lease period
4772 * under the assumption that the client is no longer interested.
4773 *
4774 * RFC5661, sec. 9.6 states that the client must not rely on getting
4775 * notifications and must continue to poll for locks, even when the
4776 * server supports them. Thus this shouldn't lead to clients blocking
4777 * indefinitely once the lock does become free.
4778 */
4779 BUG_ON(!list_empty(&reaplist));
4780 spin_lock(&nn->blocked_locks_lock);
4781 while (!list_empty(&nn->blocked_locks_lru)) {
4782 nbl = list_first_entry(&nn->blocked_locks_lru,
4783 struct nfsd4_blocked_lock, nbl_lru);
4784 if (time_after((unsigned long)nbl->nbl_time,
4785 (unsigned long)cutoff)) {
4786 t = nbl->nbl_time - cutoff;
4787 new_timeo = min(new_timeo, t);
4788 break;
4789 }
4790 list_move(&nbl->nbl_lru, &reaplist);
4791 list_del_init(&nbl->nbl_list);
4792 }
4793 spin_unlock(&nn->blocked_locks_lock);
4794
4795 while (!list_empty(&reaplist)) {
4796 nbl = list_first_entry(&reaplist,
4797 struct nfsd4_blocked_lock, nbl_lru);
4798 list_del_init(&nbl->nbl_lru);
4799 posix_unblock_lock(&nbl->nbl_lock);
4800 free_blocked_lock(nbl);
4801 }
4802
4803 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
4804 return new_timeo;
4805}
4806
4807static struct workqueue_struct *laundry_wq;
4808static void laundromat_main(struct work_struct *);
4809
4810static void
4811laundromat_main(struct work_struct *laundry)
4812{
4813 time_t t;
4814 struct delayed_work *dwork = to_delayed_work(laundry);
4815 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
4816 laundromat_work);
4817
4818 t = nfs4_laundromat(nn);
4819 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4820 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4821}
4822
4823static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4824{
4825 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4826 return nfserr_bad_stateid;
4827 return nfs_ok;
4828}
4829
4830static inline int
4831access_permit_read(struct nfs4_ol_stateid *stp)
4832{
4833 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
4834 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
4835 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
4836}
4837
4838static inline int
4839access_permit_write(struct nfs4_ol_stateid *stp)
4840{
4841 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
4842 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
4843}
4844
4845static
4846__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
4847{
4848 __be32 status = nfserr_openmode;
4849
4850 /* For lock stateid's, we test the parent open, not the lock: */
4851 if (stp->st_openstp)
4852 stp = stp->st_openstp;
4853 if ((flags & WR_STATE) && !access_permit_write(stp))
4854 goto out;
4855 if ((flags & RD_STATE) && !access_permit_read(stp))
4856 goto out;
4857 status = nfs_ok;
4858out:
4859 return status;
4860}
4861
4862static inline __be32
4863check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
4864{
4865 if (ONE_STATEID(stateid) && (flags & RD_STATE))
4866 return nfs_ok;
4867 else if (opens_in_grace(net)) {
4868 /* Answer in remaining cases depends on existence of
4869 * conflicting state; so we must wait out the grace period. */
4870 return nfserr_grace;
4871 } else if (flags & WR_STATE)
4872 return nfs4_share_conflict(current_fh,
4873 NFS4_SHARE_DENY_WRITE);
4874 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
4875 return nfs4_share_conflict(current_fh,
4876 NFS4_SHARE_DENY_READ);
4877}
4878
4879/*
4880 * Allow READ/WRITE during grace period on recovered state only for files
4881 * that are not able to provide mandatory locking.
4882 */
4883static inline int
4884grace_disallows_io(struct net *net, struct inode *inode)
4885{
4886 return opens_in_grace(net) && mandatory_lock(inode);
4887}
4888
4889static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4890{
4891 /*
4892 * When sessions are used the stateid generation number is ignored
4893 * when it is zero.
4894 */
4895 if (has_session && in->si_generation == 0)
4896 return nfs_ok;
4897
4898 if (in->si_generation == ref->si_generation)
4899 return nfs_ok;
4900
4901 /* If the client sends us a stateid from the future, it's buggy: */
4902 if (nfsd4_stateid_generation_after(in, ref))
4903 return nfserr_bad_stateid;
4904 /*
4905 * However, we could see a stateid from the past, even from a
4906 * non-buggy client. For example, if the client sends a lock
4907 * while some IO is outstanding, the lock may bump si_generation
4908 * while the IO is still in flight. The client could avoid that
4909 * situation by waiting for responses on all the IO requests,
4910 * but better performance may result in retrying IO that
4911 * receives an old_stateid error if requests are rarely
4912 * reordered in flight:
4913 */
4914 return nfserr_old_stateid;
4915}
4916
4917static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
4918{
4919 __be32 ret;
4920
4921 spin_lock(&s->sc_lock);
4922 ret = nfsd4_verify_open_stid(s);
4923 if (ret == nfs_ok)
4924 ret = check_stateid_generation(in, &s->sc_stateid, has_session);
4925 spin_unlock(&s->sc_lock);
4926 return ret;
4927}
4928
4929static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
4930{
4931 if (ols->st_stateowner->so_is_open_owner &&
4932 !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
4933 return nfserr_bad_stateid;
4934 return nfs_ok;
4935}
4936
4937static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4938{
4939 struct nfs4_stid *s;
4940 __be32 status = nfserr_bad_stateid;
4941
4942 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4943 CLOSE_STATEID(stateid))
4944 return status;
4945 /* Client debugging aid. */
4946 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4947 char addr_str[INET6_ADDRSTRLEN];
4948 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
4949 sizeof(addr_str));
4950 pr_warn_ratelimited("NFSD: client %s testing state ID "
4951 "with incorrect client ID\n", addr_str);
4952 return status;
4953 }
4954 spin_lock(&cl->cl_lock);
4955 s = find_stateid_locked(cl, stateid);
4956 if (!s)
4957 goto out_unlock;
4958 status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
4959 if (status)
4960 goto out_unlock;
4961 switch (s->sc_type) {
4962 case NFS4_DELEG_STID:
4963 status = nfs_ok;
4964 break;
4965 case NFS4_REVOKED_DELEG_STID:
4966 status = nfserr_deleg_revoked;
4967 break;
4968 case NFS4_OPEN_STID:
4969 case NFS4_LOCK_STID:
4970 status = nfsd4_check_openowner_confirmed(openlockstateid(s));
4971 break;
4972 default:
4973 printk("unknown stateid type %x\n", s->sc_type);
4974 /* Fallthrough */
4975 case NFS4_CLOSED_STID:
4976 case NFS4_CLOSED_DELEG_STID:
4977 status = nfserr_bad_stateid;
4978 }
4979out_unlock:
4980 spin_unlock(&cl->cl_lock);
4981 return status;
4982}
4983
4984__be32
4985nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
4986 stateid_t *stateid, unsigned char typemask,
4987 struct nfs4_stid **s, struct nfsd_net *nn)
4988{
4989 __be32 status;
4990 bool return_revoked = false;
4991
4992 /*
4993 * only return revoked delegations if explicitly asked.
4994 * otherwise we report revoked or bad_stateid status.
4995 */
4996 if (typemask & NFS4_REVOKED_DELEG_STID)
4997 return_revoked = true;
4998 else if (typemask & NFS4_DELEG_STID)
4999 typemask |= NFS4_REVOKED_DELEG_STID;
5000
5001 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
5002 CLOSE_STATEID(stateid))
5003 return nfserr_bad_stateid;
5004 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
5005 if (status == nfserr_stale_clientid) {
5006 if (cstate->session)
5007 return nfserr_bad_stateid;
5008 return nfserr_stale_stateid;
5009 }
5010 if (status)
5011 return status;
5012 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
5013 if (!*s)
5014 return nfserr_bad_stateid;
5015 if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
5016 nfs4_put_stid(*s);
5017 if (cstate->minorversion)
5018 return nfserr_deleg_revoked;
5019 return nfserr_bad_stateid;
5020 }
5021 return nfs_ok;
5022}
5023
5024static struct file *
5025nfs4_find_file(struct nfs4_stid *s, int flags)
5026{
5027 if (!s)
5028 return NULL;
5029
5030 switch (s->sc_type) {
5031 case NFS4_DELEG_STID:
5032 if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
5033 return NULL;
5034 return get_file(s->sc_file->fi_deleg_file);
5035 case NFS4_OPEN_STID:
5036 case NFS4_LOCK_STID:
5037 if (flags & RD_STATE)
5038 return find_readable_file(s->sc_file);
5039 else
5040 return find_writeable_file(s->sc_file);
5041 break;
5042 }
5043
5044 return NULL;
5045}
5046
5047static __be32
5048nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
5049{
5050 __be32 status;
5051
5052 status = nfsd4_check_openowner_confirmed(ols);
5053 if (status)
5054 return status;
5055 return nfs4_check_openmode(ols, flags);
5056}
5057
5058static __be32
5059nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
5060 struct file **filpp, bool *tmp_file, int flags)
5061{
5062 int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
5063 struct file *file;
5064 __be32 status;
5065
5066 file = nfs4_find_file(s, flags);
5067 if (file) {
5068 status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
5069 acc | NFSD_MAY_OWNER_OVERRIDE);
5070 if (status) {
5071 fput(file);
5072 return status;
5073 }
5074
5075 *filpp = file;
5076 } else {
5077 status = nfsd_open(rqstp, fhp, S_IFREG, acc, filpp);
5078 if (status)
5079 return status;
5080
5081 if (tmp_file)
5082 *tmp_file = true;
5083 }
5084
5085 return 0;
5086}
5087
5088/*
5089 * Checks for stateid operations
5090 */
5091__be32
5092nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
5093 struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
5094 stateid_t *stateid, int flags, struct file **filpp, bool *tmp_file)
5095{
5096 struct inode *ino = d_inode(fhp->fh_dentry);
5097 struct net *net = SVC_NET(rqstp);
5098 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5099 struct nfs4_stid *s = NULL;
5100 __be32 status;
5101
5102 if (filpp)
5103 *filpp = NULL;
5104 if (tmp_file)
5105 *tmp_file = false;
5106
5107 if (grace_disallows_io(net, ino))
5108 return nfserr_grace;
5109
5110 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
5111 status = check_special_stateids(net, fhp, stateid, flags);
5112 goto done;
5113 }
5114
5115 status = nfsd4_lookup_stateid(cstate, stateid,
5116 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
5117 &s, nn);
5118 if (status)
5119 return status;
5120 status = nfsd4_stid_check_stateid_generation(stateid, s,
5121 nfsd4_has_session(cstate));
5122 if (status)
5123 goto out;
5124
5125 switch (s->sc_type) {
5126 case NFS4_DELEG_STID:
5127 status = nfs4_check_delegmode(delegstateid(s), flags);
5128 break;
5129 case NFS4_OPEN_STID:
5130 case NFS4_LOCK_STID:
5131 status = nfs4_check_olstateid(fhp, openlockstateid(s), flags);
5132 break;
5133 default:
5134 status = nfserr_bad_stateid;
5135 break;
5136 }
5137 if (status)
5138 goto out;
5139 status = nfs4_check_fh(fhp, s);
5140
5141done:
5142 if (!status && filpp)
5143 status = nfs4_check_file(rqstp, fhp, s, filpp, tmp_file, flags);
5144out:
5145 if (s)
5146 nfs4_put_stid(s);
5147 return status;
5148}
5149
5150/*
5151 * Test if the stateid is valid
5152 */
5153__be32
5154nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5155 union nfsd4_op_u *u)
5156{
5157 struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
5158 struct nfsd4_test_stateid_id *stateid;
5159 struct nfs4_client *cl = cstate->session->se_client;
5160
5161 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
5162 stateid->ts_id_status =
5163 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
5164
5165 return nfs_ok;
5166}
5167
5168static __be32
5169nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
5170{
5171 struct nfs4_ol_stateid *stp = openlockstateid(s);
5172 __be32 ret;
5173
5174 ret = nfsd4_lock_ol_stateid(stp);
5175 if (ret)
5176 goto out_put_stid;
5177
5178 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5179 if (ret)
5180 goto out;
5181
5182 ret = nfserr_locks_held;
5183 if (check_for_locks(stp->st_stid.sc_file,
5184 lockowner(stp->st_stateowner)))
5185 goto out;
5186
5187 release_lock_stateid(stp);
5188 ret = nfs_ok;
5189
5190out:
5191 mutex_unlock(&stp->st_mutex);
5192out_put_stid:
5193 nfs4_put_stid(s);
5194 return ret;
5195}
5196
5197__be32
5198nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5199 union nfsd4_op_u *u)
5200{
5201 struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
5202 stateid_t *stateid = &free_stateid->fr_stateid;
5203 struct nfs4_stid *s;
5204 struct nfs4_delegation *dp;
5205 struct nfs4_client *cl = cstate->session->se_client;
5206 __be32 ret = nfserr_bad_stateid;
5207
5208 spin_lock(&cl->cl_lock);
5209 s = find_stateid_locked(cl, stateid);
5210 if (!s)
5211 goto out_unlock;
5212 spin_lock(&s->sc_lock);
5213 switch (s->sc_type) {
5214 case NFS4_DELEG_STID:
5215 ret = nfserr_locks_held;
5216 break;
5217 case NFS4_OPEN_STID:
5218 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
5219 if (ret)
5220 break;
5221 ret = nfserr_locks_held;
5222 break;
5223 case NFS4_LOCK_STID:
5224 spin_unlock(&s->sc_lock);
5225 refcount_inc(&s->sc_count);
5226 spin_unlock(&cl->cl_lock);
5227 ret = nfsd4_free_lock_stateid(stateid, s);
5228 goto out;
5229 case NFS4_REVOKED_DELEG_STID:
5230 spin_unlock(&s->sc_lock);
5231 dp = delegstateid(s);
5232 list_del_init(&dp->dl_recall_lru);
5233 spin_unlock(&cl->cl_lock);
5234 nfs4_put_stid(s);
5235 ret = nfs_ok;
5236 goto out;
5237 /* Default falls through and returns nfserr_bad_stateid */
5238 }
5239 spin_unlock(&s->sc_lock);
5240out_unlock:
5241 spin_unlock(&cl->cl_lock);
5242out:
5243 return ret;
5244}
5245
5246static inline int
5247setlkflg (int type)
5248{
5249 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
5250 RD_STATE : WR_STATE;
5251}
5252
5253static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
5254{
5255 struct svc_fh *current_fh = &cstate->current_fh;
5256 struct nfs4_stateowner *sop = stp->st_stateowner;
5257 __be32 status;
5258
5259 status = nfsd4_check_seqid(cstate, sop, seqid);
5260 if (status)
5261 return status;
5262 status = nfsd4_lock_ol_stateid(stp);
5263 if (status != nfs_ok)
5264 return status;
5265 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5266 if (status == nfs_ok)
5267 status = nfs4_check_fh(current_fh, &stp->st_stid);
5268 if (status != nfs_ok)
5269 mutex_unlock(&stp->st_mutex);
5270 return status;
5271}
5272
5273/*
5274 * Checks for sequence id mutating operations.
5275 */
5276static __be32
5277nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5278 stateid_t *stateid, char typemask,
5279 struct nfs4_ol_stateid **stpp,
5280 struct nfsd_net *nn)
5281{
5282 __be32 status;
5283 struct nfs4_stid *s;
5284 struct nfs4_ol_stateid *stp = NULL;
5285
5286 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
5287 seqid, STATEID_VAL(stateid));
5288
5289 *stpp = NULL;
5290 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
5291 if (status)
5292 return status;
5293 stp = openlockstateid(s);
5294 nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
5295
5296 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
5297 if (!status)
5298 *stpp = stp;
5299 else
5300 nfs4_put_stid(&stp->st_stid);
5301 return status;
5302}
5303
5304static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
5305 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
5306{
5307 __be32 status;
5308 struct nfs4_openowner *oo;
5309 struct nfs4_ol_stateid *stp;
5310
5311 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
5312 NFS4_OPEN_STID, &stp, nn);
5313 if (status)
5314 return status;
5315 oo = openowner(stp->st_stateowner);
5316 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5317 mutex_unlock(&stp->st_mutex);
5318 nfs4_put_stid(&stp->st_stid);
5319 return nfserr_bad_stateid;
5320 }
5321 *stpp = stp;
5322 return nfs_ok;
5323}
5324
5325__be32
5326nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5327 union nfsd4_op_u *u)
5328{
5329 struct nfsd4_open_confirm *oc = &u->open_confirm;
5330 __be32 status;
5331 struct nfs4_openowner *oo;
5332 struct nfs4_ol_stateid *stp;
5333 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5334
5335 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
5336 cstate->current_fh.fh_dentry);
5337
5338 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
5339 if (status)
5340 return status;
5341
5342 status = nfs4_preprocess_seqid_op(cstate,
5343 oc->oc_seqid, &oc->oc_req_stateid,
5344 NFS4_OPEN_STID, &stp, nn);
5345 if (status)
5346 goto out;
5347 oo = openowner(stp->st_stateowner);
5348 status = nfserr_bad_stateid;
5349 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5350 mutex_unlock(&stp->st_mutex);
5351 goto put_stateid;
5352 }
5353 oo->oo_flags |= NFS4_OO_CONFIRMED;
5354 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5355 mutex_unlock(&stp->st_mutex);
5356 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5357 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5358
5359 nfsd4_client_record_create(oo->oo_owner.so_client);
5360 status = nfs_ok;
5361put_stateid:
5362 nfs4_put_stid(&stp->st_stid);
5363out:
5364 nfsd4_bump_seqid(cstate, status);
5365 return status;
5366}
5367
5368static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
5369{
5370 if (!test_access(access, stp))
5371 return;
5372 nfs4_file_put_access(stp->st_stid.sc_file, access);
5373 clear_access(access, stp);
5374}
5375
5376static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
5377{
5378 switch (to_access) {
5379 case NFS4_SHARE_ACCESS_READ:
5380 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
5381 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5382 break;
5383 case NFS4_SHARE_ACCESS_WRITE:
5384 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
5385 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
5386 break;
5387 case NFS4_SHARE_ACCESS_BOTH:
5388 break;
5389 default:
5390 WARN_ON_ONCE(1);
5391 }
5392}
5393
5394__be32
5395nfsd4_open_downgrade(struct svc_rqst *rqstp,
5396 struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
5397{
5398 struct nfsd4_open_downgrade *od = &u->open_downgrade;
5399 __be32 status;
5400 struct nfs4_ol_stateid *stp;
5401 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5402
5403 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
5404 cstate->current_fh.fh_dentry);
5405
5406 /* We don't yet support WANT bits: */
5407 if (od->od_deleg_want)
5408 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
5409 od->od_deleg_want);
5410
5411 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
5412 &od->od_stateid, &stp, nn);
5413 if (status)
5414 goto out;
5415 status = nfserr_inval;
5416 if (!test_access(od->od_share_access, stp)) {
5417 dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
5418 stp->st_access_bmap, od->od_share_access);
5419 goto put_stateid;
5420 }
5421 if (!test_deny(od->od_share_deny, stp)) {
5422 dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
5423 stp->st_deny_bmap, od->od_share_deny);
5424 goto put_stateid;
5425 }
5426 nfs4_stateid_downgrade(stp, od->od_share_access);
5427 reset_union_bmap_deny(od->od_share_deny, stp);
5428 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5429 status = nfs_ok;
5430put_stateid:
5431 mutex_unlock(&stp->st_mutex);
5432 nfs4_put_stid(&stp->st_stid);
5433out:
5434 nfsd4_bump_seqid(cstate, status);
5435 return status;
5436}
5437
5438static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
5439{
5440 struct nfs4_client *clp = s->st_stid.sc_client;
5441 bool unhashed;
5442 LIST_HEAD(reaplist);
5443
5444 spin_lock(&clp->cl_lock);
5445 unhashed = unhash_open_stateid(s, &reaplist);
5446
5447 if (clp->cl_minorversion) {
5448 if (unhashed)
5449 put_ol_stateid_locked(s, &reaplist);
5450 spin_unlock(&clp->cl_lock);
5451 free_ol_stateid_reaplist(&reaplist);
5452 } else {
5453 spin_unlock(&clp->cl_lock);
5454 free_ol_stateid_reaplist(&reaplist);
5455 if (unhashed)
5456 move_to_close_lru(s, clp->net);
5457 }
5458}
5459
5460/*
5461 * nfs4_unlock_state() called after encode
5462 */
5463__be32
5464nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5465 union nfsd4_op_u *u)
5466{
5467 struct nfsd4_close *close = &u->close;
5468 __be32 status;
5469 struct nfs4_ol_stateid *stp;
5470 struct net *net = SVC_NET(rqstp);
5471 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5472
5473 dprintk("NFSD: nfsd4_close on file %pd\n",
5474 cstate->current_fh.fh_dentry);
5475
5476 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
5477 &close->cl_stateid,
5478 NFS4_OPEN_STID|NFS4_CLOSED_STID,
5479 &stp, nn);
5480 nfsd4_bump_seqid(cstate, status);
5481 if (status)
5482 goto out;
5483
5484 stp->st_stid.sc_type = NFS4_CLOSED_STID;
5485
5486 /*
5487 * Technically we don't _really_ have to increment or copy it, since
5488 * it should just be gone after this operation and we clobber the
5489 * copied value below, but we continue to do so here just to ensure
5490 * that racing ops see that there was a state change.
5491 */
5492 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5493
5494 nfsd4_close_open_stateid(stp);
5495 mutex_unlock(&stp->st_mutex);
5496
5497 /* v4.1+ suggests that we send a special stateid in here, since the
5498 * clients should just ignore this anyway. Since this is not useful
5499 * for v4.0 clients either, we set it to the special close_stateid
5500 * universally.
5501 *
5502 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
5503 */
5504 memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
5505
5506 /* put reference from nfs4_preprocess_seqid_op */
5507 nfs4_put_stid(&stp->st_stid);
5508out:
5509 return status;
5510}
5511
5512__be32
5513nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5514 union nfsd4_op_u *u)
5515{
5516 struct nfsd4_delegreturn *dr = &u->delegreturn;
5517 struct nfs4_delegation *dp;
5518 stateid_t *stateid = &dr->dr_stateid;
5519 struct nfs4_stid *s;
5520 __be32 status;
5521 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5522
5523 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
5524 return status;
5525
5526 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
5527 if (status)
5528 goto out;
5529 dp = delegstateid(s);
5530 status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
5531 if (status)
5532 goto put_stateid;
5533
5534 destroy_delegation(dp);
5535put_stateid:
5536 nfs4_put_stid(&dp->dl_stid);
5537out:
5538 return status;
5539}
5540
5541static inline u64
5542end_offset(u64 start, u64 len)
5543{
5544 u64 end;
5545
5546 end = start + len;
5547 return end >= start ? end: NFS4_MAX_UINT64;
5548}
5549
5550/* last octet in a range */
5551static inline u64
5552last_byte_offset(u64 start, u64 len)
5553{
5554 u64 end;
5555
5556 WARN_ON_ONCE(!len);
5557 end = start + len;
5558 return end > start ? end - 1: NFS4_MAX_UINT64;
5559}
5560
5561/*
5562 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
5563 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
5564 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
5565 * locking, this prevents us from being completely protocol-compliant. The
5566 * real solution to this problem is to start using unsigned file offsets in
5567 * the VFS, but this is a very deep change!
5568 */
5569static inline void
5570nfs4_transform_lock_offset(struct file_lock *lock)
5571{
5572 if (lock->fl_start < 0)
5573 lock->fl_start = OFFSET_MAX;
5574 if (lock->fl_end < 0)
5575 lock->fl_end = OFFSET_MAX;
5576}
5577
5578static fl_owner_t
5579nfsd4_fl_get_owner(fl_owner_t owner)
5580{
5581 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5582
5583 nfs4_get_stateowner(&lo->lo_owner);
5584 return owner;
5585}
5586
5587static void
5588nfsd4_fl_put_owner(fl_owner_t owner)
5589{
5590 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
5591
5592 if (lo)
5593 nfs4_put_stateowner(&lo->lo_owner);
5594}
5595
5596static void
5597nfsd4_lm_notify(struct file_lock *fl)
5598{
5599 struct nfs4_lockowner *lo = (struct nfs4_lockowner *)fl->fl_owner;
5600 struct net *net = lo->lo_owner.so_client->net;
5601 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5602 struct nfsd4_blocked_lock *nbl = container_of(fl,
5603 struct nfsd4_blocked_lock, nbl_lock);
5604 bool queue = false;
5605
5606 /* An empty list means that something else is going to be using it */
5607 spin_lock(&nn->blocked_locks_lock);
5608 if (!list_empty(&nbl->nbl_list)) {
5609 list_del_init(&nbl->nbl_list);
5610 list_del_init(&nbl->nbl_lru);
5611 queue = true;
5612 }
5613 spin_unlock(&nn->blocked_locks_lock);
5614
5615 if (queue)
5616 nfsd4_run_cb(&nbl->nbl_cb);
5617}
5618
5619static const struct lock_manager_operations nfsd_posix_mng_ops = {
5620 .lm_notify = nfsd4_lm_notify,
5621 .lm_get_owner = nfsd4_fl_get_owner,
5622 .lm_put_owner = nfsd4_fl_put_owner,
5623};
5624
5625static inline void
5626nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
5627{
5628 struct nfs4_lockowner *lo;
5629
5630 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
5631 lo = (struct nfs4_lockowner *) fl->fl_owner;
5632 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
5633 lo->lo_owner.so_owner.len, GFP_KERNEL);
5634 if (!deny->ld_owner.data)
5635 /* We just don't care that much */
5636 goto nevermind;
5637 deny->ld_owner.len = lo->lo_owner.so_owner.len;
5638 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
5639 } else {
5640nevermind:
5641 deny->ld_owner.len = 0;
5642 deny->ld_owner.data = NULL;
5643 deny->ld_clientid.cl_boot = 0;
5644 deny->ld_clientid.cl_id = 0;
5645 }
5646 deny->ld_start = fl->fl_start;
5647 deny->ld_length = NFS4_MAX_UINT64;
5648 if (fl->fl_end != NFS4_MAX_UINT64)
5649 deny->ld_length = fl->fl_end - fl->fl_start + 1;
5650 deny->ld_type = NFS4_READ_LT;
5651 if (fl->fl_type != F_RDLCK)
5652 deny->ld_type = NFS4_WRITE_LT;
5653}
5654
5655static struct nfs4_lockowner *
5656find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
5657{
5658 unsigned int strhashval = ownerstr_hashval(owner);
5659 struct nfs4_stateowner *so;
5660
5661 lockdep_assert_held(&clp->cl_lock);
5662
5663 list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
5664 so_strhash) {
5665 if (so->so_is_open_owner)
5666 continue;
5667 if (same_owner_str(so, owner))
5668 return lockowner(nfs4_get_stateowner(so));
5669 }
5670 return NULL;
5671}
5672
5673static struct nfs4_lockowner *
5674find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
5675{
5676 struct nfs4_lockowner *lo;
5677
5678 spin_lock(&clp->cl_lock);
5679 lo = find_lockowner_str_locked(clp, owner);
5680 spin_unlock(&clp->cl_lock);
5681 return lo;
5682}
5683
5684static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
5685{
5686 unhash_lockowner_locked(lockowner(sop));
5687}
5688
5689static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
5690{
5691 struct nfs4_lockowner *lo = lockowner(sop);
5692
5693 kmem_cache_free(lockowner_slab, lo);
5694}
5695
5696static const struct nfs4_stateowner_operations lockowner_ops = {
5697 .so_unhash = nfs4_unhash_lockowner,
5698 .so_free = nfs4_free_lockowner,
5699};
5700
5701/*
5702 * Alloc a lock owner structure.
5703 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
5704 * occurred.
5705 *
5706 * strhashval = ownerstr_hashval
5707 */
5708static struct nfs4_lockowner *
5709alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
5710 struct nfs4_ol_stateid *open_stp,
5711 struct nfsd4_lock *lock)
5712{
5713 struct nfs4_lockowner *lo, *ret;
5714
5715 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
5716 if (!lo)
5717 return NULL;
5718 INIT_LIST_HEAD(&lo->lo_blocked);
5719 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
5720 lo->lo_owner.so_is_open_owner = 0;
5721 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
5722 lo->lo_owner.so_ops = &lockowner_ops;
5723 spin_lock(&clp->cl_lock);
5724 ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
5725 if (ret == NULL) {
5726 list_add(&lo->lo_owner.so_strhash,
5727 &clp->cl_ownerstr_hashtbl[strhashval]);
5728 ret = lo;
5729 } else
5730 nfs4_free_stateowner(&lo->lo_owner);
5731
5732 spin_unlock(&clp->cl_lock);
5733 return ret;
5734}
5735
5736static struct nfs4_ol_stateid *
5737find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
5738{
5739 struct nfs4_ol_stateid *lst;
5740 struct nfs4_client *clp = lo->lo_owner.so_client;
5741
5742 lockdep_assert_held(&clp->cl_lock);
5743
5744 list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
5745 if (lst->st_stid.sc_type != NFS4_LOCK_STID)
5746 continue;
5747 if (lst->st_stid.sc_file == fp) {
5748 refcount_inc(&lst->st_stid.sc_count);
5749 return lst;
5750 }
5751 }
5752 return NULL;
5753}
5754
5755static struct nfs4_ol_stateid *
5756init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5757 struct nfs4_file *fp, struct inode *inode,
5758 struct nfs4_ol_stateid *open_stp)
5759{
5760 struct nfs4_client *clp = lo->lo_owner.so_client;
5761 struct nfs4_ol_stateid *retstp;
5762
5763 mutex_init(&stp->st_mutex);
5764 mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
5765retry:
5766 spin_lock(&clp->cl_lock);
5767 spin_lock(&fp->fi_lock);
5768 retstp = find_lock_stateid(lo, fp);
5769 if (retstp)
5770 goto out_unlock;
5771
5772 refcount_inc(&stp->st_stid.sc_count);
5773 stp->st_stid.sc_type = NFS4_LOCK_STID;
5774 stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
5775 get_nfs4_file(fp);
5776 stp->st_stid.sc_file = fp;
5777 stp->st_access_bmap = 0;
5778 stp->st_deny_bmap = open_stp->st_deny_bmap;
5779 stp->st_openstp = open_stp;
5780 list_add(&stp->st_locks, &open_stp->st_locks);
5781 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5782 list_add(&stp->st_perfile, &fp->fi_stateids);
5783out_unlock:
5784 spin_unlock(&fp->fi_lock);
5785 spin_unlock(&clp->cl_lock);
5786 if (retstp) {
5787 if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
5788 nfs4_put_stid(&retstp->st_stid);
5789 goto retry;
5790 }
5791 /* To keep mutex tracking happy */
5792 mutex_unlock(&stp->st_mutex);
5793 stp = retstp;
5794 }
5795 return stp;
5796}
5797
5798static struct nfs4_ol_stateid *
5799find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
5800 struct inode *inode, struct nfs4_ol_stateid *ost,
5801 bool *new)
5802{
5803 struct nfs4_stid *ns = NULL;
5804 struct nfs4_ol_stateid *lst;
5805 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5806 struct nfs4_client *clp = oo->oo_owner.so_client;
5807
5808 *new = false;
5809 spin_lock(&clp->cl_lock);
5810 lst = find_lock_stateid(lo, fi);
5811 spin_unlock(&clp->cl_lock);
5812 if (lst != NULL) {
5813 if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
5814 goto out;
5815 nfs4_put_stid(&lst->st_stid);
5816 }
5817 ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
5818 if (ns == NULL)
5819 return NULL;
5820
5821 lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
5822 if (lst == openlockstateid(ns))
5823 *new = true;
5824 else
5825 nfs4_put_stid(ns);
5826out:
5827 return lst;
5828}
5829
5830static int
5831check_lock_length(u64 offset, u64 length)
5832{
5833 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
5834 (length > ~offset)));
5835}
5836
5837static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
5838{
5839 struct nfs4_file *fp = lock_stp->st_stid.sc_file;
5840
5841 lockdep_assert_held(&fp->fi_lock);
5842
5843 if (test_access(access, lock_stp))
5844 return;
5845 __nfs4_file_get_access(fp, access);
5846 set_access(access, lock_stp);
5847}
5848
5849static __be32
5850lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
5851 struct nfs4_ol_stateid *ost,
5852 struct nfsd4_lock *lock,
5853 struct nfs4_ol_stateid **plst, bool *new)
5854{
5855 __be32 status;
5856 struct nfs4_file *fi = ost->st_stid.sc_file;
5857 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
5858 struct nfs4_client *cl = oo->oo_owner.so_client;
5859 struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
5860 struct nfs4_lockowner *lo;
5861 struct nfs4_ol_stateid *lst;
5862 unsigned int strhashval;
5863
5864 lo = find_lockowner_str(cl, &lock->lk_new_owner);
5865 if (!lo) {
5866 strhashval = ownerstr_hashval(&lock->lk_new_owner);
5867 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
5868 if (lo == NULL)
5869 return nfserr_jukebox;
5870 } else {
5871 /* with an existing lockowner, seqids must be the same */
5872 status = nfserr_bad_seqid;
5873 if (!cstate->minorversion &&
5874 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
5875 goto out;
5876 }
5877
5878 lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
5879 if (lst == NULL) {
5880 status = nfserr_jukebox;
5881 goto out;
5882 }
5883
5884 status = nfs_ok;
5885 *plst = lst;
5886out:
5887 nfs4_put_stateowner(&lo->lo_owner);
5888 return status;
5889}
5890
5891/*
5892 * LOCK operation
5893 */
5894__be32
5895nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5896 union nfsd4_op_u *u)
5897{
5898 struct nfsd4_lock *lock = &u->lock;
5899 struct nfs4_openowner *open_sop = NULL;
5900 struct nfs4_lockowner *lock_sop = NULL;
5901 struct nfs4_ol_stateid *lock_stp = NULL;
5902 struct nfs4_ol_stateid *open_stp = NULL;
5903 struct nfs4_file *fp;
5904 struct file *filp = NULL;
5905 struct nfsd4_blocked_lock *nbl = NULL;
5906 struct file_lock *file_lock = NULL;
5907 struct file_lock *conflock = NULL;
5908 __be32 status = 0;
5909 int lkflg;
5910 int err;
5911 bool new = false;
5912 unsigned char fl_type;
5913 unsigned int fl_flags = FL_POSIX;
5914 struct net *net = SVC_NET(rqstp);
5915 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5916
5917 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
5918 (long long) lock->lk_offset,
5919 (long long) lock->lk_length);
5920
5921 if (check_lock_length(lock->lk_offset, lock->lk_length))
5922 return nfserr_inval;
5923
5924 if ((status = fh_verify(rqstp, &cstate->current_fh,
5925 S_IFREG, NFSD_MAY_LOCK))) {
5926 dprintk("NFSD: nfsd4_lock: permission denied!\n");
5927 return status;
5928 }
5929
5930 if (lock->lk_is_new) {
5931 if (nfsd4_has_session(cstate))
5932 /* See rfc 5661 18.10.3: given clientid is ignored: */
5933 memcpy(&lock->lk_new_clientid,
5934 &cstate->session->se_client->cl_clientid,
5935 sizeof(clientid_t));
5936
5937 status = nfserr_stale_clientid;
5938 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
5939 goto out;
5940
5941 /* validate and update open stateid and open seqid */
5942 status = nfs4_preprocess_confirmed_seqid_op(cstate,
5943 lock->lk_new_open_seqid,
5944 &lock->lk_new_open_stateid,
5945 &open_stp, nn);
5946 if (status)
5947 goto out;
5948 mutex_unlock(&open_stp->st_mutex);
5949 open_sop = openowner(open_stp->st_stateowner);
5950 status = nfserr_bad_stateid;
5951 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5952 &lock->lk_new_clientid))
5953 goto out;
5954 status = lookup_or_create_lock_state(cstate, open_stp, lock,
5955 &lock_stp, &new);
5956 } else {
5957 status = nfs4_preprocess_seqid_op(cstate,
5958 lock->lk_old_lock_seqid,
5959 &lock->lk_old_lock_stateid,
5960 NFS4_LOCK_STID, &lock_stp, nn);
5961 }
5962 if (status)
5963 goto out;
5964 lock_sop = lockowner(lock_stp->st_stateowner);
5965
5966 lkflg = setlkflg(lock->lk_type);
5967 status = nfs4_check_openmode(lock_stp, lkflg);
5968 if (status)
5969 goto out;
5970
5971 status = nfserr_grace;
5972 if (locks_in_grace(net) && !lock->lk_reclaim)
5973 goto out;
5974 status = nfserr_no_grace;
5975 if (!locks_in_grace(net) && lock->lk_reclaim)
5976 goto out;
5977
5978 fp = lock_stp->st_stid.sc_file;
5979 switch (lock->lk_type) {
5980 case NFS4_READW_LT:
5981 if (nfsd4_has_session(cstate))
5982 fl_flags |= FL_SLEEP;
5983 /* Fallthrough */
5984 case NFS4_READ_LT:
5985 spin_lock(&fp->fi_lock);
5986 filp = find_readable_file_locked(fp);
5987 if (filp)
5988 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5989 spin_unlock(&fp->fi_lock);
5990 fl_type = F_RDLCK;
5991 break;
5992 case NFS4_WRITEW_LT:
5993 if (nfsd4_has_session(cstate))
5994 fl_flags |= FL_SLEEP;
5995 /* Fallthrough */
5996 case NFS4_WRITE_LT:
5997 spin_lock(&fp->fi_lock);
5998 filp = find_writeable_file_locked(fp);
5999 if (filp)
6000 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
6001 spin_unlock(&fp->fi_lock);
6002 fl_type = F_WRLCK;
6003 break;
6004 default:
6005 status = nfserr_inval;
6006 goto out;
6007 }
6008
6009 if (!filp) {
6010 status = nfserr_openmode;
6011 goto out;
6012 }
6013
6014 nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
6015 if (!nbl) {
6016 dprintk("NFSD: %s: unable to allocate block!\n", __func__);
6017 status = nfserr_jukebox;
6018 goto out;
6019 }
6020
6021 file_lock = &nbl->nbl_lock;
6022 file_lock->fl_type = fl_type;
6023 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
6024 file_lock->fl_pid = current->tgid;
6025 file_lock->fl_file = filp;
6026 file_lock->fl_flags = fl_flags;
6027 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6028 file_lock->fl_start = lock->lk_offset;
6029 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
6030 nfs4_transform_lock_offset(file_lock);
6031
6032 conflock = locks_alloc_lock();
6033 if (!conflock) {
6034 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6035 status = nfserr_jukebox;
6036 goto out;
6037 }
6038
6039 if (fl_flags & FL_SLEEP) {
6040 nbl->nbl_time = jiffies;
6041 spin_lock(&nn->blocked_locks_lock);
6042 list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
6043 list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
6044 spin_unlock(&nn->blocked_locks_lock);
6045 }
6046
6047 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
6048 switch (err) {
6049 case 0: /* success! */
6050 nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
6051 status = 0;
6052 break;
6053 case FILE_LOCK_DEFERRED:
6054 nbl = NULL;
6055 /* Fallthrough */
6056 case -EAGAIN: /* conflock holds conflicting lock */
6057 status = nfserr_denied;
6058 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
6059 nfs4_set_lock_denied(conflock, &lock->lk_denied);
6060 break;
6061 case -EDEADLK:
6062 status = nfserr_deadlock;
6063 break;
6064 default:
6065 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
6066 status = nfserrno(err);
6067 break;
6068 }
6069out:
6070 if (nbl) {
6071 /* dequeue it if we queued it before */
6072 if (fl_flags & FL_SLEEP) {
6073 spin_lock(&nn->blocked_locks_lock);
6074 list_del_init(&nbl->nbl_list);
6075 list_del_init(&nbl->nbl_lru);
6076 spin_unlock(&nn->blocked_locks_lock);
6077 }
6078 free_blocked_lock(nbl);
6079 }
6080 if (filp)
6081 fput(filp);
6082 if (lock_stp) {
6083 /* Bump seqid manually if the 4.0 replay owner is openowner */
6084 if (cstate->replay_owner &&
6085 cstate->replay_owner != &lock_sop->lo_owner &&
6086 seqid_mutating_err(ntohl(status)))
6087 lock_sop->lo_owner.so_seqid++;
6088
6089 /*
6090 * If this is a new, never-before-used stateid, and we are
6091 * returning an error, then just go ahead and release it.
6092 */
6093 if (status && new)
6094 release_lock_stateid(lock_stp);
6095
6096 mutex_unlock(&lock_stp->st_mutex);
6097
6098 nfs4_put_stid(&lock_stp->st_stid);
6099 }
6100 if (open_stp)
6101 nfs4_put_stid(&open_stp->st_stid);
6102 nfsd4_bump_seqid(cstate, status);
6103 if (conflock)
6104 locks_free_lock(conflock);
6105 return status;
6106}
6107
6108/*
6109 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
6110 * so we do a temporary open here just to get an open file to pass to
6111 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
6112 * inode operation.)
6113 */
6114static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
6115{
6116 struct file *file;
6117 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
6118 if (!err) {
6119 err = nfserrno(vfs_test_lock(file, lock));
6120 fput(file);
6121 }
6122 return err;
6123}
6124
6125/*
6126 * LOCKT operation
6127 */
6128__be32
6129nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6130 union nfsd4_op_u *u)
6131{
6132 struct nfsd4_lockt *lockt = &u->lockt;
6133 struct file_lock *file_lock = NULL;
6134 struct nfs4_lockowner *lo = NULL;
6135 __be32 status;
6136 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6137
6138 if (locks_in_grace(SVC_NET(rqstp)))
6139 return nfserr_grace;
6140
6141 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
6142 return nfserr_inval;
6143
6144 if (!nfsd4_has_session(cstate)) {
6145 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
6146 if (status)
6147 goto out;
6148 }
6149
6150 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
6151 goto out;
6152
6153 file_lock = locks_alloc_lock();
6154 if (!file_lock) {
6155 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6156 status = nfserr_jukebox;
6157 goto out;
6158 }
6159
6160 switch (lockt->lt_type) {
6161 case NFS4_READ_LT:
6162 case NFS4_READW_LT:
6163 file_lock->fl_type = F_RDLCK;
6164 break;
6165 case NFS4_WRITE_LT:
6166 case NFS4_WRITEW_LT:
6167 file_lock->fl_type = F_WRLCK;
6168 break;
6169 default:
6170 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
6171 status = nfserr_inval;
6172 goto out;
6173 }
6174
6175 lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
6176 if (lo)
6177 file_lock->fl_owner = (fl_owner_t)lo;
6178 file_lock->fl_pid = current->tgid;
6179 file_lock->fl_flags = FL_POSIX;
6180
6181 file_lock->fl_start = lockt->lt_offset;
6182 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
6183
6184 nfs4_transform_lock_offset(file_lock);
6185
6186 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
6187 if (status)
6188 goto out;
6189
6190 if (file_lock->fl_type != F_UNLCK) {
6191 status = nfserr_denied;
6192 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
6193 }
6194out:
6195 if (lo)
6196 nfs4_put_stateowner(&lo->lo_owner);
6197 if (file_lock)
6198 locks_free_lock(file_lock);
6199 return status;
6200}
6201
6202__be32
6203nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6204 union nfsd4_op_u *u)
6205{
6206 struct nfsd4_locku *locku = &u->locku;
6207 struct nfs4_ol_stateid *stp;
6208 struct file *filp = NULL;
6209 struct file_lock *file_lock = NULL;
6210 __be32 status;
6211 int err;
6212 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6213
6214 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
6215 (long long) locku->lu_offset,
6216 (long long) locku->lu_length);
6217
6218 if (check_lock_length(locku->lu_offset, locku->lu_length))
6219 return nfserr_inval;
6220
6221 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
6222 &locku->lu_stateid, NFS4_LOCK_STID,
6223 &stp, nn);
6224 if (status)
6225 goto out;
6226 filp = find_any_file(stp->st_stid.sc_file);
6227 if (!filp) {
6228 status = nfserr_lock_range;
6229 goto put_stateid;
6230 }
6231 file_lock = locks_alloc_lock();
6232 if (!file_lock) {
6233 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
6234 status = nfserr_jukebox;
6235 goto fput;
6236 }
6237
6238 file_lock->fl_type = F_UNLCK;
6239 file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
6240 file_lock->fl_pid = current->tgid;
6241 file_lock->fl_file = filp;
6242 file_lock->fl_flags = FL_POSIX;
6243 file_lock->fl_lmops = &nfsd_posix_mng_ops;
6244 file_lock->fl_start = locku->lu_offset;
6245
6246 file_lock->fl_end = last_byte_offset(locku->lu_offset,
6247 locku->lu_length);
6248 nfs4_transform_lock_offset(file_lock);
6249
6250 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
6251 if (err) {
6252 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
6253 goto out_nfserr;
6254 }
6255 nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
6256fput:
6257 fput(filp);
6258put_stateid:
6259 mutex_unlock(&stp->st_mutex);
6260 nfs4_put_stid(&stp->st_stid);
6261out:
6262 nfsd4_bump_seqid(cstate, status);
6263 if (file_lock)
6264 locks_free_lock(file_lock);
6265 return status;
6266
6267out_nfserr:
6268 status = nfserrno(err);
6269 goto fput;
6270}
6271
6272/*
6273 * returns
6274 * true: locks held by lockowner
6275 * false: no locks held by lockowner
6276 */
6277static bool
6278check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
6279{
6280 struct file_lock *fl;
6281 int status = false;
6282 struct file *filp = find_any_file(fp);
6283 struct inode *inode;
6284 struct file_lock_context *flctx;
6285
6286 if (!filp) {
6287 /* Any valid lock stateid should have some sort of access */
6288 WARN_ON_ONCE(1);
6289 return status;
6290 }
6291
6292 inode = file_inode(filp);
6293 flctx = inode->i_flctx;
6294
6295 if (flctx && !list_empty_careful(&flctx->flc_posix)) {
6296 spin_lock(&flctx->flc_lock);
6297 list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
6298 if (fl->fl_owner == (fl_owner_t)lowner) {
6299 status = true;
6300 break;
6301 }
6302 }
6303 spin_unlock(&flctx->flc_lock);
6304 }
6305 fput(filp);
6306 return status;
6307}
6308
6309__be32
6310nfsd4_release_lockowner(struct svc_rqst *rqstp,
6311 struct nfsd4_compound_state *cstate,
6312 union nfsd4_op_u *u)
6313{
6314 struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
6315 clientid_t *clid = &rlockowner->rl_clientid;
6316 struct nfs4_stateowner *sop;
6317 struct nfs4_lockowner *lo = NULL;
6318 struct nfs4_ol_stateid *stp;
6319 struct xdr_netobj *owner = &rlockowner->rl_owner;
6320 unsigned int hashval = ownerstr_hashval(owner);
6321 __be32 status;
6322 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6323 struct nfs4_client *clp;
6324 LIST_HEAD (reaplist);
6325
6326 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
6327 clid->cl_boot, clid->cl_id);
6328
6329 status = lookup_clientid(clid, cstate, nn);
6330 if (status)
6331 return status;
6332
6333 clp = cstate->clp;
6334 /* Find the matching lock stateowner */
6335 spin_lock(&clp->cl_lock);
6336 list_for_each_entry(sop, &clp->cl_ownerstr_hashtbl[hashval],
6337 so_strhash) {
6338
6339 if (sop->so_is_open_owner || !same_owner_str(sop, owner))
6340 continue;
6341
6342 /* see if there are still any locks associated with it */
6343 lo = lockowner(sop);
6344 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
6345 if (check_for_locks(stp->st_stid.sc_file, lo)) {
6346 status = nfserr_locks_held;
6347 spin_unlock(&clp->cl_lock);
6348 return status;
6349 }
6350 }
6351
6352 nfs4_get_stateowner(sop);
6353 break;
6354 }
6355 if (!lo) {
6356 spin_unlock(&clp->cl_lock);
6357 return status;
6358 }
6359
6360 unhash_lockowner_locked(lo);
6361 while (!list_empty(&lo->lo_owner.so_stateids)) {
6362 stp = list_first_entry(&lo->lo_owner.so_stateids,
6363 struct nfs4_ol_stateid,
6364 st_perstateowner);
6365 WARN_ON(!unhash_lock_stateid(stp));
6366 put_ol_stateid_locked(stp, &reaplist);
6367 }
6368 spin_unlock(&clp->cl_lock);
6369 free_ol_stateid_reaplist(&reaplist);
6370 remove_blocked_locks(lo);
6371 nfs4_put_stateowner(&lo->lo_owner);
6372
6373 return status;
6374}
6375
6376static inline struct nfs4_client_reclaim *
6377alloc_reclaim(void)
6378{
6379 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
6380}
6381
6382bool
6383nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
6384{
6385 struct nfs4_client_reclaim *crp;
6386
6387 crp = nfsd4_find_reclaim_client(name, nn);
6388 return (crp && crp->cr_clp);
6389}
6390
6391/*
6392 * failure => all reset bets are off, nfserr_no_grace...
6393 */
6394struct nfs4_client_reclaim *
6395nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
6396{
6397 unsigned int strhashval;
6398 struct nfs4_client_reclaim *crp;
6399
6400 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
6401 crp = alloc_reclaim();
6402 if (crp) {
6403 strhashval = clientstr_hashval(name);
6404 INIT_LIST_HEAD(&crp->cr_strhash);
6405 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
6406 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
6407 crp->cr_clp = NULL;
6408 nn->reclaim_str_hashtbl_size++;
6409 }
6410 return crp;
6411}
6412
6413void
6414nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
6415{
6416 list_del(&crp->cr_strhash);
6417 kfree(crp);
6418 nn->reclaim_str_hashtbl_size--;
6419}
6420
6421void
6422nfs4_release_reclaim(struct nfsd_net *nn)
6423{
6424 struct nfs4_client_reclaim *crp = NULL;
6425 int i;
6426
6427 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
6428 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
6429 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
6430 struct nfs4_client_reclaim, cr_strhash);
6431 nfs4_remove_reclaim_record(crp, nn);
6432 }
6433 }
6434 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
6435}
6436
6437/*
6438 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
6439struct nfs4_client_reclaim *
6440nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
6441{
6442 unsigned int strhashval;
6443 struct nfs4_client_reclaim *crp = NULL;
6444
6445 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
6446
6447 strhashval = clientstr_hashval(recdir);
6448 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
6449 if (same_name(crp->cr_recdir, recdir)) {
6450 return crp;
6451 }
6452 }
6453 return NULL;
6454}
6455
6456/*
6457* Called from OPEN. Look for clientid in reclaim list.
6458*/
6459__be32
6460nfs4_check_open_reclaim(clientid_t *clid,
6461 struct nfsd4_compound_state *cstate,
6462 struct nfsd_net *nn)
6463{
6464 __be32 status;
6465
6466 /* find clientid in conf_id_hashtbl */
6467 status = lookup_clientid(clid, cstate, nn);
6468 if (status)
6469 return nfserr_reclaim_bad;
6470
6471 if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
6472 return nfserr_no_grace;
6473
6474 if (nfsd4_client_record_check(cstate->clp))
6475 return nfserr_reclaim_bad;
6476
6477 return nfs_ok;
6478}
6479
6480#ifdef CONFIG_NFSD_FAULT_INJECTION
6481static inline void
6482put_client(struct nfs4_client *clp)
6483{
6484 atomic_dec(&clp->cl_refcount);
6485}
6486
6487static struct nfs4_client *
6488nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
6489{
6490 struct nfs4_client *clp;
6491 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6492 nfsd_net_id);
6493
6494 if (!nfsd_netns_ready(nn))
6495 return NULL;
6496
6497 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6498 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
6499 return clp;
6500 }
6501 return NULL;
6502}
6503
6504u64
6505nfsd_inject_print_clients(void)
6506{
6507 struct nfs4_client *clp;
6508 u64 count = 0;
6509 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6510 nfsd_net_id);
6511 char buf[INET6_ADDRSTRLEN];
6512
6513 if (!nfsd_netns_ready(nn))
6514 return 0;
6515
6516 spin_lock(&nn->client_lock);
6517 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6518 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6519 pr_info("NFS Client: %s\n", buf);
6520 ++count;
6521 }
6522 spin_unlock(&nn->client_lock);
6523
6524 return count;
6525}
6526
6527u64
6528nfsd_inject_forget_client(struct sockaddr_storage *addr, size_t addr_size)
6529{
6530 u64 count = 0;
6531 struct nfs4_client *clp;
6532 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6533 nfsd_net_id);
6534
6535 if (!nfsd_netns_ready(nn))
6536 return count;
6537
6538 spin_lock(&nn->client_lock);
6539 clp = nfsd_find_client(addr, addr_size);
6540 if (clp) {
6541 if (mark_client_expired_locked(clp) == nfs_ok)
6542 ++count;
6543 else
6544 clp = NULL;
6545 }
6546 spin_unlock(&nn->client_lock);
6547
6548 if (clp)
6549 expire_client(clp);
6550
6551 return count;
6552}
6553
6554u64
6555nfsd_inject_forget_clients(u64 max)
6556{
6557 u64 count = 0;
6558 struct nfs4_client *clp, *next;
6559 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6560 nfsd_net_id);
6561 LIST_HEAD(reaplist);
6562
6563 if (!nfsd_netns_ready(nn))
6564 return count;
6565
6566 spin_lock(&nn->client_lock);
6567 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
6568 if (mark_client_expired_locked(clp) == nfs_ok) {
6569 list_add(&clp->cl_lru, &reaplist);
6570 if (max != 0 && ++count >= max)
6571 break;
6572 }
6573 }
6574 spin_unlock(&nn->client_lock);
6575
6576 list_for_each_entry_safe(clp, next, &reaplist, cl_lru)
6577 expire_client(clp);
6578
6579 return count;
6580}
6581
6582static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
6583 const char *type)
6584{
6585 char buf[INET6_ADDRSTRLEN];
6586 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
6587 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
6588}
6589
6590static void
6591nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst,
6592 struct list_head *collect)
6593{
6594 struct nfs4_client *clp = lst->st_stid.sc_client;
6595 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6596 nfsd_net_id);
6597
6598 if (!collect)
6599 return;
6600
6601 lockdep_assert_held(&nn->client_lock);
6602 atomic_inc(&clp->cl_refcount);
6603 list_add(&lst->st_locks, collect);
6604}
6605
6606static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
6607 struct list_head *collect,
6608 bool (*func)(struct nfs4_ol_stateid *))
6609{
6610 struct nfs4_openowner *oop;
6611 struct nfs4_ol_stateid *stp, *st_next;
6612 struct nfs4_ol_stateid *lst, *lst_next;
6613 u64 count = 0;
6614
6615 spin_lock(&clp->cl_lock);
6616 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
6617 list_for_each_entry_safe(stp, st_next,
6618 &oop->oo_owner.so_stateids, st_perstateowner) {
6619 list_for_each_entry_safe(lst, lst_next,
6620 &stp->st_locks, st_locks) {
6621 if (func) {
6622 if (func(lst))
6623 nfsd_inject_add_lock_to_list(lst,
6624 collect);
6625 }
6626 ++count;
6627 /*
6628 * Despite the fact that these functions deal
6629 * with 64-bit integers for "count", we must
6630 * ensure that it doesn't blow up the
6631 * clp->cl_refcount. Throw a warning if we
6632 * start to approach INT_MAX here.
6633 */
6634 WARN_ON_ONCE(count == (INT_MAX / 2));
6635 if (count == max)
6636 goto out;
6637 }
6638 }
6639 }
6640out:
6641 spin_unlock(&clp->cl_lock);
6642
6643 return count;
6644}
6645
6646static u64
6647nfsd_collect_client_locks(struct nfs4_client *clp, struct list_head *collect,
6648 u64 max)
6649{
6650 return nfsd_foreach_client_lock(clp, max, collect, unhash_lock_stateid);
6651}
6652
6653static u64
6654nfsd_print_client_locks(struct nfs4_client *clp)
6655{
6656 u64 count = nfsd_foreach_client_lock(clp, 0, NULL, NULL);
6657 nfsd_print_count(clp, count, "locked files");
6658 return count;
6659}
6660
6661u64
6662nfsd_inject_print_locks(void)
6663{
6664 struct nfs4_client *clp;
6665 u64 count = 0;
6666 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6667 nfsd_net_id);
6668
6669 if (!nfsd_netns_ready(nn))
6670 return 0;
6671
6672 spin_lock(&nn->client_lock);
6673 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6674 count += nfsd_print_client_locks(clp);
6675 spin_unlock(&nn->client_lock);
6676
6677 return count;
6678}
6679
6680static void
6681nfsd_reap_locks(struct list_head *reaplist)
6682{
6683 struct nfs4_client *clp;
6684 struct nfs4_ol_stateid *stp, *next;
6685
6686 list_for_each_entry_safe(stp, next, reaplist, st_locks) {
6687 list_del_init(&stp->st_locks);
6688 clp = stp->st_stid.sc_client;
6689 nfs4_put_stid(&stp->st_stid);
6690 put_client(clp);
6691 }
6692}
6693
6694u64
6695nfsd_inject_forget_client_locks(struct sockaddr_storage *addr, size_t addr_size)
6696{
6697 unsigned int count = 0;
6698 struct nfs4_client *clp;
6699 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6700 nfsd_net_id);
6701 LIST_HEAD(reaplist);
6702
6703 if (!nfsd_netns_ready(nn))
6704 return count;
6705
6706 spin_lock(&nn->client_lock);
6707 clp = nfsd_find_client(addr, addr_size);
6708 if (clp)
6709 count = nfsd_collect_client_locks(clp, &reaplist, 0);
6710 spin_unlock(&nn->client_lock);
6711 nfsd_reap_locks(&reaplist);
6712 return count;
6713}
6714
6715u64
6716nfsd_inject_forget_locks(u64 max)
6717{
6718 u64 count = 0;
6719 struct nfs4_client *clp;
6720 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6721 nfsd_net_id);
6722 LIST_HEAD(reaplist);
6723
6724 if (!nfsd_netns_ready(nn))
6725 return count;
6726
6727 spin_lock(&nn->client_lock);
6728 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6729 count += nfsd_collect_client_locks(clp, &reaplist, max - count);
6730 if (max != 0 && count >= max)
6731 break;
6732 }
6733 spin_unlock(&nn->client_lock);
6734 nfsd_reap_locks(&reaplist);
6735 return count;
6736}
6737
6738static u64
6739nfsd_foreach_client_openowner(struct nfs4_client *clp, u64 max,
6740 struct list_head *collect,
6741 void (*func)(struct nfs4_openowner *))
6742{
6743 struct nfs4_openowner *oop, *next;
6744 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6745 nfsd_net_id);
6746 u64 count = 0;
6747
6748 lockdep_assert_held(&nn->client_lock);
6749
6750 spin_lock(&clp->cl_lock);
6751 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
6752 if (func) {
6753 func(oop);
6754 if (collect) {
6755 atomic_inc(&clp->cl_refcount);
6756 list_add(&oop->oo_perclient, collect);
6757 }
6758 }
6759 ++count;
6760 /*
6761 * Despite the fact that these functions deal with
6762 * 64-bit integers for "count", we must ensure that
6763 * it doesn't blow up the clp->cl_refcount. Throw a
6764 * warning if we start to approach INT_MAX here.
6765 */
6766 WARN_ON_ONCE(count == (INT_MAX / 2));
6767 if (count == max)
6768 break;
6769 }
6770 spin_unlock(&clp->cl_lock);
6771
6772 return count;
6773}
6774
6775static u64
6776nfsd_print_client_openowners(struct nfs4_client *clp)
6777{
6778 u64 count = nfsd_foreach_client_openowner(clp, 0, NULL, NULL);
6779
6780 nfsd_print_count(clp, count, "openowners");
6781 return count;
6782}
6783
6784static u64
6785nfsd_collect_client_openowners(struct nfs4_client *clp,
6786 struct list_head *collect, u64 max)
6787{
6788 return nfsd_foreach_client_openowner(clp, max, collect,
6789 unhash_openowner_locked);
6790}
6791
6792u64
6793nfsd_inject_print_openowners(void)
6794{
6795 struct nfs4_client *clp;
6796 u64 count = 0;
6797 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6798 nfsd_net_id);
6799
6800 if (!nfsd_netns_ready(nn))
6801 return 0;
6802
6803 spin_lock(&nn->client_lock);
6804 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6805 count += nfsd_print_client_openowners(clp);
6806 spin_unlock(&nn->client_lock);
6807
6808 return count;
6809}
6810
6811static void
6812nfsd_reap_openowners(struct list_head *reaplist)
6813{
6814 struct nfs4_client *clp;
6815 struct nfs4_openowner *oop, *next;
6816
6817 list_for_each_entry_safe(oop, next, reaplist, oo_perclient) {
6818 list_del_init(&oop->oo_perclient);
6819 clp = oop->oo_owner.so_client;
6820 release_openowner(oop);
6821 put_client(clp);
6822 }
6823}
6824
6825u64
6826nfsd_inject_forget_client_openowners(struct sockaddr_storage *addr,
6827 size_t addr_size)
6828{
6829 unsigned int count = 0;
6830 struct nfs4_client *clp;
6831 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6832 nfsd_net_id);
6833 LIST_HEAD(reaplist);
6834
6835 if (!nfsd_netns_ready(nn))
6836 return count;
6837
6838 spin_lock(&nn->client_lock);
6839 clp = nfsd_find_client(addr, addr_size);
6840 if (clp)
6841 count = nfsd_collect_client_openowners(clp, &reaplist, 0);
6842 spin_unlock(&nn->client_lock);
6843 nfsd_reap_openowners(&reaplist);
6844 return count;
6845}
6846
6847u64
6848nfsd_inject_forget_openowners(u64 max)
6849{
6850 u64 count = 0;
6851 struct nfs4_client *clp;
6852 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6853 nfsd_net_id);
6854 LIST_HEAD(reaplist);
6855
6856 if (!nfsd_netns_ready(nn))
6857 return count;
6858
6859 spin_lock(&nn->client_lock);
6860 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6861 count += nfsd_collect_client_openowners(clp, &reaplist,
6862 max - count);
6863 if (max != 0 && count >= max)
6864 break;
6865 }
6866 spin_unlock(&nn->client_lock);
6867 nfsd_reap_openowners(&reaplist);
6868 return count;
6869}
6870
6871static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
6872 struct list_head *victims)
6873{
6874 struct nfs4_delegation *dp, *next;
6875 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6876 nfsd_net_id);
6877 u64 count = 0;
6878
6879 lockdep_assert_held(&nn->client_lock);
6880
6881 spin_lock(&state_lock);
6882 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
6883 if (victims) {
6884 /*
6885 * It's not safe to mess with delegations that have a
6886 * non-zero dl_time. They might have already been broken
6887 * and could be processed by the laundromat outside of
6888 * the state_lock. Just leave them be.
6889 */
6890 if (dp->dl_time != 0)
6891 continue;
6892
6893 atomic_inc(&clp->cl_refcount);
6894 WARN_ON(!unhash_delegation_locked(dp));
6895 list_add(&dp->dl_recall_lru, victims);
6896 }
6897 ++count;
6898 /*
6899 * Despite the fact that these functions deal with
6900 * 64-bit integers for "count", we must ensure that
6901 * it doesn't blow up the clp->cl_refcount. Throw a
6902 * warning if we start to approach INT_MAX here.
6903 */
6904 WARN_ON_ONCE(count == (INT_MAX / 2));
6905 if (count == max)
6906 break;
6907 }
6908 spin_unlock(&state_lock);
6909 return count;
6910}
6911
6912static u64
6913nfsd_print_client_delegations(struct nfs4_client *clp)
6914{
6915 u64 count = nfsd_find_all_delegations(clp, 0, NULL);
6916
6917 nfsd_print_count(clp, count, "delegations");
6918 return count;
6919}
6920
6921u64
6922nfsd_inject_print_delegations(void)
6923{
6924 struct nfs4_client *clp;
6925 u64 count = 0;
6926 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6927 nfsd_net_id);
6928
6929 if (!nfsd_netns_ready(nn))
6930 return 0;
6931
6932 spin_lock(&nn->client_lock);
6933 list_for_each_entry(clp, &nn->client_lru, cl_lru)
6934 count += nfsd_print_client_delegations(clp);
6935 spin_unlock(&nn->client_lock);
6936
6937 return count;
6938}
6939
6940static void
6941nfsd_forget_delegations(struct list_head *reaplist)
6942{
6943 struct nfs4_client *clp;
6944 struct nfs4_delegation *dp, *next;
6945
6946 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
6947 list_del_init(&dp->dl_recall_lru);
6948 clp = dp->dl_stid.sc_client;
6949 revoke_delegation(dp);
6950 put_client(clp);
6951 }
6952}
6953
6954u64
6955nfsd_inject_forget_client_delegations(struct sockaddr_storage *addr,
6956 size_t addr_size)
6957{
6958 u64 count = 0;
6959 struct nfs4_client *clp;
6960 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6961 nfsd_net_id);
6962 LIST_HEAD(reaplist);
6963
6964 if (!nfsd_netns_ready(nn))
6965 return count;
6966
6967 spin_lock(&nn->client_lock);
6968 clp = nfsd_find_client(addr, addr_size);
6969 if (clp)
6970 count = nfsd_find_all_delegations(clp, 0, &reaplist);
6971 spin_unlock(&nn->client_lock);
6972
6973 nfsd_forget_delegations(&reaplist);
6974 return count;
6975}
6976
6977u64
6978nfsd_inject_forget_delegations(u64 max)
6979{
6980 u64 count = 0;
6981 struct nfs4_client *clp;
6982 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
6983 nfsd_net_id);
6984 LIST_HEAD(reaplist);
6985
6986 if (!nfsd_netns_ready(nn))
6987 return count;
6988
6989 spin_lock(&nn->client_lock);
6990 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
6991 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
6992 if (max != 0 && count >= max)
6993 break;
6994 }
6995 spin_unlock(&nn->client_lock);
6996 nfsd_forget_delegations(&reaplist);
6997 return count;
6998}
6999
7000static void
7001nfsd_recall_delegations(struct list_head *reaplist)
7002{
7003 struct nfs4_client *clp;
7004 struct nfs4_delegation *dp, *next;
7005
7006 list_for_each_entry_safe(dp, next, reaplist, dl_recall_lru) {
7007 list_del_init(&dp->dl_recall_lru);
7008 clp = dp->dl_stid.sc_client;
7009 /*
7010 * We skipped all entries that had a zero dl_time before,
7011 * so we can now reset the dl_time back to 0. If a delegation
7012 * break comes in now, then it won't make any difference since
7013 * we're recalling it either way.
7014 */
7015 spin_lock(&state_lock);
7016 dp->dl_time = 0;
7017 spin_unlock(&state_lock);
7018 nfsd_break_one_deleg(dp);
7019 put_client(clp);
7020 }
7021}
7022
7023u64
7024nfsd_inject_recall_client_delegations(struct sockaddr_storage *addr,
7025 size_t addr_size)
7026{
7027 u64 count = 0;
7028 struct nfs4_client *clp;
7029 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7030 nfsd_net_id);
7031 LIST_HEAD(reaplist);
7032
7033 if (!nfsd_netns_ready(nn))
7034 return count;
7035
7036 spin_lock(&nn->client_lock);
7037 clp = nfsd_find_client(addr, addr_size);
7038 if (clp)
7039 count = nfsd_find_all_delegations(clp, 0, &reaplist);
7040 spin_unlock(&nn->client_lock);
7041
7042 nfsd_recall_delegations(&reaplist);
7043 return count;
7044}
7045
7046u64
7047nfsd_inject_recall_delegations(u64 max)
7048{
7049 u64 count = 0;
7050 struct nfs4_client *clp, *next;
7051 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns,
7052 nfsd_net_id);
7053 LIST_HEAD(reaplist);
7054
7055 if (!nfsd_netns_ready(nn))
7056 return count;
7057
7058 spin_lock(&nn->client_lock);
7059 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
7060 count += nfsd_find_all_delegations(clp, max - count, &reaplist);
7061 if (max != 0 && ++count >= max)
7062 break;
7063 }
7064 spin_unlock(&nn->client_lock);
7065 nfsd_recall_delegations(&reaplist);
7066 return count;
7067}
7068#endif /* CONFIG_NFSD_FAULT_INJECTION */
7069
7070/*
7071 * Since the lifetime of a delegation isn't limited to that of an open, a
7072 * client may quite reasonably hang on to a delegation as long as it has
7073 * the inode cached. This becomes an obvious problem the first time a
7074 * client's inode cache approaches the size of the server's total memory.
7075 *
7076 * For now we avoid this problem by imposing a hard limit on the number
7077 * of delegations, which varies according to the server's memory size.
7078 */
7079static void
7080set_max_delegations(void)
7081{
7082 /*
7083 * Allow at most 4 delegations per megabyte of RAM. Quick
7084 * estimates suggest that in the worst case (where every delegation
7085 * is for a different inode), a delegation could take about 1.5K,
7086 * giving a worst case usage of about 6% of memory.
7087 */
7088 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
7089}
7090
7091static int nfs4_state_create_net(struct net *net)
7092{
7093 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7094 int i;
7095
7096 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7097 CLIENT_HASH_SIZE, GFP_KERNEL);
7098 if (!nn->conf_id_hashtbl)
7099 goto err;
7100 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
7101 CLIENT_HASH_SIZE, GFP_KERNEL);
7102 if (!nn->unconf_id_hashtbl)
7103 goto err_unconf_id;
7104 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
7105 SESSION_HASH_SIZE, GFP_KERNEL);
7106 if (!nn->sessionid_hashtbl)
7107 goto err_sessionid;
7108
7109 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7110 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
7111 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
7112 }
7113 for (i = 0; i < SESSION_HASH_SIZE; i++)
7114 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
7115 nn->conf_name_tree = RB_ROOT;
7116 nn->unconf_name_tree = RB_ROOT;
7117 nn->boot_time = get_seconds();
7118 nn->grace_ended = false;
7119 nn->nfsd4_manager.block_opens = true;
7120 INIT_LIST_HEAD(&nn->nfsd4_manager.list);
7121 INIT_LIST_HEAD(&nn->client_lru);
7122 INIT_LIST_HEAD(&nn->close_lru);
7123 INIT_LIST_HEAD(&nn->del_recall_lru);
7124 spin_lock_init(&nn->client_lock);
7125
7126 spin_lock_init(&nn->blocked_locks_lock);
7127 INIT_LIST_HEAD(&nn->blocked_locks_lru);
7128
7129 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
7130 get_net(net);
7131
7132 return 0;
7133
7134err_sessionid:
7135 kfree(nn->unconf_id_hashtbl);
7136err_unconf_id:
7137 kfree(nn->conf_id_hashtbl);
7138err:
7139 return -ENOMEM;
7140}
7141
7142static void
7143nfs4_state_destroy_net(struct net *net)
7144{
7145 int i;
7146 struct nfs4_client *clp = NULL;
7147 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7148
7149 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7150 while (!list_empty(&nn->conf_id_hashtbl[i])) {
7151 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7152 destroy_client(clp);
7153 }
7154 }
7155
7156 WARN_ON(!list_empty(&nn->blocked_locks_lru));
7157
7158 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
7159 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
7160 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
7161 destroy_client(clp);
7162 }
7163 }
7164
7165 kfree(nn->sessionid_hashtbl);
7166 kfree(nn->unconf_id_hashtbl);
7167 kfree(nn->conf_id_hashtbl);
7168 put_net(net);
7169}
7170
7171int
7172nfs4_state_start_net(struct net *net)
7173{
7174 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7175 int ret;
7176
7177 ret = nfs4_state_create_net(net);
7178 if (ret)
7179 return ret;
7180 locks_start_grace(net, &nn->nfsd4_manager);
7181 nfsd4_client_tracking_init(net);
7182 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
7183 nn->nfsd4_grace, net->ns.inum);
7184 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
7185 return 0;
7186}
7187
7188/* initialization to perform when the nfsd service is started: */
7189
7190int
7191nfs4_state_start(void)
7192{
7193 int ret;
7194
7195 ret = set_callback_cred();
7196 if (ret)
7197 return ret;
7198
7199 laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
7200 if (laundry_wq == NULL) {
7201 ret = -ENOMEM;
7202 goto out_cleanup_cred;
7203 }
7204 ret = nfsd4_create_callback_queue();
7205 if (ret)
7206 goto out_free_laundry;
7207
7208 set_max_delegations();
7209 return 0;
7210
7211out_free_laundry:
7212 destroy_workqueue(laundry_wq);
7213out_cleanup_cred:
7214 cleanup_callback_cred();
7215 return ret;
7216}
7217
7218void
7219nfs4_state_shutdown_net(struct net *net)
7220{
7221 struct nfs4_delegation *dp = NULL;
7222 struct list_head *pos, *next, reaplist;
7223 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7224
7225 cancel_delayed_work_sync(&nn->laundromat_work);
7226 locks_end_grace(&nn->nfsd4_manager);
7227
7228 INIT_LIST_HEAD(&reaplist);
7229 spin_lock(&state_lock);
7230 list_for_each_safe(pos, next, &nn->del_recall_lru) {
7231 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7232 WARN_ON(!unhash_delegation_locked(dp));
7233 list_add(&dp->dl_recall_lru, &reaplist);
7234 }
7235 spin_unlock(&state_lock);
7236 list_for_each_safe(pos, next, &reaplist) {
7237 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
7238 list_del_init(&dp->dl_recall_lru);
7239 destroy_unhashed_deleg(dp);
7240 }
7241
7242 nfsd4_client_tracking_exit(net);
7243 nfs4_state_destroy_net(net);
7244}
7245
7246void
7247nfs4_state_shutdown(void)
7248{
7249 destroy_workqueue(laundry_wq);
7250 nfsd4_destroy_callback_queue();
7251 cleanup_callback_cred();
7252}
7253
7254static void
7255get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7256{
7257 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
7258 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
7259}
7260
7261static void
7262put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
7263{
7264 if (cstate->minorversion) {
7265 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
7266 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7267 }
7268}
7269
7270void
7271clear_current_stateid(struct nfsd4_compound_state *cstate)
7272{
7273 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
7274}
7275
7276/*
7277 * functions to set current state id
7278 */
7279void
7280nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
7281 union nfsd4_op_u *u)
7282{
7283 put_stateid(cstate, &u->open_downgrade.od_stateid);
7284}
7285
7286void
7287nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
7288 union nfsd4_op_u *u)
7289{
7290 put_stateid(cstate, &u->open.op_stateid);
7291}
7292
7293void
7294nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
7295 union nfsd4_op_u *u)
7296{
7297 put_stateid(cstate, &u->close.cl_stateid);
7298}
7299
7300void
7301nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
7302 union nfsd4_op_u *u)
7303{
7304 put_stateid(cstate, &u->lock.lk_resp_stateid);
7305}
7306
7307/*
7308 * functions to consume current state id
7309 */
7310
7311void
7312nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
7313 union nfsd4_op_u *u)
7314{
7315 get_stateid(cstate, &u->open_downgrade.od_stateid);
7316}
7317
7318void
7319nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
7320 union nfsd4_op_u *u)
7321{
7322 get_stateid(cstate, &u->delegreturn.dr_stateid);
7323}
7324
7325void
7326nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
7327 union nfsd4_op_u *u)
7328{
7329 get_stateid(cstate, &u->free_stateid.fr_stateid);
7330}
7331
7332void
7333nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
7334 union nfsd4_op_u *u)
7335{
7336 get_stateid(cstate, &u->setattr.sa_stateid);
7337}
7338
7339void
7340nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
7341 union nfsd4_op_u *u)
7342{
7343 get_stateid(cstate, &u->close.cl_stateid);
7344}
7345
7346void
7347nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
7348 union nfsd4_op_u *u)
7349{
7350 get_stateid(cstate, &u->locku.lu_stateid);
7351}
7352
7353void
7354nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
7355 union nfsd4_op_u *u)
7356{
7357 get_stateid(cstate, &u->read.rd_stateid);
7358}
7359
7360void
7361nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
7362 union nfsd4_op_u *u)
7363{
7364 get_stateid(cstate, &u->write.wr_stateid);
7365}