Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2*  Copyright (c) 2001 The Regents of the University of Michigan.
   3*  All rights reserved.
   4*
   5*  Kendrick Smith <kmsmith@umich.edu>
   6*  Andy Adamson <kandros@umich.edu>
   7*
   8*  Redistribution and use in source and binary forms, with or without
   9*  modification, are permitted provided that the following conditions
  10*  are met:
  11*
  12*  1. Redistributions of source code must retain the above copyright
  13*     notice, this list of conditions and the following disclaimer.
  14*  2. Redistributions in binary form must reproduce the above copyright
  15*     notice, this list of conditions and the following disclaimer in the
  16*     documentation and/or other materials provided with the distribution.
  17*  3. Neither the name of the University nor the names of its
  18*     contributors may be used to endorse or promote products derived
  19*     from this software without specific prior written permission.
  20*
  21*  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  22*  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  23*  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  24*  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  25*  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  26*  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  27*  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  28*  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  29*  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  30*  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  31*  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  32*
  33*/
  34
  35#include <linux/file.h>
  36#include <linux/fs.h>
  37#include <linux/slab.h>
  38#include <linux/namei.h>
  39#include <linux/swap.h>
  40#include <linux/pagemap.h>
 
  41#include <linux/sunrpc/svcauth_gss.h>
  42#include <linux/sunrpc/clnt.h>
 
 
 
 
 
 
  43#include "xdr4.h"
 
  44#include "vfs.h"
  45#include "current_stateid.h"
  46
  47#define NFSDDBG_FACILITY                NFSDDBG_PROC
 
 
 
  48
  49/* Globals */
  50time_t nfsd4_lease = 90;     /* default lease time */
  51time_t nfsd4_grace = 90;
  52static time_t boot_time;
  53
  54#define all_ones {{~0,~0},~0}
  55static const stateid_t one_stateid = {
  56	.si_generation = ~0,
  57	.si_opaque = all_ones,
  58};
  59static const stateid_t zero_stateid = {
  60	/* all fields zero */
  61};
  62static const stateid_t currentstateid = {
  63	.si_generation = 1,
  64};
 
 
 
  65
  66static u64 current_sessionid = 1;
  67
  68#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
  69#define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
  70#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
 
  71
  72/* forward declarations */
  73static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
 
 
 
 
  74
  75/* Locking: */
  76
  77/* Currently used for almost all code touching nfsv4 state: */
  78static DEFINE_MUTEX(client_mutex);
  79
  80/*
  81 * Currently used for the del_recall_lru and file hash table.  In an
  82 * effort to decrease the scope of the client_mutex, this spinlock may
  83 * eventually cover more:
  84 */
  85static DEFINE_SPINLOCK(recall_lock);
  86
  87static struct kmem_cache *openowner_slab = NULL;
  88static struct kmem_cache *lockowner_slab = NULL;
  89static struct kmem_cache *file_slab = NULL;
  90static struct kmem_cache *stateid_slab = NULL;
  91static struct kmem_cache *deleg_slab = NULL;
  92
  93void
  94nfs4_lock_state(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  95{
  96	mutex_lock(&client_mutex);
 
 
 
 
 
 
 
 
 
  97}
  98
  99static void free_session(struct kref *);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 100
 101/* Must be called under the client_lock */
 102static void nfsd4_put_session_locked(struct nfsd4_session *ses)
 103{
 104	kref_put(&ses->se_ref, free_session);
 
 
 
 
 
 
 
 105}
 106
 107static void nfsd4_get_session(struct nfsd4_session *ses)
 108{
 109	kref_get(&ses->se_ref);
 
 
 
 
 
 110}
 111
 112void
 113nfs4_unlock_state(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 114{
 115	mutex_unlock(&client_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116}
 117
 118static inline u32
 119opaque_hashval(const void *ptr, int nbytes)
 120{
 121	unsigned char *cptr = (unsigned char *) ptr;
 122
 123	u32 x = 0;
 124	while (nbytes--) {
 125		x *= 37;
 126		x += *cptr++;
 127	}
 128	return x;
 129}
 130
 131static struct list_head del_recall_lru;
 132
 133static void nfsd4_free_file(struct nfs4_file *f)
 134{
 135	kmem_cache_free(file_slab, f);
 
 
 136}
 137
 138static inline void
 139put_nfs4_file(struct nfs4_file *fi)
 140{
 141	if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) {
 142		list_del(&fi->fi_hash);
 143		spin_unlock(&recall_lock);
 144		iput(fi->fi_inode);
 145		nfsd4_free_file(fi);
 146	}
 147}
 148
 149static inline void
 150get_nfs4_file(struct nfs4_file *fi)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 151{
 152	atomic_inc(&fi->fi_ref);
 
 
 
 
 
 
 
 153}
 154
 155static int num_delegations;
 156unsigned int max_delegations;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157
 158/*
 159 * Open owner state (share locks)
 160 */
 161
 162/* hash tables for lock and open owners */
 163#define OWNER_HASH_BITS              8
 164#define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
 165#define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
 166
 167static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
 168{
 169	unsigned int ret;
 170
 171	ret = opaque_hashval(ownername->data, ownername->len);
 172	ret += clientid;
 173	return ret & OWNER_HASH_MASK;
 174}
 175
 176static struct list_head	ownerstr_hashtbl[OWNER_HASH_SIZE];
 177
 178/* hash table for nfs4_file */
 179#define FILE_HASH_BITS                   8
 180#define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
 
 181
 182static unsigned int file_hashval(struct inode *ino)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 183{
 184	/* XXX: why are we hashing on inode pointer, anyway? */
 185	return hash_ptr(ino, FILE_HASH_BITS);
 186}
 
 
 187
 188static struct list_head file_hashtbl[FILE_HASH_SIZE];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 189
 190static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
 
 191{
 192	BUG_ON(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
 193	atomic_inc(&fp->fi_access[oflag]);
 
 
 
 
 194}
 195
 196static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
 
 197{
 198	if (oflag == O_RDWR) {
 199		__nfs4_file_get_access(fp, O_RDONLY);
 200		__nfs4_file_get_access(fp, O_WRONLY);
 201	} else
 202		__nfs4_file_get_access(fp, oflag);
 
 
 
 
 
 
 
 203}
 204
 205static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
 206{
 207	if (fp->fi_fds[oflag]) {
 208		fput(fp->fi_fds[oflag]);
 209		fp->fi_fds[oflag] = NULL;
 
 
 
 
 
 
 
 
 
 
 210	}
 
 211}
 212
 213static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
 214{
 215	if (atomic_dec_and_test(&fp->fi_access[oflag])) {
 216		nfs4_file_put_fd(fp, oflag);
 217		/*
 218		 * It's also safe to get rid of the RDWR open *if*
 219		 * we no longer have need of the other kind of access
 220		 * or if we already have the other kind of open:
 221		 */
 222		if (fp->fi_fds[1-oflag]
 223			|| atomic_read(&fp->fi_access[1 - oflag]) == 0)
 224			nfs4_file_put_fd(fp, O_RDWR);
 
 
 
 
 225	}
 226}
 227
 228static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
 229{
 230	if (oflag == O_RDWR) {
 231		__nfs4_file_put_access(fp, O_RDONLY);
 
 232		__nfs4_file_put_access(fp, O_WRONLY);
 233	} else
 234		__nfs4_file_put_access(fp, oflag);
 235}
 236
 237static inline int get_new_stid(struct nfs4_stid *stid)
 
 
 
 
 
 
 
 
 238{
 239	static int min_stateid = 0;
 240	struct idr *stateids = &stid->sc_client->cl_stateids;
 241	int new_stid;
 242	int error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 243
 244	error = idr_get_new_above(stateids, stid, min_stateid, &new_stid);
 245	/*
 246	 * Note: the necessary preallocation was done in
 247	 * nfs4_alloc_stateid().  The idr code caps the number of
 248	 * preallocations that can exist at a time, but the state lock
 249	 * prevents anyone from using ours before we get here:
 250	 */
 251	BUG_ON(error);
 252	/*
 253	 * It shouldn't be a problem to reuse an opaque stateid value.
 254	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
 255	 * example, a stray write retransmission could be accepted by
 256	 * the server when it should have been rejected.  Therefore,
 257	 * adopt a trick from the sctp code to attempt to maximize the
 258	 * amount of time until an id is reused, by ensuring they always
 259	 * "increase" (mod INT_MAX):
 260	 */
 261
 262	min_stateid = new_stid+1;
 263	if (min_stateid == INT_MAX)
 264		min_stateid = 0;
 265	return new_stid;
 266}
 267
 268static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type)
 
 
 
 
 269{
 270	stateid_t *s = &stid->sc_stateid;
 271	int new_id;
 272
 273	stid->sc_type = type;
 274	stid->sc_client = cl;
 275	s->si_opaque.so_clid = cl->cl_clientid;
 276	new_id = get_new_stid(stid);
 277	s->si_opaque.so_id = (u32)new_id;
 278	/* Will be incremented before return to client: */
 279	s->si_generation = 0;
 
 
 
 
 
 
 
 280}
 281
 282static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab)
 283{
 284	struct idr *stateids = &cl->cl_stateids;
 
 285
 286	if (!idr_pre_get(stateids, GFP_KERNEL))
 
 
 
 
 
 
 287		return NULL;
 288	/*
 289	 * Note: if we fail here (or any time between now and the time
 290	 * we actually get the new idr), we won't need to undo the idr
 291	 * preallocation, since the idr code caps the number of
 292	 * preallocated entries.
 293	 */
 294	return kmem_cache_alloc(slab, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 295}
 296
 297static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
 298{
 299	return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 300}
 301
 302static struct nfs4_delegation *
 303alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type)
 
 304{
 305	struct nfs4_delegation *dp;
 306	struct nfs4_file *fp = stp->st_file;
 
 307
 308	dprintk("NFSD alloc_init_deleg\n");
 309	/*
 310	 * Major work on the lease subsystem (for example, to support
 311	 * calbacks on stat) will be required before we can support
 312	 * write delegations properly.
 313	 */
 314	if (type != NFS4_OPEN_DELEGATE_READ)
 315		return NULL;
 316	if (fp->fi_had_conflict)
 317		return NULL;
 318	if (num_delegations > max_delegations)
 319		return NULL;
 320	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
 321	if (dp == NULL)
 322		return dp;
 323	init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID);
 324	/*
 325	 * delegation seqid's are never incremented.  The 4.1 special
 326	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
 327	 * 0 anyway just for consistency and use 1:
 328	 */
 329	dp->dl_stid.sc_stateid.si_generation = 1;
 330	num_delegations++;
 331	INIT_LIST_HEAD(&dp->dl_perfile);
 332	INIT_LIST_HEAD(&dp->dl_perclnt);
 333	INIT_LIST_HEAD(&dp->dl_recall_lru);
 
 
 
 
 
 
 
 334	get_nfs4_file(fp);
 335	dp->dl_file = fp;
 336	dp->dl_type = type;
 337	fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
 338	dp->dl_time = 0;
 339	atomic_set(&dp->dl_count, 1);
 340	INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc);
 341	return dp;
 
 
 
 342}
 343
 344void
 345nfs4_put_delegation(struct nfs4_delegation *dp)
 346{
 347	if (atomic_dec_and_test(&dp->dl_count)) {
 348		dprintk("NFSD: freeing dp %p\n",dp);
 349		put_nfs4_file(dp->dl_file);
 350		kmem_cache_free(deleg_slab, dp);
 351		num_delegations--;
 352	}
 353}
 354
 355static void nfs4_put_deleg_lease(struct nfs4_file *fp)
 356{
 357	if (atomic_dec_and_test(&fp->fi_delegees)) {
 358		vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
 359		fp->fi_lease = NULL;
 360		fput(fp->fi_deleg_file);
 361		fp->fi_deleg_file = NULL;
 362	}
 
 
 
 
 
 
 363}
 364
 365static void unhash_stid(struct nfs4_stid *s)
 
 366{
 367	struct idr *stateids = &s->sc_client->cl_stateids;
 368
 369	idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
 
 
 
 
 370}
 371
 372/* Called under the state lock. */
 373static void
 374unhash_delegation(struct nfs4_delegation *dp)
 375{
 376	unhash_stid(&dp->dl_stid);
 377	list_del_init(&dp->dl_perclnt);
 378	spin_lock(&recall_lock);
 379	list_del_init(&dp->dl_perfile);
 380	list_del_init(&dp->dl_recall_lru);
 381	spin_unlock(&recall_lock);
 382	nfs4_put_deleg_lease(dp->dl_file);
 383	nfs4_put_delegation(dp);
 
 384}
 385
 386/* 
 387 * SETCLIENTID state 
 388 */
 
 389
 390/* client_lock protects the client lru list and session hash table */
 391static DEFINE_SPINLOCK(client_lock);
 392
 393/* Hash tables for nfs4_clientid state */
 394#define CLIENT_HASH_BITS                 4
 395#define CLIENT_HASH_SIZE                (1 << CLIENT_HASH_BITS)
 396#define CLIENT_HASH_MASK                (CLIENT_HASH_SIZE - 1)
 397
 398static unsigned int clientid_hashval(u32 id)
 399{
 400	return id & CLIENT_HASH_MASK;
 
 
 401}
 402
 403static unsigned int clientstr_hashval(const char *name)
 404{
 405	return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
 406}
 407
 408/*
 409 * reclaim_str_hashtbl[] holds known client info from previous reset/reboot
 410 * used in reboot/reset lease grace period processing
 411 *
 412 * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed
 413 * setclientid_confirmed info. 
 414 *
 415 * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed 
 416 * setclientid info.
 417 *
 418 * client_lru holds client queue ordered by nfs4_client.cl_time
 419 * for lease renewal.
 420 *
 421 * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
 422 * for last close replay.
 423 */
 424static struct list_head	reclaim_str_hashtbl[CLIENT_HASH_SIZE];
 425static int reclaim_str_hashtbl_size = 0;
 426static struct list_head	conf_id_hashtbl[CLIENT_HASH_SIZE];
 427static struct list_head	conf_str_hashtbl[CLIENT_HASH_SIZE];
 428static struct list_head	unconf_str_hashtbl[CLIENT_HASH_SIZE];
 429static struct list_head	unconf_id_hashtbl[CLIENT_HASH_SIZE];
 430static struct list_head client_lru;
 431static struct list_head close_lru;
 432
 433/*
 434 * We store the NONE, READ, WRITE, and BOTH bits separately in the
 435 * st_{access,deny}_bmap field of the stateid, in order to track not
 436 * only what share bits are currently in force, but also what
 437 * combinations of share bits previous opens have used.  This allows us
 438 * to enforce the recommendation of rfc 3530 14.2.19 that the server
 439 * return an error if the client attempt to downgrade to a combination
 440 * of share bits not explicable by closing some of its previous opens.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 441 *
 442 * XXX: This enforcement is actually incomplete, since we don't keep
 443 * track of access/deny bit combinations; so, e.g., we allow:
 444 *
 445 *	OPEN allow read, deny write
 446 *	OPEN allow both, deny none
 447 *	DOWNGRADE allow read, deny none
 448 *
 449 * which we should reject.
 450 */
 451static unsigned int
 452bmap_to_share_mode(unsigned long bmap) {
 453	int i;
 454	unsigned int access = 0;
 455
 456	for (i = 1; i < 4; i++) {
 457		if (test_bit(i, &bmap))
 458			access |= i;
 459	}
 460	return access;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461}
 462
 463static bool
 464test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
 465	unsigned int access, deny;
 
 
 
 466
 467	access = bmap_to_share_mode(stp->st_access_bmap);
 468	deny = bmap_to_share_mode(stp->st_deny_bmap);
 469	if ((access & open->op_share_deny) || (deny & open->op_share_access))
 470		return false;
 
 
 
 
 
 
 
 
 
 471	return true;
 472}
 473
 474/* set share access for a given stateid */
 475static inline void
 476set_access(u32 access, struct nfs4_ol_stateid *stp)
 477{
 478	__set_bit(access, &stp->st_access_bmap);
 479}
 480
 481/* clear share access for a given stateid */
 482static inline void
 483clear_access(u32 access, struct nfs4_ol_stateid *stp)
 484{
 485	__clear_bit(access, &stp->st_access_bmap);
 486}
 487
 488/* test whether a given stateid has access */
 489static inline bool
 490test_access(u32 access, struct nfs4_ol_stateid *stp)
 491{
 492	return test_bit(access, &stp->st_access_bmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 493}
 494
 495/* set share deny for a given stateid */
 496static inline void
 497set_deny(u32 access, struct nfs4_ol_stateid *stp)
 
 
 498{
 499	__set_bit(access, &stp->st_deny_bmap);
 500}
 501
 502/* clear share deny for a given stateid */
 503static inline void
 504clear_deny(u32 access, struct nfs4_ol_stateid *stp)
 505{
 506	__clear_bit(access, &stp->st_deny_bmap);
 507}
 508
 509/* test whether a given stateid is denying specific access */
 510static inline bool
 511test_deny(u32 access, struct nfs4_ol_stateid *stp)
 
 
 
 512{
 513	return test_bit(access, &stp->st_deny_bmap);
 
 
 
 
 
 
 514}
 515
 516static int nfs4_access_to_omode(u32 access)
 
 517{
 518	switch (access & NFS4_SHARE_ACCESS_BOTH) {
 519	case NFS4_SHARE_ACCESS_READ:
 520		return O_RDONLY;
 521	case NFS4_SHARE_ACCESS_WRITE:
 522		return O_WRONLY;
 523	case NFS4_SHARE_ACCESS_BOTH:
 524		return O_RDWR;
 
 525	}
 526	BUG();
 
 
 
 527}
 528
 529/* release all access and file references for a given stateid */
 530static void
 531release_all_access(struct nfs4_ol_stateid *stp)
 532{
 533	int i;
 
 
 
 
 534
 535	for (i = 1; i < 4; i++) {
 536		if (test_access(i, stp))
 537			nfs4_file_put_access(stp->st_file,
 538					     nfs4_access_to_omode(i));
 539		clear_access(i, stp);
 540	}
 541}
 542
 543static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
 544{
 545	list_del(&stp->st_perfile);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 546	list_del(&stp->st_perstateowner);
 
 547}
 548
 549static void close_generic_stateid(struct nfs4_ol_stateid *stp)
 550{
 
 
 
 551	release_all_access(stp);
 552	put_nfs4_file(stp->st_file);
 553	stp->st_file = NULL;
 
 
 554}
 555
 556static void free_generic_stateid(struct nfs4_ol_stateid *stp)
 557{
 558	kmem_cache_free(stateid_slab, stp);
 
 
 
 
 
 
 
 
 
 
 559}
 560
 561static void release_lock_stateid(struct nfs4_ol_stateid *stp)
 
 
 
 
 
 
 562{
 563	struct file *file;
 
 
 
 
 
 
 
 
 
 
 564
 565	unhash_generic_stateid(stp);
 566	unhash_stid(&stp->st_stid);
 567	file = find_any_file(stp->st_file);
 568	if (file)
 569		locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
 570	close_generic_stateid(stp);
 571	free_generic_stateid(stp);
 572}
 573
 574static void unhash_lockowner(struct nfs4_lockowner *lo)
 575{
 576	struct nfs4_ol_stateid *stp;
 577
 578	list_del(&lo->lo_owner.so_strhash);
 579	list_del(&lo->lo_perstateid);
 580	list_del(&lo->lo_owner_ino_hash);
 581	while (!list_empty(&lo->lo_owner.so_stateids)) {
 582		stp = list_first_entry(&lo->lo_owner.so_stateids,
 583				struct nfs4_ol_stateid, st_perstateowner);
 584		release_lock_stateid(stp);
 585	}
 586}
 587
 588static void release_lockowner(struct nfs4_lockowner *lo)
 589{
 590	unhash_lockowner(lo);
 591	nfs4_free_lockowner(lo);
 
 
 
 
 
 
 592}
 593
 
 
 
 
 
 
 
 
 
 
 
 
 
 594static void
 595release_stateid_lockowners(struct nfs4_ol_stateid *open_stp)
 596{
 597	struct nfs4_lockowner *lo;
 
 598
 599	while (!list_empty(&open_stp->st_lockowners)) {
 600		lo = list_entry(open_stp->st_lockowners.next,
 601				struct nfs4_lockowner, lo_perstateid);
 602		release_lockowner(lo);
 
 
 
 
 
 
 603	}
 604}
 605
 606static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
 
 607{
 608	unhash_generic_stateid(stp);
 609	release_stateid_lockowners(stp);
 610	close_generic_stateid(stp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 611}
 612
 613static void release_open_stateid(struct nfs4_ol_stateid *stp)
 614{
 615	unhash_open_stateid(stp);
 616	unhash_stid(&stp->st_stid);
 617	free_generic_stateid(stp);
 
 
 
 
 618}
 619
 620static void unhash_openowner(struct nfs4_openowner *oo)
 621{
 622	struct nfs4_ol_stateid *stp;
 623
 624	list_del(&oo->oo_owner.so_strhash);
 625	list_del(&oo->oo_perclient);
 626	while (!list_empty(&oo->oo_owner.so_stateids)) {
 627		stp = list_first_entry(&oo->oo_owner.so_stateids,
 628				struct nfs4_ol_stateid, st_perstateowner);
 629		release_open_stateid(stp);
 630	}
 631}
 632
 633static void release_last_closed_stateid(struct nfs4_openowner *oo)
 634{
 635	struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
 
 
 636
 
 
 637	if (s) {
 638		unhash_stid(&s->st_stid);
 639		free_generic_stateid(s);
 640		oo->oo_last_closed_stid = NULL;
 641	}
 
 
 
 642}
 643
 644static void release_openowner(struct nfs4_openowner *oo)
 645{
 646	unhash_openowner(oo);
 647	list_del(&oo->oo_close_lru);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 648	release_last_closed_stateid(oo);
 649	nfs4_free_openowner(oo);
 650}
 651
 652#define SESSION_HASH_SIZE	512
 653static struct list_head sessionid_hashtbl[SESSION_HASH_SIZE];
 654
 655static inline int
 656hash_sessionid(struct nfs4_sessionid *sessionid)
 657{
 658	struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
 659
 660	return sid->sequence % SESSION_HASH_SIZE;
 661}
 662
 663#ifdef NFSD_DEBUG
 664static inline void
 665dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
 666{
 667	u32 *ptr = (u32 *)(&sessionid->data[0]);
 668	dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
 669}
 670#else
 671static inline void
 672dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
 673{
 674}
 675#endif
 676
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 677
 678static void
 679gen_sessionid(struct nfsd4_session *ses)
 680{
 681	struct nfs4_client *clp = ses->se_client;
 682	struct nfsd4_sessionid *sid;
 683
 684	sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
 685	sid->clientid = clp->cl_clientid;
 686	sid->sequence = current_sessionid++;
 687	sid->reserved = 0;
 688}
 689
 690/*
 691 * The protocol defines ca_maxresponssize_cached to include the size of
 692 * the rpc header, but all we need to cache is the data starting after
 693 * the end of the initial SEQUENCE operation--the rest we regenerate
 694 * each time.  Therefore we can advertise a ca_maxresponssize_cached
 695 * value that is the number of bytes in our cache plus a few additional
 696 * bytes.  In order to stay on the safe side, and not promise more than
 697 * we can cache, those additional bytes must be the minimum possible: 24
 698 * bytes of rpc header (xid through accept state, with AUTH_NULL
 699 * verifier), 12 for the compound header (with zero-length tag), and 44
 700 * for the SEQUENCE op response:
 701 */
 702#define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
 703
 704static void
 705free_session_slots(struct nfsd4_session *ses)
 706{
 707	int i;
 708
 709	for (i = 0; i < ses->se_fchannel.maxreqs; i++)
 
 710		kfree(ses->se_slots[i]);
 
 711}
 712
 713/*
 714 * We don't actually need to cache the rpc and session headers, so we
 715 * can allocate a little less for each slot:
 716 */
 717static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
 718{
 719	return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
 720}
 721
 722static int nfsd4_sanitize_slot_size(u32 size)
 723{
 724	size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */
 725	size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE);
 726
 727	return size;
 
 
 
 
 728}
 729
 730/*
 731 * XXX: If we run out of reserved DRC memory we could (up to a point)
 732 * re-negotiate active sessions and reduce their slot usage to make
 733 * room for new connections. For now we just fail the create session.
 734 */
 735static int nfsd4_get_drc_mem(int slotsize, u32 num)
 736{
 737	int avail;
 738
 739	num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION);
 
 740
 741	spin_lock(&nfsd_drc_lock);
 742	avail = min_t(int, NFSD_MAX_MEM_PER_SESSION,
 743			nfsd_drc_max_mem - nfsd_drc_mem_used);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 744	num = min_t(int, num, avail / slotsize);
 
 745	nfsd_drc_mem_used += num * slotsize;
 746	spin_unlock(&nfsd_drc_lock);
 747
 748	return num;
 749}
 750
 751static void nfsd4_put_drc_mem(int slotsize, int num)
 752{
 
 
 753	spin_lock(&nfsd_drc_lock);
 754	nfsd_drc_mem_used -= slotsize * num;
 755	spin_unlock(&nfsd_drc_lock);
 756}
 757
 758static struct nfsd4_session *alloc_session(int slotsize, int numslots)
 
 759{
 
 
 760	struct nfsd4_session *new;
 761	int mem, i;
 762
 763	BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
 764			+ sizeof(struct nfsd4_session) > PAGE_SIZE);
 765	mem = numslots * sizeof(struct nfsd4_slot *);
 766
 767	new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
 768	if (!new)
 769		return NULL;
 770	/* allocate each struct nfsd4_slot and data cache in one piece */
 771	for (i = 0; i < numslots; i++) {
 772		mem = sizeof(struct nfsd4_slot) + slotsize;
 773		new->se_slots[i] = kzalloc(mem, GFP_KERNEL);
 774		if (!new->se_slots[i])
 775			goto out_free;
 776	}
 
 
 
 
 777	return new;
 778out_free:
 779	while (i--)
 780		kfree(new->se_slots[i]);
 781	kfree(new);
 782	return NULL;
 783}
 784
 785static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize)
 786{
 787	u32 maxrpc = nfsd_serv->sv_max_mesg;
 788
 789	new->maxreqs = numslots;
 790	new->maxresp_cached = min_t(u32, req->maxresp_cached,
 791					slotsize + NFSD_MIN_HDR_SEQ_SZ);
 792	new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
 793	new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
 794	new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
 795}
 796
 797static void free_conn(struct nfsd4_conn *c)
 798{
 799	svc_xprt_put(c->cn_xprt);
 800	kfree(c);
 801}
 802
 803static void nfsd4_conn_lost(struct svc_xpt_user *u)
 804{
 805	struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
 806	struct nfs4_client *clp = c->cn_session->se_client;
 807
 
 
 808	spin_lock(&clp->cl_lock);
 809	if (!list_empty(&c->cn_persession)) {
 810		list_del(&c->cn_persession);
 811		free_conn(c);
 812	}
 813	spin_unlock(&clp->cl_lock);
 814	nfsd4_probe_callback(clp);
 
 815}
 816
 817static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
 818{
 819	struct nfsd4_conn *conn;
 820
 821	conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
 822	if (!conn)
 823		return NULL;
 824	svc_xprt_get(rqstp->rq_xprt);
 825	conn->cn_xprt = rqstp->rq_xprt;
 826	conn->cn_flags = flags;
 827	INIT_LIST_HEAD(&conn->cn_xpt_user.list);
 828	return conn;
 829}
 830
 831static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
 832{
 833	conn->cn_session = ses;
 834	list_add(&conn->cn_persession, &ses->se_conns);
 835}
 836
 837static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
 838{
 839	struct nfs4_client *clp = ses->se_client;
 840
 841	spin_lock(&clp->cl_lock);
 842	__nfsd4_hash_conn(conn, ses);
 843	spin_unlock(&clp->cl_lock);
 844}
 845
 846static int nfsd4_register_conn(struct nfsd4_conn *conn)
 847{
 848	conn->cn_xpt_user.callback = nfsd4_conn_lost;
 849	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
 850}
 851
 852static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses, u32 dir)
 853{
 854	struct nfsd4_conn *conn;
 855	int ret;
 856
 857	conn = alloc_conn(rqstp, dir);
 858	if (!conn)
 859		return nfserr_jukebox;
 860	nfsd4_hash_conn(conn, ses);
 861	ret = nfsd4_register_conn(conn);
 862	if (ret)
 863		/* oops; xprt is already down: */
 864		nfsd4_conn_lost(&conn->cn_xpt_user);
 865	return nfs_ok;
 
 866}
 867
 868static __be32 nfsd4_new_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_session *ses)
 869{
 870	u32 dir = NFS4_CDFC4_FORE;
 871
 872	if (ses->se_flags & SESSION4_BACK_CHAN)
 873		dir |= NFS4_CDFC4_BACK;
 874
 875	return nfsd4_new_conn(rqstp, ses, dir);
 876}
 877
 878/* must be called under client_lock */
 879static void nfsd4_del_conns(struct nfsd4_session *s)
 880{
 881	struct nfs4_client *clp = s->se_client;
 882	struct nfsd4_conn *c;
 883
 884	spin_lock(&clp->cl_lock);
 885	while (!list_empty(&s->se_conns)) {
 886		c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
 887		list_del_init(&c->cn_persession);
 888		spin_unlock(&clp->cl_lock);
 889
 890		unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
 891		free_conn(c);
 892
 893		spin_lock(&clp->cl_lock);
 894	}
 895	spin_unlock(&clp->cl_lock);
 896}
 897
 898static void free_session(struct kref *kref)
 899{
 900	struct nfsd4_session *ses;
 901	int mem;
 902
 903	lockdep_assert_held(&client_lock);
 904	ses = container_of(kref, struct nfsd4_session, se_ref);
 905	nfsd4_del_conns(ses);
 906	spin_lock(&nfsd_drc_lock);
 907	mem = ses->se_fchannel.maxreqs * slot_bytes(&ses->se_fchannel);
 908	nfsd_drc_mem_used -= mem;
 909	spin_unlock(&nfsd_drc_lock);
 910	free_session_slots(ses);
 911	kfree(ses);
 912}
 913
 914void nfsd4_put_session(struct nfsd4_session *ses)
 915{
 916	spin_lock(&client_lock);
 917	nfsd4_put_session_locked(ses);
 918	spin_unlock(&client_lock);
 919}
 920
 921static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct nfs4_client *clp, struct nfsd4_create_session *cses)
 922{
 923	struct nfsd4_session *new;
 924	struct nfsd4_channel_attrs *fchan = &cses->fore_channel;
 925	int numslots, slotsize;
 926	__be32 status;
 927	int idx;
 928
 929	/*
 930	 * Note decreasing slot size below client's request may
 931	 * make it difficult for client to function correctly, whereas
 932	 * decreasing the number of slots will (just?) affect
 933	 * performance.  When short on memory we therefore prefer to
 934	 * decrease number of slots instead of their size.
 935	 */
 936	slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
 937	numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
 938	if (numslots < 1)
 939		return NULL;
 940
 941	new = alloc_session(slotsize, numslots);
 942	if (!new) {
 943		nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
 944		return NULL;
 945	}
 946	init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize);
 947
 948	new->se_client = clp;
 949	gen_sessionid(new);
 950
 951	INIT_LIST_HEAD(&new->se_conns);
 952
 953	new->se_cb_seq_nr = 1;
 954	new->se_flags = cses->flags;
 955	new->se_cb_prog = cses->callback_prog;
 956	kref_init(&new->se_ref);
 
 957	idx = hash_sessionid(&new->se_sessionid);
 958	spin_lock(&client_lock);
 959	list_add(&new->se_hash, &sessionid_hashtbl[idx]);
 960	spin_lock(&clp->cl_lock);
 961	list_add(&new->se_perclnt, &clp->cl_sessions);
 962	spin_unlock(&clp->cl_lock);
 963	spin_unlock(&client_lock);
 964
 965	status = nfsd4_new_conn_from_crses(rqstp, new);
 966	/* whoops: benny points out, status is ignored! (err, or bogus) */
 967	if (status) {
 968		spin_lock(&client_lock);
 969		free_session(&new->se_ref);
 970		spin_unlock(&client_lock);
 971		return NULL;
 972	}
 973	if (cses->flags & SESSION4_BACK_CHAN) {
 974		struct sockaddr *sa = svc_addr(rqstp);
 975		/*
 976		 * This is a little silly; with sessions there's no real
 977		 * use for the callback address.  Use the peer address
 978		 * as a reasonable default for now, but consider fixing
 979		 * the rpc client not to require an address in the
 980		 * future:
 981		 */
 982		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
 983		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
 984	}
 985	nfsd4_probe_callback(clp);
 986	return new;
 987}
 988
 989/* caller must hold client_lock */
 990static struct nfsd4_session *
 991find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid)
 992{
 993	struct nfsd4_session *elem;
 994	int idx;
 
 
 
 995
 996	dump_sessionid(__func__, sessionid);
 997	idx = hash_sessionid(sessionid);
 998	/* Search in the appropriate list */
 999	list_for_each_entry(elem, &sessionid_hashtbl[idx], se_hash) {
1000		if (!memcmp(elem->se_sessionid.data, sessionid->data,
1001			    NFS4_MAX_SESSIONID_LEN)) {
1002			return elem;
1003		}
1004	}
1005
1006	dprintk("%s: session not found\n", __func__);
1007	return NULL;
1008}
1009
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1010/* caller must hold client_lock */
1011static void
1012unhash_session(struct nfsd4_session *ses)
1013{
 
 
 
 
 
1014	list_del(&ses->se_hash);
1015	spin_lock(&ses->se_client->cl_lock);
1016	list_del(&ses->se_perclnt);
1017	spin_unlock(&ses->se_client->cl_lock);
1018}
1019
1020/* must be called under the client_lock */
1021static inline void
1022renew_client_locked(struct nfs4_client *clp)
1023{
1024	if (is_client_expired(clp)) {
1025		WARN_ON(1);
1026		printk("%s: client (clientid %08x/%08x) already expired\n",
1027			__func__,
1028			clp->cl_clientid.cl_boot,
1029			clp->cl_clientid.cl_id);
1030		return;
1031	}
1032
1033	dprintk("renewing client (clientid %08x/%08x)\n", 
1034			clp->cl_clientid.cl_boot, 
1035			clp->cl_clientid.cl_id);
1036	list_move_tail(&clp->cl_lru, &client_lru);
1037	clp->cl_time = get_seconds();
1038}
1039
1040static inline void
1041renew_client(struct nfs4_client *clp)
1042{
1043	spin_lock(&client_lock);
1044	renew_client_locked(clp);
1045	spin_unlock(&client_lock);
1046}
1047
1048/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1049static int
1050STALE_CLIENTID(clientid_t *clid)
1051{
1052	if (clid->cl_boot == boot_time)
 
 
 
 
 
1053		return 0;
1054	dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1055		clid->cl_boot, clid->cl_id, boot_time);
1056	return 1;
1057}
1058
1059/* 
1060 * XXX Should we use a slab cache ?
1061 * This type of memory management is somewhat inefficient, but we use it
1062 * anyway since SETCLIENTID is not a common operation.
1063 */
1064static struct nfs4_client *alloc_client(struct xdr_netobj name)
 
1065{
1066	struct nfs4_client *clp;
 
1067
1068	clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1069	if (clp == NULL)
1070		return NULL;
1071	clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1072	if (clp->cl_name.data == NULL) {
1073		kfree(clp);
1074		return NULL;
1075	}
1076	clp->cl_name.len = name.len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1077	return clp;
 
 
 
 
 
1078}
1079
1080static inline void
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1081free_client(struct nfs4_client *clp)
1082{
1083	lockdep_assert_held(&client_lock);
1084	while (!list_empty(&clp->cl_sessions)) {
1085		struct nfsd4_session *ses;
1086		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1087				se_perclnt);
1088		list_del(&ses->se_perclnt);
1089		nfsd4_put_session_locked(ses);
 
1090	}
1091	free_svc_cred(&clp->cl_cred);
1092	kfree(clp->cl_name.data);
1093	kfree(clp);
1094}
1095
1096void
1097release_session_client(struct nfsd4_session *session)
1098{
1099	struct nfs4_client *clp = session->se_client;
1100
1101	if (!atomic_dec_and_lock(&clp->cl_refcount, &client_lock))
1102		return;
1103	if (is_client_expired(clp)) {
1104		free_client(clp);
1105		session->se_client = NULL;
1106	} else
1107		renew_client_locked(clp);
1108	spin_unlock(&client_lock);
1109}
1110
1111/* must be called under the client_lock */
1112static inline void
1113unhash_client_locked(struct nfs4_client *clp)
1114{
 
1115	struct nfsd4_session *ses;
1116
1117	mark_client_expired(clp);
1118	list_del(&clp->cl_lru);
 
 
 
 
 
 
 
 
 
 
 
1119	spin_lock(&clp->cl_lock);
1120	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1121		list_del_init(&ses->se_hash);
1122	spin_unlock(&clp->cl_lock);
1123}
1124
1125static void
1126expire_client(struct nfs4_client *clp)
 
 
 
 
 
 
 
 
 
1127{
 
 
 
 
 
 
 
 
 
 
 
1128	struct nfs4_openowner *oo;
1129	struct nfs4_delegation *dp;
1130	struct list_head reaplist;
1131
1132	INIT_LIST_HEAD(&reaplist);
1133	spin_lock(&recall_lock);
1134	while (!list_empty(&clp->cl_delegations)) {
1135		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1136		list_del_init(&dp->dl_perclnt);
1137		list_move(&dp->dl_recall_lru, &reaplist);
1138	}
1139	spin_unlock(&recall_lock);
1140	while (!list_empty(&reaplist)) {
1141		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1142		unhash_delegation(dp);
 
 
 
 
 
 
1143	}
1144	while (!list_empty(&clp->cl_openowners)) {
1145		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
 
1146		release_openowner(oo);
1147	}
 
 
 
 
 
 
 
 
 
 
 
 
1148	nfsd4_shutdown_callback(clp);
1149	if (clp->cl_cb_conn.cb_xprt)
1150		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1151	list_del(&clp->cl_idhash);
1152	list_del(&clp->cl_strhash);
1153	spin_lock(&client_lock);
1154	unhash_client_locked(clp);
1155	if (atomic_read(&clp->cl_refcount) == 0)
1156		free_client(clp);
1157	spin_unlock(&client_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1158}
1159
1160static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1161{
1162	memcpy(target->cl_verifier.data, source->data,
1163			sizeof(target->cl_verifier.data));
1164}
1165
1166static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1167{
1168	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 
1169	target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
1170}
1171
1172static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1173{
1174	if (source->cr_principal) {
1175		target->cr_principal =
1176				kstrdup(source->cr_principal, GFP_KERNEL);
1177		if (target->cr_principal == NULL)
1178			return -ENOMEM;
1179	} else
1180		target->cr_principal = NULL;
 
 
1181	target->cr_flavor = source->cr_flavor;
1182	target->cr_uid = source->cr_uid;
1183	target->cr_gid = source->cr_gid;
1184	target->cr_group_info = source->cr_group_info;
1185	get_group_info(target->cr_group_info);
 
 
 
1186	return 0;
1187}
1188
1189static int same_name(const char *n1, const char *n2)
 
1190{
1191	return 0 == memcmp(n1, n2, HEXDIR_LEN);
 
 
 
 
1192}
1193
1194static int
1195same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1196{
1197	return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1198}
1199
1200static int
1201same_clid(clientid_t *cl1, clientid_t *cl2)
1202{
1203	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1204}
1205
1206static bool groups_equal(struct group_info *g1, struct group_info *g2)
1207{
1208	int i;
1209
1210	if (g1->ngroups != g2->ngroups)
1211		return false;
1212	for (i=0; i<g1->ngroups; i++)
1213		if (GROUP_AT(g1, i) != GROUP_AT(g2, i))
1214			return false;
1215	return true;
1216}
1217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1218static bool
1219same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1220{
1221	if ((cr1->cr_flavor != cr2->cr_flavor)
1222		|| (cr1->cr_uid != cr2->cr_uid)
1223		|| (cr1->cr_gid != cr2->cr_gid)
1224		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1225		return false;
 
1226	if (cr1->cr_principal == cr2->cr_principal)
1227		return true;
1228	if (!cr1->cr_principal || !cr2->cr_principal)
1229		return false;
1230	return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1231}
1232
1233static void gen_clid(struct nfs4_client *clp)
 
 
 
 
 
 
 
 
 
 
 
 
1234{
1235	static u32 current_clientid = 1;
1236
1237	clp->cl_clientid.cl_boot = boot_time;
1238	clp->cl_clientid.cl_id = current_clientid++; 
 
 
 
 
 
 
 
 
 
 
1239}
1240
1241static void gen_confirm(struct nfs4_client *clp)
1242{
1243	__be32 verf[2];
1244	static u32 i;
1245
1246	verf[0] = (__be32)get_seconds();
1247	verf[1] = (__be32)i++;
 
 
 
 
1248	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1249}
1250
1251static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
1252{
1253	return idr_find(&cl->cl_stateids, t->si_opaque.so_id);
 
 
1254}
1255
1256static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
 
 
 
 
 
 
 
 
 
 
 
 
1257{
1258	struct nfs4_stid *s;
1259
1260	s = find_stateid(cl, t);
1261	if (!s)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1262		return NULL;
1263	if (typemask & s->sc_type)
1264		return s;
1265	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266}
1267
1268static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1269		struct svc_rqst *rqstp, nfs4_verifier *verf)
1270{
1271	struct nfs4_client *clp;
1272	struct sockaddr *sa = svc_addr(rqstp);
1273	int ret;
 
 
 
1274
1275	clp = alloc_client(name);
1276	if (clp == NULL)
1277		return NULL;
1278
1279	INIT_LIST_HEAD(&clp->cl_sessions);
1280	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1281	if (ret) {
1282		spin_lock(&client_lock);
1283		free_client(clp);
1284		spin_unlock(&client_lock);
1285		return NULL;
1286	}
1287	idr_init(&clp->cl_stateids);
1288	memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
1289	atomic_set(&clp->cl_refcount, 0);
1290	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1291	INIT_LIST_HEAD(&clp->cl_idhash);
1292	INIT_LIST_HEAD(&clp->cl_strhash);
1293	INIT_LIST_HEAD(&clp->cl_openowners);
1294	INIT_LIST_HEAD(&clp->cl_delegations);
1295	INIT_LIST_HEAD(&clp->cl_lru);
1296	INIT_LIST_HEAD(&clp->cl_callbacks);
1297	spin_lock_init(&clp->cl_lock);
1298	INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_do_callback_rpc);
1299	clp->cl_time = get_seconds();
1300	clear_bit(0, &clp->cl_cb_slot_busy);
1301	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1302	copy_verf(clp, verf);
1303	rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1304	gen_confirm(clp);
1305	clp->cl_cb_session = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1306	return clp;
1307}
1308
1309static void
1310add_to_unconfirmed(struct nfs4_client *clp, unsigned int strhashval)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1311{
1312	unsigned int idhashval;
 
1313
1314	list_add(&clp->cl_strhash, &unconf_str_hashtbl[strhashval]);
 
 
 
1315	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1316	list_add(&clp->cl_idhash, &unconf_id_hashtbl[idhashval]);
1317	renew_client(clp);
1318}
1319
1320static void
1321move_to_confirmed(struct nfs4_client *clp)
1322{
1323	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1324	unsigned int strhashval;
 
 
1325
1326	dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1327	list_move(&clp->cl_idhash, &conf_id_hashtbl[idhashval]);
1328	strhashval = clientstr_hashval(clp->cl_recdir);
1329	list_move(&clp->cl_strhash, &conf_str_hashtbl[strhashval]);
1330	renew_client(clp);
 
1331}
1332
1333static struct nfs4_client *
1334find_confirmed_client(clientid_t *clid)
1335{
1336	struct nfs4_client *clp;
1337	unsigned int idhashval = clientid_hashval(clid->cl_id);
1338
1339	list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) {
1340		if (same_clid(&clp->cl_clientid, clid)) {
1341			renew_client(clp);
 
 
1342			return clp;
1343		}
1344	}
1345	return NULL;
1346}
1347
1348static struct nfs4_client *
1349find_unconfirmed_client(clientid_t *clid)
1350{
1351	struct nfs4_client *clp;
1352	unsigned int idhashval = clientid_hashval(clid->cl_id);
1353
1354	list_for_each_entry(clp, &unconf_id_hashtbl[idhashval], cl_idhash) {
1355		if (same_clid(&clp->cl_clientid, clid))
1356			return clp;
1357	}
1358	return NULL;
 
 
 
 
 
 
1359}
1360
1361static bool clp_used_exchangeid(struct nfs4_client *clp)
1362{
1363	return clp->cl_exchange_flags != 0;
1364} 
1365
1366static struct nfs4_client *
1367find_confirmed_client_by_str(const char *dname, unsigned int hashval)
1368{
1369	struct nfs4_client *clp;
1370
1371	list_for_each_entry(clp, &conf_str_hashtbl[hashval], cl_strhash) {
1372		if (same_name(clp->cl_recdir, dname))
1373			return clp;
1374	}
1375	return NULL;
1376}
1377
1378static struct nfs4_client *
1379find_unconfirmed_client_by_str(const char *dname, unsigned int hashval)
1380{
1381	struct nfs4_client *clp;
1382
1383	list_for_each_entry(clp, &unconf_str_hashtbl[hashval], cl_strhash) {
1384		if (same_name(clp->cl_recdir, dname))
1385			return clp;
1386	}
1387	return NULL;
1388}
1389
1390static void
1391gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1392{
1393	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1394	struct sockaddr	*sa = svc_addr(rqstp);
1395	u32 scopeid = rpc_get_scope_id(sa);
1396	unsigned short expected_family;
1397
1398	/* Currently, we only support tcp and tcp6 for the callback channel */
1399	if (se->se_callback_netid_len == 3 &&
1400	    !memcmp(se->se_callback_netid_val, "tcp", 3))
1401		expected_family = AF_INET;
1402	else if (se->se_callback_netid_len == 4 &&
1403		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
1404		expected_family = AF_INET6;
1405	else
1406		goto out_err;
1407
1408	conn->cb_addrlen = rpc_uaddr2sockaddr(&init_net, se->se_callback_addr_val,
1409					    se->se_callback_addr_len,
1410					    (struct sockaddr *)&conn->cb_addr,
1411					    sizeof(conn->cb_addr));
1412
1413	if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1414		goto out_err;
1415
1416	if (conn->cb_addr.ss_family == AF_INET6)
1417		((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1418
1419	conn->cb_prog = se->se_callback_prog;
1420	conn->cb_ident = se->se_callback_ident;
1421	memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
 
1422	return;
1423out_err:
1424	conn->cb_addr.ss_family = AF_UNSPEC;
1425	conn->cb_addrlen = 0;
1426	dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1427		"will not receive delegations\n",
1428		clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1429
1430	return;
1431}
1432
1433/*
1434 * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size.
1435 */
1436void
1437nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1438{
 
1439	struct nfsd4_slot *slot = resp->cstate.slot;
1440	unsigned int base;
1441
1442	dprintk("--> %s slot %p\n", __func__, slot);
1443
 
1444	slot->sl_opcnt = resp->opcnt;
1445	slot->sl_status = resp->cstate.status;
 
 
1446
1447	slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
1448	if (nfsd4_not_cached(resp)) {
1449		slot->sl_datalen = 0;
1450		return;
1451	}
1452	slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap;
1453	base = (char *)resp->cstate.datap -
1454					(char *)resp->xbuf->head[0].iov_base;
1455	if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data,
1456				    slot->sl_datalen))
1457		WARN("%s: sessions DRC could not cache compound\n", __func__);
 
1458	return;
1459}
1460
1461/*
1462 * Encode the replay sequence operation from the slot values.
1463 * If cachethis is FALSE encode the uncached rep error on the next
1464 * operation which sets resp->p and increments resp->opcnt for
1465 * nfs4svc_encode_compoundres.
1466 *
1467 */
1468static __be32
1469nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1470			  struct nfsd4_compoundres *resp)
1471{
1472	struct nfsd4_op *op;
1473	struct nfsd4_slot *slot = resp->cstate.slot;
1474
1475	/* Encode the replayed sequence operation */
1476	op = &args->ops[resp->opcnt - 1];
1477	nfsd4_encode_operation(resp, op);
1478
1479	/* Return nfserr_retry_uncached_rep in next operation. */
1480	if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
 
 
 
 
 
 
 
 
1481		op = &args->ops[resp->opcnt++];
1482		op->status = nfserr_retry_uncached_rep;
1483		nfsd4_encode_operation(resp, op);
1484	}
1485	return op->status;
1486}
1487
1488/*
1489 * The sequence operation is not cached because we can use the slot and
1490 * session values.
1491 */
1492__be32
1493nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1494			 struct nfsd4_sequence *seq)
1495{
1496	struct nfsd4_slot *slot = resp->cstate.slot;
 
 
1497	__be32 status;
1498
1499	dprintk("--> %s slot %p\n", __func__, slot);
1500
1501	/* Either returns 0 or nfserr_retry_uncached */
1502	status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1503	if (status == nfserr_retry_uncached_rep)
1504		return status;
1505
1506	/* The sequence operation has been encoded, cstate->datap set. */
1507	memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen);
 
 
 
 
 
1508
1509	resp->opcnt = slot->sl_opcnt;
1510	resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen);
1511	status = slot->sl_status;
1512
1513	return status;
1514}
1515
1516/*
1517 * Set the exchange_id flags returned by the server.
1518 */
1519static void
1520nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1521{
1522	/* pNFS is not supported */
 
 
1523	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
 
1524
1525	/* Referrals are supported, Migration is not. */
1526	new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
1527
1528	/* set the wire flags to return to client. */
1529	clid->flags = new->cl_exchange_flags;
1530}
1531
 
 
 
 
 
 
 
 
 
 
 
1532static bool client_has_state(struct nfs4_client *clp)
1533{
1534	/*
1535	 * Note clp->cl_openowners check isn't quite right: there's no
1536	 * need to count owners without stateid's.
1537	 *
1538	 * Also note we should probably be using this in 4.0 case too.
1539	 */
1540	return !list_empty(&clp->cl_openowners)
1541		|| !list_empty(&clp->cl_delegations)
1542		|| !list_empty(&clp->cl_sessions);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1543}
1544
1545__be32
1546nfsd4_exchange_id(struct svc_rqst *rqstp,
1547		  struct nfsd4_compound_state *cstate,
1548		  struct nfsd4_exchange_id *exid)
1549{
1550	struct nfs4_client *unconf, *conf, *new;
 
 
1551	__be32 status;
1552	unsigned int		strhashval;
1553	char			dname[HEXDIR_LEN];
1554	char			addr_str[INET6_ADDRSTRLEN];
1555	nfs4_verifier		verf = exid->verifier;
1556	struct sockaddr		*sa = svc_addr(rqstp);
1557	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
 
1558
1559	rpc_ntop(sa, addr_str, sizeof(addr_str));
1560	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1561		"ip_addr=%s flags %x, spa_how %d\n",
1562		__func__, rqstp, exid, exid->clname.len, exid->clname.data,
1563		addr_str, exid->flags, exid->spa_how);
1564
1565	if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
1566		return nfserr_inval;
1567
1568	/* Currently only support SP4_NONE */
 
 
 
 
 
 
1569	switch (exid->spa_how) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1570	case SP4_NONE:
1571		break;
 
 
 
1572	case SP4_SSV:
1573		return nfserr_serverfault;
1574	default:
1575		BUG();				/* checked by xdr code */
1576	case SP4_MACH_CRED:
1577		return nfserr_serverfault;	/* no excuse :-/ */
1578	}
1579
1580	status = nfs4_make_rec_clidname(dname, &exid->clname);
1581
1582	if (status)
1583		return status;
1584
1585	strhashval = clientstr_hashval(dname);
1586
1587	/* Cases below refer to rfc 5661 section 18.35.4: */
1588	nfs4_lock_state();
1589	conf = find_confirmed_client_by_str(dname, strhashval);
1590	if (conf) {
1591		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
1592		bool verfs_match = same_verf(&verf, &conf->cl_verifier);
1593
1594		if (update) {
1595			if (!clp_used_exchangeid(conf)) { /* buggy client */
1596				status = nfserr_inval;
1597				goto out;
1598			}
 
 
 
 
1599			if (!creds_match) { /* case 9 */
1600				status = nfserr_perm;
1601				goto out;
1602			}
1603			if (!verfs_match) { /* case 8 */
1604				status = nfserr_not_same;
1605				goto out;
1606			}
1607			/* case 6 */
1608			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1609			new = conf;
1610			goto out_copy;
1611		}
1612		if (!creds_match) { /* case 3 */
1613			if (client_has_state(conf)) {
1614				status = nfserr_clid_inuse;
 
1615				goto out;
1616			}
1617			expire_client(conf);
1618			goto out_new;
1619		}
1620		if (verfs_match) { /* case 2 */
1621			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
1622			new = conf;
1623			goto out_copy;
1624		}
1625		/* case 5, client reboot */
 
 
1626		goto out_new;
1627	}
1628
1629	if (update) { /* case 7 */
1630		status = nfserr_noent;
1631		goto out;
1632	}
1633
1634	unconf  = find_unconfirmed_client_by_str(dname, strhashval);
1635	if (unconf) /* case 4, possible retry or client restart */
1636		expire_client(unconf);
 
 
 
1637
1638	/* case 1 (normal case) */
1639out_new:
1640	new = create_client(exid->clname, dname, rqstp, &verf);
1641	if (new == NULL) {
1642		status = nfserr_jukebox;
1643		goto out;
 
1644	}
 
 
 
1645
1646	gen_clid(new);
1647	add_to_unconfirmed(new, strhashval);
1648out_copy:
1649	exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1650	exid->clientid.cl_id = new->cl_clientid.cl_id;
1651
1652	exid->seqid = new->cl_cs_slot.sl_seqid + 1;
1653	nfsd4_set_ex_flags(new, exid);
1654
1655	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1656		new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
1657	status = nfs_ok;
1658
1659out:
1660	nfs4_unlock_state();
 
 
 
 
 
 
 
1661	return status;
1662}
1663
1664static __be32
1665check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1666{
1667	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
1668		slot_seqid);
1669
1670	/* The slot is in use, and no response has been sent. */
1671	if (slot_inuse) {
1672		if (seqid == slot_seqid)
1673			return nfserr_jukebox;
1674		else
1675			return nfserr_seq_misordered;
1676	}
1677	/* Note unsigned 32-bit arithmetic handles wraparound: */
1678	if (likely(seqid == slot_seqid + 1))
1679		return nfs_ok;
1680	if (seqid == slot_seqid)
1681		return nfserr_replay_cache;
1682	return nfserr_seq_misordered;
1683}
1684
1685/*
1686 * Cache the create session result into the create session single DRC
1687 * slot cache by saving the xdr structure. sl_seqid has been set.
1688 * Do this for solo or embedded create session operations.
1689 */
1690static void
1691nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1692			   struct nfsd4_clid_slot *slot, __be32 nfserr)
1693{
1694	slot->sl_status = nfserr;
1695	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
1696}
1697
1698static __be32
1699nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1700			    struct nfsd4_clid_slot *slot)
1701{
1702	memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
1703	return slot->sl_status;
1704}
1705
1706#define NFSD_MIN_REQ_HDR_SEQ_SZ	((\
1707			2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1708			1 +	/* MIN tag is length with zero, only length */ \
1709			3 +	/* version, opcount, opcode */ \
1710			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1711				/* seqid, slotID, slotID, cache */ \
1712			4 ) * sizeof(__be32))
1713
1714#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
1715			2 +	/* verifier: AUTH_NULL, length 0 */\
1716			1 +	/* status */ \
1717			1 +	/* MIN tag is length with zero, only length */ \
1718			3 +	/* opcount, opcode, opstatus*/ \
1719			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1720				/* seqid, slotID, slotID, slotID, status */ \
1721			5 ) * sizeof(__be32))
1722
1723static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
1724{
1725	return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
1726		|| fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1727}
1728
1729__be32
1730nfsd4_create_session(struct svc_rqst *rqstp,
1731		     struct nfsd4_compound_state *cstate,
1732		     struct nfsd4_create_session *cr_ses)
1733{
 
1734	struct sockaddr *sa = svc_addr(rqstp);
1735	struct nfs4_client *conf, *unconf;
 
1736	struct nfsd4_session *new;
 
1737	struct nfsd4_clid_slot *cs_slot = NULL;
1738	bool confirm_me = false;
1739	__be32 status = 0;
 
1740
1741	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
1742		return nfserr_inval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1743
1744	nfs4_lock_state();
1745	unconf = find_unconfirmed_client(&cr_ses->clientid);
1746	conf = find_confirmed_client(&cr_ses->clientid);
 
1747
1748	if (conf) {
 
 
 
1749		cs_slot = &conf->cl_cs_slot;
1750		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1751		if (status == nfserr_replay_cache) {
1752			status = nfsd4_replay_create_session(cr_ses, cs_slot);
1753			goto out;
1754		} else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
1755			status = nfserr_seq_misordered;
1756			goto out;
1757		}
1758	} else if (unconf) {
 
1759		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
1760		    !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
1761			status = nfserr_clid_inuse;
1762			goto out;
1763		}
 
 
 
1764		cs_slot = &unconf->cl_cs_slot;
1765		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1766		if (status) {
1767			/* an unconfirmed replay returns misordered */
1768			status = nfserr_seq_misordered;
1769			goto out;
 
 
 
 
 
 
 
 
 
1770		}
1771		confirm_me = true;
1772		conf = unconf;
1773	} else {
1774		status = nfserr_stale_clientid;
1775		goto out;
1776	}
1777
1778	/*
1779	 * XXX: we should probably set this at creation time, and check
1780	 * for consistent minorversion use throughout:
1781	 */
1782	conf->cl_minorversion = 1;
1783	/*
1784	 * We do not support RDMA or persistent sessions
1785	 */
1786	cr_ses->flags &= ~SESSION4_PERSIST;
 
1787	cr_ses->flags &= ~SESSION4_RDMA;
1788
1789	status = nfserr_toosmall;
1790	if (check_forechannel_attrs(cr_ses->fore_channel))
1791		goto out;
1792
1793	status = nfserr_jukebox;
1794	new = alloc_init_session(rqstp, conf, cr_ses);
1795	if (!new)
1796		goto out;
1797	status = nfs_ok;
1798	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
1799	       NFS4_MAX_SESSIONID_LEN);
1800	memcpy(&cr_ses->fore_channel, &new->se_fchannel,
1801		sizeof(struct nfsd4_channel_attrs));
1802	cs_slot->sl_seqid++;
1803	cr_ses->seqid = cs_slot->sl_seqid;
1804
1805	/* cache solo and embedded create sessions under the state lock */
1806	nfsd4_cache_create_session(cr_ses, cs_slot, status);
1807	if (confirm_me) {
1808		unsigned int hash = clientstr_hashval(unconf->cl_recdir);
1809		struct nfs4_client *old =
1810			find_confirmed_client_by_str(conf->cl_recdir, hash);
1811		if (old)
1812			expire_client(old);
1813		move_to_confirmed(conf);
1814	}
1815out:
1816	nfs4_unlock_state();
1817	dprintk("%s returns %d\n", __func__, ntohl(status));
 
 
 
 
 
 
 
1818	return status;
1819}
1820
1821static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
1822{
1823	struct nfsd4_compoundres *resp = rqstp->rq_resp;
1824	struct nfsd4_compoundargs *argp = rqstp->rq_argp;
1825
1826	return argp->opcnt == resp->opcnt;
1827}
1828
1829static __be32 nfsd4_map_bcts_dir(u32 *dir)
1830{
1831	switch (*dir) {
1832	case NFS4_CDFC4_FORE:
1833	case NFS4_CDFC4_BACK:
1834		return nfs_ok;
1835	case NFS4_CDFC4_FORE_OR_BOTH:
1836	case NFS4_CDFC4_BACK_OR_BOTH:
1837		*dir = NFS4_CDFC4_BOTH;
1838		return nfs_ok;
1839	};
1840	return nfserr_inval;
1841}
1842
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1843__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
1844		     struct nfsd4_compound_state *cstate,
1845		     struct nfsd4_bind_conn_to_session *bcts)
1846{
 
1847	__be32 status;
 
 
 
 
1848
1849	if (!nfsd4_last_compound_op(rqstp))
1850		return nfserr_not_only_op;
1851	spin_lock(&client_lock);
1852	cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid);
1853	/* Sorta weird: we only need the refcnt'ing because new_conn acquires
1854	 * client_lock iself: */
1855	if (cstate->session) {
1856		nfsd4_get_session(cstate->session);
1857		atomic_inc(&cstate->session->se_client->cl_refcount);
 
 
 
 
 
 
 
 
 
1858	}
1859	spin_unlock(&client_lock);
1860	if (!cstate->session)
1861		return nfserr_badsession;
1862
1863	status = nfsd4_map_bcts_dir(&bcts->dir);
1864	if (!status)
1865		nfsd4_new_conn(rqstp, cstate->session, bcts->dir);
 
 
 
 
 
 
 
 
 
1866	return status;
1867}
1868
1869static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
1870{
1871	if (!session)
1872		return 0;
1873	return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
1874}
1875
1876__be32
1877nfsd4_destroy_session(struct svc_rqst *r,
1878		      struct nfsd4_compound_state *cstate,
1879		      struct nfsd4_destroy_session *sessionid)
1880{
 
1881	struct nfsd4_session *ses;
1882	__be32 status = nfserr_badsession;
1883
1884	/* Notes:
1885	 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
1886	 * - Should we return nfserr_back_chan_busy if waiting for
1887	 *   callbacks on to-be-destroyed session?
1888	 * - Do we need to clear any callback info from previous session?
1889	 */
1890
1891	if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
 
1892		if (!nfsd4_last_compound_op(r))
1893			return nfserr_not_only_op;
1894	}
1895	dump_sessionid(__func__, &sessionid->sessionid);
1896	spin_lock(&client_lock);
1897	ses = find_in_sessionid_hashtbl(&sessionid->sessionid);
1898	if (!ses) {
1899		spin_unlock(&client_lock);
1900		goto out;
1901	}
1902
 
 
 
 
 
 
 
 
 
 
1903	unhash_session(ses);
1904	spin_unlock(&client_lock);
1905
1906	nfs4_lock_state();
1907	nfsd4_probe_callback_sync(ses->se_client);
1908	nfs4_unlock_state();
1909
1910	spin_lock(&client_lock);
1911	nfsd4_del_conns(ses);
1912	nfsd4_put_session_locked(ses);
1913	spin_unlock(&client_lock);
1914	status = nfs_ok;
 
 
 
 
1915out:
1916	dprintk("%s returns %d\n", __func__, ntohl(status));
1917	return status;
1918}
1919
1920static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
1921{
1922	struct nfsd4_conn *c;
1923
1924	list_for_each_entry(c, &s->se_conns, cn_persession) {
1925		if (c->cn_xprt == xpt) {
1926			return c;
1927		}
1928	}
1929	return NULL;
1930}
1931
1932static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
1933{
1934	struct nfs4_client *clp = ses->se_client;
1935	struct nfsd4_conn *c;
 
1936	int ret;
1937
1938	spin_lock(&clp->cl_lock);
1939	c = __nfsd4_find_conn(new->cn_xprt, ses);
1940	if (c) {
1941		spin_unlock(&clp->cl_lock);
1942		free_conn(new);
1943		return;
1944	}
1945	__nfsd4_hash_conn(new, ses);
1946	spin_unlock(&clp->cl_lock);
1947	ret = nfsd4_register_conn(new);
1948	if (ret)
1949		/* oops; xprt is already down: */
1950		nfsd4_conn_lost(&new->cn_xpt_user);
1951	return;
 
 
 
 
1952}
1953
1954static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
1955{
1956	struct nfsd4_compoundargs *args = rqstp->rq_argp;
1957
1958	return args->opcnt > session->se_fchannel.maxops;
1959}
1960
1961static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
1962				  struct nfsd4_session *session)
1963{
1964	struct xdr_buf *xb = &rqstp->rq_arg;
1965
1966	return xb->len > session->se_fchannel.maxreq_sz;
1967}
1968
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1969__be32
1970nfsd4_sequence(struct svc_rqst *rqstp,
1971	       struct nfsd4_compound_state *cstate,
1972	       struct nfsd4_sequence *seq)
1973{
 
1974	struct nfsd4_compoundres *resp = rqstp->rq_resp;
 
1975	struct nfsd4_session *session;
 
1976	struct nfsd4_slot *slot;
1977	struct nfsd4_conn *conn;
1978	__be32 status;
 
 
 
1979
1980	if (resp->opcnt != 1)
1981		return nfserr_sequence_pos;
1982
1983	/*
1984	 * Will be either used or freed by nfsd4_sequence_check_conn
1985	 * below.
1986	 */
1987	conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
1988	if (!conn)
1989		return nfserr_jukebox;
1990
1991	spin_lock(&client_lock);
1992	status = nfserr_badsession;
1993	session = find_in_sessionid_hashtbl(&seq->sessionid);
1994	if (!session)
1995		goto out;
 
1996
1997	status = nfserr_too_many_ops;
1998	if (nfsd4_session_too_many_ops(rqstp, session))
1999		goto out;
2000
2001	status = nfserr_req_too_big;
2002	if (nfsd4_request_too_big(rqstp, session))
2003		goto out;
2004
2005	status = nfserr_badslot;
2006	if (seq->slotid >= session->se_fchannel.maxreqs)
2007		goto out;
2008
2009	slot = session->se_slots[seq->slotid];
2010	dprintk("%s: slotid %d\n", __func__, seq->slotid);
2011
2012	/* We do not negotiate the number of slots yet, so set the
2013	 * maxslots to the session maxreqs which is used to encode
2014	 * sr_highest_slotid and the sr_target_slot id to maxslots */
2015	seq->maxslots = session->se_fchannel.maxreqs;
2016
2017	status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2018					slot->sl_flags & NFSD4_SLOT_INUSE);
2019	if (status == nfserr_replay_cache) {
2020		status = nfserr_seq_misordered;
2021		if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2022			goto out;
 
 
 
2023		cstate->slot = slot;
2024		cstate->session = session;
 
2025		/* Return the cached reply status and set cstate->status
2026		 * for nfsd4_proc_compound processing */
2027		status = nfsd4_replay_cache_entry(resp, seq);
2028		cstate->status = nfserr_replay_cache;
2029		goto out;
2030	}
2031	if (status)
2032		goto out;
2033
2034	nfsd4_sequence_check_conn(conn, session);
2035	conn = NULL;
 
 
2036
 
 
 
 
 
 
 
 
 
 
2037	/* Success! bump slot seqid */
2038	slot->sl_seqid = seq->seqid;
2039	slot->sl_flags |= NFSD4_SLOT_INUSE;
2040	if (seq->cachethis)
2041		slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2042	else
2043		slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2044
2045	cstate->slot = slot;
2046	cstate->session = session;
 
2047
2048out:
2049	/* Hold a session reference until done processing the compound. */
2050	if (cstate->session) {
2051		struct nfs4_client *clp = session->se_client;
2052
2053		nfsd4_get_session(cstate->session);
2054		atomic_inc(&clp->cl_refcount);
2055		switch (clp->cl_cb_state) {
2056		case NFSD4_CB_DOWN:
2057			seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2058			break;
2059		case NFSD4_CB_FAULT:
2060			seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2061			break;
2062		default:
2063			seq->status_flags = 0;
2064		}
2065	}
2066	kfree(conn);
2067	spin_unlock(&client_lock);
2068	dprintk("%s: return %d\n", __func__, ntohl(status));
 
 
 
2069	return status;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2070}
2071
2072__be32
2073nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
 
 
2074{
2075	struct nfs4_client *conf, *unconf, *clp;
 
 
2076	__be32 status = 0;
 
2077
2078	nfs4_lock_state();
2079	unconf = find_unconfirmed_client(&dc->clientid);
2080	conf = find_confirmed_client(&dc->clientid);
 
2081
2082	if (conf) {
2083		clp = conf;
2084
2085		if (!is_client_expired(conf) && client_has_state(conf)) {
2086			status = nfserr_clientid_busy;
2087			goto out;
2088		}
2089
2090		/* rfc5661 18.50.3 */
2091		if (cstate->session && conf == cstate->session->se_client) {
2092			status = nfserr_clientid_busy;
2093			goto out;
2094		}
2095	} else if (unconf)
2096		clp = unconf;
2097	else {
2098		status = nfserr_stale_clientid;
2099		goto out;
2100	}
2101
2102	expire_client(clp);
 
 
 
 
 
2103out:
2104	nfs4_unlock_state();
2105	dprintk("%s return %d\n", __func__, ntohl(status));
 
2106	return status;
2107}
2108
2109__be32
2110nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
 
2111{
 
 
2112	__be32 status = 0;
2113
2114	if (rc->rca_one_fs) {
2115		if (!cstate->current_fh.fh_dentry)
2116			return nfserr_nofilehandle;
2117		/*
2118		 * We don't take advantage of the rca_one_fs case.
2119		 * That's OK, it's optional, we can safely ignore it.
2120		 */
2121		 return nfs_ok;
2122	}
2123
2124	nfs4_lock_state();
2125	status = nfserr_complete_already;
2126	if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2127			     &cstate->session->se_client->cl_flags))
2128		goto out;
2129
2130	status = nfserr_stale_clientid;
2131	if (is_client_expired(cstate->session->se_client))
2132		/*
2133		 * The following error isn't really legal.
2134		 * But we only get here if the client just explicitly
2135		 * destroyed the client.  Surely it no longer cares what
2136		 * error it gets back on an operation for the dead
2137		 * client.
2138		 */
2139		goto out;
2140
2141	status = nfs_ok;
2142	nfsd4_client_record_create(cstate->session->se_client);
 
 
2143out:
2144	nfs4_unlock_state();
2145	return status;
2146}
2147
2148__be32
2149nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2150		  struct nfsd4_setclientid *setclid)
2151{
 
2152	struct xdr_netobj 	clname = setclid->se_name;
2153	nfs4_verifier		clverifier = setclid->se_verf;
2154	unsigned int 		strhashval;
2155	struct nfs4_client	*conf, *unconf, *new;
2156	__be32 			status;
2157	char                    dname[HEXDIR_LEN];
2158	
2159	status = nfs4_make_rec_clidname(dname, &clname);
2160	if (status)
2161		return status;
2162
2163	strhashval = clientstr_hashval(dname);
2164
2165	/* Cases below refer to rfc 3530 section 14.2.33: */
2166	nfs4_lock_state();
2167	conf = find_confirmed_client_by_str(dname, strhashval);
2168	if (conf) {
2169		/* case 0: */
2170		status = nfserr_clid_inuse;
2171		if (clp_used_exchangeid(conf))
2172			goto out;
2173		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2174			char addr_str[INET6_ADDRSTRLEN];
2175			rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2176				 sizeof(addr_str));
2177			dprintk("NFSD: setclientid: string in use by client "
2178				"at %s\n", addr_str);
2179			goto out;
2180		}
2181	}
2182	unconf = find_unconfirmed_client_by_str(dname, strhashval);
2183	if (unconf)
2184		expire_client(unconf);
2185	status = nfserr_jukebox;
2186	new = create_client(clname, dname, rqstp, &clverifier);
2187	if (new == NULL)
2188		goto out;
2189	if (conf && same_verf(&conf->cl_verifier, &clverifier))
2190		/* case 1: probable callback update */
2191		copy_clid(new, conf);
2192	else /* case 4 (new client) or cases 2, 3 (client reboot): */
2193		gen_clid(new);
2194	/*
2195	 * XXX: we should probably set this at creation time, and check
2196	 * for consistent minorversion use throughout:
2197	 */
2198	new->cl_minorversion = 0;
2199	gen_callback(new, setclid, rqstp);
2200	add_to_unconfirmed(new, strhashval);
2201	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2202	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2203	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
 
2204	status = nfs_ok;
2205out:
2206	nfs4_unlock_state();
 
 
 
 
 
 
2207	return status;
2208}
2209
2210
2211__be32
2212nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2213			 struct nfsd4_compound_state *cstate,
2214			 struct nfsd4_setclientid_confirm *setclientid_confirm)
2215{
 
 
2216	struct nfs4_client *conf, *unconf;
 
2217	nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
2218	clientid_t * clid = &setclientid_confirm->sc_clientid;
2219	__be32 status;
 
2220
2221	if (STALE_CLIENTID(clid))
2222		return nfserr_stale_clientid;
2223	nfs4_lock_state();
2224
2225	conf = find_confirmed_client(clid);
2226	unconf = find_unconfirmed_client(clid);
 
2227	/*
2228	 * We try hard to give out unique clientid's, so if we get an
2229	 * attempt to confirm the same clientid with a different cred,
2230	 * there's a bug somewhere.  Let's charitably assume it's our
2231	 * bug.
 
2232	 */
2233	status = nfserr_serverfault;
2234	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
 
2235		goto out;
2236	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
 
 
2237		goto out;
2238	/* cases below refer to rfc 3530 section 14.2.34: */
2239	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
2240		if (conf && !unconf) /* case 2: probable retransmit */
2241			status = nfs_ok;
2242		else /* case 4: client hasn't noticed we rebooted yet? */
2243			status = nfserr_stale_clientid;
2244		goto out;
2245	}
2246	status = nfs_ok;
2247	if (conf) { /* case 1: callback update */
 
 
2248		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2249		nfsd4_probe_callback(conf);
2250		expire_client(unconf);
2251	} else { /* case 3: normal case; new or rebooted client */
2252		unsigned int hash = clientstr_hashval(unconf->cl_recdir);
2253
2254		conf = find_confirmed_client_by_str(unconf->cl_recdir, hash);
2255		if (conf) {
2256			nfsd4_client_record_remove(conf);
2257			expire_client(conf);
 
 
 
 
 
 
 
2258		}
2259		move_to_confirmed(unconf);
2260		nfsd4_probe_callback(unconf);
2261	}
 
 
 
 
 
 
 
2262out:
2263	nfs4_unlock_state();
 
 
2264	return status;
2265}
2266
2267static struct nfs4_file *nfsd4_alloc_file(void)
2268{
2269	return kmem_cache_alloc(file_slab, GFP_KERNEL);
2270}
2271
2272/* OPEN Share state helper functions */
2273static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
2274{
2275	unsigned int hashval = file_hashval(ino);
2276
2277	atomic_set(&fp->fi_ref, 1);
2278	INIT_LIST_HEAD(&fp->fi_hash);
 
 
2279	INIT_LIST_HEAD(&fp->fi_stateids);
2280	INIT_LIST_HEAD(&fp->fi_delegations);
2281	fp->fi_inode = igrab(ino);
 
 
2282	fp->fi_had_conflict = false;
2283	fp->fi_lease = NULL;
2284	memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2285	memset(fp->fi_access, 0, sizeof(fp->fi_access));
2286	spin_lock(&recall_lock);
2287	list_add(&fp->fi_hash, &file_hashtbl[hashval]);
2288	spin_unlock(&recall_lock);
2289}
2290
2291static void
2292nfsd4_free_slab(struct kmem_cache **slab)
2293{
2294	if (*slab == NULL)
2295		return;
2296	kmem_cache_destroy(*slab);
2297	*slab = NULL;
2298}
2299
2300void
2301nfsd4_free_slabs(void)
2302{
2303	nfsd4_free_slab(&openowner_slab);
2304	nfsd4_free_slab(&lockowner_slab);
2305	nfsd4_free_slab(&file_slab);
2306	nfsd4_free_slab(&stateid_slab);
2307	nfsd4_free_slab(&deleg_slab);
 
 
2308}
2309
2310int
2311nfsd4_init_slabs(void)
2312{
 
 
 
 
2313	openowner_slab = kmem_cache_create("nfsd4_openowners",
2314			sizeof(struct nfs4_openowner), 0, 0, NULL);
2315	if (openowner_slab == NULL)
2316		goto out_nomem;
2317	lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2318			sizeof(struct nfs4_openowner), 0, 0, NULL);
2319	if (lockowner_slab == NULL)
2320		goto out_nomem;
2321	file_slab = kmem_cache_create("nfsd4_files",
2322			sizeof(struct nfs4_file), 0, 0, NULL);
2323	if (file_slab == NULL)
2324		goto out_nomem;
2325	stateid_slab = kmem_cache_create("nfsd4_stateids",
2326			sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
2327	if (stateid_slab == NULL)
2328		goto out_nomem;
2329	deleg_slab = kmem_cache_create("nfsd4_delegations",
2330			sizeof(struct nfs4_delegation), 0, 0, NULL);
2331	if (deleg_slab == NULL)
2332		goto out_nomem;
 
 
 
 
2333	return 0;
2334out_nomem:
2335	nfsd4_free_slabs();
2336	dprintk("nfsd4: out of memory while initializing nfsv4\n");
 
 
 
 
 
 
 
 
 
 
 
2337	return -ENOMEM;
2338}
2339
2340void nfs4_free_openowner(struct nfs4_openowner *oo)
 
2341{
2342	kfree(oo->oo_owner.so_owner.data);
2343	kmem_cache_free(openowner_slab, oo);
 
 
 
 
 
 
 
2344}
2345
2346void nfs4_free_lockowner(struct nfs4_lockowner *lo)
 
2347{
2348	kfree(lo->lo_owner.so_owner.data);
2349	kmem_cache_free(lockowner_slab, lo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2350}
2351
2352static void init_nfs4_replay(struct nfs4_replay *rp)
2353{
2354	rp->rp_status = nfserr_serverfault;
2355	rp->rp_buflen = 0;
2356	rp->rp_buf = rp->rp_ibuf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2357}
2358
2359static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2360{
2361	struct nfs4_stateowner *sop;
2362
2363	sop = kmem_cache_alloc(slab, GFP_KERNEL);
2364	if (!sop)
2365		return NULL;
2366
2367	sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
2368	if (!sop->so_owner.data) {
2369		kmem_cache_free(slab, sop);
2370		return NULL;
2371	}
2372	sop->so_owner.len = owner->len;
2373
2374	INIT_LIST_HEAD(&sop->so_stateids);
2375	sop->so_client = clp;
2376	init_nfs4_replay(&sop->so_replay);
 
2377	return sop;
2378}
2379
2380static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2381{
2382	list_add(&oo->oo_owner.so_strhash, &ownerstr_hashtbl[strhashval]);
 
 
 
2383	list_add(&oo->oo_perclient, &clp->cl_openowners);
2384}
2385
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2386static struct nfs4_openowner *
2387alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
2388	struct nfs4_openowner *oo;
 
 
 
2389
2390	oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2391	if (!oo)
2392		return NULL;
 
2393	oo->oo_owner.so_is_open_owner = 1;
2394	oo->oo_owner.so_seqid = open->op_seqid;
2395	oo->oo_flags = NFS4_OO_NEW;
 
 
2396	oo->oo_time = 0;
2397	oo->oo_last_closed_stid = NULL;
2398	INIT_LIST_HEAD(&oo->oo_close_lru);
2399	hash_openowner(oo, clp, strhashval);
2400	return oo;
 
 
 
 
 
 
 
 
2401}
2402
2403static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
 
 
 
2404	struct nfs4_openowner *oo = open->op_openowner;
2405	struct nfs4_client *clp = oo->oo_owner.so_client;
 
2406
2407	init_stid(&stp->st_stid, clp, NFS4_OPEN_STID);
2408	INIT_LIST_HEAD(&stp->st_lockowners);
2409	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2410	list_add(&stp->st_perfile, &fp->fi_stateids);
2411	stp->st_stateowner = &oo->oo_owner;
 
 
 
 
 
 
 
 
 
 
 
 
 
2412	get_nfs4_file(fp);
2413	stp->st_file = fp;
2414	stp->st_access_bmap = 0;
2415	stp->st_deny_bmap = 0;
2416	set_access(open->op_share_access, stp);
2417	set_deny(open->op_share_deny, stp);
2418	stp->st_openstp = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2419}
2420
 
 
 
 
 
2421static void
2422move_to_close_lru(struct nfs4_openowner *oo)
2423{
2424	dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2425
2426	list_move_tail(&oo->oo_close_lru, &close_lru);
2427	oo->oo_time = get_seconds();
2428}
2429
2430static int
2431same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
2432							clientid_t *clid)
2433{
2434	return (sop->so_owner.len == owner->len) &&
2435		0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
2436		(sop->so_client->cl_clientid.cl_id == clid->cl_id);
2437}
2438
2439static struct nfs4_openowner *
2440find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open)
2441{
2442	struct nfs4_stateowner *so;
2443	struct nfs4_openowner *oo;
 
 
 
 
 
2444
2445	list_for_each_entry(so, &ownerstr_hashtbl[hashval], so_strhash) {
2446		if (!so->so_is_open_owner)
2447			continue;
2448		if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
2449			oo = openowner(so);
2450			renew_client(oo->oo_owner.so_client);
2451			return oo;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2452		}
2453	}
 
2454	return NULL;
2455}
2456
2457/* search file_hashtbl[] for file */
2458static struct nfs4_file *
2459find_file(struct inode *ino)
 
 
 
 
 
 
 
2460{
2461	unsigned int hashval = file_hashval(ino);
2462	struct nfs4_file *fp;
 
 
 
 
2463
2464	spin_lock(&recall_lock);
2465	list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2466		if (fp->fi_inode == ino) {
2467			get_nfs4_file(fp);
2468			spin_unlock(&recall_lock);
2469			return fp;
2470		}
 
 
 
 
2471	}
2472	spin_unlock(&recall_lock);
2473	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2474}
2475
2476/*
2477 * Called to check deny when READ with all zero stateid or
2478 * WRITE with all zero or all one stateid
2479 */
2480static __be32
2481nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2482{
2483	struct inode *ino = current_fh->fh_dentry->d_inode;
2484	struct nfs4_file *fp;
2485	struct nfs4_ol_stateid *stp;
2486	__be32 ret;
2487
2488	dprintk("NFSD: nfs4_share_conflict\n");
2489
2490	fp = find_file(ino);
2491	if (!fp)
2492		return nfs_ok;
2493	ret = nfserr_locked;
2494	/* Search for conflicting share reservations */
2495	list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
2496		if (test_deny(deny_type, stp) ||
2497		    test_deny(NFS4_SHARE_DENY_BOTH, stp))
2498			goto out;
2499	}
2500	ret = nfs_ok;
2501out:
2502	put_nfs4_file(fp);
2503	return ret;
2504}
2505
2506static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2507{
2508	/* We're assuming the state code never drops its reference
2509	 * without first removing the lease.  Since we're in this lease
2510	 * callback (and since the lease code is serialized by the kernel
2511	 * lock) we know the server hasn't removed the lease yet, we know
2512	 * it's safe to take a reference: */
2513	atomic_inc(&dp->dl_count);
2514
2515	list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
 
2516
2517	/* only place dl_time is set. protected by lock_flocks*/
2518	dp->dl_time = get_seconds();
 
 
 
 
 
 
 
 
 
 
 
 
 
2519
2520	nfsd4_cb_recall(dp);
 
 
 
2521}
2522
2523/* Called from break_lease() with lock_flocks() held. */
2524static void nfsd_break_deleg_cb(struct file_lock *fl)
2525{
2526	struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
2527	struct nfs4_delegation *dp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2528
2529	BUG_ON(!fp);
2530	/* We assume break_lease is only called once per lease: */
2531	BUG_ON(fp->fi_had_conflict);
2532	/*
2533	 * We don't want the locks code to timeout the lease for us;
2534	 * we'll remove it ourself if a delegation isn't returned
2535	 * in time:
2536	 */
2537	fl->fl_break_time = 0;
2538
2539	spin_lock(&recall_lock);
2540	fp->fi_had_conflict = true;
2541	list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2542		nfsd_break_one_deleg(dp);
2543	spin_unlock(&recall_lock);
2544}
2545
2546static
2547int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
 
 
 
 
 
 
 
2548{
2549	if (arg & F_UNLCK)
2550		return lease_modify(onlist, arg);
2551	else
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2552		return -EAGAIN;
2553}
2554
2555static const struct lock_manager_operations nfsd_lease_mng_ops = {
 
2556	.lm_break = nfsd_break_deleg_cb,
2557	.lm_change = nfsd_change_deleg_cb,
2558};
2559
2560static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
2561{
2562	if (nfsd4_has_session(cstate))
2563		return nfs_ok;
2564	if (seqid == so->so_seqid - 1)
2565		return nfserr_replay_me;
2566	if (seqid == so->so_seqid)
2567		return nfs_ok;
2568	return nfserr_bad_seqid;
2569}
2570
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2571__be32
2572nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2573		    struct nfsd4_open *open)
2574{
2575	clientid_t *clientid = &open->op_clientid;
2576	struct nfs4_client *clp = NULL;
2577	unsigned int strhashval;
2578	struct nfs4_openowner *oo = NULL;
2579	__be32 status;
2580
2581	if (STALE_CLIENTID(&open->op_clientid))
2582		return nfserr_stale_clientid;
2583	/*
2584	 * In case we need it later, after we've already created the
2585	 * file and don't want to risk a further failure:
2586	 */
2587	open->op_file = nfsd4_alloc_file();
2588	if (open->op_file == NULL)
2589		return nfserr_jukebox;
2590
2591	strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
2592	oo = find_openstateowner_str(strhashval, open);
 
 
 
 
 
2593	open->op_openowner = oo;
2594	if (!oo) {
2595		clp = find_confirmed_client(clientid);
2596		if (clp == NULL)
2597			return nfserr_expired;
2598		goto new_owner;
2599	}
2600	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
2601		/* Replace unconfirmed owners without checking for replay. */
2602		clp = oo->oo_owner.so_client;
2603		release_openowner(oo);
2604		open->op_openowner = NULL;
2605		goto new_owner;
2606	}
2607	status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
2608	if (status)
2609		return status;
2610	clp = oo->oo_owner.so_client;
2611	goto alloc_stateid;
2612new_owner:
2613	oo = alloc_init_open_stateowner(strhashval, clp, open);
2614	if (oo == NULL)
2615		return nfserr_jukebox;
2616	open->op_openowner = oo;
2617alloc_stateid:
2618	open->op_stp = nfs4_alloc_stateid(clp);
2619	if (!open->op_stp)
2620		return nfserr_jukebox;
 
 
 
 
 
 
 
 
2621	return nfs_ok;
2622}
2623
2624static inline __be32
2625nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
2626{
2627	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
2628		return nfserr_openmode;
2629	else
2630		return nfs_ok;
2631}
2632
2633static int share_access_to_flags(u32 share_access)
2634{
2635	return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
2636}
2637
2638static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
2639{
2640	struct nfs4_stid *ret;
2641
2642	ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
 
2643	if (!ret)
2644		return NULL;
2645	return delegstateid(ret);
2646}
2647
2648static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
2649{
2650	return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
2651	       open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
2652}
2653
2654static __be32
2655nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open,
2656		struct nfs4_delegation **dp)
2657{
2658	int flags;
2659	__be32 status = nfserr_bad_stateid;
 
2660
2661	*dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
2662	if (*dp == NULL)
 
 
 
 
 
2663		goto out;
 
2664	flags = share_access_to_flags(open->op_share_access);
2665	status = nfs4_check_delegmode(*dp, flags);
2666	if (status)
2667		*dp = NULL;
 
 
 
2668out:
2669	if (!nfsd4_is_deleg_cur(open))
2670		return nfs_ok;
2671	if (status)
2672		return status;
2673	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2674	return nfs_ok;
2675}
2676
2677static __be32
2678nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
2679{
2680	struct nfs4_ol_stateid *local;
2681	struct nfs4_openowner *oo = open->op_openowner;
2682
2683	list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
2684		/* ignore lock owners */
2685		if (local->st_stateowner->so_is_open_owner == 0)
2686			continue;
2687		/* remember if we have seen this open owner */
2688		if (local->st_stateowner == &oo->oo_owner)
2689			*stpp = local;
2690		/* check for conflicting share reservations */
2691		if (!test_share(local, open))
2692			return nfserr_share_denied;
2693	}
2694	return nfs_ok;
2695}
2696
2697static void nfs4_free_stateid(struct nfs4_ol_stateid *s)
2698{
2699	kmem_cache_free(stateid_slab, s);
2700}
2701
2702static inline int nfs4_access_to_access(u32 nfs4_access)
2703{
2704	int flags = 0;
2705
2706	if (nfs4_access & NFS4_SHARE_ACCESS_READ)
2707		flags |= NFSD_MAY_READ;
2708	if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
2709		flags |= NFSD_MAY_WRITE;
2710	return flags;
2711}
2712
2713static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
2714		struct svc_fh *cur_fh, struct nfsd4_open *open)
2715{
2716	__be32 status;
2717	int oflag = nfs4_access_to_omode(open->op_share_access);
2718	int access = nfs4_access_to_access(open->op_share_access);
2719
2720	if (!fp->fi_fds[oflag]) {
2721		status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
2722			&fp->fi_fds[oflag]);
2723		if (status)
2724			return status;
2725	}
2726	nfs4_file_get_access(fp, oflag);
2727
2728	return nfs_ok;
2729}
2730
2731static inline __be32
2732nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
2733		struct nfsd4_open *open)
2734{
2735	struct iattr iattr = {
2736		.ia_valid = ATTR_SIZE,
2737		.ia_size = 0,
2738	};
 
 
 
2739	if (!open->op_truncate)
2740		return 0;
2741	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
2742		return nfserr_inval;
2743	return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
2744}
2745
2746static __be32
2747nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
 
2748{
2749	u32 op_share_access = open->op_share_access;
2750	bool new_access;
2751	__be32 status;
 
 
 
2752
2753	new_access = !test_access(op_share_access, stp);
2754	if (new_access) {
2755		status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2756		if (status)
2757			return status;
 
 
 
 
 
 
 
 
 
 
 
 
2758	}
2759	status = nfsd4_truncate(rqstp, cur_fh, open);
2760	if (status) {
2761		if (new_access) {
2762			int oflag = nfs4_access_to_omode(op_share_access);
2763			nfs4_file_put_access(fp, oflag);
 
 
2764		}
2765		return status;
 
 
 
 
2766	}
2767	/* remember the open */
2768	set_access(op_share_access, stp);
 
 
 
 
 
2769	set_deny(open->op_share_deny, stp);
 
2770
2771	return nfs_ok;
2772}
2773
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2774
2775static void
2776nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2777{
2778	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2779}
2780
2781/* Should we give out recallable state?: */
2782static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
2783{
2784	if (clp->cl_cb_state == NFSD4_CB_UP)
2785		return true;
2786	/*
2787	 * In the sessions case, since we don't have to establish a
2788	 * separate connection for callbacks, we assume it's OK
2789	 * until we hear otherwise:
2790	 */
2791	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
2792}
2793
2794static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
 
2795{
2796	struct file_lock *fl;
2797
2798	fl = locks_alloc_lock();
2799	if (!fl)
2800		return NULL;
2801	locks_init_lock(fl);
2802	fl->fl_lmops = &nfsd_lease_mng_ops;
2803	fl->fl_flags = FL_LEASE;
2804	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
2805	fl->fl_end = OFFSET_MAX;
2806	fl->fl_owner = (fl_owner_t)(dp->dl_file);
2807	fl->fl_pid = current->tgid;
 
2808	return fl;
2809}
2810
2811static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
 
2812{
2813	struct nfs4_file *fp = dp->dl_file;
2814	struct file_lock *fl;
2815	int status;
 
2816
2817	fl = nfs4_alloc_init_lease(dp, flag);
2818	if (!fl)
2819		return -ENOMEM;
2820	fl->fl_file = find_readable_file(fp);
2821	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2822	status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
2823	if (status) {
2824		list_del_init(&dp->dl_perclnt);
2825		locks_free_lock(fl);
2826		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2827	}
2828	fp->fi_lease = fl;
2829	fp->fi_deleg_file = fl->fl_file;
2830	get_file(fp->fi_deleg_file);
2831	atomic_set(&fp->fi_delegees, 1);
2832	list_add(&dp->dl_perfile, &fp->fi_delegations);
 
 
2833	return 0;
2834}
2835
2836static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
 
 
 
 
 
 
 
2837{
2838	struct nfs4_file *fp = dp->dl_file;
 
 
 
 
 
 
2839
2840	if (!fp->fi_lease)
2841		return nfs4_setlease(dp, flag);
2842	spin_lock(&recall_lock);
2843	if (fp->fi_had_conflict) {
2844		spin_unlock(&recall_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2845		return -EAGAIN;
2846	}
2847	atomic_inc(&fp->fi_delegees);
2848	list_add(&dp->dl_perfile, &fp->fi_delegations);
2849	spin_unlock(&recall_lock);
2850	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2851	return 0;
2852}
2853
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2854static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
2855{
2856	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2857	if (status == -EAGAIN)
2858		open->op_why_no_deleg = WND4_CONTENTION;
2859	else {
2860		open->op_why_no_deleg = WND4_RESOURCE;
2861		switch (open->op_deleg_want) {
2862		case NFS4_SHARE_WANT_READ_DELEG:
2863		case NFS4_SHARE_WANT_WRITE_DELEG:
2864		case NFS4_SHARE_WANT_ANY_DELEG:
2865			break;
2866		case NFS4_SHARE_WANT_CANCEL:
2867			open->op_why_no_deleg = WND4_CANCELLED;
2868			break;
2869		case NFS4_SHARE_WANT_NO_DELEG:
2870			BUG();	/* not supposed to get here */
2871		}
2872	}
2873}
2874
2875/*
2876 * Attempt to hand out a delegation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2877 */
2878static void
2879nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
 
2880{
2881	struct nfs4_delegation *dp;
2882	struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
 
 
2883	int cb_up;
2884	int status = 0, flag = 0;
2885
2886	cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
2887	flag = NFS4_OPEN_DELEGATE_NONE;
2888	open->op_recall = 0;
2889	switch (open->op_claim_type) {
2890		case NFS4_OPEN_CLAIM_PREVIOUS:
2891			if (!cb_up)
2892				open->op_recall = 1;
2893			flag = open->op_delegate_type;
2894			if (flag == NFS4_OPEN_DELEGATE_NONE)
2895				goto out;
2896			break;
2897		case NFS4_OPEN_CLAIM_NULL:
2898			/* Let's not give out any delegations till everyone's
2899			 * had the chance to reclaim theirs.... */
2900			if (locks_in_grace())
2901				goto out;
 
 
 
 
 
 
2902			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
2903				goto out;
2904			if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
2905				flag = NFS4_OPEN_DELEGATE_WRITE;
2906			else
2907				flag = NFS4_OPEN_DELEGATE_READ;
2908			break;
2909		default:
2910			goto out;
2911	}
2912
2913	dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag);
2914	if (dp == NULL)
2915		goto out_no_deleg;
2916	status = nfs4_set_delegation(dp, flag);
2917	if (status)
2918		goto out_free;
2919
2920	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
2921
2922	dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
2923		STATEID_VAL(&dp->dl_stid.sc_stateid));
2924out:
2925	open->op_delegate_type = flag;
2926	if (flag == NFS4_OPEN_DELEGATE_NONE) {
2927		if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
2928		    open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
2929			dprintk("NFSD: WARNING: refusing delegation reclaim\n");
2930
2931		/* 4.1 client asking for a delegation? */
2932		if (open->op_deleg_want)
2933			nfsd4_open_deleg_none_ext(open, status);
2934	}
 
2935	return;
2936out_free:
2937	nfs4_put_delegation(dp);
2938out_no_deleg:
2939	flag = NFS4_OPEN_DELEGATE_NONE;
2940	goto out;
 
 
 
 
 
 
 
 
 
2941}
2942
2943static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
2944					struct nfs4_delegation *dp)
2945{
2946	if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
2947	    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
2948		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2949		open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
2950	} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
2951		   dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
2952		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2953		open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
2954	}
2955	/* Otherwise the client must be confused wanting a delegation
2956	 * it already has, therefore we don't return
2957	 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
2958	 */
2959}
2960
2961/*
2962 * called with nfs4_lock_state() held.
 
 
 
 
 
 
 
 
 
2963 */
2964__be32
2965nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
2966{
2967	struct nfsd4_compoundres *resp = rqstp->rq_resp;
2968	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
2969	struct nfs4_file *fp = NULL;
2970	struct inode *ino = current_fh->fh_dentry->d_inode;
2971	struct nfs4_ol_stateid *stp = NULL;
2972	struct nfs4_delegation *dp = NULL;
2973	__be32 status;
 
2974
2975	/*
2976	 * Lookup file; if found, lookup stateid and check open request,
2977	 * and check for delegations in the process of being recalled.
2978	 * If not found, create the nfs4_file struct
2979	 */
2980	fp = find_file(ino);
2981	if (fp) {
2982		if ((status = nfs4_check_open(fp, open, &stp)))
2983			goto out;
2984		status = nfs4_check_deleg(cl, fp, open, &dp);
2985		if (status)
2986			goto out;
 
2987	} else {
 
2988		status = nfserr_bad_stateid;
2989		if (nfsd4_is_deleg_cur(open))
2990			goto out;
2991		status = nfserr_jukebox;
2992		fp = open->op_file;
2993		open->op_file = NULL;
2994		nfsd4_init_file(fp, ino);
 
 
2995	}
2996
2997	/*
2998	 * OPEN the file, or upgrade an existing OPEN.
2999	 * If truncate fails, the OPEN fails.
 
 
3000	 */
3001	if (stp) {
3002		/* Stateid was found, this is an OPEN upgrade */
3003		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
3004		if (status)
 
3005			goto out;
 
3006	} else {
3007		status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
3008		if (status)
3009			goto out;
3010		stp = open->op_stp;
3011		open->op_stp = NULL;
3012		init_open_stateid(stp, fp, open);
3013		status = nfsd4_truncate(rqstp, current_fh, open);
3014		if (status) {
 
3015			release_open_stateid(stp);
 
3016			goto out;
3017		}
 
 
 
 
 
3018	}
3019	update_stateid(&stp->st_stid.sc_stateid);
3020	memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3021
3022	if (nfsd4_has_session(&resp->cstate)) {
3023		open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3024
 
3025		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
3026			open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3027			open->op_why_no_deleg = WND4_NOT_WANTED;
3028			goto nodeleg;
3029		}
3030	}
3031
3032	/*
3033	* Attempt to hand out a delegation. No error return, because the
3034	* OPEN succeeds even if we fail.
3035	*/
3036	nfs4_open_delegation(current_fh, open, stp);
3037nodeleg:
3038	status = nfs_ok;
3039
3040	dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
3041		STATEID_VAL(&stp->st_stid.sc_stateid));
3042out:
3043	/* 4.1 client trying to upgrade/downgrade delegation? */
3044	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
3045	    open->op_deleg_want)
3046		nfsd4_deleg_xgrade_none_ext(open, dp);
3047
3048	if (fp)
3049		put_nfs4_file(fp);
3050	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
3051		nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
3052	/*
3053	* To finish the open response, we just need to set the rflags.
3054	*/
3055	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
3056	if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
3057	    !nfsd4_has_session(&resp->cstate))
 
3058		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
3059
 
 
 
 
 
3060	return status;
3061}
3062
3063void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
 
3064{
3065	if (open->op_openowner) {
3066		struct nfs4_openowner *oo = open->op_openowner;
3067
3068		if (!list_empty(&oo->oo_owner.so_stateids))
3069			list_del_init(&oo->oo_close_lru);
3070		if (oo->oo_flags & NFS4_OO_NEW) {
3071			if (status) {
3072				release_openowner(oo);
3073				open->op_openowner = NULL;
3074			} else
3075				oo->oo_flags &= ~NFS4_OO_NEW;
3076		}
3077	}
3078	if (open->op_file)
3079		nfsd4_free_file(open->op_file);
3080	if (open->op_stp)
3081		nfs4_free_stateid(open->op_stp);
 
 
3082}
3083
3084__be32
3085nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3086	    clientid_t *clid)
3087{
 
3088	struct nfs4_client *clp;
3089	__be32 status;
 
3090
3091	nfs4_lock_state();
3092	dprintk("process_renew(%08x/%08x): starting\n", 
3093			clid->cl_boot, clid->cl_id);
3094	status = nfserr_stale_clientid;
3095	if (STALE_CLIENTID(clid))
3096		goto out;
3097	clp = find_confirmed_client(clid);
3098	status = nfserr_expired;
3099	if (clp == NULL) {
3100		/* We assume the client took too long to RENEW. */
3101		dprintk("nfsd4_renew: clientid not found!\n");
3102		goto out;
3103	}
3104	status = nfserr_cb_path_down;
3105	if (!list_empty(&clp->cl_delegations)
3106			&& clp->cl_cb_state != NFSD4_CB_UP)
3107		goto out;
3108	status = nfs_ok;
3109out:
3110	nfs4_unlock_state();
3111	return status;
3112}
3113
3114static struct lock_manager nfsd4_manager = {
3115};
3116
3117static bool grace_ended;
3118
3119static void
3120nfsd4_end_grace(void)
3121{
3122	/* do nothing if grace period already ended */
3123	if (grace_ended)
3124		return;
3125
3126	dprintk("NFSD: end of grace period\n");
3127	grace_ended = true;
3128	nfsd4_record_grace_done(&init_net, boot_time);
3129	locks_end_grace(&nfsd4_manager);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3130	/*
3131	 * Now that every NFSv4 client has had the chance to recover and
3132	 * to see the (possibly new, possibly shorter) lease time, we
3133	 * can safely set the next grace time to the current lease time:
3134	 */
3135	nfsd4_grace = nfsd4_lease;
 
 
3136}
3137
3138static time_t
3139nfs4_laundromat(void)
 
 
 
 
3140{
3141	struct nfs4_client *clp;
3142	struct nfs4_openowner *oo;
3143	struct nfs4_delegation *dp;
3144	struct list_head *pos, *next, reaplist;
3145	time_t cutoff = get_seconds() - nfsd4_lease;
3146	time_t t, clientid_val = nfsd4_lease;
3147	time_t u, test_val = nfsd4_lease;
3148
3149	nfs4_lock_state();
 
 
 
 
 
3150
3151	dprintk("NFSD: laundromat service - starting\n");
3152	nfsd4_end_grace();
3153	INIT_LIST_HEAD(&reaplist);
3154	spin_lock(&client_lock);
3155	list_for_each_safe(pos, next, &client_lru) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3156		clp = list_entry(pos, struct nfs4_client, cl_lru);
3157		if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
3158			t = clp->cl_time - cutoff;
3159			if (clientid_val > t)
3160				clientid_val = t;
3161			break;
 
 
 
 
3162		}
3163		if (atomic_read(&clp->cl_refcount)) {
3164			dprintk("NFSD: client in use (clientid %08x)\n",
3165				clp->cl_clientid.cl_id);
3166			continue;
 
 
 
 
 
3167		}
3168		unhash_client_locked(clp);
3169		list_add(&clp->cl_lru, &reaplist);
3170	}
3171	spin_unlock(&client_lock);
3172	list_for_each_safe(pos, next, &reaplist) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3173		clp = list_entry(pos, struct nfs4_client, cl_lru);
3174		dprintk("NFSD: purging unused client (clientid %08x)\n",
3175			clp->cl_clientid.cl_id);
3176		nfsd4_client_record_remove(clp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3177		expire_client(clp);
3178	}
3179	spin_lock(&recall_lock);
3180	list_for_each_safe(pos, next, &del_recall_lru) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3181		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3182		if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3183			u = dp->dl_time - cutoff;
3184			if (test_val > u)
3185				test_val = u;
3186			break;
3187		}
3188		list_move(&dp->dl_recall_lru, &reaplist);
3189	}
3190	spin_unlock(&recall_lock);
3191	list_for_each_safe(pos, next, &reaplist) {
3192		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3193		unhash_delegation(dp);
 
 
3194	}
3195	test_val = nfsd4_lease;
3196	list_for_each_safe(pos, next, &close_lru) {
3197		oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
3198		if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3199			u = oo->oo_time - cutoff;
3200			if (test_val > u)
3201				test_val = u;
3202			break;
3203		}
3204		release_openowner(oo);
 
 
 
 
3205	}
3206	if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
3207		clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
3208	nfs4_unlock_state();
3209	return clientid_val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3210}
3211
3212static struct workqueue_struct *laundry_wq;
3213static void laundromat_main(struct work_struct *);
3214static DECLARE_DELAYED_WORK(laundromat_work, laundromat_main);
3215
3216static void
3217laundromat_main(struct work_struct *not_used)
3218{
3219	time_t t;
 
 
 
3220
3221	t = nfs4_laundromat();
3222	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3223	queue_delayed_work(laundry_wq, &laundromat_work, t*HZ);
3224}
3225
3226static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
 
3227{
3228	if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
3229		return nfserr_bad_stateid;
3230	return nfs_ok;
 
3231}
3232
3233static int
3234STALE_STATEID(stateid_t *stateid)
3235{
3236	if (stateid->si_opaque.so_clid.cl_boot == boot_time)
3237		return 0;
3238	dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
3239		STATEID_VAL(stateid));
3240	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3241}
3242
3243static inline int
3244access_permit_read(struct nfs4_ol_stateid *stp)
3245{
3246	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
3247		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
3248		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
 
 
3249}
3250
3251static inline int
3252access_permit_write(struct nfs4_ol_stateid *stp)
3253{
3254	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
3255		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
 
3256}
3257
3258static
3259__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3260{
3261        __be32 status = nfserr_openmode;
3262
3263	/* For lock stateid's, we test the parent open, not the lock: */
3264	if (stp->st_openstp)
3265		stp = stp->st_openstp;
3266	if ((flags & WR_STATE) && !access_permit_write(stp))
3267                goto out;
3268	if ((flags & RD_STATE) && !access_permit_read(stp))
3269                goto out;
3270	status = nfs_ok;
3271out:
3272	return status;
3273}
3274
3275static inline __be32
3276check_special_stateids(svc_fh *current_fh, stateid_t *stateid, int flags)
3277{
3278	if (ONE_STATEID(stateid) && (flags & RD_STATE))
3279		return nfs_ok;
3280	else if (locks_in_grace()) {
3281		/* Answer in remaining cases depends on existence of
3282		 * conflicting state; so we must wait out the grace period. */
3283		return nfserr_grace;
3284	} else if (flags & WR_STATE)
3285		return nfs4_share_conflict(current_fh,
3286				NFS4_SHARE_DENY_WRITE);
3287	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
3288		return nfs4_share_conflict(current_fh,
3289				NFS4_SHARE_DENY_READ);
3290}
3291
3292/*
3293 * Allow READ/WRITE during grace period on recovered state only for files
3294 * that are not able to provide mandatory locking.
3295 */
3296static inline int
3297grace_disallows_io(struct inode *inode)
3298{
3299	return locks_in_grace() && mandatory_lock(inode);
3300}
3301
3302/* Returns true iff a is later than b: */
3303static bool stateid_generation_after(stateid_t *a, stateid_t *b)
3304{
3305	return (s32)a->si_generation - (s32)b->si_generation > 0;
3306}
3307
3308static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3309{
3310	/*
3311	 * When sessions are used the stateid generation number is ignored
3312	 * when it is zero.
3313	 */
3314	if (has_session && in->si_generation == 0)
3315		return nfs_ok;
3316
3317	if (in->si_generation == ref->si_generation)
3318		return nfs_ok;
3319
3320	/* If the client sends us a stateid from the future, it's buggy: */
3321	if (stateid_generation_after(in, ref))
3322		return nfserr_bad_stateid;
3323	/*
3324	 * However, we could see a stateid from the past, even from a
3325	 * non-buggy client.  For example, if the client sends a lock
3326	 * while some IO is outstanding, the lock may bump si_generation
3327	 * while the IO is still in flight.  The client could avoid that
3328	 * situation by waiting for responses on all the IO requests,
3329	 * but better performance may result in retrying IO that
3330	 * receives an old_stateid error if requests are rarely
3331	 * reordered in flight:
3332	 */
3333	return nfserr_old_stateid;
3334}
3335
3336__be32 nfs4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3337{
3338	struct nfs4_stid *s;
3339	struct nfs4_ol_stateid *ols;
3340	__be32 status;
3341
3342	if (STALE_STATEID(stateid))
3343		return nfserr_stale_stateid;
 
 
 
 
 
3344
3345	s = find_stateid(cl, stateid);
3346	if (!s)
3347		 return nfserr_stale_stateid;
3348	status = check_stateid_generation(stateid, &s->sc_stateid, 1);
3349	if (status)
3350		return status;
3351	if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID)))
3352		return nfs_ok;
3353	ols = openlockstateid(s);
3354	if (ols->st_stateowner->so_is_open_owner
3355	    && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3356		return nfserr_bad_stateid;
3357	return nfs_ok;
3358}
3359
3360static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, struct nfs4_stid **s)
3361{
3362	struct nfs4_client *cl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3363
3364	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
 
 
 
 
 
 
 
 
 
 
3365		return nfserr_bad_stateid;
3366	if (STALE_STATEID(stateid))
 
 
 
3367		return nfserr_stale_stateid;
3368	cl = find_confirmed_client(&stateid->si_opaque.so_clid);
3369	if (!cl)
3370		return nfserr_expired;
3371	*s = find_stateid_by_type(cl, stateid, typemask);
3372	if (!*s)
 
 
 
 
 
3373		return nfserr_bad_stateid;
 
 
3374	return nfs_ok;
 
3375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3376}
3377
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3378/*
3379* Checks for stateid operations
3380*/
3381__be32
3382nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
3383			   stateid_t *stateid, int flags, struct file **filpp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3384{
3385	struct nfs4_stid *s;
3386	struct nfs4_ol_stateid *stp = NULL;
3387	struct nfs4_delegation *dp = NULL;
3388	struct svc_fh *current_fh = &cstate->current_fh;
3389	struct inode *ino = current_fh->fh_dentry->d_inode;
3390	__be32 status;
 
 
3391
3392	if (filpp)
3393		*filpp = NULL;
 
3394
3395	if (grace_disallows_io(ino))
3396		return nfserr_grace;
3397
3398	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3399		return check_special_stateids(current_fh, stateid, flags);
 
 
3400
3401	status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID, &s);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3402	if (status)
3403		return status;
3404	status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
 
3405	if (status)
3406		goto out;
 
3407	switch (s->sc_type) {
3408	case NFS4_DELEG_STID:
3409		dp = delegstateid(s);
3410		status = nfs4_check_delegmode(dp, flags);
3411		if (status)
3412			goto out;
3413		if (filpp) {
3414			*filpp = dp->dl_file->fi_deleg_file;
3415			BUG_ON(!*filpp);
3416		}
3417		break;
3418	case NFS4_OPEN_STID:
3419	case NFS4_LOCK_STID:
3420		stp = openlockstateid(s);
3421		status = nfs4_check_fh(current_fh, stp);
3422		if (status)
3423			goto out;
3424		if (stp->st_stateowner->so_is_open_owner
3425		    && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3426			goto out;
3427		status = nfs4_check_openmode(stp, flags);
3428		if (status)
3429			goto out;
3430		if (filpp) {
3431			if (flags & RD_STATE)
3432				*filpp = find_readable_file(stp->st_file);
3433			else
3434				*filpp = find_writeable_file(stp->st_file);
3435		}
3436		break;
3437	default:
3438		return nfserr_bad_stateid;
 
3439	}
3440	status = nfs_ok;
 
 
 
 
 
 
3441out:
 
 
 
 
 
 
3442	return status;
3443}
3444
3445static __be32
3446nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3447{
3448	if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
3449		return nfserr_locks_held;
3450	release_lock_stateid(stp);
3451	return nfs_ok;
3452}
3453
3454/*
3455 * Test if the stateid is valid
3456 */
3457__be32
3458nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3459		   struct nfsd4_test_stateid *test_stateid)
3460{
 
3461	struct nfsd4_test_stateid_id *stateid;
3462	struct nfs4_client *cl = cstate->session->se_client;
3463
3464	nfs4_lock_state();
3465	list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
3466		stateid->ts_id_status = nfs4_validate_stateid(cl, &stateid->ts_id_stateid);
3467	nfs4_unlock_state();
3468
3469	return nfs_ok;
3470}
3471
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3472__be32
3473nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3474		   struct nfsd4_free_stateid *free_stateid)
3475{
 
3476	stateid_t *stateid = &free_stateid->fr_stateid;
3477	struct nfs4_stid *s;
3478	struct nfs4_client *cl = cstate->session->se_client;
 
3479	__be32 ret = nfserr_bad_stateid;
3480
3481	nfs4_lock_state();
3482	s = find_stateid(cl, stateid);
3483	if (!s)
3484		goto out;
 
3485	switch (s->sc_type) {
3486	case NFS4_DELEG_STID:
3487		ret = nfserr_locks_held;
3488		goto out;
3489	case NFS4_OPEN_STID:
3490	case NFS4_LOCK_STID:
3491		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
3492		if (ret)
3493			goto out;
3494		if (s->sc_type == NFS4_LOCK_STID)
3495			ret = nfsd4_free_lock_stateid(openlockstateid(s));
3496		else
3497			ret = nfserr_locks_held;
3498		break;
3499	default:
3500		ret = nfserr_bad_stateid;
3501	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3502out:
3503	nfs4_unlock_state();
3504	return ret;
3505}
3506
3507static inline int
3508setlkflg (int type)
3509{
3510	return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
3511		RD_STATE : WR_STATE;
3512}
3513
3514static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
3515{
3516	struct svc_fh *current_fh = &cstate->current_fh;
3517	struct nfs4_stateowner *sop = stp->st_stateowner;
3518	__be32 status;
3519
3520	status = nfsd4_check_seqid(cstate, sop, seqid);
3521	if (status)
3522		return status;
3523	if (stp->st_stid.sc_type == NFS4_CLOSED_STID)
3524		/*
3525		 * "Closed" stateid's exist *only* to return
3526		 * nfserr_replay_me from the previous step.
3527		 */
3528		return nfserr_bad_stateid;
3529	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
3530	if (status)
3531		return status;
3532	return nfs4_check_fh(current_fh, stp);
 
 
 
 
 
3533}
3534
3535/* 
3536 * Checks for sequence id mutating operations. 
 
 
 
 
 
 
 
 
 
 
3537 */
3538static __be32
3539nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3540			 stateid_t *stateid, char typemask,
3541			 struct nfs4_ol_stateid **stpp)
 
3542{
3543	__be32 status;
3544	struct nfs4_stid *s;
 
3545
3546	dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
3547		seqid, STATEID_VAL(stateid));
3548
3549	*stpp = NULL;
3550	status = nfsd4_lookup_stateid(stateid, typemask, &s);
3551	if (status)
3552		return status;
3553	*stpp = openlockstateid(s);
3554	cstate->replay_owner = (*stpp)->st_stateowner;
3555
3556	return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
 
 
 
 
 
3557}
3558
3559static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, stateid_t *stateid, struct nfs4_ol_stateid **stpp)
 
3560{
3561	__be32 status;
3562	struct nfs4_openowner *oo;
 
3563
3564	status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
3565						NFS4_OPEN_STID, stpp);
3566	if (status)
3567		return status;
3568	oo = openowner((*stpp)->st_stateowner);
3569	if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
 
 
3570		return nfserr_bad_stateid;
 
 
3571	return nfs_ok;
3572}
3573
3574__be32
3575nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3576		   struct nfsd4_open_confirm *oc)
3577{
 
3578	__be32 status;
3579	struct nfs4_openowner *oo;
3580	struct nfs4_ol_stateid *stp;
 
3581
3582	dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
3583			(int)cstate->current_fh.fh_dentry->d_name.len,
3584			cstate->current_fh.fh_dentry->d_name.name);
3585
3586	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
3587	if (status)
3588		return status;
3589
3590	nfs4_lock_state();
3591
3592	status = nfs4_preprocess_seqid_op(cstate,
3593					oc->oc_seqid, &oc->oc_req_stateid,
3594					NFS4_OPEN_STID, &stp);
3595	if (status)
3596		goto out;
3597	oo = openowner(stp->st_stateowner);
3598	status = nfserr_bad_stateid;
3599	if (oo->oo_flags & NFS4_OO_CONFIRMED)
3600		goto out;
 
 
3601	oo->oo_flags |= NFS4_OO_CONFIRMED;
3602	update_stateid(&stp->st_stid.sc_stateid);
3603	memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3604	dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
3605		__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
3606
3607	nfsd4_client_record_create(oo->oo_owner.so_client);
3608	status = nfs_ok;
 
 
3609out:
3610	if (!cstate->replay_owner)
3611		nfs4_unlock_state();
3612	return status;
3613}
3614
3615static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
3616{
3617	if (!test_access(access, stp))
3618		return;
3619	nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
3620	clear_access(access, stp);
3621}
3622
3623static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
3624{
3625	switch (to_access) {
3626	case NFS4_SHARE_ACCESS_READ:
3627		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
3628		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
3629		break;
3630	case NFS4_SHARE_ACCESS_WRITE:
3631		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
3632		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
3633		break;
3634	case NFS4_SHARE_ACCESS_BOTH:
3635		break;
3636	default:
3637		BUG();
3638	}
3639}
3640
3641static void
3642reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
3643{
3644	int i;
3645	for (i = 0; i < 4; i++) {
3646		if ((i & deny) != i)
3647			clear_deny(i, stp);
3648	}
3649}
3650
3651__be32
3652nfsd4_open_downgrade(struct svc_rqst *rqstp,
3653		     struct nfsd4_compound_state *cstate,
3654		     struct nfsd4_open_downgrade *od)
3655{
 
3656	__be32 status;
3657	struct nfs4_ol_stateid *stp;
 
3658
3659	dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n", 
3660			(int)cstate->current_fh.fh_dentry->d_name.len,
3661			cstate->current_fh.fh_dentry->d_name.name);
3662
3663	/* We don't yet support WANT bits: */
3664	if (od->od_deleg_want)
3665		dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
3666			od->od_deleg_want);
3667
3668	nfs4_lock_state();
3669	status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
3670					&od->od_stateid, &stp);
3671	if (status)
3672		goto out; 
3673	status = nfserr_inval;
3674	if (!test_access(od->od_share_access, stp)) {
3675		dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n",
3676			stp->st_access_bmap, od->od_share_access);
3677		goto out;
3678	}
3679	if (!test_deny(od->od_share_deny, stp)) {
3680		dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
3681			stp->st_deny_bmap, od->od_share_deny);
3682		goto out;
3683	}
3684	nfs4_stateid_downgrade(stp, od->od_share_access);
3685
3686	reset_union_bmap_deny(od->od_share_deny, stp);
3687
3688	update_stateid(&stp->st_stid.sc_stateid);
3689	memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3690	status = nfs_ok;
 
 
 
3691out:
3692	if (!cstate->replay_owner)
3693		nfs4_unlock_state();
3694	return status;
3695}
3696
3697void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so)
3698{
3699	struct nfs4_openowner *oo;
3700	struct nfs4_ol_stateid *s;
 
 
3701
3702	if (!so->so_is_open_owner)
3703		return;
3704	oo = openowner(so);
3705	s = oo->oo_last_closed_stid;
3706	if (!s)
3707		return;
3708	if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) {
3709		/* Release the last_closed_stid on the next seqid bump: */
3710		oo->oo_flags |= NFS4_OO_PURGE_CLOSE;
3711		return;
3712	}
3713	oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE;
3714	release_last_closed_stateid(oo);
3715}
3716
3717static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
3718{
3719	unhash_open_stateid(s);
3720	s->st_stid.sc_type = NFS4_CLOSED_STID;
 
 
 
 
 
 
 
 
 
3721}
3722
3723/*
3724 * nfs4_unlock_state() called after encode
3725 */
3726__be32
3727nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3728	    struct nfsd4_close *close)
3729{
 
3730	__be32 status;
3731	struct nfs4_openowner *oo;
3732	struct nfs4_ol_stateid *stp;
 
 
3733
3734	dprintk("NFSD: nfsd4_close on file %.*s\n", 
3735			(int)cstate->current_fh.fh_dentry->d_name.len,
3736			cstate->current_fh.fh_dentry->d_name.name);
3737
3738	nfs4_lock_state();
3739	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
3740					&close->cl_stateid,
3741					NFS4_OPEN_STID|NFS4_CLOSED_STID,
3742					&stp);
 
3743	if (status)
3744		goto out; 
3745	oo = openowner(stp->st_stateowner);
3746	status = nfs_ok;
3747	update_stateid(&stp->st_stid.sc_stateid);
3748	memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
 
 
 
 
 
 
3749
3750	nfsd4_close_open_stateid(stp);
3751	oo->oo_last_closed_stid = stp;
3752
3753	/* place unused nfs4_stateowners on so_close_lru list to be
3754	 * released by the laundromat service after the lease period
3755	 * to enable us to handle CLOSE replay
 
 
 
3756	 */
3757	if (list_empty(&oo->oo_owner.so_stateids))
3758		move_to_close_lru(oo);
 
 
3759out:
3760	if (!cstate->replay_owner)
3761		nfs4_unlock_state();
3762	return status;
3763}
3764
3765__be32
3766nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3767		  struct nfsd4_delegreturn *dr)
3768{
 
3769	struct nfs4_delegation *dp;
3770	stateid_t *stateid = &dr->dr_stateid;
3771	struct nfs4_stid *s;
3772	struct inode *inode;
3773	__be32 status;
 
3774
3775	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
3776		return status;
3777	inode = cstate->current_fh.fh_dentry->d_inode;
3778
3779	nfs4_lock_state();
3780	status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s);
3781	if (status)
3782		goto out;
3783	dp = delegstateid(s);
3784	status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
3785	if (status)
3786		goto out;
3787
3788	unhash_delegation(dp);
 
 
 
 
3789out:
3790	nfs4_unlock_state();
3791
3792	return status;
3793}
3794
3795
3796#define LOFF_OVERFLOW(start, len)      ((u64)(len) > ~(u64)(start))
3797
3798#define LOCKOWNER_INO_HASH_BITS 8
3799#define LOCKOWNER_INO_HASH_SIZE (1 << LOCKOWNER_INO_HASH_BITS)
3800#define LOCKOWNER_INO_HASH_MASK (LOCKOWNER_INO_HASH_SIZE - 1)
3801
3802static inline u64
3803end_offset(u64 start, u64 len)
3804{
3805	u64 end;
3806
3807	end = start + len;
3808	return end >= start ? end: NFS4_MAX_UINT64;
3809}
3810
3811/* last octet in a range */
3812static inline u64
3813last_byte_offset(u64 start, u64 len)
3814{
3815	u64 end;
3816
3817	BUG_ON(!len);
3818	end = start + len;
3819	return end > start ? end - 1: NFS4_MAX_UINT64;
3820}
3821
3822static unsigned int lockowner_ino_hashval(struct inode *inode, u32 cl_id, struct xdr_netobj *ownername)
3823{
3824	return (file_hashval(inode) + cl_id
3825			+ opaque_hashval(ownername->data, ownername->len))
3826		& LOCKOWNER_INO_HASH_MASK;
3827}
3828
3829static struct list_head lockowner_ino_hashtbl[LOCKOWNER_INO_HASH_SIZE];
3830
3831/*
3832 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
3833 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
3834 * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
3835 * locking, this prevents us from being completely protocol-compliant.  The
3836 * real solution to this problem is to start using unsigned file offsets in
3837 * the VFS, but this is a very deep change!
3838 */
3839static inline void
3840nfs4_transform_lock_offset(struct file_lock *lock)
3841{
3842	if (lock->fl_start < 0)
3843		lock->fl_start = OFFSET_MAX;
3844	if (lock->fl_end < 0)
3845		lock->fl_end = OFFSET_MAX;
3846}
3847
3848/* Hack!: For now, we're defining this just so we can use a pointer to it
3849 * as a unique cookie to identify our (NFSv4's) posix locks. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3850static const struct lock_manager_operations nfsd_posix_mng_ops  = {
 
 
 
 
 
 
3851};
3852
3853static inline void
3854nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
3855{
3856	struct nfs4_lockowner *lo;
3857
3858	if (fl->fl_lmops == &nfsd_posix_mng_ops) {
3859		lo = (struct nfs4_lockowner *) fl->fl_owner;
3860		deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
3861					lo->lo_owner.so_owner.len, GFP_KERNEL);
3862		if (!deny->ld_owner.data)
3863			/* We just don't care that much */
3864			goto nevermind;
3865		deny->ld_owner.len = lo->lo_owner.so_owner.len;
3866		deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
3867	} else {
3868nevermind:
3869		deny->ld_owner.len = 0;
3870		deny->ld_owner.data = NULL;
3871		deny->ld_clientid.cl_boot = 0;
3872		deny->ld_clientid.cl_id = 0;
3873	}
3874	deny->ld_start = fl->fl_start;
3875	deny->ld_length = NFS4_MAX_UINT64;
3876	if (fl->fl_end != NFS4_MAX_UINT64)
3877		deny->ld_length = fl->fl_end - fl->fl_start + 1;        
3878	deny->ld_type = NFS4_READ_LT;
3879	if (fl->fl_type != F_RDLCK)
3880		deny->ld_type = NFS4_WRITE_LT;
3881}
3882
3883static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner)
 
3884{
3885	struct nfs4_ol_stateid *lst;
 
3886
3887	if (!same_owner_str(&lo->lo_owner, owner, clid))
3888		return false;
3889	lst = list_first_entry(&lo->lo_owner.so_stateids,
3890			       struct nfs4_ol_stateid, st_perstateowner);
3891	return lst->st_file->fi_inode == inode;
 
 
 
 
 
3892}
3893
3894static struct nfs4_lockowner *
3895find_lockowner_str(struct inode *inode, clientid_t *clid,
3896		struct xdr_netobj *owner)
3897{
3898	unsigned int hashval = lockowner_ino_hashval(inode, clid->cl_id, owner);
3899	struct nfs4_lockowner *lo;
3900
3901	list_for_each_entry(lo, &lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) {
3902		if (same_lockowner_ino(lo, inode, clid, owner))
3903			return lo;
3904	}
3905	return NULL;
3906}
3907
3908static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp)
3909{
3910	struct inode *inode = open_stp->st_file->fi_inode;
3911	unsigned int inohash = lockowner_ino_hashval(inode,
3912			clp->cl_clientid.cl_id, &lo->lo_owner.so_owner);
3913
3914	list_add(&lo->lo_owner.so_strhash, &ownerstr_hashtbl[strhashval]);
3915	list_add(&lo->lo_owner_ino_hash, &lockowner_ino_hashtbl[inohash]);
3916	list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
 
3917}
3918
 
 
 
 
 
3919/*
3920 * Alloc a lock owner structure.
3921 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 
3922 * occurred. 
3923 *
3924 * strhashval = ownerstr_hashval
3925 */
3926
3927static struct nfs4_lockowner *
3928alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
3929	struct nfs4_lockowner *lo;
 
 
 
3930
3931	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
3932	if (!lo)
3933		return NULL;
 
3934	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
3935	lo->lo_owner.so_is_open_owner = 0;
3936	/* It is the openowner seqid that will be incremented in encode in the
3937	 * case of new lockowners; so increment the lock seqid manually: */
3938	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
3939	hash_lockowner(lo, strhashval, clp, open_stp);
3940	return lo;
 
 
 
 
 
 
 
 
3941}
3942
3943static struct nfs4_ol_stateid *
3944alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3945{
3946	struct nfs4_ol_stateid *stp;
3947	struct nfs4_client *clp = lo->lo_owner.so_client;
 
3948
3949	stp = nfs4_alloc_stateid(clp);
3950	if (stp == NULL)
3951		return NULL;
3952	init_stid(&stp->st_stid, clp, NFS4_LOCK_STID);
3953	list_add(&stp->st_perfile, &fp->fi_stateids);
3954	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
3955	stp->st_stateowner = &lo->lo_owner;
 
 
 
 
 
3956	get_nfs4_file(fp);
3957	stp->st_file = fp;
3958	stp->st_access_bmap = 0;
3959	stp->st_deny_bmap = open_stp->st_deny_bmap;
3960	stp->st_openstp = open_stp;
 
 
 
 
 
 
3961	return stp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3962}
3963
3964static int
3965check_lock_length(u64 offset, u64 length)
3966{
3967	return ((length == 0)  || ((length != NFS4_MAX_UINT64) &&
3968	     LOFF_OVERFLOW(offset, length)));
3969}
3970
3971static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
3972{
3973	struct nfs4_file *fp = lock_stp->st_file;
3974	int oflag = nfs4_access_to_omode(access);
 
3975
3976	if (test_access(access, lock_stp))
3977		return;
3978	nfs4_file_get_access(fp, oflag);
3979	set_access(access, lock_stp);
3980}
3981
3982static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
 
 
 
 
3983{
3984	struct nfs4_file *fi = ost->st_file;
 
3985	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
3986	struct nfs4_client *cl = oo->oo_owner.so_client;
 
3987	struct nfs4_lockowner *lo;
 
3988	unsigned int strhashval;
3989
3990	lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid, &lock->v.new.owner);
3991	if (lo) {
3992		if (!cstate->minorversion)
3993			return nfserr_bad_seqid;
3994		/* XXX: a lockowner always has exactly one stateid: */
3995		*lst = list_first_entry(&lo->lo_owner.so_stateids,
3996				struct nfs4_ol_stateid, st_perstateowner);
3997		return nfs_ok;
 
 
 
 
3998	}
3999	strhashval = ownerstr_hashval(cl->cl_clientid.cl_id,
4000			&lock->v.new.owner);
4001	lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
4002	if (lo == NULL)
4003		return nfserr_jukebox;
4004	*lst = alloc_init_lock_stateid(lo, fi, ost);
4005	if (*lst == NULL) {
4006		release_lockowner(lo);
4007		return nfserr_jukebox;
4008	}
4009	*new = true;
4010	return nfs_ok;
 
 
 
 
4011}
4012
4013/*
4014 *  LOCK operation 
4015 */
4016__be32
4017nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4018	   struct nfsd4_lock *lock)
4019{
 
4020	struct nfs4_openowner *open_sop = NULL;
4021	struct nfs4_lockowner *lock_sop = NULL;
4022	struct nfs4_ol_stateid *lock_stp;
4023	struct file *filp = NULL;
4024	struct file_lock file_lock;
4025	struct file_lock conflock;
 
 
 
 
4026	__be32 status = 0;
4027	bool new_state = false;
4028	int lkflg;
4029	int err;
 
 
 
 
 
4030
4031	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
4032		(long long) lock->lk_offset,
4033		(long long) lock->lk_length);
4034
4035	if (check_lock_length(lock->lk_offset, lock->lk_length))
4036		 return nfserr_inval;
4037
4038	if ((status = fh_verify(rqstp, &cstate->current_fh,
4039				S_IFREG, NFSD_MAY_LOCK))) {
4040		dprintk("NFSD: nfsd4_lock: permission denied!\n");
4041		return status;
4042	}
4043
4044	nfs4_lock_state();
4045
4046	if (lock->lk_is_new) {
4047		/*
4048		 * Client indicates that this is a new lockowner.
4049		 * Use open owner and open stateid to create lock owner and
4050		 * lock stateid.
4051		 */
4052		struct nfs4_ol_stateid *open_stp = NULL;
4053
4054		if (nfsd4_has_session(cstate))
4055			/* See rfc 5661 18.10.3: given clientid is ignored: */
4056			memcpy(&lock->v.new.clientid,
4057				&cstate->session->se_client->cl_clientid,
4058				sizeof(clientid_t));
4059
4060		status = nfserr_stale_clientid;
4061		if (STALE_CLIENTID(&lock->lk_new_clientid))
4062			goto out;
4063
4064		/* validate and update open stateid and open seqid */
4065		status = nfs4_preprocess_confirmed_seqid_op(cstate,
4066				        lock->lk_new_open_seqid,
4067		                        &lock->lk_new_open_stateid,
4068					&open_stp);
4069		if (status)
4070			goto out;
 
4071		open_sop = openowner(open_stp->st_stateowner);
4072		status = nfserr_bad_stateid;
4073		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
4074						&lock->v.new.clientid))
4075			goto out;
4076		status = lookup_or_create_lock_state(cstate, open_stp, lock,
4077							&lock_stp, &new_state);
4078		if (status)
4079			goto out;
4080	} else {
4081		/* lock (lock owner + lock stateid) already exists */
4082		status = nfs4_preprocess_seqid_op(cstate,
4083				       lock->lk_old_lock_seqid,
4084				       &lock->lk_old_lock_stateid,
4085				       NFS4_LOCK_STID, &lock_stp);
4086		if (status)
4087			goto out;
4088	}
 
 
4089	lock_sop = lockowner(lock_stp->st_stateowner);
4090
4091	lkflg = setlkflg(lock->lk_type);
4092	status = nfs4_check_openmode(lock_stp, lkflg);
4093	if (status)
4094		goto out;
4095
4096	status = nfserr_grace;
4097	if (locks_in_grace() && !lock->lk_reclaim)
4098		goto out;
4099	status = nfserr_no_grace;
4100	if (!locks_in_grace() && lock->lk_reclaim)
4101		goto out;
4102
4103	locks_init_lock(&file_lock);
 
 
 
4104	switch (lock->lk_type) {
4105		case NFS4_READ_LT:
4106		case NFS4_READW_LT:
4107			filp = find_readable_file(lock_stp->st_file);
4108			if (filp)
 
 
 
 
 
 
4109				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
4110			file_lock.fl_type = F_RDLCK;
 
4111			break;
4112		case NFS4_WRITE_LT:
4113		case NFS4_WRITEW_LT:
4114			filp = find_writeable_file(lock_stp->st_file);
4115			if (filp)
 
 
 
 
 
 
4116				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
4117			file_lock.fl_type = F_WRLCK;
 
4118			break;
4119		default:
4120			status = nfserr_inval;
4121		goto out;
4122	}
4123	if (!filp) {
 
4124		status = nfserr_openmode;
4125		goto out;
4126	}
4127	file_lock.fl_owner = (fl_owner_t)lock_sop;
4128	file_lock.fl_pid = current->tgid;
4129	file_lock.fl_file = filp;
4130	file_lock.fl_flags = FL_POSIX;
4131	file_lock.fl_lmops = &nfsd_posix_mng_ops;
4132
4133	file_lock.fl_start = lock->lk_offset;
4134	file_lock.fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
4135	nfs4_transform_lock_offset(&file_lock);
4136
4137	/*
4138	* Try to lock the file in the VFS.
4139	* Note: locks.c uses the BKL to protect the inode's lock list.
4140	*/
 
 
 
 
 
 
 
 
 
 
 
 
4141
4142	err = vfs_lock_file(filp, F_SETLK, &file_lock, &conflock);
4143	switch (-err) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4144	case 0: /* success! */
4145		update_stateid(&lock_stp->st_stid.sc_stateid);
4146		memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid, 
4147				sizeof(stateid_t));
4148		status = 0;
 
 
4149		break;
4150	case (EAGAIN):		/* conflock holds conflicting lock */
 
 
 
 
4151		status = nfserr_denied;
4152		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
4153		nfs4_set_lock_denied(&conflock, &lock->lk_denied);
4154		break;
4155	case (EDEADLK):
4156		status = nfserr_deadlock;
4157		break;
4158	default:
4159		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
4160		status = nfserrno(err);
4161		break;
4162	}
4163out:
4164	if (status && new_state)
4165		release_lockowner(lock_sop);
4166	if (!cstate->replay_owner)
4167		nfs4_unlock_state();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4168	return status;
4169}
4170
 
 
 
 
 
 
 
 
4171/*
4172 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
4173 * so we do a temporary open here just to get an open file to pass to
4174 * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
4175 * inode operation.)
4176 */
4177static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4178{
4179	struct file *file;
4180	__be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4181	if (!err) {
4182		err = nfserrno(vfs_test_lock(file, lock));
4183		nfsd_close(file);
4184	}
 
 
 
 
 
 
 
 
 
 
 
 
4185	return err;
4186}
4187
4188/*
4189 * LOCKT operation
4190 */
4191__be32
4192nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4193	    struct nfsd4_lockt *lockt)
4194{
4195	struct inode *inode;
4196	struct file_lock file_lock;
4197	struct nfs4_lockowner *lo;
4198	__be32 status;
 
4199
4200	if (locks_in_grace())
4201		return nfserr_grace;
4202
4203	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
4204		 return nfserr_inval;
4205
4206	nfs4_lock_state();
 
 
 
 
4207
4208	status = nfserr_stale_clientid;
4209	if (!nfsd4_has_session(cstate) && STALE_CLIENTID(&lockt->lt_clientid))
4210		goto out;
4211
4212	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
 
 
 
4213		goto out;
 
4214
4215	inode = cstate->current_fh.fh_dentry->d_inode;
4216	locks_init_lock(&file_lock);
4217	switch (lockt->lt_type) {
4218		case NFS4_READ_LT:
4219		case NFS4_READW_LT:
4220			file_lock.fl_type = F_RDLCK;
4221		break;
4222		case NFS4_WRITE_LT:
4223		case NFS4_WRITEW_LT:
4224			file_lock.fl_type = F_WRLCK;
4225		break;
4226		default:
4227			dprintk("NFSD: nfs4_lockt: bad lock type!\n");
4228			status = nfserr_inval;
4229		goto out;
4230	}
4231
4232	lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner);
4233	if (lo)
4234		file_lock.fl_owner = (fl_owner_t)lo;
4235	file_lock.fl_pid = current->tgid;
4236	file_lock.fl_flags = FL_POSIX;
4237
4238	file_lock.fl_start = lockt->lt_offset;
4239	file_lock.fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
4240
4241	nfs4_transform_lock_offset(&file_lock);
4242
4243	status = nfsd_test_lock(rqstp, &cstate->current_fh, &file_lock);
4244	if (status)
4245		goto out;
4246
4247	if (file_lock.fl_type != F_UNLCK) {
4248		status = nfserr_denied;
4249		nfs4_set_lock_denied(&file_lock, &lockt->lt_denied);
4250	}
4251out:
4252	nfs4_unlock_state();
 
 
 
4253	return status;
4254}
4255
 
 
 
 
 
 
 
 
4256__be32
4257nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4258	    struct nfsd4_locku *locku)
4259{
 
4260	struct nfs4_ol_stateid *stp;
4261	struct file *filp = NULL;
4262	struct file_lock file_lock;
4263	__be32 status;
4264	int err;
4265						        
 
4266	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
4267		(long long) locku->lu_offset,
4268		(long long) locku->lu_length);
4269
4270	if (check_lock_length(locku->lu_offset, locku->lu_length))
4271		 return nfserr_inval;
4272
4273	nfs4_lock_state();
4274									        
4275	status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4276					&locku->lu_stateid, NFS4_LOCK_STID, &stp);
 
4277	if (status)
4278		goto out;
4279	filp = find_any_file(stp->st_file);
4280	if (!filp) {
4281		status = nfserr_lock_range;
4282		goto out;
 
 
 
 
 
 
4283	}
4284	BUG_ON(!filp);
4285	locks_init_lock(&file_lock);
4286	file_lock.fl_type = F_UNLCK;
4287	file_lock.fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
4288	file_lock.fl_pid = current->tgid;
4289	file_lock.fl_file = filp;
4290	file_lock.fl_flags = FL_POSIX; 
4291	file_lock.fl_lmops = &nfsd_posix_mng_ops;
4292	file_lock.fl_start = locku->lu_offset;
4293
4294	file_lock.fl_end = last_byte_offset(locku->lu_offset, locku->lu_length);
4295	nfs4_transform_lock_offset(&file_lock);
 
 
 
 
 
 
 
 
 
4296
4297	/*
4298	*  Try to unlock the file in the VFS.
4299	*/
4300	err = vfs_lock_file(filp, F_SETLK, &file_lock, NULL);
4301	if (err) {
4302		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
4303		goto out_nfserr;
4304	}
4305	/*
4306	* OK, unlock succeeded; the only thing left to do is update the stateid.
4307	*/
4308	update_stateid(&stp->st_stid.sc_stateid);
4309	memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4310
4311out:
4312	if (!cstate->replay_owner)
4313		nfs4_unlock_state();
 
4314	return status;
4315
4316out_nfserr:
4317	status = nfserrno(err);
4318	goto out;
4319}
4320
4321/*
4322 * returns
4323 * 	1: locks held by lockowner
4324 * 	0: no locks held by lockowner
4325 */
4326static int
4327check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
4328{
4329	struct file_lock **flpp;
4330	struct inode *inode = filp->fi_inode;
4331	int status = 0;
 
 
4332
4333	lock_flocks();
4334	for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
4335		if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
4336			status = 1;
4337			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
4338		}
 
4339	}
4340out:
4341	unlock_flocks();
4342	return status;
4343}
4344
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4345__be32
4346nfsd4_release_lockowner(struct svc_rqst *rqstp,
4347			struct nfsd4_compound_state *cstate,
4348			struct nfsd4_release_lockowner *rlockowner)
4349{
 
 
4350	clientid_t *clid = &rlockowner->rl_clientid;
4351	struct nfs4_stateowner *sop;
4352	struct nfs4_lockowner *lo;
4353	struct nfs4_ol_stateid *stp;
4354	struct xdr_netobj *owner = &rlockowner->rl_owner;
4355	struct list_head matches;
4356	unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
4357	__be32 status;
4358
4359	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
4360		clid->cl_boot, clid->cl_id);
4361
4362	/* XXX check for lease expiration */
4363
4364	status = nfserr_stale_clientid;
4365	if (STALE_CLIENTID(clid))
4366		return status;
 
4367
4368	nfs4_lock_state();
4369
4370	status = nfserr_locks_held;
4371	INIT_LIST_HEAD(&matches);
 
 
4372
4373	list_for_each_entry(sop, &ownerstr_hashtbl[hashval], so_strhash) {
4374		if (sop->so_is_open_owner)
4375			continue;
4376		if (!same_owner_str(sop, owner, clid))
4377			continue;
4378		list_for_each_entry(stp, &sop->so_stateids,
4379				st_perstateowner) {
4380			lo = lockowner(sop);
4381			if (check_for_locks(stp->st_file, lo))
4382				goto out;
4383			list_add(&lo->lo_list, &matches);
4384		}
4385	}
4386	/* Clients probably won't expect us to return with some (but not all)
4387	 * of the lockowner state released; so don't release any until all
4388	 * have been checked. */
4389	status = nfs_ok;
4390	while (!list_empty(&matches)) {
4391		lo = list_entry(matches.next, struct nfs4_lockowner,
4392								lo_list);
4393		/* unhash_stateowner deletes so_perclient only
4394		 * for openowners. */
4395		list_del(&lo->lo_list);
4396		release_lockowner(lo);
4397	}
4398out:
4399	nfs4_unlock_state();
4400	return status;
 
 
 
4401}
4402
4403static inline struct nfs4_client_reclaim *
4404alloc_reclaim(void)
4405{
4406	return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
4407}
4408
4409int
4410nfs4_has_reclaimed_state(const char *name, bool use_exchange_id)
4411{
4412	unsigned int strhashval = clientstr_hashval(name);
4413	struct nfs4_client *clp;
4414
4415	clp = find_confirmed_client_by_str(name, strhashval);
4416	if (!clp)
4417		return 0;
4418	return test_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags);
4419}
4420
4421/*
4422 * failure => all reset bets are off, nfserr_no_grace...
 
 
 
4423 */
4424int
4425nfs4_client_to_reclaim(const char *name)
 
4426{
4427	unsigned int strhashval;
4428	struct nfs4_client_reclaim *crp = NULL;
4429
4430	dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
4431	crp = alloc_reclaim();
4432	if (!crp)
4433		return 0;
4434	strhashval = clientstr_hashval(name);
4435	INIT_LIST_HEAD(&crp->cr_strhash);
4436	list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]);
4437	memcpy(crp->cr_recdir, name, HEXDIR_LEN);
4438	reclaim_str_hashtbl_size++;
4439	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4440}
4441
4442void
4443nfs4_release_reclaim(void)
4444{
4445	struct nfs4_client_reclaim *crp = NULL;
4446	int i;
4447
4448	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4449		while (!list_empty(&reclaim_str_hashtbl[i])) {
4450			crp = list_entry(reclaim_str_hashtbl[i].next,
4451			                struct nfs4_client_reclaim, cr_strhash);
4452			list_del(&crp->cr_strhash);
4453			kfree(crp);
4454			reclaim_str_hashtbl_size--;
4455		}
4456	}
4457	BUG_ON(reclaim_str_hashtbl_size);
4458}
4459
4460/*
4461 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
4462struct nfs4_client_reclaim *
4463nfsd4_find_reclaim_client(struct nfs4_client *clp)
4464{
4465	unsigned int strhashval;
4466	struct nfs4_client_reclaim *crp = NULL;
4467
4468	dprintk("NFSD: nfs4_find_reclaim_client for %.*s with recdir %s\n",
4469		            clp->cl_name.len, clp->cl_name.data,
4470			    clp->cl_recdir);
4471
4472	/* find clp->cl_name in reclaim_str_hashtbl */
4473	strhashval = clientstr_hashval(clp->cl_recdir);
4474	list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) {
4475		if (same_name(crp->cr_recdir, clp->cl_recdir)) {
4476			return crp;
4477		}
4478	}
4479	return NULL;
4480}
4481
4482/*
4483* Called from OPEN. Look for clientid in reclaim list.
4484*/
4485__be32
4486nfs4_check_open_reclaim(clientid_t *clid)
4487{
4488	struct nfs4_client *clp;
 
4489
4490	/* find clientid in conf_id_hashtbl */
4491	clp = find_confirmed_client(clid);
4492	if (clp == NULL)
4493		return nfserr_reclaim_bad;
4494
4495	return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok;
4496}
4497
4498#ifdef CONFIG_NFSD_FAULT_INJECTION
4499
4500void nfsd_forget_clients(u64 num)
4501{
4502	struct nfs4_client *clp, *next;
4503	int count = 0;
4504
4505	nfs4_lock_state();
4506	list_for_each_entry_safe(clp, next, &client_lru, cl_lru) {
4507		nfsd4_client_record_remove(clp);
4508		expire_client(clp);
4509		if (++count == num)
4510			break;
4511	}
4512	nfs4_unlock_state();
4513
4514	printk(KERN_INFO "NFSD: Forgot %d clients", count);
4515}
4516
4517static void release_lockowner_sop(struct nfs4_stateowner *sop)
 
 
 
 
 
 
 
 
 
 
4518{
4519	release_lockowner(lockowner(sop));
 
 
 
 
 
 
4520}
4521
4522static void release_openowner_sop(struct nfs4_stateowner *sop)
4523{
4524	release_openowner(openowner(sop));
4525}
4526
4527static int nfsd_release_n_owners(u64 num, bool is_open_owner,
4528				void (*release_sop)(struct nfs4_stateowner *))
4529{
4530	int i, count = 0;
4531	struct nfs4_stateowner *sop, *next;
 
 
 
 
 
 
 
 
 
 
4532
4533	for (i = 0; i < OWNER_HASH_SIZE; i++) {
4534		list_for_each_entry_safe(sop, next, &ownerstr_hashtbl[i], so_strhash) {
4535			if (sop->so_is_open_owner != is_open_owner)
4536				continue;
4537			release_sop(sop);
4538			if (++count == num)
4539				return count;
4540		}
4541	}
4542	return count;
4543}
4544
4545void nfsd_forget_locks(u64 num)
4546{
4547	int count;
4548
4549	nfs4_lock_state();
4550	count = nfsd_release_n_owners(num, false, release_lockowner_sop);
4551	nfs4_unlock_state();
4552
4553	printk(KERN_INFO "NFSD: Forgot %d locks", count);
4554}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4555
4556void nfsd_forget_openowners(u64 num)
4557{
4558	int count;
4559
4560	nfs4_lock_state();
4561	count = nfsd_release_n_owners(num, true, release_openowner_sop);
4562	nfs4_unlock_state();
4563
4564	printk(KERN_INFO "NFSD: Forgot %d open owners", count);
 
 
 
 
 
 
 
 
4565}
4566
4567int nfsd_process_n_delegations(u64 num, void (*deleg_func)(struct nfs4_delegation *))
 
4568{
4569	int i, count = 0;
4570	struct nfs4_file *fp, *fnext;
4571	struct nfs4_delegation *dp, *dnext;
4572
4573	for (i = 0; i < FILE_HASH_SIZE; i++) {
4574		list_for_each_entry_safe(fp, fnext, &file_hashtbl[i], fi_hash) {
4575			list_for_each_entry_safe(dp, dnext, &fp->fi_delegations, dl_perfile) {
4576				deleg_func(dp);
4577				if (++count == num)
4578					return count;
4579			}
4580		}
4581	}
4582
4583	return count;
4584}
4585
4586void nfsd_forget_delegations(u64 num)
4587{
4588	unsigned int count;
4589
4590	nfs4_lock_state();
4591	count = nfsd_process_n_delegations(num, unhash_delegation);
4592	nfs4_unlock_state();
4593
4594	printk(KERN_INFO "NFSD: Forgot %d delegations", count);
4595}
4596
4597void nfsd_recall_delegations(u64 num)
4598{
4599	unsigned int count;
4600
4601	nfs4_lock_state();
4602	spin_lock(&recall_lock);
4603	count = nfsd_process_n_delegations(num, nfsd_break_one_deleg);
4604	spin_unlock(&recall_lock);
4605	nfs4_unlock_state();
 
4606
4607	printk(KERN_INFO "NFSD: Recalled %d delegations", count);
 
 
 
4608}
4609
4610#endif /* CONFIG_NFSD_FAULT_INJECTION */
4611
4612/* initialization to perform at module load time: */
4613
4614void
4615nfs4_state_init(void)
4616{
4617	int i;
 
4618
4619	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4620		INIT_LIST_HEAD(&conf_id_hashtbl[i]);
4621		INIT_LIST_HEAD(&conf_str_hashtbl[i]);
4622		INIT_LIST_HEAD(&unconf_str_hashtbl[i]);
4623		INIT_LIST_HEAD(&unconf_id_hashtbl[i]);
4624		INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
4625	}
4626	for (i = 0; i < SESSION_HASH_SIZE; i++)
4627		INIT_LIST_HEAD(&sessionid_hashtbl[i]);
4628	for (i = 0; i < FILE_HASH_SIZE; i++) {
4629		INIT_LIST_HEAD(&file_hashtbl[i]);
4630	}
4631	for (i = 0; i < OWNER_HASH_SIZE; i++) {
4632		INIT_LIST_HEAD(&ownerstr_hashtbl[i]);
4633	}
4634	for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++)
4635		INIT_LIST_HEAD(&lockowner_ino_hashtbl[i]);
4636	INIT_LIST_HEAD(&close_lru);
4637	INIT_LIST_HEAD(&client_lru);
4638	INIT_LIST_HEAD(&del_recall_lru);
4639	reclaim_str_hashtbl_size = 0;
4640}
4641
4642/*
4643 * Since the lifetime of a delegation isn't limited to that of an open, a
4644 * client may quite reasonably hang on to a delegation as long as it has
4645 * the inode cached.  This becomes an obvious problem the first time a
4646 * client's inode cache approaches the size of the server's total memory.
4647 *
4648 * For now we avoid this problem by imposing a hard limit on the number
4649 * of delegations, which varies according to the server's memory size.
4650 */
4651static void
4652set_max_delegations(void)
4653{
4654	/*
4655	 * Allow at most 4 delegations per megabyte of RAM.  Quick
4656	 * estimates suggest that in the worst case (where every delegation
4657	 * is for a different inode), a delegation could take about 1.5K,
4658	 * giving a worst case usage of about 6% of memory.
4659	 */
4660	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
4661}
4662
4663/* initialization to perform when the nfsd service is started: */
4664
4665int
4666nfs4_state_start(void)
4667{
4668	int ret;
4669
4670	/*
4671	 * FIXME: For now, we hang most of the pernet global stuff off of
4672	 * init_net until nfsd is fully containerized. Eventually, we'll
4673	 * need to pass a net pointer into this function, take a reference
4674	 * to that instead and then do most of the rest of this on a per-net
4675	 * basis.
4676	 */
4677	get_net(&init_net);
4678	nfsd4_client_tracking_init(&init_net);
4679	boot_time = get_seconds();
4680	locks_start_grace(&nfsd4_manager);
4681	grace_ended = false;
4682	printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
4683	       nfsd4_grace);
4684	ret = set_callback_cred();
4685	if (ret) {
4686		ret = -ENOMEM;
4687		goto out_recovery;
4688	}
4689	laundry_wq = create_singlethread_workqueue("nfsd4");
4690	if (laundry_wq == NULL) {
4691		ret = -ENOMEM;
4692		goto out_recovery;
4693	}
4694	ret = nfsd4_create_callback_queue();
4695	if (ret)
4696		goto out_free_laundry;
4697	queue_delayed_work(laundry_wq, &laundromat_work, nfsd4_grace * HZ);
4698	set_max_delegations();
4699	return 0;
4700out_free_laundry:
4701	destroy_workqueue(laundry_wq);
4702out_recovery:
4703	nfsd4_client_tracking_exit(&init_net);
4704	put_net(&init_net);
4705	return ret;
4706}
4707
4708static void
4709__nfs4_state_shutdown(void)
4710{
4711	int i;
4712	struct nfs4_client *clp = NULL;
4713	struct nfs4_delegation *dp = NULL;
4714	struct list_head *pos, *next, reaplist;
 
 
 
 
 
 
4715
4716	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4717		while (!list_empty(&conf_id_hashtbl[i])) {
4718			clp = list_entry(conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
4719			expire_client(clp);
4720		}
4721		while (!list_empty(&unconf_str_hashtbl[i])) {
4722			clp = list_entry(unconf_str_hashtbl[i].next, struct nfs4_client, cl_strhash);
4723			expire_client(clp);
4724		}
4725	}
4726	INIT_LIST_HEAD(&reaplist);
4727	spin_lock(&recall_lock);
4728	list_for_each_safe(pos, next, &del_recall_lru) {
4729		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4730		list_move(&dp->dl_recall_lru, &reaplist);
 
4731	}
4732	spin_unlock(&recall_lock);
4733	list_for_each_safe(pos, next, &reaplist) {
4734		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4735		unhash_delegation(dp);
 
4736	}
4737
4738	nfsd4_client_tracking_exit(&init_net);
4739	put_net(&init_net);
 
 
 
4740}
4741
4742void
4743nfs4_state_shutdown(void)
4744{
4745	cancel_delayed_work_sync(&laundromat_work);
4746	destroy_workqueue(laundry_wq);
4747	locks_end_grace(&nfsd4_manager);
4748	nfs4_lock_state();
4749	__nfs4_state_shutdown();
4750	nfs4_unlock_state();
4751	nfsd4_destroy_callback_queue();
 
4752}
4753
4754static void
4755get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
4756{
4757	if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
 
4758		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
4759}
4760
4761static void
4762put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
4763{
4764	if (cstate->minorversion) {
4765		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
4766		SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
4767	}
4768}
4769
4770void
4771clear_current_stateid(struct nfsd4_compound_state *cstate)
4772{
4773	CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
4774}
4775
4776/*
4777 * functions to set current state id
4778 */
4779void
4780nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
 
4781{
4782	put_stateid(cstate, &odp->od_stateid);
4783}
4784
4785void
4786nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
 
4787{
4788	put_stateid(cstate, &open->op_stateid);
4789}
4790
4791void
4792nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
 
4793{
4794	put_stateid(cstate, &close->cl_stateid);
4795}
4796
4797void
4798nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
 
4799{
4800	put_stateid(cstate, &lock->lk_resp_stateid);
4801}
4802
4803/*
4804 * functions to consume current state id
4805 */
4806
4807void
4808nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
 
4809{
4810	get_stateid(cstate, &odp->od_stateid);
4811}
4812
4813void
4814nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
 
4815{
4816	get_stateid(cstate, &drp->dr_stateid);
4817}
4818
4819void
4820nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
 
4821{
4822	get_stateid(cstate, &fsp->fr_stateid);
4823}
4824
4825void
4826nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
 
4827{
4828	get_stateid(cstate, &setattr->sa_stateid);
4829}
4830
4831void
4832nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
 
4833{
4834	get_stateid(cstate, &close->cl_stateid);
4835}
4836
4837void
4838nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
 
4839{
4840	get_stateid(cstate, &locku->lu_stateid);
4841}
4842
4843void
4844nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
 
4845{
4846	get_stateid(cstate, &read->rd_stateid);
4847}
4848
4849void
4850nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
 
4851{
4852	get_stateid(cstate, &write->wr_stateid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4853}
v6.8
   1/*
   2*  Copyright (c) 2001 The Regents of the University of Michigan.
   3*  All rights reserved.
   4*
   5*  Kendrick Smith <kmsmith@umich.edu>
   6*  Andy Adamson <kandros@umich.edu>
   7*
   8*  Redistribution and use in source and binary forms, with or without
   9*  modification, are permitted provided that the following conditions
  10*  are met:
  11*
  12*  1. Redistributions of source code must retain the above copyright
  13*     notice, this list of conditions and the following disclaimer.
  14*  2. Redistributions in binary form must reproduce the above copyright
  15*     notice, this list of conditions and the following disclaimer in the
  16*     documentation and/or other materials provided with the distribution.
  17*  3. Neither the name of the University nor the names of its
  18*     contributors may be used to endorse or promote products derived
  19*     from this software without specific prior written permission.
  20*
  21*  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  22*  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  23*  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  24*  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  25*  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  26*  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  27*  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  28*  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  29*  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  30*  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  31*  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  32*
  33*/
  34
  35#include <linux/file.h>
  36#include <linux/fs.h>
  37#include <linux/slab.h>
  38#include <linux/namei.h>
  39#include <linux/swap.h>
  40#include <linux/pagemap.h>
  41#include <linux/ratelimit.h>
  42#include <linux/sunrpc/svcauth_gss.h>
  43#include <linux/sunrpc/addr.h>
  44#include <linux/jhash.h>
  45#include <linux/string_helpers.h>
  46#include <linux/fsnotify.h>
  47#include <linux/rhashtable.h>
  48#include <linux/nfs_ssc.h>
  49
  50#include "xdr4.h"
  51#include "xdr4cb.h"
  52#include "vfs.h"
  53#include "current_stateid.h"
  54
  55#include "netns.h"
  56#include "pnfs.h"
  57#include "filecache.h"
  58#include "trace.h"
  59
  60#define NFSDDBG_FACILITY                NFSDDBG_PROC
 
 
 
  61
  62#define all_ones {{ ~0, ~0}, ~0}
  63static const stateid_t one_stateid = {
  64	.si_generation = ~0,
  65	.si_opaque = all_ones,
  66};
  67static const stateid_t zero_stateid = {
  68	/* all fields zero */
  69};
  70static const stateid_t currentstateid = {
  71	.si_generation = 1,
  72};
  73static const stateid_t close_stateid = {
  74	.si_generation = 0xffffffffU,
  75};
  76
  77static u64 current_sessionid = 1;
  78
  79#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
  80#define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
  81#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
  82#define CLOSE_STATEID(stateid)  (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
  83
  84/* forward declarations */
  85static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
  86static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
  87void nfsd4_end_grace(struct nfsd_net *nn);
  88static void _free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps);
  89static void nfsd4_file_hash_remove(struct nfs4_file *fi);
  90
  91/* Locking: */
  92
 
 
 
  93/*
  94 * Currently used for the del_recall_lru and file hash table.  In an
  95 * effort to decrease the scope of the client_mutex, this spinlock may
  96 * eventually cover more:
  97 */
  98static DEFINE_SPINLOCK(state_lock);
  99
 100enum nfsd4_st_mutex_lock_subclass {
 101	OPEN_STATEID_MUTEX = 0,
 102	LOCK_STATEID_MUTEX = 1,
 103};
 
 104
 105/*
 106 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
 107 * the refcount on the open stateid to drop.
 108 */
 109static DECLARE_WAIT_QUEUE_HEAD(close_wq);
 110
 111/*
 112 * A waitqueue where a writer to clients/#/ctl destroying a client can
 113 * wait for cl_rpc_users to drop to 0 and then for the client to be
 114 * unhashed.
 115 */
 116static DECLARE_WAIT_QUEUE_HEAD(expiry_wq);
 117
 118static struct kmem_cache *client_slab;
 119static struct kmem_cache *openowner_slab;
 120static struct kmem_cache *lockowner_slab;
 121static struct kmem_cache *file_slab;
 122static struct kmem_cache *stateid_slab;
 123static struct kmem_cache *deleg_slab;
 124static struct kmem_cache *odstate_slab;
 125
 126static void free_session(struct nfsd4_session *);
 127
 128static const struct nfsd4_callback_ops nfsd4_cb_recall_ops;
 129static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops;
 130
 131static struct workqueue_struct *laundry_wq;
 132
 133int nfsd4_create_laundry_wq(void)
 134{
 135	int rc = 0;
 136
 137	laundry_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, "nfsd4");
 138	if (laundry_wq == NULL)
 139		rc = -ENOMEM;
 140	return rc;
 141}
 142
 143void nfsd4_destroy_laundry_wq(void)
 144{
 145	destroy_workqueue(laundry_wq);
 146}
 147
 148static bool is_session_dead(struct nfsd4_session *ses)
 149{
 150	return ses->se_flags & NFS4_SESSION_DEAD;
 151}
 152
 153static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
 154{
 155	if (atomic_read(&ses->se_ref) > ref_held_by_me)
 156		return nfserr_jukebox;
 157	ses->se_flags |= NFS4_SESSION_DEAD;
 158	return nfs_ok;
 159}
 160
 161static bool is_client_expired(struct nfs4_client *clp)
 162{
 163	return clp->cl_time == 0;
 164}
 165
 166static void nfsd4_dec_courtesy_client_count(struct nfsd_net *nn,
 167					struct nfs4_client *clp)
 168{
 169	if (clp->cl_state != NFSD4_ACTIVE)
 170		atomic_add_unless(&nn->nfsd_courtesy_clients, -1, 0);
 171}
 172
 173static __be32 get_client_locked(struct nfs4_client *clp)
 174{
 175	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
 176
 177	lockdep_assert_held(&nn->client_lock);
 178
 179	if (is_client_expired(clp))
 180		return nfserr_expired;
 181	atomic_inc(&clp->cl_rpc_users);
 182	nfsd4_dec_courtesy_client_count(nn, clp);
 183	clp->cl_state = NFSD4_ACTIVE;
 184	return nfs_ok;
 185}
 186
 187/* must be called under the client_lock */
 188static inline void
 189renew_client_locked(struct nfs4_client *clp)
 190{
 191	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
 192
 193	if (is_client_expired(clp)) {
 194		WARN_ON(1);
 195		printk("%s: client (clientid %08x/%08x) already expired\n",
 196			__func__,
 197			clp->cl_clientid.cl_boot,
 198			clp->cl_clientid.cl_id);
 199		return;
 200	}
 201
 202	list_move_tail(&clp->cl_lru, &nn->client_lru);
 203	clp->cl_time = ktime_get_boottime_seconds();
 204	nfsd4_dec_courtesy_client_count(nn, clp);
 205	clp->cl_state = NFSD4_ACTIVE;
 206}
 207
 208static void put_client_renew_locked(struct nfs4_client *clp)
 209{
 210	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
 211
 212	lockdep_assert_held(&nn->client_lock);
 213
 214	if (!atomic_dec_and_test(&clp->cl_rpc_users))
 215		return;
 216	if (!is_client_expired(clp))
 217		renew_client_locked(clp);
 218	else
 219		wake_up_all(&expiry_wq);
 220}
 221
 222static void put_client_renew(struct nfs4_client *clp)
 223{
 224	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
 225
 226	if (!atomic_dec_and_lock(&clp->cl_rpc_users, &nn->client_lock))
 227		return;
 228	if (!is_client_expired(clp))
 229		renew_client_locked(clp);
 230	else
 231		wake_up_all(&expiry_wq);
 232	spin_unlock(&nn->client_lock);
 233}
 234
 235static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
 236{
 237	__be32 status;
 238
 239	if (is_session_dead(ses))
 240		return nfserr_badsession;
 241	status = get_client_locked(ses->se_client);
 242	if (status)
 243		return status;
 244	atomic_inc(&ses->se_ref);
 245	return nfs_ok;
 246}
 247
 
 248static void nfsd4_put_session_locked(struct nfsd4_session *ses)
 249{
 250	struct nfs4_client *clp = ses->se_client;
 251	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
 252
 253	lockdep_assert_held(&nn->client_lock);
 254
 255	if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
 256		free_session(ses);
 257	put_client_renew_locked(clp);
 258}
 259
 260static void nfsd4_put_session(struct nfsd4_session *ses)
 261{
 262	struct nfs4_client *clp = ses->se_client;
 263	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
 264
 265	spin_lock(&nn->client_lock);
 266	nfsd4_put_session_locked(ses);
 267	spin_unlock(&nn->client_lock);
 268}
 269
 270static struct nfsd4_blocked_lock *
 271find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
 272			struct nfsd_net *nn)
 273{
 274	struct nfsd4_blocked_lock *cur, *found = NULL;
 275
 276	spin_lock(&nn->blocked_locks_lock);
 277	list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
 278		if (fh_match(fh, &cur->nbl_fh)) {
 279			list_del_init(&cur->nbl_list);
 280			WARN_ON(list_empty(&cur->nbl_lru));
 281			list_del_init(&cur->nbl_lru);
 282			found = cur;
 283			break;
 284		}
 285	}
 286	spin_unlock(&nn->blocked_locks_lock);
 287	if (found)
 288		locks_delete_block(&found->nbl_lock);
 289	return found;
 290}
 291
 292static struct nfsd4_blocked_lock *
 293find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
 294			struct nfsd_net *nn)
 295{
 296	struct nfsd4_blocked_lock *nbl;
 297
 298	nbl = find_blocked_lock(lo, fh, nn);
 299	if (!nbl) {
 300		nbl = kmalloc(sizeof(*nbl), GFP_KERNEL);
 301		if (nbl) {
 302			INIT_LIST_HEAD(&nbl->nbl_list);
 303			INIT_LIST_HEAD(&nbl->nbl_lru);
 304			fh_copy_shallow(&nbl->nbl_fh, fh);
 305			locks_init_lock(&nbl->nbl_lock);
 306			kref_init(&nbl->nbl_kref);
 307			nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
 308					&nfsd4_cb_notify_lock_ops,
 309					NFSPROC4_CLNT_CB_NOTIFY_LOCK);
 310		}
 311	}
 312	return nbl;
 313}
 314
 315static void
 316free_nbl(struct kref *kref)
 317{
 318	struct nfsd4_blocked_lock *nbl;
 319
 320	nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
 321	kfree(nbl);
 322}
 323
 324static void
 325free_blocked_lock(struct nfsd4_blocked_lock *nbl)
 326{
 327	locks_delete_block(&nbl->nbl_lock);
 328	locks_release_private(&nbl->nbl_lock);
 329	kref_put(&nbl->nbl_kref, free_nbl);
 330}
 331
 332static void
 333remove_blocked_locks(struct nfs4_lockowner *lo)
 334{
 335	struct nfs4_client *clp = lo->lo_owner.so_client;
 336	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
 337	struct nfsd4_blocked_lock *nbl;
 338	LIST_HEAD(reaplist);
 339
 340	/* Dequeue all blocked locks */
 341	spin_lock(&nn->blocked_locks_lock);
 342	while (!list_empty(&lo->lo_blocked)) {
 343		nbl = list_first_entry(&lo->lo_blocked,
 344					struct nfsd4_blocked_lock,
 345					nbl_list);
 346		list_del_init(&nbl->nbl_list);
 347		WARN_ON(list_empty(&nbl->nbl_lru));
 348		list_move(&nbl->nbl_lru, &reaplist);
 349	}
 350	spin_unlock(&nn->blocked_locks_lock);
 351
 352	/* Now free them */
 353	while (!list_empty(&reaplist)) {
 354		nbl = list_first_entry(&reaplist, struct nfsd4_blocked_lock,
 355					nbl_lru);
 356		list_del_init(&nbl->nbl_lru);
 357		free_blocked_lock(nbl);
 358	}
 359}
 360
 361static void
 362nfsd4_cb_notify_lock_prepare(struct nfsd4_callback *cb)
 363{
 364	struct nfsd4_blocked_lock	*nbl = container_of(cb,
 365						struct nfsd4_blocked_lock, nbl_cb);
 366	locks_delete_block(&nbl->nbl_lock);
 367}
 368
 369static int
 370nfsd4_cb_notify_lock_done(struct nfsd4_callback *cb, struct rpc_task *task)
 371{
 372	trace_nfsd_cb_notify_lock_done(&zero_stateid, task);
 373
 374	/*
 375	 * Since this is just an optimization, we don't try very hard if it
 376	 * turns out not to succeed. We'll requeue it on NFS4ERR_DELAY, and
 377	 * just quit trying on anything else.
 378	 */
 379	switch (task->tk_status) {
 380	case -NFS4ERR_DELAY:
 381		rpc_delay(task, 1 * HZ);
 382		return 0;
 383	default:
 384		return 1;
 385	}
 386}
 387
 388static void
 389nfsd4_cb_notify_lock_release(struct nfsd4_callback *cb)
 390{
 391	struct nfsd4_blocked_lock	*nbl = container_of(cb,
 392						struct nfsd4_blocked_lock, nbl_cb);
 393
 394	free_blocked_lock(nbl);
 395}
 396
 397static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
 398	.prepare	= nfsd4_cb_notify_lock_prepare,
 399	.done		= nfsd4_cb_notify_lock_done,
 400	.release	= nfsd4_cb_notify_lock_release,
 401};
 402
 403/*
 404 * We store the NONE, READ, WRITE, and BOTH bits separately in the
 405 * st_{access,deny}_bmap field of the stateid, in order to track not
 406 * only what share bits are currently in force, but also what
 407 * combinations of share bits previous opens have used.  This allows us
 408 * to enforce the recommendation in
 409 * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
 410 * the server return an error if the client attempt to downgrade to a
 411 * combination of share bits not explicable by closing some of its
 412 * previous opens.
 413 *
 414 * This enforcement is arguably incomplete, since we don't keep
 415 * track of access/deny bit combinations; so, e.g., we allow:
 416 *
 417 *	OPEN allow read, deny write
 418 *	OPEN allow both, deny none
 419 *	DOWNGRADE allow read, deny none
 420 *
 421 * which we should reject.
 422 *
 423 * But you could also argue that our current code is already overkill,
 424 * since it only exists to return NFS4ERR_INVAL on incorrect client
 425 * behavior.
 426 */
 427static unsigned int
 428bmap_to_share_mode(unsigned long bmap)
 429{
 430	int i;
 431	unsigned int access = 0;
 432
 433	for (i = 1; i < 4; i++) {
 434		if (test_bit(i, &bmap))
 435			access |= i;
 436	}
 437	return access;
 438}
 439
 440/* set share access for a given stateid */
 441static inline void
 442set_access(u32 access, struct nfs4_ol_stateid *stp)
 443{
 444	unsigned char mask = 1 << access;
 445
 446	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
 447	stp->st_access_bmap |= mask;
 448}
 449
 450/* clear share access for a given stateid */
 451static inline void
 452clear_access(u32 access, struct nfs4_ol_stateid *stp)
 453{
 454	unsigned char mask = 1 << access;
 455
 456	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
 457	stp->st_access_bmap &= ~mask;
 458}
 459
 460/* test whether a given stateid has access */
 461static inline bool
 462test_access(u32 access, struct nfs4_ol_stateid *stp)
 463{
 464	unsigned char mask = 1 << access;
 465
 466	return (bool)(stp->st_access_bmap & mask);
 467}
 468
 469/* set share deny for a given stateid */
 470static inline void
 471set_deny(u32 deny, struct nfs4_ol_stateid *stp)
 472{
 473	unsigned char mask = 1 << deny;
 474
 475	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
 476	stp->st_deny_bmap |= mask;
 477}
 478
 479/* clear share deny for a given stateid */
 480static inline void
 481clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
 482{
 483	unsigned char mask = 1 << deny;
 484
 485	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
 486	stp->st_deny_bmap &= ~mask;
 487}
 488
 489/* test whether a given stateid is denying specific access */
 490static inline bool
 491test_deny(u32 deny, struct nfs4_ol_stateid *stp)
 492{
 493	unsigned char mask = 1 << deny;
 494
 495	return (bool)(stp->st_deny_bmap & mask);
 496}
 497
 498static int nfs4_access_to_omode(u32 access)
 499{
 500	switch (access & NFS4_SHARE_ACCESS_BOTH) {
 501	case NFS4_SHARE_ACCESS_READ:
 502		return O_RDONLY;
 503	case NFS4_SHARE_ACCESS_WRITE:
 504		return O_WRONLY;
 505	case NFS4_SHARE_ACCESS_BOTH:
 506		return O_RDWR;
 507	}
 508	WARN_ON_ONCE(1);
 509	return O_RDONLY;
 510}
 511
 512static inline int
 513access_permit_read(struct nfs4_ol_stateid *stp)
 514{
 515	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
 516		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
 517		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
 518}
 519
 520static inline int
 521access_permit_write(struct nfs4_ol_stateid *stp)
 522{
 523	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
 524		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
 525}
 526
 527static inline struct nfs4_stateowner *
 528nfs4_get_stateowner(struct nfs4_stateowner *sop)
 529{
 530	atomic_inc(&sop->so_count);
 531	return sop;
 532}
 533
 534static int
 535same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner)
 536{
 537	return (sop->so_owner.len == owner->len) &&
 538		0 == memcmp(sop->so_owner.data, owner->data, owner->len);
 539}
 540
 541static struct nfs4_openowner *
 542find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
 543			struct nfs4_client *clp)
 544{
 545	struct nfs4_stateowner *so;
 546
 547	lockdep_assert_held(&clp->cl_lock);
 548
 549	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[hashval],
 550			    so_strhash) {
 551		if (!so->so_is_open_owner)
 552			continue;
 553		if (same_owner_str(so, &open->op_owner))
 554			return openowner(nfs4_get_stateowner(so));
 555	}
 556	return NULL;
 557}
 558
 559static struct nfs4_openowner *
 560find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
 561			struct nfs4_client *clp)
 562{
 563	struct nfs4_openowner *oo;
 564
 565	spin_lock(&clp->cl_lock);
 566	oo = find_openstateowner_str_locked(hashval, open, clp);
 567	spin_unlock(&clp->cl_lock);
 568	return oo;
 569}
 570
 571static inline u32
 572opaque_hashval(const void *ptr, int nbytes)
 573{
 574	unsigned char *cptr = (unsigned char *) ptr;
 575
 576	u32 x = 0;
 577	while (nbytes--) {
 578		x *= 37;
 579		x += *cptr++;
 580	}
 581	return x;
 582}
 583
 584static void nfsd4_free_file_rcu(struct rcu_head *rcu)
 
 
 585{
 586	struct nfs4_file *fp = container_of(rcu, struct nfs4_file, fi_rcu);
 587
 588	kmem_cache_free(file_slab, fp);
 589}
 590
 591void
 592put_nfs4_file(struct nfs4_file *fi)
 593{
 594	if (refcount_dec_and_test(&fi->fi_ref)) {
 595		nfsd4_file_hash_remove(fi);
 596		WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
 597		WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
 598		call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
 599	}
 600}
 601
 602static struct nfsd_file *
 603find_writeable_file_locked(struct nfs4_file *f)
 604{
 605	struct nfsd_file *ret;
 606
 607	lockdep_assert_held(&f->fi_lock);
 608
 609	ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
 610	if (!ret)
 611		ret = nfsd_file_get(f->fi_fds[O_RDWR]);
 612	return ret;
 613}
 614
 615static struct nfsd_file *
 616find_writeable_file(struct nfs4_file *f)
 617{
 618	struct nfsd_file *ret;
 619
 620	spin_lock(&f->fi_lock);
 621	ret = find_writeable_file_locked(f);
 622	spin_unlock(&f->fi_lock);
 623
 624	return ret;
 625}
 626
 627static struct nfsd_file *
 628find_readable_file_locked(struct nfs4_file *f)
 629{
 630	struct nfsd_file *ret;
 631
 632	lockdep_assert_held(&f->fi_lock);
 633
 634	ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
 635	if (!ret)
 636		ret = nfsd_file_get(f->fi_fds[O_RDWR]);
 637	return ret;
 638}
 639
 640static struct nfsd_file *
 641find_readable_file(struct nfs4_file *f)
 642{
 643	struct nfsd_file *ret;
 644
 645	spin_lock(&f->fi_lock);
 646	ret = find_readable_file_locked(f);
 647	spin_unlock(&f->fi_lock);
 648
 649	return ret;
 650}
 651
 652static struct nfsd_file *
 653find_rw_file(struct nfs4_file *f)
 654{
 655	struct nfsd_file *ret;
 656
 657	spin_lock(&f->fi_lock);
 658	ret = nfsd_file_get(f->fi_fds[O_RDWR]);
 659	spin_unlock(&f->fi_lock);
 660
 661	return ret;
 662}
 663
 664struct nfsd_file *
 665find_any_file(struct nfs4_file *f)
 666{
 667	struct nfsd_file *ret;
 668
 669	if (!f)
 670		return NULL;
 671	spin_lock(&f->fi_lock);
 672	ret = nfsd_file_get(f->fi_fds[O_RDWR]);
 673	if (!ret) {
 674		ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
 675		if (!ret)
 676			ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
 677	}
 678	spin_unlock(&f->fi_lock);
 679	return ret;
 680}
 681
 682static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
 683{
 684	lockdep_assert_held(&f->fi_lock);
 685
 686	if (f->fi_fds[O_RDWR])
 687		return f->fi_fds[O_RDWR];
 688	if (f->fi_fds[O_WRONLY])
 689		return f->fi_fds[O_WRONLY];
 690	if (f->fi_fds[O_RDONLY])
 691		return f->fi_fds[O_RDONLY];
 692	return NULL;
 693}
 694
 695static atomic_long_t num_delegations;
 696unsigned long max_delegations;
 697
 698/*
 699 * Open owner state (share locks)
 700 */
 701
 702/* hash tables for lock and open owners */
 703#define OWNER_HASH_BITS              8
 704#define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
 705#define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
 706
 707static unsigned int ownerstr_hashval(struct xdr_netobj *ownername)
 708{
 709	unsigned int ret;
 710
 711	ret = opaque_hashval(ownername->data, ownername->len);
 
 712	return ret & OWNER_HASH_MASK;
 713}
 714
 715static struct rhltable nfs4_file_rhltable ____cacheline_aligned_in_smp;
 716
 717static const struct rhashtable_params nfs4_file_rhash_params = {
 718	.key_len		= sizeof_field(struct nfs4_file, fi_inode),
 719	.key_offset		= offsetof(struct nfs4_file, fi_inode),
 720	.head_offset		= offsetof(struct nfs4_file, fi_rlist),
 721
 722	/*
 723	 * Start with a single page hash table to reduce resizing churn
 724	 * on light workloads.
 725	 */
 726	.min_size		= 256,
 727	.automatic_shrinking	= true,
 728};
 729
 730/*
 731 * Check if courtesy clients have conflicting access and resolve it if possible
 732 *
 733 * access:  is op_share_access if share_access is true.
 734 *	    Check if access mode, op_share_access, would conflict with
 735 *	    the current deny mode of the file 'fp'.
 736 * access:  is op_share_deny if share_access is false.
 737 *	    Check if the deny mode, op_share_deny, would conflict with
 738 *	    current access of the file 'fp'.
 739 * stp:     skip checking this entry.
 740 * new_stp: normal open, not open upgrade.
 741 *
 742 * Function returns:
 743 *	false - access/deny mode conflict with normal client.
 744 *	true  - no conflict or conflict with courtesy client(s) is resolved.
 745 */
 746static bool
 747nfs4_resolve_deny_conflicts_locked(struct nfs4_file *fp, bool new_stp,
 748		struct nfs4_ol_stateid *stp, u32 access, bool share_access)
 749{
 750	struct nfs4_ol_stateid *st;
 751	bool resolvable = true;
 752	unsigned char bmap;
 753	struct nfsd_net *nn;
 754	struct nfs4_client *clp;
 755
 756	lockdep_assert_held(&fp->fi_lock);
 757	list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
 758		/* ignore lock stateid */
 759		if (st->st_openstp)
 760			continue;
 761		if (st == stp && new_stp)
 762			continue;
 763		/* check file access against deny mode or vice versa */
 764		bmap = share_access ? st->st_deny_bmap : st->st_access_bmap;
 765		if (!(access & bmap_to_share_mode(bmap)))
 766			continue;
 767		clp = st->st_stid.sc_client;
 768		if (try_to_expire_client(clp))
 769			continue;
 770		resolvable = false;
 771		break;
 772	}
 773	if (resolvable) {
 774		clp = stp->st_stid.sc_client;
 775		nn = net_generic(clp->net, nfsd_net_id);
 776		mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
 777	}
 778	return resolvable;
 779}
 780
 781static void
 782__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
 783{
 784	lockdep_assert_held(&fp->fi_lock);
 785
 786	if (access & NFS4_SHARE_ACCESS_WRITE)
 787		atomic_inc(&fp->fi_access[O_WRONLY]);
 788	if (access & NFS4_SHARE_ACCESS_READ)
 789		atomic_inc(&fp->fi_access[O_RDONLY]);
 790}
 791
 792static __be32
 793nfs4_file_get_access(struct nfs4_file *fp, u32 access)
 794{
 795	lockdep_assert_held(&fp->fi_lock);
 796
 797	/* Does this access mode make sense? */
 798	if (access & ~NFS4_SHARE_ACCESS_BOTH)
 799		return nfserr_inval;
 800
 801	/* Does it conflict with a deny mode already set? */
 802	if ((access & fp->fi_share_deny) != 0)
 803		return nfserr_share_denied;
 804
 805	__nfs4_file_get_access(fp, access);
 806	return nfs_ok;
 807}
 808
 809static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
 810{
 811	/* Common case is that there is no deny mode. */
 812	if (deny) {
 813		/* Does this deny mode make sense? */
 814		if (deny & ~NFS4_SHARE_DENY_BOTH)
 815			return nfserr_inval;
 816
 817		if ((deny & NFS4_SHARE_DENY_READ) &&
 818		    atomic_read(&fp->fi_access[O_RDONLY]))
 819			return nfserr_share_denied;
 820
 821		if ((deny & NFS4_SHARE_DENY_WRITE) &&
 822		    atomic_read(&fp->fi_access[O_WRONLY]))
 823			return nfserr_share_denied;
 824	}
 825	return nfs_ok;
 826}
 827
 828static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
 829{
 830	might_lock(&fp->fi_lock);
 831
 832	if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
 833		struct nfsd_file *f1 = NULL;
 834		struct nfsd_file *f2 = NULL;
 835
 836		swap(f1, fp->fi_fds[oflag]);
 837		if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
 838			swap(f2, fp->fi_fds[O_RDWR]);
 839		spin_unlock(&fp->fi_lock);
 840		if (f1)
 841			nfsd_file_put(f1);
 842		if (f2)
 843			nfsd_file_put(f2);
 844	}
 845}
 846
 847static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
 848{
 849	WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);
 850
 851	if (access & NFS4_SHARE_ACCESS_WRITE)
 852		__nfs4_file_put_access(fp, O_WRONLY);
 853	if (access & NFS4_SHARE_ACCESS_READ)
 854		__nfs4_file_put_access(fp, O_RDONLY);
 855}
 856
 857/*
 858 * Allocate a new open/delegation state counter. This is needed for
 859 * pNFS for proper return on close semantics.
 860 *
 861 * Note that we only allocate it for pNFS-enabled exports, otherwise
 862 * all pointers to struct nfs4_clnt_odstate are always NULL.
 863 */
 864static struct nfs4_clnt_odstate *
 865alloc_clnt_odstate(struct nfs4_client *clp)
 866{
 867	struct nfs4_clnt_odstate *co;
 868
 869	co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
 870	if (co) {
 871		co->co_client = clp;
 872		refcount_set(&co->co_odcount, 1);
 873	}
 874	return co;
 875}
 876
 877static void
 878hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
 879{
 880	struct nfs4_file *fp = co->co_file;
 881
 882	lockdep_assert_held(&fp->fi_lock);
 883	list_add(&co->co_perfile, &fp->fi_clnt_odstate);
 884}
 885
 886static inline void
 887get_clnt_odstate(struct nfs4_clnt_odstate *co)
 888{
 889	if (co)
 890		refcount_inc(&co->co_odcount);
 891}
 892
 893static void
 894put_clnt_odstate(struct nfs4_clnt_odstate *co)
 895{
 896	struct nfs4_file *fp;
 897
 898	if (!co)
 899		return;
 900
 901	fp = co->co_file;
 902	if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
 903		list_del(&co->co_perfile);
 904		spin_unlock(&fp->fi_lock);
 905
 906		nfsd4_return_all_file_layouts(co->co_client, fp);
 907		kmem_cache_free(odstate_slab, co);
 908	}
 909}
 910
 911static struct nfs4_clnt_odstate *
 912find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
 913{
 914	struct nfs4_clnt_odstate *co;
 915	struct nfs4_client *cl;
 916
 917	if (!new)
 918		return NULL;
 919
 920	cl = new->co_client;
 921
 922	spin_lock(&fp->fi_lock);
 923	list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
 924		if (co->co_client == cl) {
 925			get_clnt_odstate(co);
 926			goto out;
 927		}
 928	}
 929	co = new;
 930	co->co_file = fp;
 931	hash_clnt_odstate_locked(new);
 932out:
 933	spin_unlock(&fp->fi_lock);
 934	return co;
 935}
 936
 937struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
 938				  void (*sc_free)(struct nfs4_stid *))
 939{
 940	struct nfs4_stid *stid;
 941	int new_id;
 942
 943	stid = kmem_cache_zalloc(slab, GFP_KERNEL);
 944	if (!stid)
 945		return NULL;
 946
 947	idr_preload(GFP_KERNEL);
 948	spin_lock(&cl->cl_lock);
 949	/* Reserving 0 for start of file in nfsdfs "states" file: */
 950	new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 1, 0, GFP_NOWAIT);
 951	spin_unlock(&cl->cl_lock);
 952	idr_preload_end();
 953	if (new_id < 0)
 954		goto out_free;
 955
 956	stid->sc_free = sc_free;
 957	stid->sc_client = cl;
 958	stid->sc_stateid.si_opaque.so_id = new_id;
 959	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
 960	/* Will be incremented before return to client: */
 961	refcount_set(&stid->sc_count, 1);
 962	spin_lock_init(&stid->sc_lock);
 963	INIT_LIST_HEAD(&stid->sc_cp_list);
 964
 
 
 
 
 
 
 
 
 965	/*
 966	 * It shouldn't be a problem to reuse an opaque stateid value.
 967	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
 968	 * example, a stray write retransmission could be accepted by
 969	 * the server when it should have been rejected.  Therefore,
 970	 * adopt a trick from the sctp code to attempt to maximize the
 971	 * amount of time until an id is reused, by ensuring they always
 972	 * "increase" (mod INT_MAX):
 973	 */
 974	return stid;
 975out_free:
 976	kmem_cache_free(slab, stid);
 977	return NULL;
 
 978}
 979
 980/*
 981 * Create a unique stateid_t to represent each COPY.
 982 */
 983static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
 984			      unsigned char cs_type)
 985{
 
 986	int new_id;
 987
 988	stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
 989	stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
 990
 991	idr_preload(GFP_KERNEL);
 992	spin_lock(&nn->s2s_cp_lock);
 993	new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, stid, 0, 0, GFP_NOWAIT);
 994	stid->cs_stid.si_opaque.so_id = new_id;
 995	stid->cs_stid.si_generation = 1;
 996	spin_unlock(&nn->s2s_cp_lock);
 997	idr_preload_end();
 998	if (new_id < 0)
 999		return 0;
1000	stid->cs_type = cs_type;
1001	return 1;
1002}
1003
1004int nfs4_init_copy_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
1005{
1006	return nfs4_init_cp_state(nn, &copy->cp_stateid, NFS4_COPY_STID);
1007}
1008
1009struct nfs4_cpntf_state *nfs4_alloc_init_cpntf_state(struct nfsd_net *nn,
1010						     struct nfs4_stid *p_stid)
1011{
1012	struct nfs4_cpntf_state *cps;
1013
1014	cps = kzalloc(sizeof(struct nfs4_cpntf_state), GFP_KERNEL);
1015	if (!cps)
1016		return NULL;
1017	cps->cpntf_time = ktime_get_boottime_seconds();
1018	refcount_set(&cps->cp_stateid.cs_count, 1);
1019	if (!nfs4_init_cp_state(nn, &cps->cp_stateid, NFS4_COPYNOTIFY_STID))
1020		goto out_free;
1021	spin_lock(&nn->s2s_cp_lock);
1022	list_add(&cps->cp_list, &p_stid->sc_cp_list);
1023	spin_unlock(&nn->s2s_cp_lock);
1024	return cps;
1025out_free:
1026	kfree(cps);
1027	return NULL;
1028}
1029
1030void nfs4_free_copy_state(struct nfsd4_copy *copy)
1031{
1032	struct nfsd_net *nn;
1033
1034	if (copy->cp_stateid.cs_type != NFS4_COPY_STID)
1035		return;
1036	nn = net_generic(copy->cp_clp->net, nfsd_net_id);
1037	spin_lock(&nn->s2s_cp_lock);
1038	idr_remove(&nn->s2s_cp_stateids,
1039		   copy->cp_stateid.cs_stid.si_opaque.so_id);
1040	spin_unlock(&nn->s2s_cp_lock);
1041}
1042
1043static void nfs4_free_cpntf_statelist(struct net *net, struct nfs4_stid *stid)
1044{
1045	struct nfs4_cpntf_state *cps;
1046	struct nfsd_net *nn;
1047
1048	nn = net_generic(net, nfsd_net_id);
1049	spin_lock(&nn->s2s_cp_lock);
1050	while (!list_empty(&stid->sc_cp_list)) {
1051		cps = list_first_entry(&stid->sc_cp_list,
1052				       struct nfs4_cpntf_state, cp_list);
1053		_free_cpntf_state_locked(nn, cps);
1054	}
1055	spin_unlock(&nn->s2s_cp_lock);
1056}
1057
1058static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
1059{
1060	struct nfs4_stid *stid;
1061
1062	stid = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_ol_stateid);
1063	if (!stid)
1064		return NULL;
1065
1066	return openlockstateid(stid);
1067}
1068
1069static void nfs4_free_deleg(struct nfs4_stid *stid)
1070{
1071	struct nfs4_delegation *dp = delegstateid(stid);
1072
1073	WARN_ON_ONCE(!list_empty(&stid->sc_cp_list));
1074	WARN_ON_ONCE(!list_empty(&dp->dl_perfile));
1075	WARN_ON_ONCE(!list_empty(&dp->dl_perclnt));
1076	WARN_ON_ONCE(!list_empty(&dp->dl_recall_lru));
1077	kmem_cache_free(deleg_slab, stid);
1078	atomic_long_dec(&num_delegations);
1079}
1080
1081/*
1082 * When we recall a delegation, we should be careful not to hand it
1083 * out again straight away.
1084 * To ensure this we keep a pair of bloom filters ('new' and 'old')
1085 * in which the filehandles of recalled delegations are "stored".
1086 * If a filehandle appear in either filter, a delegation is blocked.
1087 * When a delegation is recalled, the filehandle is stored in the "new"
1088 * filter.
1089 * Every 30 seconds we swap the filters and clear the "new" one,
1090 * unless both are empty of course.
1091 *
1092 * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
1093 * low 3 bytes as hash-table indices.
1094 *
1095 * 'blocked_delegations_lock', which is always taken in block_delegations(),
1096 * is used to manage concurrent access.  Testing does not need the lock
1097 * except when swapping the two filters.
1098 */
1099static DEFINE_SPINLOCK(blocked_delegations_lock);
1100static struct bloom_pair {
1101	int	entries, old_entries;
1102	time64_t swap_time;
1103	int	new; /* index into 'set' */
1104	DECLARE_BITMAP(set[2], 256);
1105} blocked_delegations;
1106
1107static int delegation_blocked(struct knfsd_fh *fh)
1108{
1109	u32 hash;
1110	struct bloom_pair *bd = &blocked_delegations;
1111
1112	if (bd->entries == 0)
1113		return 0;
1114	if (ktime_get_seconds() - bd->swap_time > 30) {
1115		spin_lock(&blocked_delegations_lock);
1116		if (ktime_get_seconds() - bd->swap_time > 30) {
1117			bd->entries -= bd->old_entries;
1118			bd->old_entries = bd->entries;
1119			memset(bd->set[bd->new], 0,
1120			       sizeof(bd->set[0]));
1121			bd->new = 1-bd->new;
1122			bd->swap_time = ktime_get_seconds();
1123		}
1124		spin_unlock(&blocked_delegations_lock);
1125	}
1126	hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1127	if (test_bit(hash&255, bd->set[0]) &&
1128	    test_bit((hash>>8)&255, bd->set[0]) &&
1129	    test_bit((hash>>16)&255, bd->set[0]))
1130		return 1;
1131
1132	if (test_bit(hash&255, bd->set[1]) &&
1133	    test_bit((hash>>8)&255, bd->set[1]) &&
1134	    test_bit((hash>>16)&255, bd->set[1]))
1135		return 1;
1136
1137	return 0;
1138}
1139
1140static void block_delegations(struct knfsd_fh *fh)
1141{
1142	u32 hash;
1143	struct bloom_pair *bd = &blocked_delegations;
1144
1145	hash = jhash(&fh->fh_raw, fh->fh_size, 0);
1146
1147	spin_lock(&blocked_delegations_lock);
1148	__set_bit(hash&255, bd->set[bd->new]);
1149	__set_bit((hash>>8)&255, bd->set[bd->new]);
1150	__set_bit((hash>>16)&255, bd->set[bd->new]);
1151	if (bd->entries == 0)
1152		bd->swap_time = ktime_get_seconds();
1153	bd->entries += 1;
1154	spin_unlock(&blocked_delegations_lock);
1155}
1156
1157static struct nfs4_delegation *
1158alloc_init_deleg(struct nfs4_client *clp, struct nfs4_file *fp,
1159		 struct nfs4_clnt_odstate *odstate, u32 dl_type)
1160{
1161	struct nfs4_delegation *dp;
1162	struct nfs4_stid *stid;
1163	long n;
1164
1165	dprintk("NFSD alloc_init_deleg\n");
1166	n = atomic_long_inc_return(&num_delegations);
1167	if (n < 0 || n > max_delegations)
1168		goto out_dec;
1169	if (delegation_blocked(&fp->fi_fhandle))
1170		goto out_dec;
1171	stid = nfs4_alloc_stid(clp, deleg_slab, nfs4_free_deleg);
1172	if (stid == NULL)
1173		goto out_dec;
1174	dp = delegstateid(stid);
1175
 
 
 
 
 
1176	/*
1177	 * delegation seqid's are never incremented.  The 4.1 special
1178	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
1179	 * 0 anyway just for consistency and use 1:
1180	 */
1181	dp->dl_stid.sc_stateid.si_generation = 1;
 
1182	INIT_LIST_HEAD(&dp->dl_perfile);
1183	INIT_LIST_HEAD(&dp->dl_perclnt);
1184	INIT_LIST_HEAD(&dp->dl_recall_lru);
1185	dp->dl_clnt_odstate = odstate;
1186	get_clnt_odstate(odstate);
1187	dp->dl_type = dl_type;
1188	dp->dl_retries = 1;
1189	dp->dl_recalled = false;
1190	nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
1191		      &nfsd4_cb_recall_ops, NFSPROC4_CLNT_CB_RECALL);
1192	get_nfs4_file(fp);
1193	dp->dl_stid.sc_file = fp;
 
 
 
 
 
1194	return dp;
1195out_dec:
1196	atomic_long_dec(&num_delegations);
1197	return NULL;
1198}
1199
1200void
1201nfs4_put_stid(struct nfs4_stid *s)
1202{
1203	struct nfs4_file *fp = s->sc_file;
1204	struct nfs4_client *clp = s->sc_client;
 
 
 
 
 
1205
1206	might_lock(&clp->cl_lock);
1207
1208	if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
1209		wake_up_all(&close_wq);
1210		return;
 
 
1211	}
1212	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1213	nfs4_free_cpntf_statelist(clp->net, s);
1214	spin_unlock(&clp->cl_lock);
1215	s->sc_free(s);
1216	if (fp)
1217		put_nfs4_file(fp);
1218}
1219
1220void
1221nfs4_inc_and_copy_stateid(stateid_t *dst, struct nfs4_stid *stid)
1222{
1223	stateid_t *src = &stid->sc_stateid;
1224
1225	spin_lock(&stid->sc_lock);
1226	if (unlikely(++src->si_generation == 0))
1227		src->si_generation = 1;
1228	memcpy(dst, src, sizeof(*dst));
1229	spin_unlock(&stid->sc_lock);
1230}
1231
1232static void put_deleg_file(struct nfs4_file *fp)
 
 
1233{
1234	struct nfsd_file *nf = NULL;
1235
1236	spin_lock(&fp->fi_lock);
1237	if (--fp->fi_delegees == 0)
1238		swap(nf, fp->fi_deleg_file);
1239	spin_unlock(&fp->fi_lock);
1240
1241	if (nf)
1242		nfsd_file_put(nf);
1243}
1244
1245static void nfs4_unlock_deleg_lease(struct nfs4_delegation *dp)
1246{
1247	struct nfs4_file *fp = dp->dl_stid.sc_file;
1248	struct nfsd_file *nf = fp->fi_deleg_file;
1249
1250	WARN_ON_ONCE(!fp->fi_delegees);
 
1251
1252	vfs_setlease(nf->nf_file, F_UNLCK, NULL, (void **)&dp);
1253	put_deleg_file(fp);
1254}
 
1255
1256static void destroy_unhashed_deleg(struct nfs4_delegation *dp)
1257{
1258	put_clnt_odstate(dp->dl_clnt_odstate);
1259	nfs4_unlock_deleg_lease(dp);
1260	nfs4_put_stid(&dp->dl_stid);
1261}
1262
1263void nfs4_unhash_stid(struct nfs4_stid *s)
1264{
1265	s->sc_type = 0;
1266}
1267
1268/**
1269 * nfs4_delegation_exists - Discover if this delegation already exists
1270 * @clp:     a pointer to the nfs4_client we're granting a delegation to
1271 * @fp:      a pointer to the nfs4_file we're granting a delegation on
 
 
 
 
 
1272 *
1273 * Return:
1274 *      On success: true iff an existing delegation is found
 
 
 
1275 */
 
 
 
 
 
 
 
 
1276
1277static bool
1278nfs4_delegation_exists(struct nfs4_client *clp, struct nfs4_file *fp)
1279{
1280	struct nfs4_delegation *searchdp = NULL;
1281	struct nfs4_client *searchclp = NULL;
1282
1283	lockdep_assert_held(&state_lock);
1284	lockdep_assert_held(&fp->fi_lock);
1285
1286	list_for_each_entry(searchdp, &fp->fi_delegations, dl_perfile) {
1287		searchclp = searchdp->dl_stid.sc_client;
1288		if (clp == searchclp) {
1289			return true;
1290		}
1291	}
1292	return false;
1293}
1294
1295/**
1296 * hash_delegation_locked - Add a delegation to the appropriate lists
1297 * @dp:     a pointer to the nfs4_delegation we are adding.
1298 * @fp:     a pointer to the nfs4_file we're granting a delegation on
1299 *
1300 * Return:
1301 *      On success: NULL if the delegation was successfully hashed.
1302 *
1303 *      On error: -EAGAIN if one was previously granted to this
1304 *                 nfs4_client for this nfs4_file. Delegation is not hashed.
 
1305 *
 
1306 */
 
 
 
 
1307
1308static int
1309hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1310{
1311	struct nfs4_client *clp = dp->dl_stid.sc_client;
1312
1313	lockdep_assert_held(&state_lock);
1314	lockdep_assert_held(&fp->fi_lock);
1315
1316	if (nfs4_delegation_exists(clp, fp))
1317		return -EAGAIN;
1318	refcount_inc(&dp->dl_stid.sc_count);
1319	dp->dl_stid.sc_type = NFS4_DELEG_STID;
1320	list_add(&dp->dl_perfile, &fp->fi_delegations);
1321	list_add(&dp->dl_perclnt, &clp->cl_delegations);
1322	return 0;
1323}
1324
1325static bool delegation_hashed(struct nfs4_delegation *dp)
1326{
1327	return !(list_empty(&dp->dl_perfile));
1328}
1329
1330static bool
1331unhash_delegation_locked(struct nfs4_delegation *dp)
1332{
1333	struct nfs4_file *fp = dp->dl_stid.sc_file;
1334
1335	lockdep_assert_held(&state_lock);
1336
1337	if (!delegation_hashed(dp))
 
 
1338		return false;
1339
1340	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1341	/* Ensure that deleg break won't try to requeue it */
1342	++dp->dl_time;
1343	spin_lock(&fp->fi_lock);
1344	list_del_init(&dp->dl_perclnt);
1345	list_del_init(&dp->dl_recall_lru);
1346	list_del_init(&dp->dl_perfile);
1347	spin_unlock(&fp->fi_lock);
1348	return true;
1349}
1350
1351static void destroy_delegation(struct nfs4_delegation *dp)
 
 
1352{
1353	bool unhashed;
 
1354
1355	spin_lock(&state_lock);
1356	unhashed = unhash_delegation_locked(dp);
1357	spin_unlock(&state_lock);
1358	if (unhashed)
1359		destroy_unhashed_deleg(dp);
1360}
1361
1362static void revoke_delegation(struct nfs4_delegation *dp)
 
 
1363{
1364	struct nfs4_client *clp = dp->dl_stid.sc_client;
1365
1366	WARN_ON(!list_empty(&dp->dl_recall_lru));
1367
1368	trace_nfsd_stid_revoke(&dp->dl_stid);
1369
1370	if (clp->cl_minorversion) {
1371		spin_lock(&clp->cl_lock);
1372		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
1373		refcount_inc(&dp->dl_stid.sc_count);
1374		list_add(&dp->dl_recall_lru, &clp->cl_revoked);
1375		spin_unlock(&clp->cl_lock);
1376	}
1377	destroy_unhashed_deleg(dp);
1378}
1379
1380/* 
1381 * SETCLIENTID state 
1382 */
1383
1384static unsigned int clientid_hashval(u32 id)
1385{
1386	return id & CLIENT_HASH_MASK;
1387}
1388
1389static unsigned int clientstr_hashval(struct xdr_netobj name)
 
 
1390{
1391	return opaque_hashval(name.data, 8) & CLIENT_HASH_MASK;
1392}
1393
1394/*
1395 * A stateid that had a deny mode associated with it is being released
1396 * or downgraded. Recalculate the deny mode on the file.
1397 */
1398static void
1399recalculate_deny_mode(struct nfs4_file *fp)
1400{
1401	struct nfs4_ol_stateid *stp;
1402
1403	spin_lock(&fp->fi_lock);
1404	fp->fi_share_deny = 0;
1405	list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
1406		fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
1407	spin_unlock(&fp->fi_lock);
1408}
1409
1410static void
1411reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
1412{
1413	int i;
1414	bool change = false;
1415
1416	for (i = 1; i < 4; i++) {
1417		if ((i & deny) != i) {
1418			change = true;
1419			clear_deny(i, stp);
1420		}
1421	}
1422
1423	/* Recalculate per-file deny mode if there was a change */
1424	if (change)
1425		recalculate_deny_mode(stp->st_stid.sc_file);
1426}
1427
1428/* release all access and file references for a given stateid */
1429static void
1430release_all_access(struct nfs4_ol_stateid *stp)
1431{
1432	int i;
1433	struct nfs4_file *fp = stp->st_stid.sc_file;
1434
1435	if (fp && stp->st_deny_bmap != 0)
1436		recalculate_deny_mode(fp);
1437
1438	for (i = 1; i < 4; i++) {
1439		if (test_access(i, stp))
1440			nfs4_file_put_access(stp->st_stid.sc_file, i);
 
1441		clear_access(i, stp);
1442	}
1443}
1444
1445static inline void nfs4_free_stateowner(struct nfs4_stateowner *sop)
1446{
1447	kfree(sop->so_owner.data);
1448	sop->so_ops->so_free(sop);
1449}
1450
1451static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
1452{
1453	struct nfs4_client *clp = sop->so_client;
1454
1455	might_lock(&clp->cl_lock);
1456
1457	if (!atomic_dec_and_lock(&sop->so_count, &clp->cl_lock))
1458		return;
1459	sop->so_ops->so_unhash(sop);
1460	spin_unlock(&clp->cl_lock);
1461	nfs4_free_stateowner(sop);
1462}
1463
1464static bool
1465nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
1466{
1467	return list_empty(&stp->st_perfile);
1468}
1469
1470static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
1471{
1472	struct nfs4_file *fp = stp->st_stid.sc_file;
1473
1474	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);
1475
1476	if (list_empty(&stp->st_perfile))
1477		return false;
1478
1479	spin_lock(&fp->fi_lock);
1480	list_del_init(&stp->st_perfile);
1481	spin_unlock(&fp->fi_lock);
1482	list_del(&stp->st_perstateowner);
1483	return true;
1484}
1485
1486static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
1487{
1488	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1489
1490	put_clnt_odstate(stp->st_clnt_odstate);
1491	release_all_access(stp);
1492	if (stp->st_stateowner)
1493		nfs4_put_stateowner(stp->st_stateowner);
1494	WARN_ON(!list_empty(&stid->sc_cp_list));
1495	kmem_cache_free(stateid_slab, stid);
1496}
1497
1498static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
1499{
1500	struct nfs4_ol_stateid *stp = openlockstateid(stid);
1501	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
1502	struct nfsd_file *nf;
1503
1504	nf = find_any_file(stp->st_stid.sc_file);
1505	if (nf) {
1506		get_file(nf->nf_file);
1507		filp_close(nf->nf_file, (fl_owner_t)lo);
1508		nfsd_file_put(nf);
1509	}
1510	nfs4_free_ol_stateid(stid);
1511}
1512
1513/*
1514 * Put the persistent reference to an already unhashed generic stateid, while
1515 * holding the cl_lock. If it's the last reference, then put it onto the
1516 * reaplist for later destruction.
1517 */
1518static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp,
1519				       struct list_head *reaplist)
1520{
1521	struct nfs4_stid *s = &stp->st_stid;
1522	struct nfs4_client *clp = s->sc_client;
1523
1524	lockdep_assert_held(&clp->cl_lock);
1525
1526	WARN_ON_ONCE(!list_empty(&stp->st_locks));
1527
1528	if (!refcount_dec_and_test(&s->sc_count)) {
1529		wake_up_all(&close_wq);
1530		return;
1531	}
1532
1533	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
1534	list_add(&stp->st_locks, reaplist);
 
 
 
 
 
1535}
1536
1537static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
1538{
1539	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1540
1541	if (!unhash_ol_stateid(stp))
1542		return false;
1543	list_del_init(&stp->st_locks);
1544	nfs4_unhash_stid(&stp->st_stid);
1545	return true;
 
 
 
1546}
1547
1548static void release_lock_stateid(struct nfs4_ol_stateid *stp)
1549{
1550	struct nfs4_client *clp = stp->st_stid.sc_client;
1551	bool unhashed;
1552
1553	spin_lock(&clp->cl_lock);
1554	unhashed = unhash_lock_stateid(stp);
1555	spin_unlock(&clp->cl_lock);
1556	if (unhashed)
1557		nfs4_put_stid(&stp->st_stid);
1558}
1559
1560static void unhash_lockowner_locked(struct nfs4_lockowner *lo)
1561{
1562	struct nfs4_client *clp = lo->lo_owner.so_client;
1563
1564	lockdep_assert_held(&clp->cl_lock);
1565
1566	list_del_init(&lo->lo_owner.so_strhash);
1567}
1568
1569/*
1570 * Free a list of generic stateids that were collected earlier after being
1571 * fully unhashed.
1572 */
1573static void
1574free_ol_stateid_reaplist(struct list_head *reaplist)
1575{
1576	struct nfs4_ol_stateid *stp;
1577	struct nfs4_file *fp;
1578
1579	might_sleep();
1580
1581	while (!list_empty(reaplist)) {
1582		stp = list_first_entry(reaplist, struct nfs4_ol_stateid,
1583				       st_locks);
1584		list_del(&stp->st_locks);
1585		fp = stp->st_stid.sc_file;
1586		stp->st_stid.sc_free(&stp->st_stid);
1587		if (fp)
1588			put_nfs4_file(fp);
1589	}
1590}
1591
1592static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
1593				       struct list_head *reaplist)
1594{
1595	struct nfs4_ol_stateid *stp;
1596
1597	lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock);
1598
1599	while (!list_empty(&open_stp->st_locks)) {
1600		stp = list_entry(open_stp->st_locks.next,
1601				struct nfs4_ol_stateid, st_locks);
1602		WARN_ON(!unhash_lock_stateid(stp));
1603		put_ol_stateid_locked(stp, reaplist);
1604	}
1605}
1606
1607static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
1608				struct list_head *reaplist)
1609{
1610	lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
1611
1612	if (!unhash_ol_stateid(stp))
1613		return false;
1614	release_open_stateid_locks(stp, reaplist);
1615	return true;
1616}
1617
1618static void release_open_stateid(struct nfs4_ol_stateid *stp)
1619{
1620	LIST_HEAD(reaplist);
1621
1622	spin_lock(&stp->st_stid.sc_client->cl_lock);
1623	if (unhash_open_stateid(stp, &reaplist))
1624		put_ol_stateid_locked(stp, &reaplist);
1625	spin_unlock(&stp->st_stid.sc_client->cl_lock);
1626	free_ol_stateid_reaplist(&reaplist);
1627}
1628
1629static void unhash_openowner_locked(struct nfs4_openowner *oo)
1630{
1631	struct nfs4_client *clp = oo->oo_owner.so_client;
1632
1633	lockdep_assert_held(&clp->cl_lock);
1634
1635	list_del_init(&oo->oo_owner.so_strhash);
1636	list_del_init(&oo->oo_perclient);
 
 
 
1637}
1638
1639static void release_last_closed_stateid(struct nfs4_openowner *oo)
1640{
1641	struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
1642					  nfsd_net_id);
1643	struct nfs4_ol_stateid *s;
1644
1645	spin_lock(&nn->client_lock);
1646	s = oo->oo_last_closed_stid;
1647	if (s) {
1648		list_del_init(&oo->oo_close_lru);
 
1649		oo->oo_last_closed_stid = NULL;
1650	}
1651	spin_unlock(&nn->client_lock);
1652	if (s)
1653		nfs4_put_stid(&s->st_stid);
1654}
1655
1656static void release_openowner(struct nfs4_openowner *oo)
1657{
1658	struct nfs4_ol_stateid *stp;
1659	struct nfs4_client *clp = oo->oo_owner.so_client;
1660	struct list_head reaplist;
1661
1662	INIT_LIST_HEAD(&reaplist);
1663
1664	spin_lock(&clp->cl_lock);
1665	unhash_openowner_locked(oo);
1666	while (!list_empty(&oo->oo_owner.so_stateids)) {
1667		stp = list_first_entry(&oo->oo_owner.so_stateids,
1668				struct nfs4_ol_stateid, st_perstateowner);
1669		if (unhash_open_stateid(stp, &reaplist))
1670			put_ol_stateid_locked(stp, &reaplist);
1671	}
1672	spin_unlock(&clp->cl_lock);
1673	free_ol_stateid_reaplist(&reaplist);
1674	release_last_closed_stateid(oo);
1675	nfs4_put_stateowner(&oo->oo_owner);
1676}
1677
 
 
 
1678static inline int
1679hash_sessionid(struct nfs4_sessionid *sessionid)
1680{
1681	struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
1682
1683	return sid->sequence % SESSION_HASH_SIZE;
1684}
1685
1686#ifdef CONFIG_SUNRPC_DEBUG
1687static inline void
1688dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1689{
1690	u32 *ptr = (u32 *)(&sessionid->data[0]);
1691	dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
1692}
1693#else
1694static inline void
1695dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
1696{
1697}
1698#endif
1699
1700/*
1701 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
1702 * won't be used for replay.
1703 */
1704void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
1705{
1706	struct nfs4_stateowner *so = cstate->replay_owner;
1707
1708	if (nfserr == nfserr_replay_me)
1709		return;
1710
1711	if (!seqid_mutating_err(ntohl(nfserr))) {
1712		nfsd4_cstate_clear_replay(cstate);
1713		return;
1714	}
1715	if (!so)
1716		return;
1717	if (so->so_is_open_owner)
1718		release_last_closed_stateid(openowner(so));
1719	so->so_seqid++;
1720	return;
1721}
1722
1723static void
1724gen_sessionid(struct nfsd4_session *ses)
1725{
1726	struct nfs4_client *clp = ses->se_client;
1727	struct nfsd4_sessionid *sid;
1728
1729	sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
1730	sid->clientid = clp->cl_clientid;
1731	sid->sequence = current_sessionid++;
1732	sid->reserved = 0;
1733}
1734
1735/*
1736 * The protocol defines ca_maxresponssize_cached to include the size of
1737 * the rpc header, but all we need to cache is the data starting after
1738 * the end of the initial SEQUENCE operation--the rest we regenerate
1739 * each time.  Therefore we can advertise a ca_maxresponssize_cached
1740 * value that is the number of bytes in our cache plus a few additional
1741 * bytes.  In order to stay on the safe side, and not promise more than
1742 * we can cache, those additional bytes must be the minimum possible: 24
1743 * bytes of rpc header (xid through accept state, with AUTH_NULL
1744 * verifier), 12 for the compound header (with zero-length tag), and 44
1745 * for the SEQUENCE op response:
1746 */
1747#define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)
1748
1749static void
1750free_session_slots(struct nfsd4_session *ses)
1751{
1752	int i;
1753
1754	for (i = 0; i < ses->se_fchannel.maxreqs; i++) {
1755		free_svc_cred(&ses->se_slots[i]->sl_cred);
1756		kfree(ses->se_slots[i]);
1757	}
1758}
1759
1760/*
1761 * We don't actually need to cache the rpc and session headers, so we
1762 * can allocate a little less for each slot:
1763 */
1764static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
 
 
 
 
 
1765{
1766	u32 size;
 
1767
1768	if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
1769		size = 0;
1770	else
1771		size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
1772	return size + sizeof(struct nfsd4_slot);
1773}
1774
1775/*
1776 * XXX: If we run out of reserved DRC memory we could (up to a point)
1777 * re-negotiate active sessions and reduce their slot usage to make
1778 * room for new connections. For now we just fail the create session.
1779 */
1780static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1781{
1782	u32 slotsize = slot_bytes(ca);
1783	u32 num = ca->maxreqs;
1784	unsigned long avail, total_avail;
1785	unsigned int scale_factor;
1786
1787	spin_lock(&nfsd_drc_lock);
1788	if (nfsd_drc_max_mem > nfsd_drc_mem_used)
1789		total_avail = nfsd_drc_max_mem - nfsd_drc_mem_used;
1790	else
1791		/* We have handed out more space than we chose in
1792		 * set_max_drc() to allow.  That isn't really a
1793		 * problem as long as that doesn't make us think we
1794		 * have lots more due to integer overflow.
1795		 */
1796		total_avail = 0;
1797	avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail);
1798	/*
1799	 * Never use more than a fraction of the remaining memory,
1800	 * unless it's the only way to give this client a slot.
1801	 * The chosen fraction is either 1/8 or 1/number of threads,
1802	 * whichever is smaller.  This ensures there are adequate
1803	 * slots to support multiple clients per thread.
1804	 * Give the client one slot even if that would require
1805	 * over-allocation--it is better than failure.
1806	 */
1807	scale_factor = max_t(unsigned int, 8, nn->nfsd_serv->sv_nrthreads);
1808
1809	avail = clamp_t(unsigned long, avail, slotsize,
1810			total_avail/scale_factor);
1811	num = min_t(int, num, avail / slotsize);
1812	num = max_t(int, num, 1);
1813	nfsd_drc_mem_used += num * slotsize;
1814	spin_unlock(&nfsd_drc_lock);
1815
1816	return num;
1817}
1818
1819static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1820{
1821	int slotsize = slot_bytes(ca);
1822
1823	spin_lock(&nfsd_drc_lock);
1824	nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1825	spin_unlock(&nfsd_drc_lock);
1826}
1827
1828static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
1829					   struct nfsd4_channel_attrs *battrs)
1830{
1831	int numslots = fattrs->maxreqs;
1832	int slotsize = slot_bytes(fattrs);
1833	struct nfsd4_session *new;
1834	int i;
1835
1836	BUILD_BUG_ON(struct_size(new, se_slots, NFSD_MAX_SLOTS_PER_SESSION)
1837		     > PAGE_SIZE);
 
1838
1839	new = kzalloc(struct_size(new, se_slots, numslots), GFP_KERNEL);
1840	if (!new)
1841		return NULL;
1842	/* allocate each struct nfsd4_slot and data cache in one piece */
1843	for (i = 0; i < numslots; i++) {
1844		new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
 
1845		if (!new->se_slots[i])
1846			goto out_free;
1847	}
1848
1849	memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
1850	memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
1851
1852	return new;
1853out_free:
1854	while (i--)
1855		kfree(new->se_slots[i]);
1856	kfree(new);
1857	return NULL;
1858}
1859
 
 
 
 
 
 
 
 
 
 
 
 
1860static void free_conn(struct nfsd4_conn *c)
1861{
1862	svc_xprt_put(c->cn_xprt);
1863	kfree(c);
1864}
1865
1866static void nfsd4_conn_lost(struct svc_xpt_user *u)
1867{
1868	struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1869	struct nfs4_client *clp = c->cn_session->se_client;
1870
1871	trace_nfsd_cb_lost(clp);
1872
1873	spin_lock(&clp->cl_lock);
1874	if (!list_empty(&c->cn_persession)) {
1875		list_del(&c->cn_persession);
1876		free_conn(c);
1877	}
 
1878	nfsd4_probe_callback(clp);
1879	spin_unlock(&clp->cl_lock);
1880}
1881
1882static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1883{
1884	struct nfsd4_conn *conn;
1885
1886	conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1887	if (!conn)
1888		return NULL;
1889	svc_xprt_get(rqstp->rq_xprt);
1890	conn->cn_xprt = rqstp->rq_xprt;
1891	conn->cn_flags = flags;
1892	INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1893	return conn;
1894}
1895
1896static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1897{
1898	conn->cn_session = ses;
1899	list_add(&conn->cn_persession, &ses->se_conns);
1900}
1901
1902static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1903{
1904	struct nfs4_client *clp = ses->se_client;
1905
1906	spin_lock(&clp->cl_lock);
1907	__nfsd4_hash_conn(conn, ses);
1908	spin_unlock(&clp->cl_lock);
1909}
1910
1911static int nfsd4_register_conn(struct nfsd4_conn *conn)
1912{
1913	conn->cn_xpt_user.callback = nfsd4_conn_lost;
1914	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1915}
1916
1917static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1918{
 
1919	int ret;
1920
 
 
 
1921	nfsd4_hash_conn(conn, ses);
1922	ret = nfsd4_register_conn(conn);
1923	if (ret)
1924		/* oops; xprt is already down: */
1925		nfsd4_conn_lost(&conn->cn_xpt_user);
1926	/* We may have gained or lost a callback channel: */
1927	nfsd4_probe_callback_sync(ses->se_client);
1928}
1929
1930static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1931{
1932	u32 dir = NFS4_CDFC4_FORE;
1933
1934	if (cses->flags & SESSION4_BACK_CHAN)
1935		dir |= NFS4_CDFC4_BACK;
1936	return alloc_conn(rqstp, dir);
 
1937}
1938
1939/* must be called under client_lock */
1940static void nfsd4_del_conns(struct nfsd4_session *s)
1941{
1942	struct nfs4_client *clp = s->se_client;
1943	struct nfsd4_conn *c;
1944
1945	spin_lock(&clp->cl_lock);
1946	while (!list_empty(&s->se_conns)) {
1947		c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1948		list_del_init(&c->cn_persession);
1949		spin_unlock(&clp->cl_lock);
1950
1951		unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1952		free_conn(c);
1953
1954		spin_lock(&clp->cl_lock);
1955	}
1956	spin_unlock(&clp->cl_lock);
1957}
1958
1959static void __free_session(struct nfsd4_session *ses)
1960{
 
 
 
 
 
 
 
 
 
 
1961	free_session_slots(ses);
1962	kfree(ses);
1963}
1964
1965static void free_session(struct nfsd4_session *ses)
1966{
1967	nfsd4_del_conns(ses);
1968	nfsd4_put_drc_mem(&ses->se_fchannel);
1969	__free_session(ses);
1970}
1971
1972static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1973{
 
 
 
 
1974	int idx;
1975	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1976
1977	new->se_client = clp;
1978	gen_sessionid(new);
1979
1980	INIT_LIST_HEAD(&new->se_conns);
1981
1982	new->se_cb_seq_nr = 1;
1983	new->se_flags = cses->flags;
1984	new->se_cb_prog = cses->callback_prog;
1985	new->se_cb_sec = cses->cb_sec;
1986	atomic_set(&new->se_ref, 0);
1987	idx = hash_sessionid(&new->se_sessionid);
1988	list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
 
1989	spin_lock(&clp->cl_lock);
1990	list_add(&new->se_perclnt, &clp->cl_sessions);
1991	spin_unlock(&clp->cl_lock);
 
1992
1993	{
 
 
 
 
 
 
 
 
1994		struct sockaddr *sa = svc_addr(rqstp);
1995		/*
1996		 * This is a little silly; with sessions there's no real
1997		 * use for the callback address.  Use the peer address
1998		 * as a reasonable default for now, but consider fixing
1999		 * the rpc client not to require an address in the
2000		 * future:
2001		 */
2002		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
2003		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
2004	}
 
 
2005}
2006
2007/* caller must hold client_lock */
2008static struct nfsd4_session *
2009__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
2010{
2011	struct nfsd4_session *elem;
2012	int idx;
2013	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2014
2015	lockdep_assert_held(&nn->client_lock);
2016
2017	dump_sessionid(__func__, sessionid);
2018	idx = hash_sessionid(sessionid);
2019	/* Search in the appropriate list */
2020	list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
2021		if (!memcmp(elem->se_sessionid.data, sessionid->data,
2022			    NFS4_MAX_SESSIONID_LEN)) {
2023			return elem;
2024		}
2025	}
2026
2027	dprintk("%s: session not found\n", __func__);
2028	return NULL;
2029}
2030
2031static struct nfsd4_session *
2032find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
2033		__be32 *ret)
2034{
2035	struct nfsd4_session *session;
2036	__be32 status = nfserr_badsession;
2037
2038	session = __find_in_sessionid_hashtbl(sessionid, net);
2039	if (!session)
2040		goto out;
2041	status = nfsd4_get_session_locked(session);
2042	if (status)
2043		session = NULL;
2044out:
2045	*ret = status;
2046	return session;
2047}
2048
2049/* caller must hold client_lock */
2050static void
2051unhash_session(struct nfsd4_session *ses)
2052{
2053	struct nfs4_client *clp = ses->se_client;
2054	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2055
2056	lockdep_assert_held(&nn->client_lock);
2057
2058	list_del(&ses->se_hash);
2059	spin_lock(&ses->se_client->cl_lock);
2060	list_del(&ses->se_perclnt);
2061	spin_unlock(&ses->se_client->cl_lock);
2062}
2063
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2064/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
2065static int
2066STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
2067{
2068	/*
2069	 * We're assuming the clid was not given out from a boot
2070	 * precisely 2^32 (about 136 years) before this one.  That seems
2071	 * a safe assumption:
2072	 */
2073	if (clid->cl_boot == (u32)nn->boot_time)
2074		return 0;
2075	trace_nfsd_clid_stale(clid);
 
2076	return 1;
2077}
2078
2079/* 
2080 * XXX Should we use a slab cache ?
2081 * This type of memory management is somewhat inefficient, but we use it
2082 * anyway since SETCLIENTID is not a common operation.
2083 */
2084static struct nfs4_client *alloc_client(struct xdr_netobj name,
2085				struct nfsd_net *nn)
2086{
2087	struct nfs4_client *clp;
2088	int i;
2089
2090	if (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) {
2091		mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
 
 
 
 
2092		return NULL;
2093	}
2094	clp = kmem_cache_zalloc(client_slab, GFP_KERNEL);
2095	if (clp == NULL)
2096		return NULL;
2097	xdr_netobj_dup(&clp->cl_name, &name, GFP_KERNEL);
2098	if (clp->cl_name.data == NULL)
2099		goto err_no_name;
2100	clp->cl_ownerstr_hashtbl = kmalloc_array(OWNER_HASH_SIZE,
2101						 sizeof(struct list_head),
2102						 GFP_KERNEL);
2103	if (!clp->cl_ownerstr_hashtbl)
2104		goto err_no_hashtbl;
2105	for (i = 0; i < OWNER_HASH_SIZE; i++)
2106		INIT_LIST_HEAD(&clp->cl_ownerstr_hashtbl[i]);
2107	INIT_LIST_HEAD(&clp->cl_sessions);
2108	idr_init(&clp->cl_stateids);
2109	atomic_set(&clp->cl_rpc_users, 0);
2110	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
2111	clp->cl_state = NFSD4_ACTIVE;
2112	atomic_inc(&nn->nfs4_client_count);
2113	atomic_set(&clp->cl_delegs_in_recall, 0);
2114	INIT_LIST_HEAD(&clp->cl_idhash);
2115	INIT_LIST_HEAD(&clp->cl_openowners);
2116	INIT_LIST_HEAD(&clp->cl_delegations);
2117	INIT_LIST_HEAD(&clp->cl_lru);
2118	INIT_LIST_HEAD(&clp->cl_revoked);
2119#ifdef CONFIG_NFSD_PNFS
2120	INIT_LIST_HEAD(&clp->cl_lo_states);
2121#endif
2122	INIT_LIST_HEAD(&clp->async_copies);
2123	spin_lock_init(&clp->async_lock);
2124	spin_lock_init(&clp->cl_lock);
2125	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
2126	return clp;
2127err_no_hashtbl:
2128	kfree(clp->cl_name.data);
2129err_no_name:
2130	kmem_cache_free(client_slab, clp);
2131	return NULL;
2132}
2133
2134static void __free_client(struct kref *k)
2135{
2136	struct nfsdfs_client *c = container_of(k, struct nfsdfs_client, cl_ref);
2137	struct nfs4_client *clp = container_of(c, struct nfs4_client, cl_nfsdfs);
2138
2139	free_svc_cred(&clp->cl_cred);
2140	kfree(clp->cl_ownerstr_hashtbl);
2141	kfree(clp->cl_name.data);
2142	kfree(clp->cl_nii_domain.data);
2143	kfree(clp->cl_nii_name.data);
2144	idr_destroy(&clp->cl_stateids);
2145	kfree(clp->cl_ra);
2146	kmem_cache_free(client_slab, clp);
2147}
2148
2149static void drop_client(struct nfs4_client *clp)
2150{
2151	kref_put(&clp->cl_nfsdfs.cl_ref, __free_client);
2152}
2153
2154static void
2155free_client(struct nfs4_client *clp)
2156{
 
2157	while (!list_empty(&clp->cl_sessions)) {
2158		struct nfsd4_session *ses;
2159		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
2160				se_perclnt);
2161		list_del(&ses->se_perclnt);
2162		WARN_ON_ONCE(atomic_read(&ses->se_ref));
2163		free_session(ses);
2164	}
2165	rpc_destroy_wait_queue(&clp->cl_cb_waitq);
2166	if (clp->cl_nfsd_dentry) {
2167		nfsd_client_rmdir(clp->cl_nfsd_dentry);
2168		clp->cl_nfsd_dentry = NULL;
2169		wake_up_all(&expiry_wq);
2170	}
2171	drop_client(clp);
 
 
 
 
 
 
 
 
 
 
 
2172}
2173
2174/* must be called under the client_lock */
2175static void
2176unhash_client_locked(struct nfs4_client *clp)
2177{
2178	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2179	struct nfsd4_session *ses;
2180
2181	lockdep_assert_held(&nn->client_lock);
2182
2183	/* Mark the client as expired! */
2184	clp->cl_time = 0;
2185	/* Make it invisible */
2186	if (!list_empty(&clp->cl_idhash)) {
2187		list_del_init(&clp->cl_idhash);
2188		if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2189			rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
2190		else
2191			rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
2192	}
2193	list_del_init(&clp->cl_lru);
2194	spin_lock(&clp->cl_lock);
2195	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
2196		list_del_init(&ses->se_hash);
2197	spin_unlock(&clp->cl_lock);
2198}
2199
2200static void
2201unhash_client(struct nfs4_client *clp)
2202{
2203	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2204
2205	spin_lock(&nn->client_lock);
2206	unhash_client_locked(clp);
2207	spin_unlock(&nn->client_lock);
2208}
2209
2210static __be32 mark_client_expired_locked(struct nfs4_client *clp)
2211{
2212	if (atomic_read(&clp->cl_rpc_users))
2213		return nfserr_jukebox;
2214	unhash_client_locked(clp);
2215	return nfs_ok;
2216}
2217
2218static void
2219__destroy_client(struct nfs4_client *clp)
2220{
2221	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2222	int i;
2223	struct nfs4_openowner *oo;
2224	struct nfs4_delegation *dp;
2225	struct list_head reaplist;
2226
2227	INIT_LIST_HEAD(&reaplist);
2228	spin_lock(&state_lock);
2229	while (!list_empty(&clp->cl_delegations)) {
2230		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
2231		WARN_ON(!unhash_delegation_locked(dp));
2232		list_add(&dp->dl_recall_lru, &reaplist);
2233	}
2234	spin_unlock(&state_lock);
2235	while (!list_empty(&reaplist)) {
2236		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
2237		list_del_init(&dp->dl_recall_lru);
2238		destroy_unhashed_deleg(dp);
2239	}
2240	while (!list_empty(&clp->cl_revoked)) {
2241		dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru);
2242		list_del_init(&dp->dl_recall_lru);
2243		nfs4_put_stid(&dp->dl_stid);
2244	}
2245	while (!list_empty(&clp->cl_openowners)) {
2246		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
2247		nfs4_get_stateowner(&oo->oo_owner);
2248		release_openowner(oo);
2249	}
2250	for (i = 0; i < OWNER_HASH_SIZE; i++) {
2251		struct nfs4_stateowner *so, *tmp;
2252
2253		list_for_each_entry_safe(so, tmp, &clp->cl_ownerstr_hashtbl[i],
2254					 so_strhash) {
2255			/* Should be no openowners at this point */
2256			WARN_ON_ONCE(so->so_is_open_owner);
2257			remove_blocked_locks(lockowner(so));
2258		}
2259	}
2260	nfsd4_return_all_client_layouts(clp);
2261	nfsd4_shutdown_copy(clp);
2262	nfsd4_shutdown_callback(clp);
2263	if (clp->cl_cb_conn.cb_xprt)
2264		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
2265	atomic_add_unless(&nn->nfs4_client_count, -1, 0);
2266	nfsd4_dec_courtesy_client_count(nn, clp);
2267	free_client(clp);
2268	wake_up_all(&expiry_wq);
2269}
2270
2271static void
2272destroy_client(struct nfs4_client *clp)
2273{
2274	unhash_client(clp);
2275	__destroy_client(clp);
2276}
2277
2278static void inc_reclaim_complete(struct nfs4_client *clp)
2279{
2280	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2281
2282	if (!nn->track_reclaim_completes)
2283		return;
2284	if (!nfsd4_find_reclaim_client(clp->cl_name, nn))
2285		return;
2286	if (atomic_inc_return(&nn->nr_reclaim_complete) ==
2287			nn->reclaim_str_hashtbl_size) {
2288		printk(KERN_INFO "NFSD: all clients done reclaiming, ending NFSv4 grace period (net %x)\n",
2289				clp->net->ns.inum);
2290		nfsd4_end_grace(nn);
2291	}
2292}
2293
2294static void expire_client(struct nfs4_client *clp)
2295{
2296	unhash_client(clp);
2297	nfsd4_client_record_remove(clp);
2298	__destroy_client(clp);
2299}
2300
2301static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
2302{
2303	memcpy(target->cl_verifier.data, source->data,
2304			sizeof(target->cl_verifier.data));
2305}
2306
2307static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
2308{
2309	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 
2310	target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
2311}
2312
2313static int copy_cred(struct svc_cred *target, struct svc_cred *source)
2314{
2315	target->cr_principal = kstrdup(source->cr_principal, GFP_KERNEL);
2316	target->cr_raw_principal = kstrdup(source->cr_raw_principal,
2317								GFP_KERNEL);
2318	target->cr_targ_princ = kstrdup(source->cr_targ_princ, GFP_KERNEL);
2319	if ((source->cr_principal && !target->cr_principal) ||
2320	    (source->cr_raw_principal && !target->cr_raw_principal) ||
2321	    (source->cr_targ_princ && !target->cr_targ_princ))
2322		return -ENOMEM;
2323
2324	target->cr_flavor = source->cr_flavor;
2325	target->cr_uid = source->cr_uid;
2326	target->cr_gid = source->cr_gid;
2327	target->cr_group_info = source->cr_group_info;
2328	get_group_info(target->cr_group_info);
2329	target->cr_gss_mech = source->cr_gss_mech;
2330	if (source->cr_gss_mech)
2331		gss_mech_get(source->cr_gss_mech);
2332	return 0;
2333}
2334
2335static int
2336compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
2337{
2338	if (o1->len < o2->len)
2339		return -1;
2340	if (o1->len > o2->len)
2341		return 1;
2342	return memcmp(o1->data, o2->data, o1->len);
2343}
2344
2345static int
2346same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
2347{
2348	return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
2349}
2350
2351static int
2352same_clid(clientid_t *cl1, clientid_t *cl2)
2353{
2354	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
2355}
2356
2357static bool groups_equal(struct group_info *g1, struct group_info *g2)
2358{
2359	int i;
2360
2361	if (g1->ngroups != g2->ngroups)
2362		return false;
2363	for (i=0; i<g1->ngroups; i++)
2364		if (!gid_eq(g1->gid[i], g2->gid[i]))
2365			return false;
2366	return true;
2367}
2368
2369/*
2370 * RFC 3530 language requires clid_inuse be returned when the
2371 * "principal" associated with a requests differs from that previously
2372 * used.  We use uid, gid's, and gss principal string as our best
2373 * approximation.  We also don't want to allow non-gss use of a client
2374 * established using gss: in theory cr_principal should catch that
2375 * change, but in practice cr_principal can be null even in the gss case
2376 * since gssd doesn't always pass down a principal string.
2377 */
2378static bool is_gss_cred(struct svc_cred *cr)
2379{
2380	/* Is cr_flavor one of the gss "pseudoflavors"?: */
2381	return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
2382}
2383
2384
2385static bool
2386same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
2387{
2388	if ((is_gss_cred(cr1) != is_gss_cred(cr2))
2389		|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
2390		|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
2391		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
2392		return false;
2393	/* XXX: check that cr_targ_princ fields match ? */
2394	if (cr1->cr_principal == cr2->cr_principal)
2395		return true;
2396	if (!cr1->cr_principal || !cr2->cr_principal)
2397		return false;
2398	return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
2399}
2400
2401static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
2402{
2403	struct svc_cred *cr = &rqstp->rq_cred;
2404	u32 service;
2405
2406	if (!cr->cr_gss_mech)
2407		return false;
2408	service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
2409	return service == RPC_GSS_SVC_INTEGRITY ||
2410	       service == RPC_GSS_SVC_PRIVACY;
2411}
2412
2413bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
2414{
2415	struct svc_cred *cr = &rqstp->rq_cred;
2416
2417	if (!cl->cl_mach_cred)
2418		return true;
2419	if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
2420		return false;
2421	if (!svc_rqst_integrity_protected(rqstp))
2422		return false;
2423	if (cl->cl_cred.cr_raw_principal)
2424		return 0 == strcmp(cl->cl_cred.cr_raw_principal,
2425						cr->cr_raw_principal);
2426	if (!cr->cr_principal)
2427		return false;
2428	return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
2429}
2430
2431static void gen_confirm(struct nfs4_client *clp, struct nfsd_net *nn)
2432{
2433	__be32 verf[2];
 
2434
2435	/*
2436	 * This is opaque to client, so no need to byte-swap. Use
2437	 * __force to keep sparse happy
2438	 */
2439	verf[0] = (__force __be32)(u32)ktime_get_real_seconds();
2440	verf[1] = (__force __be32)nn->clverifier_counter++;
2441	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
2442}
2443
2444static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
2445{
2446	clp->cl_clientid.cl_boot = (u32)nn->boot_time;
2447	clp->cl_clientid.cl_id = nn->clientid_counter++;
2448	gen_confirm(clp, nn);
2449}
2450
2451static struct nfs4_stid *
2452find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
2453{
2454	struct nfs4_stid *ret;
2455
2456	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
2457	if (!ret || !ret->sc_type)
2458		return NULL;
2459	return ret;
2460}
2461
2462static struct nfs4_stid *
2463find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
2464{
2465	struct nfs4_stid *s;
2466
2467	spin_lock(&cl->cl_lock);
2468	s = find_stateid_locked(cl, t);
2469	if (s != NULL) {
2470		if (typemask & s->sc_type)
2471			refcount_inc(&s->sc_count);
2472		else
2473			s = NULL;
2474	}
2475	spin_unlock(&cl->cl_lock);
2476	return s;
2477}
2478
2479static struct nfs4_client *get_nfsdfs_clp(struct inode *inode)
2480{
2481	struct nfsdfs_client *nc;
2482	nc = get_nfsdfs_client(inode);
2483	if (!nc)
2484		return NULL;
2485	return container_of(nc, struct nfs4_client, cl_nfsdfs);
2486}
2487
2488static void seq_quote_mem(struct seq_file *m, char *data, int len)
2489{
2490	seq_printf(m, "\"");
2491	seq_escape_mem(m, data, len, ESCAPE_HEX | ESCAPE_NAP | ESCAPE_APPEND, "\"\\");
2492	seq_printf(m, "\"");
2493}
2494
2495static const char *cb_state2str(int state)
2496{
2497	switch (state) {
2498	case NFSD4_CB_UP:
2499		return "UP";
2500	case NFSD4_CB_UNKNOWN:
2501		return "UNKNOWN";
2502	case NFSD4_CB_DOWN:
2503		return "DOWN";
2504	case NFSD4_CB_FAULT:
2505		return "FAULT";
2506	}
2507	return "UNDEFINED";
2508}
2509
2510static int client_info_show(struct seq_file *m, void *v)
2511{
2512	struct inode *inode = file_inode(m->file);
2513	struct nfs4_client *clp;
2514	u64 clid;
2515
2516	clp = get_nfsdfs_clp(inode);
2517	if (!clp)
2518		return -ENXIO;
2519	memcpy(&clid, &clp->cl_clientid, sizeof(clid));
2520	seq_printf(m, "clientid: 0x%llx\n", clid);
2521	seq_printf(m, "address: \"%pISpc\"\n", (struct sockaddr *)&clp->cl_addr);
2522
2523	if (clp->cl_state == NFSD4_COURTESY)
2524		seq_puts(m, "status: courtesy\n");
2525	else if (clp->cl_state == NFSD4_EXPIRABLE)
2526		seq_puts(m, "status: expirable\n");
2527	else if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
2528		seq_puts(m, "status: confirmed\n");
2529	else
2530		seq_puts(m, "status: unconfirmed\n");
2531	seq_printf(m, "seconds from last renew: %lld\n",
2532		ktime_get_boottime_seconds() - clp->cl_time);
2533	seq_printf(m, "name: ");
2534	seq_quote_mem(m, clp->cl_name.data, clp->cl_name.len);
2535	seq_printf(m, "\nminor version: %d\n", clp->cl_minorversion);
2536	if (clp->cl_nii_domain.data) {
2537		seq_printf(m, "Implementation domain: ");
2538		seq_quote_mem(m, clp->cl_nii_domain.data,
2539					clp->cl_nii_domain.len);
2540		seq_printf(m, "\nImplementation name: ");
2541		seq_quote_mem(m, clp->cl_nii_name.data, clp->cl_nii_name.len);
2542		seq_printf(m, "\nImplementation time: [%lld, %ld]\n",
2543			clp->cl_nii_time.tv_sec, clp->cl_nii_time.tv_nsec);
2544	}
2545	seq_printf(m, "callback state: %s\n", cb_state2str(clp->cl_cb_state));
2546	seq_printf(m, "callback address: %pISpc\n", &clp->cl_cb_conn.cb_addr);
2547	drop_client(clp);
2548
2549	return 0;
2550}
2551
2552DEFINE_SHOW_ATTRIBUTE(client_info);
2553
2554static void *states_start(struct seq_file *s, loff_t *pos)
2555	__acquires(&clp->cl_lock)
2556{
2557	struct nfs4_client *clp = s->private;
2558	unsigned long id = *pos;
2559	void *ret;
2560
2561	spin_lock(&clp->cl_lock);
2562	ret = idr_get_next_ul(&clp->cl_stateids, &id);
2563	*pos = id;
2564	return ret;
2565}
2566
2567static void *states_next(struct seq_file *s, void *v, loff_t *pos)
2568{
2569	struct nfs4_client *clp = s->private;
2570	unsigned long id = *pos;
2571	void *ret;
2572
2573	id = *pos;
2574	id++;
2575	ret = idr_get_next_ul(&clp->cl_stateids, &id);
2576	*pos = id;
2577	return ret;
2578}
2579
2580static void states_stop(struct seq_file *s, void *v)
2581	__releases(&clp->cl_lock)
2582{
2583	struct nfs4_client *clp = s->private;
2584
2585	spin_unlock(&clp->cl_lock);
2586}
2587
2588static void nfs4_show_fname(struct seq_file *s, struct nfsd_file *f)
2589{
2590         seq_printf(s, "filename: \"%pD2\"", f->nf_file);
2591}
2592
2593static void nfs4_show_superblock(struct seq_file *s, struct nfsd_file *f)
2594{
2595	struct inode *inode = file_inode(f->nf_file);
2596
2597	seq_printf(s, "superblock: \"%02x:%02x:%ld\"",
2598					MAJOR(inode->i_sb->s_dev),
2599					 MINOR(inode->i_sb->s_dev),
2600					 inode->i_ino);
2601}
2602
2603static void nfs4_show_owner(struct seq_file *s, struct nfs4_stateowner *oo)
2604{
2605	seq_printf(s, "owner: ");
2606	seq_quote_mem(s, oo->so_owner.data, oo->so_owner.len);
2607}
2608
2609static void nfs4_show_stateid(struct seq_file *s, stateid_t *stid)
2610{
2611	seq_printf(s, "0x%.8x", stid->si_generation);
2612	seq_printf(s, "%12phN", &stid->si_opaque);
2613}
2614
2615static int nfs4_show_open(struct seq_file *s, struct nfs4_stid *st)
2616{
2617	struct nfs4_ol_stateid *ols;
2618	struct nfs4_file *nf;
2619	struct nfsd_file *file;
2620	struct nfs4_stateowner *oo;
2621	unsigned int access, deny;
2622
2623	if (st->sc_type != NFS4_OPEN_STID && st->sc_type != NFS4_LOCK_STID)
2624		return 0; /* XXX: or SEQ_SKIP? */
2625	ols = openlockstateid(st);
2626	oo = ols->st_stateowner;
2627	nf = st->sc_file;
2628
2629	spin_lock(&nf->fi_lock);
2630	file = find_any_file_locked(nf);
2631	if (!file)
2632		goto out;
2633
2634	seq_printf(s, "- ");
2635	nfs4_show_stateid(s, &st->sc_stateid);
2636	seq_printf(s, ": { type: open, ");
2637
2638	access = bmap_to_share_mode(ols->st_access_bmap);
2639	deny   = bmap_to_share_mode(ols->st_deny_bmap);
2640
2641	seq_printf(s, "access: %s%s, ",
2642		access & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2643		access & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2644	seq_printf(s, "deny: %s%s, ",
2645		deny & NFS4_SHARE_ACCESS_READ ? "r" : "-",
2646		deny & NFS4_SHARE_ACCESS_WRITE ? "w" : "-");
2647
2648	nfs4_show_superblock(s, file);
2649	seq_printf(s, ", ");
2650	nfs4_show_fname(s, file);
2651	seq_printf(s, ", ");
2652	nfs4_show_owner(s, oo);
2653	seq_printf(s, " }\n");
2654out:
2655	spin_unlock(&nf->fi_lock);
2656	return 0;
2657}
2658
2659static int nfs4_show_lock(struct seq_file *s, struct nfs4_stid *st)
2660{
2661	struct nfs4_ol_stateid *ols;
2662	struct nfs4_file *nf;
2663	struct nfsd_file *file;
2664	struct nfs4_stateowner *oo;
2665
2666	ols = openlockstateid(st);
2667	oo = ols->st_stateowner;
2668	nf = st->sc_file;
2669	spin_lock(&nf->fi_lock);
2670	file = find_any_file_locked(nf);
2671	if (!file)
2672		goto out;
2673
2674	seq_printf(s, "- ");
2675	nfs4_show_stateid(s, &st->sc_stateid);
2676	seq_printf(s, ": { type: lock, ");
2677
2678	/*
2679	 * Note: a lock stateid isn't really the same thing as a lock,
2680	 * it's the locking state held by one owner on a file, and there
2681	 * may be multiple (or no) lock ranges associated with it.
2682	 * (Same for the matter is true of open stateids.)
2683	 */
2684
2685	nfs4_show_superblock(s, file);
2686	/* XXX: open stateid? */
2687	seq_printf(s, ", ");
2688	nfs4_show_fname(s, file);
2689	seq_printf(s, ", ");
2690	nfs4_show_owner(s, oo);
2691	seq_printf(s, " }\n");
2692out:
2693	spin_unlock(&nf->fi_lock);
2694	return 0;
2695}
2696
2697static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
2698{
2699	struct nfs4_delegation *ds;
2700	struct nfs4_file *nf;
2701	struct nfsd_file *file;
2702
2703	ds = delegstateid(st);
2704	nf = st->sc_file;
2705	spin_lock(&nf->fi_lock);
2706	file = nf->fi_deleg_file;
2707	if (!file)
2708		goto out;
2709
2710	seq_printf(s, "- ");
2711	nfs4_show_stateid(s, &st->sc_stateid);
2712	seq_printf(s, ": { type: deleg, ");
2713
2714	/* Kinda dead code as long as we only support read delegs: */
2715	seq_printf(s, "access: %s, ",
2716		ds->dl_type == NFS4_OPEN_DELEGATE_READ ? "r" : "w");
2717
2718	/* XXX: lease time, whether it's being recalled. */
2719
2720	nfs4_show_superblock(s, file);
2721	seq_printf(s, ", ");
2722	nfs4_show_fname(s, file);
2723	seq_printf(s, " }\n");
2724out:
2725	spin_unlock(&nf->fi_lock);
2726	return 0;
2727}
2728
2729static int nfs4_show_layout(struct seq_file *s, struct nfs4_stid *st)
2730{
2731	struct nfs4_layout_stateid *ls;
2732	struct nfsd_file *file;
2733
2734	ls = container_of(st, struct nfs4_layout_stateid, ls_stid);
2735	file = ls->ls_file;
2736
2737	seq_printf(s, "- ");
2738	nfs4_show_stateid(s, &st->sc_stateid);
2739	seq_printf(s, ": { type: layout, ");
2740
2741	/* XXX: What else would be useful? */
2742
2743	nfs4_show_superblock(s, file);
2744	seq_printf(s, ", ");
2745	nfs4_show_fname(s, file);
2746	seq_printf(s, " }\n");
2747
2748	return 0;
2749}
2750
2751static int states_show(struct seq_file *s, void *v)
2752{
2753	struct nfs4_stid *st = v;
2754
2755	switch (st->sc_type) {
2756	case NFS4_OPEN_STID:
2757		return nfs4_show_open(s, st);
2758	case NFS4_LOCK_STID:
2759		return nfs4_show_lock(s, st);
2760	case NFS4_DELEG_STID:
2761		return nfs4_show_deleg(s, st);
2762	case NFS4_LAYOUT_STID:
2763		return nfs4_show_layout(s, st);
2764	default:
2765		return 0; /* XXX: or SEQ_SKIP? */
2766	}
2767	/* XXX: copy stateids? */
2768}
2769
2770static struct seq_operations states_seq_ops = {
2771	.start = states_start,
2772	.next = states_next,
2773	.stop = states_stop,
2774	.show = states_show
2775};
2776
2777static int client_states_open(struct inode *inode, struct file *file)
2778{
2779	struct seq_file *s;
2780	struct nfs4_client *clp;
2781	int ret;
2782
2783	clp = get_nfsdfs_clp(inode);
2784	if (!clp)
2785		return -ENXIO;
2786
2787	ret = seq_open(file, &states_seq_ops);
2788	if (ret)
2789		return ret;
2790	s = file->private_data;
2791	s->private = clp;
2792	return 0;
2793}
2794
2795static int client_opens_release(struct inode *inode, struct file *file)
2796{
2797	struct seq_file *m = file->private_data;
2798	struct nfs4_client *clp = m->private;
2799
2800	/* XXX: alternatively, we could get/drop in seq start/stop */
2801	drop_client(clp);
2802	return seq_release(inode, file);
2803}
2804
2805static const struct file_operations client_states_fops = {
2806	.open		= client_states_open,
2807	.read		= seq_read,
2808	.llseek		= seq_lseek,
2809	.release	= client_opens_release,
2810};
2811
2812/*
2813 * Normally we refuse to destroy clients that are in use, but here the
2814 * administrator is telling us to just do it.  We also want to wait
2815 * so the caller has a guarantee that the client's locks are gone by
2816 * the time the write returns:
2817 */
2818static void force_expire_client(struct nfs4_client *clp)
2819{
2820	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2821	bool already_expired;
2822
2823	trace_nfsd_clid_admin_expired(&clp->cl_clientid);
2824
2825	spin_lock(&nn->client_lock);
2826	clp->cl_time = 0;
2827	spin_unlock(&nn->client_lock);
2828
2829	wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
2830	spin_lock(&nn->client_lock);
2831	already_expired = list_empty(&clp->cl_lru);
2832	if (!already_expired)
2833		unhash_client_locked(clp);
2834	spin_unlock(&nn->client_lock);
2835
2836	if (!already_expired)
2837		expire_client(clp);
2838	else
2839		wait_event(expiry_wq, clp->cl_nfsd_dentry == NULL);
2840}
2841
2842static ssize_t client_ctl_write(struct file *file, const char __user *buf,
2843				   size_t size, loff_t *pos)
2844{
2845	char *data;
2846	struct nfs4_client *clp;
2847
2848	data = simple_transaction_get(file, buf, size);
2849	if (IS_ERR(data))
2850		return PTR_ERR(data);
2851	if (size != 7 || 0 != memcmp(data, "expire\n", 7))
2852		return -EINVAL;
2853	clp = get_nfsdfs_clp(file_inode(file));
2854	if (!clp)
2855		return -ENXIO;
2856	force_expire_client(clp);
2857	drop_client(clp);
2858	return 7;
2859}
2860
2861static const struct file_operations client_ctl_fops = {
2862	.write		= client_ctl_write,
2863	.release	= simple_transaction_release,
2864};
2865
2866static const struct tree_descr client_files[] = {
2867	[0] = {"info", &client_info_fops, S_IRUSR},
2868	[1] = {"states", &client_states_fops, S_IRUSR},
2869	[2] = {"ctl", &client_ctl_fops, S_IWUSR},
2870	[3] = {""},
2871};
2872
2873static int
2874nfsd4_cb_recall_any_done(struct nfsd4_callback *cb,
2875				struct rpc_task *task)
2876{
2877	trace_nfsd_cb_recall_any_done(cb, task);
2878	switch (task->tk_status) {
2879	case -NFS4ERR_DELAY:
2880		rpc_delay(task, 2 * HZ);
2881		return 0;
2882	default:
2883		return 1;
2884	}
2885}
2886
2887static void
2888nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
2889{
2890	struct nfs4_client *clp = cb->cb_clp;
2891	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2892
2893	spin_lock(&nn->client_lock);
2894	clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
2895	put_client_renew_locked(clp);
2896	spin_unlock(&nn->client_lock);
2897}
2898
2899static const struct nfsd4_callback_ops nfsd4_cb_recall_any_ops = {
2900	.done		= nfsd4_cb_recall_any_done,
2901	.release	= nfsd4_cb_recall_any_release,
2902};
2903
2904static struct nfs4_client *create_client(struct xdr_netobj name,
2905		struct svc_rqst *rqstp, nfs4_verifier *verf)
2906{
2907	struct nfs4_client *clp;
2908	struct sockaddr *sa = svc_addr(rqstp);
2909	int ret;
2910	struct net *net = SVC_NET(rqstp);
2911	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2912	struct dentry *dentries[ARRAY_SIZE(client_files)];
2913
2914	clp = alloc_client(name, nn);
2915	if (clp == NULL)
2916		return NULL;
2917
 
2918	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
2919	if (ret) {
 
2920		free_client(clp);
 
2921		return NULL;
2922	}
2923	gen_clid(clp, nn);
2924	kref_init(&clp->cl_nfsdfs.cl_ref);
2925	nfsd4_init_cb(&clp->cl_cb_null, clp, NULL, NFSPROC4_CLNT_CB_NULL);
2926	clp->cl_time = ktime_get_boottime_seconds();
 
 
 
 
 
 
 
 
 
2927	clear_bit(0, &clp->cl_cb_slot_busy);
 
2928	copy_verf(clp, verf);
2929	memcpy(&clp->cl_addr, sa, sizeof(struct sockaddr_storage));
 
2930	clp->cl_cb_session = NULL;
2931	clp->net = net;
2932	clp->cl_nfsd_dentry = nfsd_client_mkdir(
2933		nn, &clp->cl_nfsdfs,
2934		clp->cl_clientid.cl_id - nn->clientid_base,
2935		client_files, dentries);
2936	clp->cl_nfsd_info_dentry = dentries[0];
2937	if (!clp->cl_nfsd_dentry) {
2938		free_client(clp);
2939		return NULL;
2940	}
2941	clp->cl_ra = kzalloc(sizeof(*clp->cl_ra), GFP_KERNEL);
2942	if (!clp->cl_ra) {
2943		free_client(clp);
2944		return NULL;
2945	}
2946	clp->cl_ra_time = 0;
2947	nfsd4_init_cb(&clp->cl_ra->ra_cb, clp, &nfsd4_cb_recall_any_ops,
2948			NFSPROC4_CLNT_CB_RECALL_ANY);
2949	return clp;
2950}
2951
2952static void
2953add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
2954{
2955	struct rb_node **new = &(root->rb_node), *parent = NULL;
2956	struct nfs4_client *clp;
2957
2958	while (*new) {
2959		clp = rb_entry(*new, struct nfs4_client, cl_namenode);
2960		parent = *new;
2961
2962		if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
2963			new = &((*new)->rb_left);
2964		else
2965			new = &((*new)->rb_right);
2966	}
2967
2968	rb_link_node(&new_clp->cl_namenode, parent, new);
2969	rb_insert_color(&new_clp->cl_namenode, root);
2970}
2971
2972static struct nfs4_client *
2973find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
2974{
2975	int cmp;
2976	struct rb_node *node = root->rb_node;
2977	struct nfs4_client *clp;
2978
2979	while (node) {
2980		clp = rb_entry(node, struct nfs4_client, cl_namenode);
2981		cmp = compare_blob(&clp->cl_name, name);
2982		if (cmp > 0)
2983			node = node->rb_left;
2984		else if (cmp < 0)
2985			node = node->rb_right;
2986		else
2987			return clp;
2988	}
2989	return NULL;
2990}
2991
2992static void
2993add_to_unconfirmed(struct nfs4_client *clp)
2994{
2995	unsigned int idhashval;
2996	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2997
2998	lockdep_assert_held(&nn->client_lock);
2999
3000	clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3001	add_clp_to_name_tree(clp, &nn->unconf_name_tree);
3002	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3003	list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
3004	renew_client_locked(clp);
3005}
3006
3007static void
3008move_to_confirmed(struct nfs4_client *clp)
3009{
3010	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
3011	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3012
3013	lockdep_assert_held(&nn->client_lock);
3014
3015	list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
3016	rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
3017	add_clp_to_name_tree(clp, &nn->conf_name_tree);
3018	set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
3019	trace_nfsd_clid_confirmed(&clp->cl_clientid);
3020	renew_client_locked(clp);
3021}
3022
3023static struct nfs4_client *
3024find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
3025{
3026	struct nfs4_client *clp;
3027	unsigned int idhashval = clientid_hashval(clid->cl_id);
3028
3029	list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
3030		if (same_clid(&clp->cl_clientid, clid)) {
3031			if ((bool)clp->cl_minorversion != sessions)
3032				return NULL;
3033			renew_client_locked(clp);
3034			return clp;
3035		}
3036	}
3037	return NULL;
3038}
3039
3040static struct nfs4_client *
3041find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3042{
3043	struct list_head *tbl = nn->conf_id_hashtbl;
 
3044
3045	lockdep_assert_held(&nn->client_lock);
3046	return find_client_in_id_table(tbl, clid, sessions);
3047}
3048
3049static struct nfs4_client *
3050find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
3051{
3052	struct list_head *tbl = nn->unconf_id_hashtbl;
3053
3054	lockdep_assert_held(&nn->client_lock);
3055	return find_client_in_id_table(tbl, clid, sessions);
3056}
3057
3058static bool clp_used_exchangeid(struct nfs4_client *clp)
3059{
3060	return clp->cl_exchange_flags != 0;
3061} 
3062
3063static struct nfs4_client *
3064find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3065{
3066	lockdep_assert_held(&nn->client_lock);
3067	return find_clp_in_name_tree(name, &nn->conf_name_tree);
 
 
 
 
 
3068}
3069
3070static struct nfs4_client *
3071find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
3072{
3073	lockdep_assert_held(&nn->client_lock);
3074	return find_clp_in_name_tree(name, &nn->unconf_name_tree);
 
 
 
 
 
3075}
3076
3077static void
3078gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
3079{
3080	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
3081	struct sockaddr	*sa = svc_addr(rqstp);
3082	u32 scopeid = rpc_get_scope_id(sa);
3083	unsigned short expected_family;
3084
3085	/* Currently, we only support tcp and tcp6 for the callback channel */
3086	if (se->se_callback_netid_len == 3 &&
3087	    !memcmp(se->se_callback_netid_val, "tcp", 3))
3088		expected_family = AF_INET;
3089	else if (se->se_callback_netid_len == 4 &&
3090		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
3091		expected_family = AF_INET6;
3092	else
3093		goto out_err;
3094
3095	conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
3096					    se->se_callback_addr_len,
3097					    (struct sockaddr *)&conn->cb_addr,
3098					    sizeof(conn->cb_addr));
3099
3100	if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
3101		goto out_err;
3102
3103	if (conn->cb_addr.ss_family == AF_INET6)
3104		((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
3105
3106	conn->cb_prog = se->se_callback_prog;
3107	conn->cb_ident = se->se_callback_ident;
3108	memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
3109	trace_nfsd_cb_args(clp, conn);
3110	return;
3111out_err:
3112	conn->cb_addr.ss_family = AF_UNSPEC;
3113	conn->cb_addrlen = 0;
3114	trace_nfsd_cb_nodelegs(clp);
 
 
 
3115	return;
3116}
3117
3118/*
3119 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
3120 */
3121static void
3122nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
3123{
3124	struct xdr_buf *buf = resp->xdr->buf;
3125	struct nfsd4_slot *slot = resp->cstate.slot;
3126	unsigned int base;
3127
3128	dprintk("--> %s slot %p\n", __func__, slot);
3129
3130	slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
3131	slot->sl_opcnt = resp->opcnt;
3132	slot->sl_status = resp->cstate.status;
3133	free_svc_cred(&slot->sl_cred);
3134	copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred);
3135
3136	if (!nfsd4_cache_this(resp)) {
3137		slot->sl_flags &= ~NFSD4_SLOT_CACHED;
 
3138		return;
3139	}
3140	slot->sl_flags |= NFSD4_SLOT_CACHED;
3141
3142	base = resp->cstate.data_offset;
3143	slot->sl_datalen = buf->len - base;
3144	if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
3145		WARN(1, "%s: sessions DRC could not cache compound\n",
3146		     __func__);
3147	return;
3148}
3149
3150/*
3151 * Encode the replay sequence operation from the slot values.
3152 * If cachethis is FALSE encode the uncached rep error on the next
3153 * operation which sets resp->p and increments resp->opcnt for
3154 * nfs4svc_encode_compoundres.
3155 *
3156 */
3157static __be32
3158nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
3159			  struct nfsd4_compoundres *resp)
3160{
3161	struct nfsd4_op *op;
3162	struct nfsd4_slot *slot = resp->cstate.slot;
3163
3164	/* Encode the replayed sequence operation */
3165	op = &args->ops[resp->opcnt - 1];
3166	nfsd4_encode_operation(resp, op);
3167
3168	if (slot->sl_flags & NFSD4_SLOT_CACHED)
3169		return op->status;
3170	if (args->opcnt == 1) {
3171		/*
3172		 * The original operation wasn't a solo sequence--we
3173		 * always cache those--so this retry must not match the
3174		 * original:
3175		 */
3176		op->status = nfserr_seq_false_retry;
3177	} else {
3178		op = &args->ops[resp->opcnt++];
3179		op->status = nfserr_retry_uncached_rep;
3180		nfsd4_encode_operation(resp, op);
3181	}
3182	return op->status;
3183}
3184
3185/*
3186 * The sequence operation is not cached because we can use the slot and
3187 * session values.
3188 */
3189static __be32
3190nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
3191			 struct nfsd4_sequence *seq)
3192{
3193	struct nfsd4_slot *slot = resp->cstate.slot;
3194	struct xdr_stream *xdr = resp->xdr;
3195	__be32 *p;
3196	__be32 status;
3197
3198	dprintk("--> %s slot %p\n", __func__, slot);
3199
 
3200	status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
3201	if (status)
3202		return status;
3203
3204	p = xdr_reserve_space(xdr, slot->sl_datalen);
3205	if (!p) {
3206		WARN_ON_ONCE(1);
3207		return nfserr_serverfault;
3208	}
3209	xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
3210	xdr_commit_encode(xdr);
3211
3212	resp->opcnt = slot->sl_opcnt;
3213	return slot->sl_status;
 
 
 
3214}
3215
3216/*
3217 * Set the exchange_id flags returned by the server.
3218 */
3219static void
3220nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
3221{
3222#ifdef CONFIG_NFSD_PNFS
3223	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_PNFS_MDS;
3224#else
3225	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
3226#endif
3227
3228	/* Referrals are supported, Migration is not. */
3229	new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
3230
3231	/* set the wire flags to return to client. */
3232	clid->flags = new->cl_exchange_flags;
3233}
3234
3235static bool client_has_openowners(struct nfs4_client *clp)
3236{
3237	struct nfs4_openowner *oo;
3238
3239	list_for_each_entry(oo, &clp->cl_openowners, oo_perclient) {
3240		if (!list_empty(&oo->oo_owner.so_stateids))
3241			return true;
3242	}
3243	return false;
3244}
3245
3246static bool client_has_state(struct nfs4_client *clp)
3247{
3248	return client_has_openowners(clp)
3249#ifdef CONFIG_NFSD_PNFS
3250		|| !list_empty(&clp->cl_lo_states)
3251#endif
 
 
 
3252		|| !list_empty(&clp->cl_delegations)
3253		|| !list_empty(&clp->cl_sessions)
3254		|| !list_empty(&clp->async_copies);
3255}
3256
3257static __be32 copy_impl_id(struct nfs4_client *clp,
3258				struct nfsd4_exchange_id *exid)
3259{
3260	if (!exid->nii_domain.data)
3261		return 0;
3262	xdr_netobj_dup(&clp->cl_nii_domain, &exid->nii_domain, GFP_KERNEL);
3263	if (!clp->cl_nii_domain.data)
3264		return nfserr_jukebox;
3265	xdr_netobj_dup(&clp->cl_nii_name, &exid->nii_name, GFP_KERNEL);
3266	if (!clp->cl_nii_name.data)
3267		return nfserr_jukebox;
3268	clp->cl_nii_time = exid->nii_time;
3269	return 0;
3270}
3271
3272__be32
3273nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3274		union nfsd4_op_u *u)
 
3275{
3276	struct nfsd4_exchange_id *exid = &u->exchange_id;
3277	struct nfs4_client *conf, *new;
3278	struct nfs4_client *unconf = NULL;
3279	__be32 status;
 
 
3280	char			addr_str[INET6_ADDRSTRLEN];
3281	nfs4_verifier		verf = exid->verifier;
3282	struct sockaddr		*sa = svc_addr(rqstp);
3283	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
3284	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3285
3286	rpc_ntop(sa, addr_str, sizeof(addr_str));
3287	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
3288		"ip_addr=%s flags %x, spa_how %u\n",
3289		__func__, rqstp, exid, exid->clname.len, exid->clname.data,
3290		addr_str, exid->flags, exid->spa_how);
3291
3292	if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
3293		return nfserr_inval;
3294
3295	new = create_client(exid->clname, rqstp, &verf);
3296	if (new == NULL)
3297		return nfserr_jukebox;
3298	status = copy_impl_id(new, exid);
3299	if (status)
3300		goto out_nolock;
3301
3302	switch (exid->spa_how) {
3303	case SP4_MACH_CRED:
3304		exid->spo_must_enforce[0] = 0;
3305		exid->spo_must_enforce[1] = (
3306			1 << (OP_BIND_CONN_TO_SESSION - 32) |
3307			1 << (OP_EXCHANGE_ID - 32) |
3308			1 << (OP_CREATE_SESSION - 32) |
3309			1 << (OP_DESTROY_SESSION - 32) |
3310			1 << (OP_DESTROY_CLIENTID - 32));
3311
3312		exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
3313					1 << (OP_OPEN_DOWNGRADE) |
3314					1 << (OP_LOCKU) |
3315					1 << (OP_DELEGRETURN));
3316
3317		exid->spo_must_allow[1] &= (
3318					1 << (OP_TEST_STATEID - 32) |
3319					1 << (OP_FREE_STATEID - 32));
3320		if (!svc_rqst_integrity_protected(rqstp)) {
3321			status = nfserr_inval;
3322			goto out_nolock;
3323		}
3324		/*
3325		 * Sometimes userspace doesn't give us a principal.
3326		 * Which is a bug, really.  Anyway, we can't enforce
3327		 * MACH_CRED in that case, better to give up now:
3328		 */
3329		if (!new->cl_cred.cr_principal &&
3330					!new->cl_cred.cr_raw_principal) {
3331			status = nfserr_serverfault;
3332			goto out_nolock;
3333		}
3334		new->cl_mach_cred = true;
3335		break;
3336	case SP4_NONE:
3337		break;
3338	default:				/* checked by xdr code */
3339		WARN_ON_ONCE(1);
3340		fallthrough;
3341	case SP4_SSV:
3342		status = nfserr_encr_alg_unsupp;
3343		goto out_nolock;
 
 
 
3344	}
3345
 
 
 
 
 
 
 
3346	/* Cases below refer to rfc 5661 section 18.35.4: */
3347	spin_lock(&nn->client_lock);
3348	conf = find_confirmed_client_by_name(&exid->clname, nn);
3349	if (conf) {
3350		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
3351		bool verfs_match = same_verf(&verf, &conf->cl_verifier);
3352
3353		if (update) {
3354			if (!clp_used_exchangeid(conf)) { /* buggy client */
3355				status = nfserr_inval;
3356				goto out;
3357			}
3358			if (!nfsd4_mach_creds_match(conf, rqstp)) {
3359				status = nfserr_wrong_cred;
3360				goto out;
3361			}
3362			if (!creds_match) { /* case 9 */
3363				status = nfserr_perm;
3364				goto out;
3365			}
3366			if (!verfs_match) { /* case 8 */
3367				status = nfserr_not_same;
3368				goto out;
3369			}
3370			/* case 6 */
3371			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
3372			trace_nfsd_clid_confirmed_r(conf);
3373			goto out_copy;
3374		}
3375		if (!creds_match) { /* case 3 */
3376			if (client_has_state(conf)) {
3377				status = nfserr_clid_inuse;
3378				trace_nfsd_clid_cred_mismatch(conf, rqstp);
3379				goto out;
3380			}
 
3381			goto out_new;
3382		}
3383		if (verfs_match) { /* case 2 */
3384			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
3385			trace_nfsd_clid_confirmed_r(conf);
3386			goto out_copy;
3387		}
3388		/* case 5, client reboot */
3389		trace_nfsd_clid_verf_mismatch(conf, rqstp, &verf);
3390		conf = NULL;
3391		goto out_new;
3392	}
3393
3394	if (update) { /* case 7 */
3395		status = nfserr_noent;
3396		goto out;
3397	}
3398
3399	unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
3400	if (unconf) /* case 4, possible retry or client restart */
3401		unhash_client_locked(unconf);
3402
3403	/* case 1, new owner ID */
3404	trace_nfsd_clid_fresh(new);
3405
 
3406out_new:
3407	if (conf) {
3408		status = mark_client_expired_locked(conf);
3409		if (status)
3410			goto out;
3411		trace_nfsd_clid_replaced(&conf->cl_clientid);
3412	}
3413	new->cl_minorversion = cstate->minorversion;
3414	new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
3415	new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
3416
3417	add_to_unconfirmed(new);
3418	swap(new, conf);
3419out_copy:
3420	exid->clientid.cl_boot = conf->cl_clientid.cl_boot;
3421	exid->clientid.cl_id = conf->cl_clientid.cl_id;
3422
3423	exid->seqid = conf->cl_cs_slot.sl_seqid + 1;
3424	nfsd4_set_ex_flags(conf, exid);
3425
3426	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
3427		conf->cl_cs_slot.sl_seqid, conf->cl_exchange_flags);
3428	status = nfs_ok;
3429
3430out:
3431	spin_unlock(&nn->client_lock);
3432out_nolock:
3433	if (new)
3434		expire_client(new);
3435	if (unconf) {
3436		trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
3437		expire_client(unconf);
3438	}
3439	return status;
3440}
3441
3442static __be32
3443check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
3444{
3445	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
3446		slot_seqid);
3447
3448	/* The slot is in use, and no response has been sent. */
3449	if (slot_inuse) {
3450		if (seqid == slot_seqid)
3451			return nfserr_jukebox;
3452		else
3453			return nfserr_seq_misordered;
3454	}
3455	/* Note unsigned 32-bit arithmetic handles wraparound: */
3456	if (likely(seqid == slot_seqid + 1))
3457		return nfs_ok;
3458	if (seqid == slot_seqid)
3459		return nfserr_replay_cache;
3460	return nfserr_seq_misordered;
3461}
3462
3463/*
3464 * Cache the create session result into the create session single DRC
3465 * slot cache by saving the xdr structure. sl_seqid has been set.
3466 * Do this for solo or embedded create session operations.
3467 */
3468static void
3469nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
3470			   struct nfsd4_clid_slot *slot, __be32 nfserr)
3471{
3472	slot->sl_status = nfserr;
3473	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
3474}
3475
3476static __be32
3477nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
3478			    struct nfsd4_clid_slot *slot)
3479{
3480	memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
3481	return slot->sl_status;
3482}
3483
3484#define NFSD_MIN_REQ_HDR_SEQ_SZ	((\
3485			2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
3486			1 +	/* MIN tag is length with zero, only length */ \
3487			3 +	/* version, opcount, opcode */ \
3488			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3489				/* seqid, slotID, slotID, cache */ \
3490			4 ) * sizeof(__be32))
3491
3492#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
3493			2 +	/* verifier: AUTH_NULL, length 0 */\
3494			1 +	/* status */ \
3495			1 +	/* MIN tag is length with zero, only length */ \
3496			3 +	/* opcount, opcode, opstatus*/ \
3497			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
3498				/* seqid, slotID, slotID, slotID, status */ \
3499			5 ) * sizeof(__be32))
3500
3501static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
3502{
3503	u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
3504
3505	if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
3506		return nfserr_toosmall;
3507	if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
3508		return nfserr_toosmall;
3509	ca->headerpadsz = 0;
3510	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
3511	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
3512	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
3513	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
3514			NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
3515	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
3516	/*
3517	 * Note decreasing slot size below client's request may make it
3518	 * difficult for client to function correctly, whereas
3519	 * decreasing the number of slots will (just?) affect
3520	 * performance.  When short on memory we therefore prefer to
3521	 * decrease number of slots instead of their size.  Clients that
3522	 * request larger slots than they need will get poor results:
3523	 * Note that we always allow at least one slot, because our
3524	 * accounting is soft and provides no guarantees either way.
3525	 */
3526	ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
3527
3528	return nfs_ok;
3529}
3530
3531/*
3532 * Server's NFSv4.1 backchannel support is AUTH_SYS-only for now.
3533 * These are based on similar macros in linux/sunrpc/msg_prot.h .
3534 */
3535#define RPC_MAX_HEADER_WITH_AUTH_SYS \
3536	(RPC_CALLHDRSIZE + 2 * (2 + UNX_CALLSLACK))
3537
3538#define RPC_MAX_REPHEADER_WITH_AUTH_SYS \
3539	(RPC_REPHDRSIZE + (2 + NUL_REPLYSLACK))
3540
3541#define NFSD_CB_MAX_REQ_SZ	((NFS4_enc_cb_recall_sz + \
3542				 RPC_MAX_HEADER_WITH_AUTH_SYS) * sizeof(__be32))
3543#define NFSD_CB_MAX_RESP_SZ	((NFS4_dec_cb_recall_sz + \
3544				 RPC_MAX_REPHEADER_WITH_AUTH_SYS) * \
3545				 sizeof(__be32))
3546
3547static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
3548{
3549	ca->headerpadsz = 0;
3550
3551	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
3552		return nfserr_toosmall;
3553	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
3554		return nfserr_toosmall;
3555	ca->maxresp_cached = 0;
3556	if (ca->maxops < 2)
3557		return nfserr_toosmall;
3558
3559	return nfs_ok;
3560}
3561
3562static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
3563{
3564	switch (cbs->flavor) {
3565	case RPC_AUTH_NULL:
3566	case RPC_AUTH_UNIX:
3567		return nfs_ok;
3568	default:
3569		/*
3570		 * GSS case: the spec doesn't allow us to return this
3571		 * error.  But it also doesn't allow us not to support
3572		 * GSS.
3573		 * I'd rather this fail hard than return some error the
3574		 * client might think it can already handle:
3575		 */
3576		return nfserr_encr_alg_unsupp;
3577	}
3578}
3579
3580__be32
3581nfsd4_create_session(struct svc_rqst *rqstp,
3582		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
 
3583{
3584	struct nfsd4_create_session *cr_ses = &u->create_session;
3585	struct sockaddr *sa = svc_addr(rqstp);
3586	struct nfs4_client *conf, *unconf;
3587	struct nfs4_client *old = NULL;
3588	struct nfsd4_session *new;
3589	struct nfsd4_conn *conn;
3590	struct nfsd4_clid_slot *cs_slot = NULL;
 
3591	__be32 status = 0;
3592	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3593
3594	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
3595		return nfserr_inval;
3596	status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
3597	if (status)
3598		return status;
3599	status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
3600	if (status)
3601		return status;
3602	status = check_backchannel_attrs(&cr_ses->back_channel);
3603	if (status)
3604		goto out_release_drc_mem;
3605	status = nfserr_jukebox;
3606	new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
3607	if (!new)
3608		goto out_release_drc_mem;
3609	conn = alloc_conn_from_crses(rqstp, cr_ses);
3610	if (!conn)
3611		goto out_free_session;
3612
3613	spin_lock(&nn->client_lock);
3614	unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
3615	conf = find_confirmed_client(&cr_ses->clientid, true, nn);
3616	WARN_ON_ONCE(conf && unconf);
3617
3618	if (conf) {
3619		status = nfserr_wrong_cred;
3620		if (!nfsd4_mach_creds_match(conf, rqstp))
3621			goto out_free_conn;
3622		cs_slot = &conf->cl_cs_slot;
3623		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3624		if (status) {
3625			if (status == nfserr_replay_cache)
3626				status = nfsd4_replay_create_session(cr_ses, cs_slot);
3627			goto out_free_conn;
 
 
3628		}
3629	} else if (unconf) {
3630		status = nfserr_clid_inuse;
3631		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
3632		    !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
3633			trace_nfsd_clid_cred_mismatch(unconf, rqstp);
3634			goto out_free_conn;
3635		}
3636		status = nfserr_wrong_cred;
3637		if (!nfsd4_mach_creds_match(unconf, rqstp))
3638			goto out_free_conn;
3639		cs_slot = &unconf->cl_cs_slot;
3640		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
3641		if (status) {
3642			/* an unconfirmed replay returns misordered */
3643			status = nfserr_seq_misordered;
3644			goto out_free_conn;
3645		}
3646		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
3647		if (old) {
3648			status = mark_client_expired_locked(old);
3649			if (status) {
3650				old = NULL;
3651				goto out_free_conn;
3652			}
3653			trace_nfsd_clid_replaced(&old->cl_clientid);
3654		}
3655		move_to_confirmed(unconf);
3656		conf = unconf;
3657	} else {
3658		status = nfserr_stale_clientid;
3659		goto out_free_conn;
3660	}
3661	status = nfs_ok;
3662	/* Persistent sessions are not supported */
 
 
 
 
 
 
 
3663	cr_ses->flags &= ~SESSION4_PERSIST;
3664	/* Upshifting from TCP to RDMA is not supported */
3665	cr_ses->flags &= ~SESSION4_RDMA;
3666
3667	init_session(rqstp, new, conf, cr_ses);
3668	nfsd4_get_session_locked(new);
 
3669
 
 
 
 
 
3670	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
3671	       NFS4_MAX_SESSIONID_LEN);
 
 
3672	cs_slot->sl_seqid++;
3673	cr_ses->seqid = cs_slot->sl_seqid;
3674
3675	/* cache solo and embedded create sessions under the client_lock */
3676	nfsd4_cache_create_session(cr_ses, cs_slot, status);
3677	spin_unlock(&nn->client_lock);
3678	if (conf == unconf)
3679		fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
3680	/* init connection and backchannel */
3681	nfsd4_init_conn(rqstp, conn, new);
3682	nfsd4_put_session(new);
3683	if (old)
3684		expire_client(old);
3685	return status;
3686out_free_conn:
3687	spin_unlock(&nn->client_lock);
3688	free_conn(conn);
3689	if (old)
3690		expire_client(old);
3691out_free_session:
3692	__free_session(new);
3693out_release_drc_mem:
3694	nfsd4_put_drc_mem(&cr_ses->fore_channel);
3695	return status;
 
 
 
 
 
 
 
 
3696}
3697
3698static __be32 nfsd4_map_bcts_dir(u32 *dir)
3699{
3700	switch (*dir) {
3701	case NFS4_CDFC4_FORE:
3702	case NFS4_CDFC4_BACK:
3703		return nfs_ok;
3704	case NFS4_CDFC4_FORE_OR_BOTH:
3705	case NFS4_CDFC4_BACK_OR_BOTH:
3706		*dir = NFS4_CDFC4_BOTH;
3707		return nfs_ok;
3708	}
3709	return nfserr_inval;
3710}
3711
3712__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp,
3713		struct nfsd4_compound_state *cstate,
3714		union nfsd4_op_u *u)
3715{
3716	struct nfsd4_backchannel_ctl *bc = &u->backchannel_ctl;
3717	struct nfsd4_session *session = cstate->session;
3718	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3719	__be32 status;
3720
3721	status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
3722	if (status)
3723		return status;
3724	spin_lock(&nn->client_lock);
3725	session->se_cb_prog = bc->bc_cb_program;
3726	session->se_cb_sec = bc->bc_cb_sec;
3727	spin_unlock(&nn->client_lock);
3728
3729	nfsd4_probe_callback(session->se_client);
3730
3731	return nfs_ok;
3732}
3733
3734static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
3735{
3736	struct nfsd4_conn *c;
3737
3738	list_for_each_entry(c, &s->se_conns, cn_persession) {
3739		if (c->cn_xprt == xpt) {
3740			return c;
3741		}
3742	}
3743	return NULL;
3744}
3745
3746static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
3747		struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
3748{
3749	struct nfs4_client *clp = session->se_client;
3750	struct svc_xprt *xpt = rqst->rq_xprt;
3751	struct nfsd4_conn *c;
3752	__be32 status;
3753
3754	/* Following the last paragraph of RFC 5661 Section 18.34.3: */
3755	spin_lock(&clp->cl_lock);
3756	c = __nfsd4_find_conn(xpt, session);
3757	if (!c)
3758		status = nfserr_noent;
3759	else if (req == c->cn_flags)
3760		status = nfs_ok;
3761	else if (req == NFS4_CDFC4_FORE_OR_BOTH &&
3762				c->cn_flags != NFS4_CDFC4_BACK)
3763		status = nfs_ok;
3764	else if (req == NFS4_CDFC4_BACK_OR_BOTH &&
3765				c->cn_flags != NFS4_CDFC4_FORE)
3766		status = nfs_ok;
3767	else
3768		status = nfserr_inval;
3769	spin_unlock(&clp->cl_lock);
3770	if (status == nfs_ok && conn)
3771		*conn = c;
3772	return status;
3773}
3774
3775__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
3776		     struct nfsd4_compound_state *cstate,
3777		     union nfsd4_op_u *u)
3778{
3779	struct nfsd4_bind_conn_to_session *bcts = &u->bind_conn_to_session;
3780	__be32 status;
3781	struct nfsd4_conn *conn;
3782	struct nfsd4_session *session;
3783	struct net *net = SVC_NET(rqstp);
3784	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3785
3786	if (!nfsd4_last_compound_op(rqstp))
3787		return nfserr_not_only_op;
3788	spin_lock(&nn->client_lock);
3789	session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
3790	spin_unlock(&nn->client_lock);
3791	if (!session)
3792		goto out_no_session;
3793	status = nfserr_wrong_cred;
3794	if (!nfsd4_mach_creds_match(session->se_client, rqstp))
3795		goto out;
3796	status = nfsd4_match_existing_connection(rqstp, session,
3797			bcts->dir, &conn);
3798	if (status == nfs_ok) {
3799		if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
3800				bcts->dir == NFS4_CDFC4_BACK)
3801			conn->cn_flags |= NFS4_CDFC4_BACK;
3802		nfsd4_probe_callback(session->se_client);
3803		goto out;
3804	}
3805	if (status == nfserr_inval)
3806		goto out;
 
 
3807	status = nfsd4_map_bcts_dir(&bcts->dir);
3808	if (status)
3809		goto out;
3810	conn = alloc_conn(rqstp, bcts->dir);
3811	status = nfserr_jukebox;
3812	if (!conn)
3813		goto out;
3814	nfsd4_init_conn(rqstp, conn, session);
3815	status = nfs_ok;
3816out:
3817	nfsd4_put_session(session);
3818out_no_session:
3819	return status;
3820}
3821
3822static bool nfsd4_compound_in_session(struct nfsd4_compound_state *cstate, struct nfs4_sessionid *sid)
3823{
3824	if (!cstate->session)
3825		return false;
3826	return !memcmp(sid, &cstate->session->se_sessionid, sizeof(*sid));
3827}
3828
3829__be32
3830nfsd4_destroy_session(struct svc_rqst *r, struct nfsd4_compound_state *cstate,
3831		union nfsd4_op_u *u)
 
3832{
3833	struct nfs4_sessionid *sessionid = &u->destroy_session.sessionid;
3834	struct nfsd4_session *ses;
3835	__be32 status;
3836	int ref_held_by_me = 0;
3837	struct net *net = SVC_NET(r);
3838	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
 
 
 
3839
3840	status = nfserr_not_only_op;
3841	if (nfsd4_compound_in_session(cstate, sessionid)) {
3842		if (!nfsd4_last_compound_op(r))
3843			goto out;
3844		ref_held_by_me++;
 
 
 
 
 
 
3845	}
3846	dump_sessionid(__func__, sessionid);
3847	spin_lock(&nn->client_lock);
3848	ses = find_in_sessionid_hashtbl(sessionid, net, &status);
3849	if (!ses)
3850		goto out_client_lock;
3851	status = nfserr_wrong_cred;
3852	if (!nfsd4_mach_creds_match(ses->se_client, r))
3853		goto out_put_session;
3854	status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
3855	if (status)
3856		goto out_put_session;
3857	unhash_session(ses);
3858	spin_unlock(&nn->client_lock);
3859
 
3860	nfsd4_probe_callback_sync(ses->se_client);
 
3861
3862	spin_lock(&nn->client_lock);
 
 
 
3863	status = nfs_ok;
3864out_put_session:
3865	nfsd4_put_session_locked(ses);
3866out_client_lock:
3867	spin_unlock(&nn->client_lock);
3868out:
 
3869	return status;
3870}
3871
3872static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
 
 
 
 
 
 
 
 
 
 
 
 
3873{
3874	struct nfs4_client *clp = ses->se_client;
3875	struct nfsd4_conn *c;
3876	__be32 status = nfs_ok;
3877	int ret;
3878
3879	spin_lock(&clp->cl_lock);
3880	c = __nfsd4_find_conn(new->cn_xprt, ses);
3881	if (c)
3882		goto out_free;
3883	status = nfserr_conn_not_bound_to_session;
3884	if (clp->cl_mach_cred)
3885		goto out_free;
3886	__nfsd4_hash_conn(new, ses);
3887	spin_unlock(&clp->cl_lock);
3888	ret = nfsd4_register_conn(new);
3889	if (ret)
3890		/* oops; xprt is already down: */
3891		nfsd4_conn_lost(&new->cn_xpt_user);
3892	return nfs_ok;
3893out_free:
3894	spin_unlock(&clp->cl_lock);
3895	free_conn(new);
3896	return status;
3897}
3898
3899static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
3900{
3901	struct nfsd4_compoundargs *args = rqstp->rq_argp;
3902
3903	return args->opcnt > session->se_fchannel.maxops;
3904}
3905
3906static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
3907				  struct nfsd4_session *session)
3908{
3909	struct xdr_buf *xb = &rqstp->rq_arg;
3910
3911	return xb->len > session->se_fchannel.maxreq_sz;
3912}
3913
3914static bool replay_matches_cache(struct svc_rqst *rqstp,
3915		 struct nfsd4_sequence *seq, struct nfsd4_slot *slot)
3916{
3917	struct nfsd4_compoundargs *argp = rqstp->rq_argp;
3918
3919	if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) !=
3920	    (bool)seq->cachethis)
3921		return false;
3922	/*
3923	 * If there's an error then the reply can have fewer ops than
3924	 * the call.
3925	 */
3926	if (slot->sl_opcnt < argp->opcnt && !slot->sl_status)
3927		return false;
3928	/*
3929	 * But if we cached a reply with *more* ops than the call you're
3930	 * sending us now, then this new call is clearly not really a
3931	 * replay of the old one:
3932	 */
3933	if (slot->sl_opcnt > argp->opcnt)
3934		return false;
3935	/* This is the only check explicitly called by spec: */
3936	if (!same_creds(&rqstp->rq_cred, &slot->sl_cred))
3937		return false;
3938	/*
3939	 * There may be more comparisons we could actually do, but the
3940	 * spec doesn't require us to catch every case where the calls
3941	 * don't match (that would require caching the call as well as
3942	 * the reply), so we don't bother.
3943	 */
3944	return true;
3945}
3946
3947__be32
3948nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3949		union nfsd4_op_u *u)
 
3950{
3951	struct nfsd4_sequence *seq = &u->sequence;
3952	struct nfsd4_compoundres *resp = rqstp->rq_resp;
3953	struct xdr_stream *xdr = resp->xdr;
3954	struct nfsd4_session *session;
3955	struct nfs4_client *clp;
3956	struct nfsd4_slot *slot;
3957	struct nfsd4_conn *conn;
3958	__be32 status;
3959	int buflen;
3960	struct net *net = SVC_NET(rqstp);
3961	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3962
3963	if (resp->opcnt != 1)
3964		return nfserr_sequence_pos;
3965
3966	/*
3967	 * Will be either used or freed by nfsd4_sequence_check_conn
3968	 * below.
3969	 */
3970	conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
3971	if (!conn)
3972		return nfserr_jukebox;
3973
3974	spin_lock(&nn->client_lock);
3975	session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
 
3976	if (!session)
3977		goto out_no_session;
3978	clp = session->se_client;
3979
3980	status = nfserr_too_many_ops;
3981	if (nfsd4_session_too_many_ops(rqstp, session))
3982		goto out_put_session;
3983
3984	status = nfserr_req_too_big;
3985	if (nfsd4_request_too_big(rqstp, session))
3986		goto out_put_session;
3987
3988	status = nfserr_badslot;
3989	if (seq->slotid >= session->se_fchannel.maxreqs)
3990		goto out_put_session;
3991
3992	slot = session->se_slots[seq->slotid];
3993	dprintk("%s: slotid %d\n", __func__, seq->slotid);
3994
3995	/* We do not negotiate the number of slots yet, so set the
3996	 * maxslots to the session maxreqs which is used to encode
3997	 * sr_highest_slotid and the sr_target_slot id to maxslots */
3998	seq->maxslots = session->se_fchannel.maxreqs;
3999
4000	status = check_slot_seqid(seq->seqid, slot->sl_seqid,
4001					slot->sl_flags & NFSD4_SLOT_INUSE);
4002	if (status == nfserr_replay_cache) {
4003		status = nfserr_seq_misordered;
4004		if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
4005			goto out_put_session;
4006		status = nfserr_seq_false_retry;
4007		if (!replay_matches_cache(rqstp, seq, slot))
4008			goto out_put_session;
4009		cstate->slot = slot;
4010		cstate->session = session;
4011		cstate->clp = clp;
4012		/* Return the cached reply status and set cstate->status
4013		 * for nfsd4_proc_compound processing */
4014		status = nfsd4_replay_cache_entry(resp, seq);
4015		cstate->status = nfserr_replay_cache;
4016		goto out;
4017	}
4018	if (status)
4019		goto out_put_session;
4020
4021	status = nfsd4_sequence_check_conn(conn, session);
4022	conn = NULL;
4023	if (status)
4024		goto out_put_session;
4025
4026	buflen = (seq->cachethis) ?
4027			session->se_fchannel.maxresp_cached :
4028			session->se_fchannel.maxresp_sz;
4029	status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
4030				    nfserr_rep_too_big;
4031	if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
4032		goto out_put_session;
4033	svc_reserve(rqstp, buflen);
4034
4035	status = nfs_ok;
4036	/* Success! bump slot seqid */
4037	slot->sl_seqid = seq->seqid;
4038	slot->sl_flags |= NFSD4_SLOT_INUSE;
4039	if (seq->cachethis)
4040		slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
4041	else
4042		slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
4043
4044	cstate->slot = slot;
4045	cstate->session = session;
4046	cstate->clp = clp;
4047
4048out:
4049	switch (clp->cl_cb_state) {
4050	case NFSD4_CB_DOWN:
4051		seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
4052		break;
4053	case NFSD4_CB_FAULT:
4054		seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
4055		break;
4056	default:
4057		seq->status_flags = 0;
 
 
 
 
 
 
 
4058	}
4059	if (!list_empty(&clp->cl_revoked))
4060		seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
4061out_no_session:
4062	if (conn)
4063		free_conn(conn);
4064	spin_unlock(&nn->client_lock);
4065	return status;
4066out_put_session:
4067	nfsd4_put_session_locked(session);
4068	goto out_no_session;
4069}
4070
4071void
4072nfsd4_sequence_done(struct nfsd4_compoundres *resp)
4073{
4074	struct nfsd4_compound_state *cs = &resp->cstate;
4075
4076	if (nfsd4_has_session(cs)) {
4077		if (cs->status != nfserr_replay_cache) {
4078			nfsd4_store_cache_entry(resp);
4079			cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
4080		}
4081		/* Drop session reference that was taken in nfsd4_sequence() */
4082		nfsd4_put_session(cs->session);
4083	} else if (cs->clp)
4084		put_client_renew(cs->clp);
4085}
4086
4087__be32
4088nfsd4_destroy_clientid(struct svc_rqst *rqstp,
4089		struct nfsd4_compound_state *cstate,
4090		union nfsd4_op_u *u)
4091{
4092	struct nfsd4_destroy_clientid *dc = &u->destroy_clientid;
4093	struct nfs4_client *conf, *unconf;
4094	struct nfs4_client *clp = NULL;
4095	__be32 status = 0;
4096	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4097
4098	spin_lock(&nn->client_lock);
4099	unconf = find_unconfirmed_client(&dc->clientid, true, nn);
4100	conf = find_confirmed_client(&dc->clientid, true, nn);
4101	WARN_ON_ONCE(conf && unconf);
4102
4103	if (conf) {
4104		if (client_has_state(conf)) {
 
 
4105			status = nfserr_clientid_busy;
4106			goto out;
4107		}
4108		status = mark_client_expired_locked(conf);
4109		if (status)
 
 
4110			goto out;
4111		clp = conf;
4112	} else if (unconf)
4113		clp = unconf;
4114	else {
4115		status = nfserr_stale_clientid;
4116		goto out;
4117	}
4118	if (!nfsd4_mach_creds_match(clp, rqstp)) {
4119		clp = NULL;
4120		status = nfserr_wrong_cred;
4121		goto out;
4122	}
4123	trace_nfsd_clid_destroyed(&clp->cl_clientid);
4124	unhash_client_locked(clp);
4125out:
4126	spin_unlock(&nn->client_lock);
4127	if (clp)
4128		expire_client(clp);
4129	return status;
4130}
4131
4132__be32
4133nfsd4_reclaim_complete(struct svc_rqst *rqstp,
4134		struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
4135{
4136	struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
4137	struct nfs4_client *clp = cstate->clp;
4138	__be32 status = 0;
4139
4140	if (rc->rca_one_fs) {
4141		if (!cstate->current_fh.fh_dentry)
4142			return nfserr_nofilehandle;
4143		/*
4144		 * We don't take advantage of the rca_one_fs case.
4145		 * That's OK, it's optional, we can safely ignore it.
4146		 */
4147		return nfs_ok;
4148	}
4149
 
4150	status = nfserr_complete_already;
4151	if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
 
4152		goto out;
4153
4154	status = nfserr_stale_clientid;
4155	if (is_client_expired(clp))
4156		/*
4157		 * The following error isn't really legal.
4158		 * But we only get here if the client just explicitly
4159		 * destroyed the client.  Surely it no longer cares what
4160		 * error it gets back on an operation for the dead
4161		 * client.
4162		 */
4163		goto out;
4164
4165	status = nfs_ok;
4166	trace_nfsd_clid_reclaim_complete(&clp->cl_clientid);
4167	nfsd4_client_record_create(clp);
4168	inc_reclaim_complete(clp);
4169out:
 
4170	return status;
4171}
4172
4173__be32
4174nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4175		  union nfsd4_op_u *u)
4176{
4177	struct nfsd4_setclientid *setclid = &u->setclientid;
4178	struct xdr_netobj 	clname = setclid->se_name;
4179	nfs4_verifier		clverifier = setclid->se_verf;
4180	struct nfs4_client	*conf, *new;
4181	struct nfs4_client	*unconf = NULL;
4182	__be32 			status;
4183	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
 
 
 
 
4184
4185	new = create_client(clname, rqstp, &clverifier);
4186	if (new == NULL)
4187		return nfserr_jukebox;
4188	spin_lock(&nn->client_lock);
4189	conf = find_confirmed_client_by_name(&clname, nn);
4190	if (conf && client_has_state(conf)) {
 
4191		status = nfserr_clid_inuse;
4192		if (clp_used_exchangeid(conf))
4193			goto out;
4194		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4195			trace_nfsd_clid_cred_mismatch(conf, rqstp);
 
 
 
 
4196			goto out;
4197		}
4198	}
4199	unconf = find_unconfirmed_client_by_name(&clname, nn);
4200	if (unconf)
4201		unhash_client_locked(unconf);
4202	if (conf) {
4203		if (same_verf(&conf->cl_verifier, &clverifier)) {
4204			copy_clid(new, conf);
4205			gen_confirm(new, nn);
4206		} else
4207			trace_nfsd_clid_verf_mismatch(conf, rqstp,
4208						      &clverifier);
4209	} else
4210		trace_nfsd_clid_fresh(new);
 
 
 
 
4211	new->cl_minorversion = 0;
4212	gen_callback(new, setclid, rqstp);
4213	add_to_unconfirmed(new);
4214	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
4215	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
4216	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
4217	new = NULL;
4218	status = nfs_ok;
4219out:
4220	spin_unlock(&nn->client_lock);
4221	if (new)
4222		free_client(new);
4223	if (unconf) {
4224		trace_nfsd_clid_expire_unconf(&unconf->cl_clientid);
4225		expire_client(unconf);
4226	}
4227	return status;
4228}
4229
 
4230__be32
4231nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
4232			struct nfsd4_compound_state *cstate,
4233			union nfsd4_op_u *u)
4234{
4235	struct nfsd4_setclientid_confirm *setclientid_confirm =
4236			&u->setclientid_confirm;
4237	struct nfs4_client *conf, *unconf;
4238	struct nfs4_client *old = NULL;
4239	nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
4240	clientid_t * clid = &setclientid_confirm->sc_clientid;
4241	__be32 status;
4242	struct nfsd_net	*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4243
4244	if (STALE_CLIENTID(clid, nn))
4245		return nfserr_stale_clientid;
 
4246
4247	spin_lock(&nn->client_lock);
4248	conf = find_confirmed_client(clid, false, nn);
4249	unconf = find_unconfirmed_client(clid, false, nn);
4250	/*
4251	 * We try hard to give out unique clientid's, so if we get an
4252	 * attempt to confirm the same clientid with a different cred,
4253	 * the client may be buggy; this should never happen.
4254	 *
4255	 * Nevertheless, RFC 7530 recommends INUSE for this case:
4256	 */
4257	status = nfserr_clid_inuse;
4258	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
4259		trace_nfsd_clid_cred_mismatch(unconf, rqstp);
4260		goto out;
4261	}
4262	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
4263		trace_nfsd_clid_cred_mismatch(conf, rqstp);
4264		goto out;
4265	}
4266	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
4267		if (conf && same_verf(&confirm, &conf->cl_confirm)) {
4268			status = nfs_ok;
4269		} else
4270			status = nfserr_stale_clientid;
4271		goto out;
4272	}
4273	status = nfs_ok;
4274	if (conf) {
4275		old = unconf;
4276		unhash_client_locked(old);
4277		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
4278	} else {
4279		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
4280		if (old) {
4281			status = nfserr_clid_inuse;
4282			if (client_has_state(old)
4283					&& !same_creds(&unconf->cl_cred,
4284							&old->cl_cred)) {
4285				old = NULL;
4286				goto out;
4287			}
4288			status = mark_client_expired_locked(old);
4289			if (status) {
4290				old = NULL;
4291				goto out;
4292			}
4293			trace_nfsd_clid_replaced(&old->cl_clientid);
4294		}
4295		move_to_confirmed(unconf);
4296		conf = unconf;
4297	}
4298	get_client_locked(conf);
4299	spin_unlock(&nn->client_lock);
4300	if (conf == unconf)
4301		fsnotify_dentry(conf->cl_nfsd_info_dentry, FS_MODIFY);
4302	nfsd4_probe_callback(conf);
4303	spin_lock(&nn->client_lock);
4304	put_client_renew_locked(conf);
4305out:
4306	spin_unlock(&nn->client_lock);
4307	if (old)
4308		expire_client(old);
4309	return status;
4310}
4311
4312static struct nfs4_file *nfsd4_alloc_file(void)
4313{
4314	return kmem_cache_alloc(file_slab, GFP_KERNEL);
4315}
4316
4317/* OPEN Share state helper functions */
 
 
 
4318
4319static void nfsd4_file_init(const struct svc_fh *fh, struct nfs4_file *fp)
4320{
4321	refcount_set(&fp->fi_ref, 1);
4322	spin_lock_init(&fp->fi_lock);
4323	INIT_LIST_HEAD(&fp->fi_stateids);
4324	INIT_LIST_HEAD(&fp->fi_delegations);
4325	INIT_LIST_HEAD(&fp->fi_clnt_odstate);
4326	fh_copy_shallow(&fp->fi_fhandle, &fh->fh_handle);
4327	fp->fi_deleg_file = NULL;
4328	fp->fi_had_conflict = false;
4329	fp->fi_share_deny = 0;
4330	memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
4331	memset(fp->fi_access, 0, sizeof(fp->fi_access));
4332	fp->fi_aliased = false;
4333	fp->fi_inode = d_inode(fh->fh_dentry);
4334#ifdef CONFIG_NFSD_PNFS
4335	INIT_LIST_HEAD(&fp->fi_lo_states);
4336	atomic_set(&fp->fi_lo_recalls, 0);
4337#endif
 
 
 
 
 
 
4338}
4339
4340void
4341nfsd4_free_slabs(void)
4342{
4343	kmem_cache_destroy(client_slab);
4344	kmem_cache_destroy(openowner_slab);
4345	kmem_cache_destroy(lockowner_slab);
4346	kmem_cache_destroy(file_slab);
4347	kmem_cache_destroy(stateid_slab);
4348	kmem_cache_destroy(deleg_slab);
4349	kmem_cache_destroy(odstate_slab);
4350}
4351
4352int
4353nfsd4_init_slabs(void)
4354{
4355	client_slab = kmem_cache_create("nfsd4_clients",
4356			sizeof(struct nfs4_client), 0, 0, NULL);
4357	if (client_slab == NULL)
4358		goto out;
4359	openowner_slab = kmem_cache_create("nfsd4_openowners",
4360			sizeof(struct nfs4_openowner), 0, 0, NULL);
4361	if (openowner_slab == NULL)
4362		goto out_free_client_slab;
4363	lockowner_slab = kmem_cache_create("nfsd4_lockowners",
4364			sizeof(struct nfs4_lockowner), 0, 0, NULL);
4365	if (lockowner_slab == NULL)
4366		goto out_free_openowner_slab;
4367	file_slab = kmem_cache_create("nfsd4_files",
4368			sizeof(struct nfs4_file), 0, 0, NULL);
4369	if (file_slab == NULL)
4370		goto out_free_lockowner_slab;
4371	stateid_slab = kmem_cache_create("nfsd4_stateids",
4372			sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
4373	if (stateid_slab == NULL)
4374		goto out_free_file_slab;
4375	deleg_slab = kmem_cache_create("nfsd4_delegations",
4376			sizeof(struct nfs4_delegation), 0, 0, NULL);
4377	if (deleg_slab == NULL)
4378		goto out_free_stateid_slab;
4379	odstate_slab = kmem_cache_create("nfsd4_odstate",
4380			sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
4381	if (odstate_slab == NULL)
4382		goto out_free_deleg_slab;
4383	return 0;
4384
4385out_free_deleg_slab:
4386	kmem_cache_destroy(deleg_slab);
4387out_free_stateid_slab:
4388	kmem_cache_destroy(stateid_slab);
4389out_free_file_slab:
4390	kmem_cache_destroy(file_slab);
4391out_free_lockowner_slab:
4392	kmem_cache_destroy(lockowner_slab);
4393out_free_openowner_slab:
4394	kmem_cache_destroy(openowner_slab);
4395out_free_client_slab:
4396	kmem_cache_destroy(client_slab);
4397out:
4398	return -ENOMEM;
4399}
4400
4401static unsigned long
4402nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
4403{
4404	int count;
4405	struct nfsd_net *nn = shrink->private_data;
4406
4407	count = atomic_read(&nn->nfsd_courtesy_clients);
4408	if (!count)
4409		count = atomic_long_read(&num_delegations);
4410	if (count)
4411		queue_work(laundry_wq, &nn->nfsd_shrinker_work);
4412	return (unsigned long)count;
4413}
4414
4415static unsigned long
4416nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
4417{
4418	return SHRINK_STOP;
4419}
4420
4421void
4422nfsd4_init_leases_net(struct nfsd_net *nn)
4423{
4424	struct sysinfo si;
4425	u64 max_clients;
4426
4427	nn->nfsd4_lease = 90;	/* default lease time */
4428	nn->nfsd4_grace = 90;
4429	nn->somebody_reclaimed = false;
4430	nn->track_reclaim_completes = false;
4431	nn->clverifier_counter = get_random_u32();
4432	nn->clientid_base = get_random_u32();
4433	nn->clientid_counter = nn->clientid_base + 1;
4434	nn->s2s_cp_cl_id = nn->clientid_counter++;
4435
4436	atomic_set(&nn->nfs4_client_count, 0);
4437	si_meminfo(&si);
4438	max_clients = (u64)si.totalram * si.mem_unit / (1024 * 1024 * 1024);
4439	max_clients *= NFS4_CLIENTS_PER_GB;
4440	nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
4441
4442	atomic_set(&nn->nfsd_courtesy_clients, 0);
4443}
4444
4445static void init_nfs4_replay(struct nfs4_replay *rp)
4446{
4447	rp->rp_status = nfserr_serverfault;
4448	rp->rp_buflen = 0;
4449	rp->rp_buf = rp->rp_ibuf;
4450	mutex_init(&rp->rp_mutex);
4451}
4452
4453static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
4454		struct nfs4_stateowner *so)
4455{
4456	if (!nfsd4_has_session(cstate)) {
4457		mutex_lock(&so->so_replay.rp_mutex);
4458		cstate->replay_owner = nfs4_get_stateowner(so);
4459	}
4460}
4461
4462void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
4463{
4464	struct nfs4_stateowner *so = cstate->replay_owner;
4465
4466	if (so != NULL) {
4467		cstate->replay_owner = NULL;
4468		mutex_unlock(&so->so_replay.rp_mutex);
4469		nfs4_put_stateowner(so);
4470	}
4471}
4472
4473static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
4474{
4475	struct nfs4_stateowner *sop;
4476
4477	sop = kmem_cache_alloc(slab, GFP_KERNEL);
4478	if (!sop)
4479		return NULL;
4480
4481	xdr_netobj_dup(&sop->so_owner, owner, GFP_KERNEL);
4482	if (!sop->so_owner.data) {
4483		kmem_cache_free(slab, sop);
4484		return NULL;
4485	}
 
4486
4487	INIT_LIST_HEAD(&sop->so_stateids);
4488	sop->so_client = clp;
4489	init_nfs4_replay(&sop->so_replay);
4490	atomic_set(&sop->so_count, 1);
4491	return sop;
4492}
4493
4494static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
4495{
4496	lockdep_assert_held(&clp->cl_lock);
4497
4498	list_add(&oo->oo_owner.so_strhash,
4499		 &clp->cl_ownerstr_hashtbl[strhashval]);
4500	list_add(&oo->oo_perclient, &clp->cl_openowners);
4501}
4502
4503static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
4504{
4505	unhash_openowner_locked(openowner(so));
4506}
4507
4508static void nfs4_free_openowner(struct nfs4_stateowner *so)
4509{
4510	struct nfs4_openowner *oo = openowner(so);
4511
4512	kmem_cache_free(openowner_slab, oo);
4513}
4514
4515static const struct nfs4_stateowner_operations openowner_ops = {
4516	.so_unhash =	nfs4_unhash_openowner,
4517	.so_free =	nfs4_free_openowner,
4518};
4519
4520static struct nfs4_ol_stateid *
4521nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4522{
4523	struct nfs4_ol_stateid *local, *ret = NULL;
4524	struct nfs4_openowner *oo = open->op_openowner;
4525
4526	lockdep_assert_held(&fp->fi_lock);
4527
4528	list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
4529		/* ignore lock owners */
4530		if (local->st_stateowner->so_is_open_owner == 0)
4531			continue;
4532		if (local->st_stateowner != &oo->oo_owner)
4533			continue;
4534		if (local->st_stid.sc_type == NFS4_OPEN_STID) {
4535			ret = local;
4536			refcount_inc(&ret->st_stid.sc_count);
4537			break;
4538		}
4539	}
4540	return ret;
4541}
4542
4543static __be32
4544nfsd4_verify_open_stid(struct nfs4_stid *s)
4545{
4546	__be32 ret = nfs_ok;
4547
4548	switch (s->sc_type) {
4549	default:
4550		break;
4551	case 0:
4552	case NFS4_CLOSED_STID:
4553	case NFS4_CLOSED_DELEG_STID:
4554		ret = nfserr_bad_stateid;
4555		break;
4556	case NFS4_REVOKED_DELEG_STID:
4557		ret = nfserr_deleg_revoked;
4558	}
4559	return ret;
4560}
4561
4562/* Lock the stateid st_mutex, and deal with races with CLOSE */
4563static __be32
4564nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
4565{
4566	__be32 ret;
4567
4568	mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
4569	ret = nfsd4_verify_open_stid(&stp->st_stid);
4570	if (ret != nfs_ok)
4571		mutex_unlock(&stp->st_mutex);
4572	return ret;
4573}
4574
4575static struct nfs4_ol_stateid *
4576nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
4577{
4578	struct nfs4_ol_stateid *stp;
4579	for (;;) {
4580		spin_lock(&fp->fi_lock);
4581		stp = nfsd4_find_existing_open(fp, open);
4582		spin_unlock(&fp->fi_lock);
4583		if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
4584			break;
4585		nfs4_put_stid(&stp->st_stid);
4586	}
4587	return stp;
4588}
4589
4590static struct nfs4_openowner *
4591alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
4592			   struct nfsd4_compound_state *cstate)
4593{
4594	struct nfs4_client *clp = cstate->clp;
4595	struct nfs4_openowner *oo, *ret;
4596
4597	oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
4598	if (!oo)
4599		return NULL;
4600	oo->oo_owner.so_ops = &openowner_ops;
4601	oo->oo_owner.so_is_open_owner = 1;
4602	oo->oo_owner.so_seqid = open->op_seqid;
4603	oo->oo_flags = 0;
4604	if (nfsd4_has_session(cstate))
4605		oo->oo_flags |= NFS4_OO_CONFIRMED;
4606	oo->oo_time = 0;
4607	oo->oo_last_closed_stid = NULL;
4608	INIT_LIST_HEAD(&oo->oo_close_lru);
4609	spin_lock(&clp->cl_lock);
4610	ret = find_openstateowner_str_locked(strhashval, open, clp);
4611	if (ret == NULL) {
4612		hash_openowner(oo, clp, strhashval);
4613		ret = oo;
4614	} else
4615		nfs4_free_stateowner(&oo->oo_owner);
4616
4617	spin_unlock(&clp->cl_lock);
4618	return ret;
4619}
4620
4621static struct nfs4_ol_stateid *
4622init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
4623{
4624
4625	struct nfs4_openowner *oo = open->op_openowner;
4626	struct nfs4_ol_stateid *retstp = NULL;
4627	struct nfs4_ol_stateid *stp;
4628
4629	stp = open->op_stp;
4630	/* We are moving these outside of the spinlocks to avoid the warnings */
4631	mutex_init(&stp->st_mutex);
4632	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
4633
4634retry:
4635	spin_lock(&oo->oo_owner.so_client->cl_lock);
4636	spin_lock(&fp->fi_lock);
4637
4638	retstp = nfsd4_find_existing_open(fp, open);
4639	if (retstp)
4640		goto out_unlock;
4641
4642	open->op_stp = NULL;
4643	refcount_inc(&stp->st_stid.sc_count);
4644	stp->st_stid.sc_type = NFS4_OPEN_STID;
4645	INIT_LIST_HEAD(&stp->st_locks);
4646	stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner);
4647	get_nfs4_file(fp);
4648	stp->st_stid.sc_file = fp;
4649	stp->st_access_bmap = 0;
4650	stp->st_deny_bmap = 0;
 
 
4651	stp->st_openstp = NULL;
4652	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
4653	list_add(&stp->st_perfile, &fp->fi_stateids);
4654
4655out_unlock:
4656	spin_unlock(&fp->fi_lock);
4657	spin_unlock(&oo->oo_owner.so_client->cl_lock);
4658	if (retstp) {
4659		/* Handle races with CLOSE */
4660		if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
4661			nfs4_put_stid(&retstp->st_stid);
4662			goto retry;
4663		}
4664		/* To keep mutex tracking happy */
4665		mutex_unlock(&stp->st_mutex);
4666		stp = retstp;
4667	}
4668	return stp;
4669}
4670
4671/*
4672 * In the 4.0 case we need to keep the owners around a little while to handle
4673 * CLOSE replay. We still do need to release any file access that is held by
4674 * them before returning however.
4675 */
4676static void
4677move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
4678{
4679	struct nfs4_ol_stateid *last;
4680	struct nfs4_openowner *oo = openowner(s->st_stateowner);
4681	struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
4682						nfsd_net_id);
 
4683
4684	dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
 
 
 
 
 
 
 
4685
4686	/*
4687	 * We know that we hold one reference via nfsd4_close, and another
4688	 * "persistent" reference for the client. If the refcount is higher
4689	 * than 2, then there are still calls in progress that are using this
4690	 * stateid. We can't put the sc_file reference until they are finished.
4691	 * Wait for the refcount to drop to 2. Since it has been unhashed,
4692	 * there should be no danger of the refcount going back up again at
4693	 * this point.
4694	 */
4695	wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2);
4696
4697	release_all_access(s);
4698	if (s->st_stid.sc_file) {
4699		put_nfs4_file(s->st_stid.sc_file);
4700		s->st_stid.sc_file = NULL;
4701	}
4702
4703	spin_lock(&nn->client_lock);
4704	last = oo->oo_last_closed_stid;
4705	oo->oo_last_closed_stid = s;
4706	list_move_tail(&oo->oo_close_lru, &nn->close_lru);
4707	oo->oo_time = ktime_get_boottime_seconds();
4708	spin_unlock(&nn->client_lock);
4709	if (last)
4710		nfs4_put_stid(&last->st_stid);
4711}
4712
4713static noinline_for_stack struct nfs4_file *
4714nfsd4_file_hash_lookup(const struct svc_fh *fhp)
4715{
4716	struct inode *inode = d_inode(fhp->fh_dentry);
4717	struct rhlist_head *tmp, *list;
4718	struct nfs4_file *fi;
4719
4720	rcu_read_lock();
4721	list = rhltable_lookup(&nfs4_file_rhltable, &inode,
4722			       nfs4_file_rhash_params);
4723	rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
4724		if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
4725			if (refcount_inc_not_zero(&fi->fi_ref)) {
4726				rcu_read_unlock();
4727				return fi;
4728			}
4729		}
4730	}
4731	rcu_read_unlock();
4732	return NULL;
4733}
4734
4735/*
4736 * On hash insertion, identify entries with the same inode but
4737 * distinct filehandles. They will all be on the list returned
4738 * by rhltable_lookup().
4739 *
4740 * inode->i_lock prevents racing insertions from adding an entry
4741 * for the same inode/fhp pair twice.
4742 */
4743static noinline_for_stack struct nfs4_file *
4744nfsd4_file_hash_insert(struct nfs4_file *new, const struct svc_fh *fhp)
4745{
4746	struct inode *inode = d_inode(fhp->fh_dentry);
4747	struct rhlist_head *tmp, *list;
4748	struct nfs4_file *ret = NULL;
4749	bool alias_found = false;
4750	struct nfs4_file *fi;
4751	int err;
4752
4753	rcu_read_lock();
4754	spin_lock(&inode->i_lock);
4755
4756	list = rhltable_lookup(&nfs4_file_rhltable, &inode,
4757			       nfs4_file_rhash_params);
4758	rhl_for_each_entry_rcu(fi, tmp, list, fi_rlist) {
4759		if (fh_match(&fi->fi_fhandle, &fhp->fh_handle)) {
4760			if (refcount_inc_not_zero(&fi->fi_ref))
4761				ret = fi;
4762		} else
4763			fi->fi_aliased = alias_found = true;
4764	}
4765	if (ret)
4766		goto out_unlock;
4767
4768	nfsd4_file_init(fhp, new);
4769	err = rhltable_insert(&nfs4_file_rhltable, &new->fi_rlist,
4770			      nfs4_file_rhash_params);
4771	if (err)
4772		goto out_unlock;
4773
4774	new->fi_aliased = alias_found;
4775	ret = new;
4776
4777out_unlock:
4778	spin_unlock(&inode->i_lock);
4779	rcu_read_unlock();
4780	return ret;
4781}
4782
4783static noinline_for_stack void nfsd4_file_hash_remove(struct nfs4_file *fi)
4784{
4785	rhltable_remove(&nfs4_file_rhltable, &fi->fi_rlist,
4786			nfs4_file_rhash_params);
4787}
4788
4789/*
4790 * Called to check deny when READ with all zero stateid or
4791 * WRITE with all zero or all one stateid
4792 */
4793static __be32
4794nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
4795{
 
4796	struct nfs4_file *fp;
4797	__be32 ret = nfs_ok;
 
 
 
4798
4799	fp = nfsd4_file_hash_lookup(current_fh);
4800	if (!fp)
4801		return ret;
4802
4803	/* Check for conflicting share reservations */
4804	spin_lock(&fp->fi_lock);
4805	if (fp->fi_share_deny & deny_type)
4806		ret = nfserr_locked;
4807	spin_unlock(&fp->fi_lock);
 
 
 
4808	put_nfs4_file(fp);
4809	return ret;
4810}
4811
4812static bool nfsd4_deleg_present(const struct inode *inode)
4813{
4814	struct file_lock_context *ctx = locks_inode_context(inode);
 
 
 
 
 
4815
4816	return ctx && !list_empty_careful(&ctx->flc_lease);
4817}
4818
4819/**
4820 * nfsd_wait_for_delegreturn - wait for delegations to be returned
4821 * @rqstp: the RPC transaction being executed
4822 * @inode: in-core inode of the file being waited for
4823 *
4824 * The timeout prevents deadlock if all nfsd threads happen to be
4825 * tied up waiting for returning delegations.
4826 *
4827 * Return values:
4828 *   %true: delegation was returned
4829 *   %false: timed out waiting for delegreturn
4830 */
4831bool nfsd_wait_for_delegreturn(struct svc_rqst *rqstp, struct inode *inode)
4832{
4833	long __maybe_unused timeo;
4834
4835	timeo = wait_var_event_timeout(inode, !nfsd4_deleg_present(inode),
4836				       NFSD_DELEGRETURN_TIMEOUT);
4837	trace_nfsd_delegret_wakeup(rqstp, inode, timeo);
4838	return timeo > 0;
4839}
4840
4841static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
 
4842{
4843	struct nfs4_delegation *dp = cb_to_delegation(cb);
4844	struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
4845					  nfsd_net_id);
4846
4847	block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
4848
4849	/*
4850	 * We can't do this in nfsd_break_deleg_cb because it is
4851	 * already holding inode->i_lock.
4852	 *
4853	 * If the dl_time != 0, then we know that it has already been
4854	 * queued for a lease break. Don't queue it again.
4855	 */
4856	spin_lock(&state_lock);
4857	if (delegation_hashed(dp) && dp->dl_time == 0) {
4858		dp->dl_time = ktime_get_boottime_seconds();
4859		list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
4860	}
4861	spin_unlock(&state_lock);
4862}
4863
4864static int nfsd4_cb_recall_done(struct nfsd4_callback *cb,
4865		struct rpc_task *task)
4866{
4867	struct nfs4_delegation *dp = cb_to_delegation(cb);
4868
4869	trace_nfsd_cb_recall_done(&dp->dl_stid.sc_stateid, task);
4870
4871	if (dp->dl_stid.sc_type == NFS4_CLOSED_DELEG_STID ||
4872	    dp->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4873	        return 1;
4874
4875	switch (task->tk_status) {
4876	case 0:
4877		return 1;
4878	case -NFS4ERR_DELAY:
4879		rpc_delay(task, 2 * HZ);
4880		return 0;
4881	case -EBADHANDLE:
4882	case -NFS4ERR_BAD_STATEID:
4883		/*
4884		 * Race: client probably got cb_recall before open reply
4885		 * granting delegation.
4886		 */
4887		if (dp->dl_retries--) {
4888			rpc_delay(task, 2 * HZ);
4889			return 0;
4890		}
4891		fallthrough;
4892	default:
4893		return 1;
4894	}
4895}
4896
4897static void nfsd4_cb_recall_release(struct nfsd4_callback *cb)
4898{
4899	struct nfs4_delegation *dp = cb_to_delegation(cb);
4900
4901	nfs4_put_stid(&dp->dl_stid);
4902}
4903
4904static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
4905	.prepare	= nfsd4_cb_recall_prepare,
4906	.done		= nfsd4_cb_recall_done,
4907	.release	= nfsd4_cb_recall_release,
4908};
4909
4910static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
4911{
4912	/*
4913	 * We're assuming the state code never drops its reference
4914	 * without first removing the lease.  Since we're in this lease
4915	 * callback (and since the lease code is serialized by the
4916	 * flc_lock) we know the server hasn't removed the lease yet, and
4917	 * we know it's safe to take a reference.
4918	 */
4919	refcount_inc(&dp->dl_stid.sc_count);
4920	WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
4921}
4922
4923/* Called from break_lease() with flc_lock held. */
4924static bool
4925nfsd_break_deleg_cb(struct file_lock *fl)
4926{
4927	struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
4928	struct nfs4_file *fp = dp->dl_stid.sc_file;
4929	struct nfs4_client *clp = dp->dl_stid.sc_client;
4930	struct nfsd_net *nn;
4931
4932	trace_nfsd_cb_recall(&dp->dl_stid);
4933
4934	dp->dl_recalled = true;
4935	atomic_inc(&clp->cl_delegs_in_recall);
4936	if (try_to_expire_client(clp)) {
4937		nn = net_generic(clp->net, nfsd_net_id);
4938		mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
4939	}
4940
 
 
 
4941	/*
4942	 * We don't want the locks code to timeout the lease for us;
4943	 * we'll remove it ourself if a delegation isn't returned
4944	 * in time:
4945	 */
4946	fl->fl_break_time = 0;
4947
 
4948	fp->fi_had_conflict = true;
4949	nfsd_break_one_deleg(dp);
4950	return false;
 
4951}
4952
4953/**
4954 * nfsd_breaker_owns_lease - Check if lease conflict was resolved
4955 * @fl: Lock state to check
4956 *
4957 * Return values:
4958 *   %true: Lease conflict was resolved
4959 *   %false: Lease conflict was not resolved.
4960 */
4961static bool nfsd_breaker_owns_lease(struct file_lock *fl)
4962{
4963	struct nfs4_delegation *dl = fl->fl_owner;
4964	struct svc_rqst *rqst;
4965	struct nfs4_client *clp;
4966
4967	if (!i_am_nfsd())
4968		return false;
4969	rqst = kthread_data(current);
4970	/* Note rq_prog == NFS_ACL_PROGRAM is also possible: */
4971	if (rqst->rq_prog != NFS_PROGRAM || rqst->rq_vers < 4)
4972		return false;
4973	clp = *(rqst->rq_lease_breaker);
4974	return dl->dl_stid.sc_client == clp;
4975}
4976
4977static int
4978nfsd_change_deleg_cb(struct file_lock *onlist, int arg,
4979		     struct list_head *dispose)
4980{
4981	struct nfs4_delegation *dp = (struct nfs4_delegation *)onlist->fl_owner;
4982	struct nfs4_client *clp = dp->dl_stid.sc_client;
4983
4984	if (arg & F_UNLCK) {
4985		if (dp->dl_recalled)
4986			atomic_dec(&clp->cl_delegs_in_recall);
4987		return lease_modify(onlist, arg, dispose);
4988	} else
4989		return -EAGAIN;
4990}
4991
4992static const struct lock_manager_operations nfsd_lease_mng_ops = {
4993	.lm_breaker_owns_lease = nfsd_breaker_owns_lease,
4994	.lm_break = nfsd_break_deleg_cb,
4995	.lm_change = nfsd_change_deleg_cb,
4996};
4997
4998static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
4999{
5000	if (nfsd4_has_session(cstate))
5001		return nfs_ok;
5002	if (seqid == so->so_seqid - 1)
5003		return nfserr_replay_me;
5004	if (seqid == so->so_seqid)
5005		return nfs_ok;
5006	return nfserr_bad_seqid;
5007}
5008
5009static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
5010						struct nfsd_net *nn)
5011{
5012	struct nfs4_client *found;
5013
5014	spin_lock(&nn->client_lock);
5015	found = find_confirmed_client(clid, sessions, nn);
5016	if (found)
5017		atomic_inc(&found->cl_rpc_users);
5018	spin_unlock(&nn->client_lock);
5019	return found;
5020}
5021
5022static __be32 set_client(clientid_t *clid,
5023		struct nfsd4_compound_state *cstate,
5024		struct nfsd_net *nn)
5025{
5026	if (cstate->clp) {
5027		if (!same_clid(&cstate->clp->cl_clientid, clid))
5028			return nfserr_stale_clientid;
5029		return nfs_ok;
5030	}
5031	if (STALE_CLIENTID(clid, nn))
5032		return nfserr_stale_clientid;
5033	/*
5034	 * We're in the 4.0 case (otherwise the SEQUENCE op would have
5035	 * set cstate->clp), so session = false:
5036	 */
5037	cstate->clp = lookup_clientid(clid, false, nn);
5038	if (!cstate->clp)
5039		return nfserr_expired;
5040	return nfs_ok;
5041}
5042
5043__be32
5044nfsd4_process_open1(struct nfsd4_compound_state *cstate,
5045		    struct nfsd4_open *open, struct nfsd_net *nn)
5046{
5047	clientid_t *clientid = &open->op_clientid;
5048	struct nfs4_client *clp = NULL;
5049	unsigned int strhashval;
5050	struct nfs4_openowner *oo = NULL;
5051	__be32 status;
5052
 
 
5053	/*
5054	 * In case we need it later, after we've already created the
5055	 * file and don't want to risk a further failure:
5056	 */
5057	open->op_file = nfsd4_alloc_file();
5058	if (open->op_file == NULL)
5059		return nfserr_jukebox;
5060
5061	status = set_client(clientid, cstate, nn);
5062	if (status)
5063		return status;
5064	clp = cstate->clp;
5065
5066	strhashval = ownerstr_hashval(&open->op_owner);
5067	oo = find_openstateowner_str(strhashval, open, clp);
5068	open->op_openowner = oo;
5069	if (!oo) {
 
 
 
5070		goto new_owner;
5071	}
5072	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5073		/* Replace unconfirmed owners without checking for replay. */
 
5074		release_openowner(oo);
5075		open->op_openowner = NULL;
5076		goto new_owner;
5077	}
5078	status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
5079	if (status)
5080		return status;
 
5081	goto alloc_stateid;
5082new_owner:
5083	oo = alloc_init_open_stateowner(strhashval, open, cstate);
5084	if (oo == NULL)
5085		return nfserr_jukebox;
5086	open->op_openowner = oo;
5087alloc_stateid:
5088	open->op_stp = nfs4_alloc_open_stateid(clp);
5089	if (!open->op_stp)
5090		return nfserr_jukebox;
5091
5092	if (nfsd4_has_session(cstate) &&
5093	    (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
5094		open->op_odstate = alloc_clnt_odstate(clp);
5095		if (!open->op_odstate)
5096			return nfserr_jukebox;
5097	}
5098
5099	return nfs_ok;
5100}
5101
5102static inline __be32
5103nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
5104{
5105	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
5106		return nfserr_openmode;
5107	else
5108		return nfs_ok;
5109}
5110
5111static int share_access_to_flags(u32 share_access)
5112{
5113	return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
5114}
5115
5116static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
5117{
5118	struct nfs4_stid *ret;
5119
5120	ret = find_stateid_by_type(cl, s,
5121				NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
5122	if (!ret)
5123		return NULL;
5124	return delegstateid(ret);
5125}
5126
5127static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
5128{
5129	return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
5130	       open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
5131}
5132
5133static __be32
5134nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
5135		struct nfs4_delegation **dp)
5136{
5137	int flags;
5138	__be32 status = nfserr_bad_stateid;
5139	struct nfs4_delegation *deleg;
5140
5141	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
5142	if (deleg == NULL)
5143		goto out;
5144	if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
5145		nfs4_put_stid(&deleg->dl_stid);
5146		if (cl->cl_minorversion)
5147			status = nfserr_deleg_revoked;
5148		goto out;
5149	}
5150	flags = share_access_to_flags(open->op_share_access);
5151	status = nfs4_check_delegmode(deleg, flags);
5152	if (status) {
5153		nfs4_put_stid(&deleg->dl_stid);
5154		goto out;
5155	}
5156	*dp = deleg;
5157out:
5158	if (!nfsd4_is_deleg_cur(open))
5159		return nfs_ok;
5160	if (status)
5161		return status;
5162	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5163	return nfs_ok;
5164}
5165
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5166static inline int nfs4_access_to_access(u32 nfs4_access)
5167{
5168	int flags = 0;
5169
5170	if (nfs4_access & NFS4_SHARE_ACCESS_READ)
5171		flags |= NFSD_MAY_READ;
5172	if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
5173		flags |= NFSD_MAY_WRITE;
5174	return flags;
5175}
5176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5177static inline __be32
5178nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
5179		struct nfsd4_open *open)
5180{
5181	struct iattr iattr = {
5182		.ia_valid = ATTR_SIZE,
5183		.ia_size = 0,
5184	};
5185	struct nfsd_attrs attrs = {
5186		.na_iattr	= &iattr,
5187	};
5188	if (!open->op_truncate)
5189		return 0;
5190	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
5191		return nfserr_inval;
5192	return nfsd_setattr(rqstp, fh, &attrs, 0, (time64_t)0);
5193}
5194
5195static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
5196		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5197		struct nfsd4_open *open, bool new_stp)
5198{
5199	struct nfsd_file *nf = NULL;
 
5200	__be32 status;
5201	int oflag = nfs4_access_to_omode(open->op_share_access);
5202	int access = nfs4_access_to_access(open->op_share_access);
5203	unsigned char old_access_bmap, old_deny_bmap;
5204
5205	spin_lock(&fp->fi_lock);
5206
5207	/*
5208	 * Are we trying to set a deny mode that would conflict with
5209	 * current access?
5210	 */
5211	status = nfs4_file_check_deny(fp, open->op_share_deny);
5212	if (status != nfs_ok) {
5213		if (status != nfserr_share_denied) {
5214			spin_unlock(&fp->fi_lock);
5215			goto out;
5216		}
5217		if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5218				stp, open->op_share_deny, false))
5219			status = nfserr_jukebox;
5220		spin_unlock(&fp->fi_lock);
5221		goto out;
5222	}
5223
5224	/* set access to the file */
5225	status = nfs4_file_get_access(fp, open->op_share_access);
5226	if (status != nfs_ok) {
5227		if (status != nfserr_share_denied) {
5228			spin_unlock(&fp->fi_lock);
5229			goto out;
5230		}
5231		if (nfs4_resolve_deny_conflicts_locked(fp, new_stp,
5232				stp, open->op_share_access, true))
5233			status = nfserr_jukebox;
5234		spin_unlock(&fp->fi_lock);
5235		goto out;
5236	}
5237
5238	/* Set access bits in stateid */
5239	old_access_bmap = stp->st_access_bmap;
5240	set_access(open->op_share_access, stp);
5241
5242	/* Set new deny mask */
5243	old_deny_bmap = stp->st_deny_bmap;
5244	set_deny(open->op_share_deny, stp);
5245	fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5246
5247	if (!fp->fi_fds[oflag]) {
5248		spin_unlock(&fp->fi_lock);
5249
5250		status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
5251						  open->op_filp, &nf);
5252		if (status != nfs_ok)
5253			goto out_put_access;
5254
5255		spin_lock(&fp->fi_lock);
5256		if (!fp->fi_fds[oflag]) {
5257			fp->fi_fds[oflag] = nf;
5258			nf = NULL;
5259		}
5260	}
5261	spin_unlock(&fp->fi_lock);
5262	if (nf)
5263		nfsd_file_put(nf);
5264
5265	status = nfserrno(nfsd_open_break_lease(cur_fh->fh_dentry->d_inode,
5266								access));
5267	if (status)
5268		goto out_put_access;
5269
5270	status = nfsd4_truncate(rqstp, cur_fh, open);
5271	if (status)
5272		goto out_put_access;
5273out:
5274	return status;
5275out_put_access:
5276	stp->st_access_bmap = old_access_bmap;
5277	nfs4_file_put_access(fp, open->op_share_access);
5278	reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
5279	goto out;
5280}
5281
5282static __be32
5283nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
5284		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
5285		struct nfsd4_open *open)
5286{
5287	__be32 status;
5288	unsigned char old_deny_bmap = stp->st_deny_bmap;
5289
5290	if (!test_access(open->op_share_access, stp))
5291		return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open, false);
5292
5293	/* test and set deny mode */
5294	spin_lock(&fp->fi_lock);
5295	status = nfs4_file_check_deny(fp, open->op_share_deny);
5296	switch (status) {
5297	case nfs_ok:
5298		set_deny(open->op_share_deny, stp);
5299		fp->fi_share_deny |=
5300			(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
5301		break;
5302	case nfserr_share_denied:
5303		if (nfs4_resolve_deny_conflicts_locked(fp, false,
5304				stp, open->op_share_deny, false))
5305			status = nfserr_jukebox;
5306		break;
5307	}
5308	spin_unlock(&fp->fi_lock);
5309
5310	if (status != nfs_ok)
5311		return status;
5312
5313	status = nfsd4_truncate(rqstp, cur_fh, open);
5314	if (status != nfs_ok)
5315		reset_union_bmap_deny(old_deny_bmap, stp);
5316	return status;
5317}
5318
5319/* Should we give out recallable state?: */
5320static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
5321{
5322	if (clp->cl_cb_state == NFSD4_CB_UP)
5323		return true;
5324	/*
5325	 * In the sessions case, since we don't have to establish a
5326	 * separate connection for callbacks, we assume it's OK
5327	 * until we hear otherwise:
5328	 */
5329	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
5330}
5331
5332static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp,
5333						int flag)
5334{
5335	struct file_lock *fl;
5336
5337	fl = locks_alloc_lock();
5338	if (!fl)
5339		return NULL;
 
5340	fl->fl_lmops = &nfsd_lease_mng_ops;
5341	fl->fl_flags = FL_DELEG;
5342	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
5343	fl->fl_end = OFFSET_MAX;
5344	fl->fl_owner = (fl_owner_t)dp;
5345	fl->fl_pid = current->tgid;
5346	fl->fl_file = dp->dl_stid.sc_file->fi_deleg_file->nf_file;
5347	return fl;
5348}
5349
5350static int nfsd4_check_conflicting_opens(struct nfs4_client *clp,
5351					 struct nfs4_file *fp)
5352{
5353	struct nfs4_ol_stateid *st;
5354	struct file *f = fp->fi_deleg_file->nf_file;
5355	struct inode *ino = file_inode(f);
5356	int writes;
5357
5358	writes = atomic_read(&ino->i_writecount);
5359	if (!writes)
5360		return 0;
5361	/*
5362	 * There could be multiple filehandles (hence multiple
5363	 * nfs4_files) referencing this file, but that's not too
5364	 * common; let's just give up in that case rather than
5365	 * trying to go look up all the clients using that other
5366	 * nfs4_file as well:
5367	 */
5368	if (fp->fi_aliased)
5369		return -EAGAIN;
5370	/*
5371	 * If there's a close in progress, make sure that we see it
5372	 * clear any fi_fds[] entries before we see it decrement
5373	 * i_writecount:
5374	 */
5375	smp_mb__after_atomic();
5376
5377	if (fp->fi_fds[O_WRONLY])
5378		writes--;
5379	if (fp->fi_fds[O_RDWR])
5380		writes--;
5381	if (writes > 0)
5382		return -EAGAIN; /* There may be non-NFSv4 writers */
5383	/*
5384	 * It's possible there are non-NFSv4 write opens in progress,
5385	 * but if they haven't incremented i_writecount yet then they
5386	 * also haven't called break lease yet; so, they'll break this
5387	 * lease soon enough.  So, all that's left to check for is NFSv4
5388	 * opens:
5389	 */
5390	spin_lock(&fp->fi_lock);
5391	list_for_each_entry(st, &fp->fi_stateids, st_perfile) {
5392		if (st->st_openstp == NULL /* it's an open */ &&
5393		    access_permit_write(st) &&
5394		    st->st_stid.sc_client != clp) {
5395			spin_unlock(&fp->fi_lock);
5396			return -EAGAIN;
5397		}
5398	}
5399	spin_unlock(&fp->fi_lock);
5400	/*
5401	 * There's a small chance that we could be racing with another
5402	 * NFSv4 open.  However, any open that hasn't added itself to
5403	 * the fi_stateids list also hasn't called break_lease yet; so,
5404	 * they'll break this lease soon enough.
5405	 */
5406	return 0;
5407}
5408
5409/*
5410 * It's possible that between opening the dentry and setting the delegation,
5411 * that it has been renamed or unlinked. Redo the lookup to verify that this
5412 * hasn't happened.
5413 */
5414static int
5415nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
5416			  struct svc_fh *parent)
5417{
5418	struct svc_export *exp;
5419	struct dentry *child;
5420	__be32 err;
5421
5422	err = nfsd_lookup_dentry(open->op_rqstp, parent,
5423				 open->op_fname, open->op_fnamelen,
5424				 &exp, &child);
5425
5426	if (err)
5427		return -EAGAIN;
5428
5429	exp_put(exp);
5430	dput(child);
5431	if (child != file_dentry(fp->fi_deleg_file->nf_file))
5432		return -EAGAIN;
5433
5434	return 0;
5435}
5436
5437/*
5438 * We avoid breaking delegations held by a client due to its own activity, but
5439 * clearing setuid/setgid bits on a write is an implicit activity and the client
5440 * may not notice and continue using the old mode. Avoid giving out a delegation
5441 * on setuid/setgid files when the client is requesting an open for write.
5442 */
5443static int
5444nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
5445{
5446	struct inode *inode = file_inode(nf->nf_file);
5447
5448	if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) &&
5449	    (inode->i_mode & (S_ISUID|S_ISGID)))
5450		return -EAGAIN;
 
 
 
 
 
5451	return 0;
5452}
5453
5454static struct nfs4_delegation *
5455nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5456		    struct svc_fh *parent)
5457{
5458	int status = 0;
5459	struct nfs4_client *clp = stp->st_stid.sc_client;
5460	struct nfs4_file *fp = stp->st_stid.sc_file;
5461	struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
5462	struct nfs4_delegation *dp;
5463	struct nfsd_file *nf = NULL;
5464	struct file_lock *fl;
5465	u32 dl_type;
5466
5467	/*
5468	 * The fi_had_conflict and nfs_get_existing_delegation checks
5469	 * here are just optimizations; we'll need to recheck them at
5470	 * the end:
5471	 */
5472	if (fp->fi_had_conflict)
5473		return ERR_PTR(-EAGAIN);
5474
5475	/*
5476	 * Try for a write delegation first. RFC8881 section 10.4 says:
5477	 *
5478	 *  "An OPEN_DELEGATE_WRITE delegation allows the client to handle,
5479	 *   on its own, all opens."
5480	 *
5481	 * Furthermore the client can use a write delegation for most READ
5482	 * operations as well, so we require a O_RDWR file here.
5483	 *
5484	 * Offer a write delegation in the case of a BOTH open, and ensure
5485	 * we get the O_RDWR descriptor.
5486	 */
5487	if ((open->op_share_access & NFS4_SHARE_ACCESS_BOTH) == NFS4_SHARE_ACCESS_BOTH) {
5488		nf = find_rw_file(fp);
5489		dl_type = NFS4_OPEN_DELEGATE_WRITE;
5490	}
5491
5492	/*
5493	 * If the file is being opened O_RDONLY or we couldn't get a O_RDWR
5494	 * file for some reason, then try for a read delegation instead.
5495	 */
5496	if (!nf && (open->op_share_access & NFS4_SHARE_ACCESS_READ)) {
5497		nf = find_readable_file(fp);
5498		dl_type = NFS4_OPEN_DELEGATE_READ;
5499	}
5500
5501	if (!nf)
5502		return ERR_PTR(-EAGAIN);
5503
5504	spin_lock(&state_lock);
5505	spin_lock(&fp->fi_lock);
5506	if (nfs4_delegation_exists(clp, fp))
5507		status = -EAGAIN;
5508	else if (nfsd4_verify_setuid_write(open, nf))
5509		status = -EAGAIN;
5510	else if (!fp->fi_deleg_file) {
5511		fp->fi_deleg_file = nf;
5512		/* increment early to prevent fi_deleg_file from being
5513		 * cleared */
5514		fp->fi_delegees = 1;
5515		nf = NULL;
5516	} else
5517		fp->fi_delegees++;
5518	spin_unlock(&fp->fi_lock);
5519	spin_unlock(&state_lock);
5520	if (nf)
5521		nfsd_file_put(nf);
5522	if (status)
5523		return ERR_PTR(status);
5524
5525	status = -ENOMEM;
5526	dp = alloc_init_deleg(clp, fp, odstate, dl_type);
5527	if (!dp)
5528		goto out_delegees;
5529
5530	fl = nfs4_alloc_init_lease(dp, dl_type);
5531	if (!fl)
5532		goto out_clnt_odstate;
5533
5534	status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL);
5535	if (fl)
5536		locks_free_lock(fl);
5537	if (status)
5538		goto out_clnt_odstate;
5539
5540	if (parent) {
5541		status = nfsd4_verify_deleg_dentry(open, fp, parent);
5542		if (status)
5543			goto out_unlock;
5544	}
5545
5546	status = nfsd4_check_conflicting_opens(clp, fp);
5547	if (status)
5548		goto out_unlock;
5549
5550	/*
5551	 * Now that the deleg is set, check again to ensure that nothing
5552	 * raced in and changed the mode while we weren't lookng.
5553	 */
5554	status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
5555	if (status)
5556		goto out_unlock;
5557
5558	status = -EAGAIN;
5559	if (fp->fi_had_conflict)
5560		goto out_unlock;
5561
5562	spin_lock(&state_lock);
5563	spin_lock(&fp->fi_lock);
5564	status = hash_delegation_locked(dp, fp);
5565	spin_unlock(&fp->fi_lock);
5566	spin_unlock(&state_lock);
5567
5568	if (status)
5569		goto out_unlock;
5570
5571	return dp;
5572out_unlock:
5573	vfs_setlease(fp->fi_deleg_file->nf_file, F_UNLCK, NULL, (void **)&dp);
5574out_clnt_odstate:
5575	put_clnt_odstate(dp->dl_clnt_odstate);
5576	nfs4_put_stid(&dp->dl_stid);
5577out_delegees:
5578	put_deleg_file(fp);
5579	return ERR_PTR(status);
5580}
5581
5582static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
5583{
5584	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5585	if (status == -EAGAIN)
5586		open->op_why_no_deleg = WND4_CONTENTION;
5587	else {
5588		open->op_why_no_deleg = WND4_RESOURCE;
5589		switch (open->op_deleg_want) {
5590		case NFS4_SHARE_WANT_READ_DELEG:
5591		case NFS4_SHARE_WANT_WRITE_DELEG:
5592		case NFS4_SHARE_WANT_ANY_DELEG:
5593			break;
5594		case NFS4_SHARE_WANT_CANCEL:
5595			open->op_why_no_deleg = WND4_CANCELLED;
5596			break;
5597		case NFS4_SHARE_WANT_NO_DELEG:
5598			WARN_ON_ONCE(1);
5599		}
5600	}
5601}
5602
5603/*
5604 * The Linux NFS server does not offer write delegations to NFSv4.0
5605 * clients in order to avoid conflicts between write delegations and
5606 * GETATTRs requesting CHANGE or SIZE attributes.
5607 *
5608 * With NFSv4.1 and later minorversions, the SEQUENCE operation that
5609 * begins each COMPOUND contains a client ID. Delegation recall can
5610 * be avoided when the server recognizes the client sending a
5611 * GETATTR also holds write delegation it conflicts with.
5612 *
5613 * However, the NFSv4.0 protocol does not enable a server to
5614 * determine that a GETATTR originated from the client holding the
5615 * conflicting delegation versus coming from some other client. Per
5616 * RFC 7530 Section 16.7.5, the server must recall or send a
5617 * CB_GETATTR even when the GETATTR originates from the client that
5618 * holds the conflicting delegation.
5619 *
5620 * An NFSv4.0 client can trigger a pathological situation if it
5621 * always sends a DELEGRETURN preceded by a conflicting GETATTR in
5622 * the same COMPOUND. COMPOUND execution will always stop at the
5623 * GETATTR and the DELEGRETURN will never get executed. The server
5624 * eventually revokes the delegation, which can result in loss of
5625 * open or lock state.
5626 */
5627static void
5628nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
5629		     struct svc_fh *currentfh)
5630{
5631	struct nfs4_delegation *dp;
5632	struct nfs4_openowner *oo = openowner(stp->st_stateowner);
5633	struct nfs4_client *clp = stp->st_stid.sc_client;
5634	struct svc_fh *parent = NULL;
5635	int cb_up;
5636	int status = 0;
5637
5638	cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
5639	open->op_recall = false;
 
5640	switch (open->op_claim_type) {
5641		case NFS4_OPEN_CLAIM_PREVIOUS:
5642			if (!cb_up)
5643				open->op_recall = true;
 
 
 
5644			break;
5645		case NFS4_OPEN_CLAIM_NULL:
5646			parent = currentfh;
5647			fallthrough;
5648		case NFS4_OPEN_CLAIM_FH:
5649			/*
5650			 * Let's not give out any delegations till everyone's
5651			 * had the chance to reclaim theirs, *and* until
5652			 * NLM locks have all been reclaimed:
5653			 */
5654			if (locks_in_grace(clp->net))
5655				goto out_no_deleg;
5656			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
5657				goto out_no_deleg;
5658			if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE &&
5659					!clp->cl_minorversion)
5660				goto out_no_deleg;
 
5661			break;
5662		default:
5663			goto out_no_deleg;
5664	}
5665	dp = nfs4_set_delegation(open, stp, parent);
5666	if (IS_ERR(dp))
 
5667		goto out_no_deleg;
 
 
 
5668
5669	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
5670
5671	if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) {
5672		open->op_delegate_type = NFS4_OPEN_DELEGATE_WRITE;
5673		trace_nfsd_deleg_write(&dp->dl_stid.sc_stateid);
5674	} else {
5675		open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
5676		trace_nfsd_deleg_read(&dp->dl_stid.sc_stateid);
 
 
 
 
 
 
5677	}
5678	nfs4_put_stid(&dp->dl_stid);
5679	return;
 
 
5680out_no_deleg:
5681	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
5682	if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
5683	    open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
5684		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
5685		open->op_recall = true;
5686	}
5687
5688	/* 4.1 client asking for a delegation? */
5689	if (open->op_deleg_want)
5690		nfsd4_open_deleg_none_ext(open, status);
5691	return;
5692}
5693
5694static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
5695					struct nfs4_delegation *dp)
5696{
5697	if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
5698	    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5699		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5700		open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
5701	} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
5702		   dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
5703		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5704		open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
5705	}
5706	/* Otherwise the client must be confused wanting a delegation
5707	 * it already has, therefore we don't return
5708	 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
5709	 */
5710}
5711
5712/**
5713 * nfsd4_process_open2 - finish open processing
5714 * @rqstp: the RPC transaction being executed
5715 * @current_fh: NFSv4 COMPOUND's current filehandle
5716 * @open: OPEN arguments
5717 *
5718 * If successful, (1) truncate the file if open->op_truncate was
5719 * set, (2) set open->op_stateid, (3) set open->op_delegation.
5720 *
5721 * Returns %nfs_ok on success; otherwise an nfs4stat value in
5722 * network byte order is returned.
5723 */
5724__be32
5725nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
5726{
5727	struct nfsd4_compoundres *resp = rqstp->rq_resp;
5728	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
5729	struct nfs4_file *fp = NULL;
 
5730	struct nfs4_ol_stateid *stp = NULL;
5731	struct nfs4_delegation *dp = NULL;
5732	__be32 status;
5733	bool new_stp = false;
5734
5735	/*
5736	 * Lookup file; if found, lookup stateid and check open request,
5737	 * and check for delegations in the process of being recalled.
5738	 * If not found, create the nfs4_file struct
5739	 */
5740	fp = nfsd4_file_hash_insert(open->op_file, current_fh);
5741	if (unlikely(!fp))
5742		return nfserr_jukebox;
5743	if (fp != open->op_file) {
5744		status = nfs4_check_deleg(cl, open, &dp);
5745		if (status)
5746			goto out;
5747		stp = nfsd4_find_and_lock_existing_open(fp, open);
5748	} else {
5749		open->op_file = NULL;
5750		status = nfserr_bad_stateid;
5751		if (nfsd4_is_deleg_cur(open))
5752			goto out;
5753	}
5754
5755	if (!stp) {
5756		stp = init_open_stateid(fp, open);
5757		if (!open->op_stp)
5758			new_stp = true;
5759	}
5760
5761	/*
5762	 * OPEN the file, or upgrade an existing OPEN.
5763	 * If truncate fails, the OPEN fails.
5764	 *
5765	 * stp is already locked.
5766	 */
5767	if (!new_stp) {
5768		/* Stateid was found, this is an OPEN upgrade */
5769		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5770		if (status) {
5771			mutex_unlock(&stp->st_mutex);
5772			goto out;
5773		}
5774	} else {
5775		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open, true);
 
 
 
 
 
 
5776		if (status) {
5777			stp->st_stid.sc_type = NFS4_CLOSED_STID;
5778			release_open_stateid(stp);
5779			mutex_unlock(&stp->st_mutex);
5780			goto out;
5781		}
5782
5783		stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
5784							open->op_odstate);
5785		if (stp->st_clnt_odstate == open->op_odstate)
5786			open->op_odstate = NULL;
5787	}
 
 
5788
5789	nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5790	mutex_unlock(&stp->st_mutex);
5791
5792	if (nfsd4_has_session(&resp->cstate)) {
5793		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5794			open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
5795			open->op_why_no_deleg = WND4_NOT_WANTED;
5796			goto nodeleg;
5797		}
5798	}
5799
5800	/*
5801	* Attempt to hand out a delegation. No error return, because the
5802	* OPEN succeeds even if we fail.
5803	*/
5804	nfs4_open_delegation(open, stp, &resp->cstate.current_fh);
5805nodeleg:
5806	status = nfs_ok;
5807	trace_nfsd_open(&stp->st_stid.sc_stateid);
 
 
5808out:
5809	/* 4.1 client trying to upgrade/downgrade delegation? */
5810	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
5811	    open->op_deleg_want)
5812		nfsd4_deleg_xgrade_none_ext(open, dp);
5813
5814	if (fp)
5815		put_nfs4_file(fp);
5816	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
5817		open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
5818	/*
5819	* To finish the open response, we just need to set the rflags.
5820	*/
5821	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
5822	if (nfsd4_has_session(&resp->cstate))
5823		open->op_rflags |= NFS4_OPEN_RESULT_MAY_NOTIFY_LOCK;
5824	else if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED))
5825		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
5826
5827	if (dp)
5828		nfs4_put_stid(&dp->dl_stid);
5829	if (stp)
5830		nfs4_put_stid(&stp->st_stid);
5831
5832	return status;
5833}
5834
5835void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
5836			      struct nfsd4_open *open)
5837{
5838	if (open->op_openowner) {
5839		struct nfs4_stateowner *so = &open->op_openowner->oo_owner;
5840
5841		nfsd4_cstate_assign_replay(cstate, so);
5842		nfs4_put_stateowner(so);
 
 
 
 
 
 
 
5843	}
5844	if (open->op_file)
5845		kmem_cache_free(file_slab, open->op_file);
5846	if (open->op_stp)
5847		nfs4_put_stid(&open->op_stp->st_stid);
5848	if (open->op_odstate)
5849		kmem_cache_free(odstate_slab, open->op_odstate);
5850}
5851
5852__be32
5853nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5854	    union nfsd4_op_u *u)
5855{
5856	clientid_t *clid = &u->renew;
5857	struct nfs4_client *clp;
5858	__be32 status;
5859	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
5860
5861	trace_nfsd_clid_renew(clid);
5862	status = set_client(clid, cstate, nn);
5863	if (status)
5864		return status;
5865	clp = cstate->clp;
 
 
 
 
 
 
 
 
 
5866	if (!list_empty(&clp->cl_delegations)
5867			&& clp->cl_cb_state != NFSD4_CB_UP)
5868		return nfserr_cb_path_down;
5869	return nfs_ok;
 
 
 
5870}
5871
5872void
5873nfsd4_end_grace(struct nfsd_net *nn)
 
 
 
 
 
5874{
5875	/* do nothing if grace period already ended */
5876	if (nn->grace_ended)
5877		return;
5878
5879	trace_nfsd_grace_complete(nn);
5880	nn->grace_ended = true;
5881	/*
5882	 * If the server goes down again right now, an NFSv4
5883	 * client will still be allowed to reclaim after it comes back up,
5884	 * even if it hasn't yet had a chance to reclaim state this time.
5885	 *
5886	 */
5887	nfsd4_record_grace_done(nn);
5888	/*
5889	 * At this point, NFSv4 clients can still reclaim.  But if the
5890	 * server crashes, any that have not yet reclaimed will be out
5891	 * of luck on the next boot.
5892	 *
5893	 * (NFSv4.1+ clients are considered to have reclaimed once they
5894	 * call RECLAIM_COMPLETE.  NFSv4.0 clients are considered to
5895	 * have reclaimed after their first OPEN.)
5896	 */
5897	locks_end_grace(&nn->nfsd4_manager);
5898	/*
5899	 * At this point, and once lockd and/or any other containers
5900	 * exit their grace period, further reclaims will fail and
5901	 * regular locking can resume.
5902	 */
5903}
5904
5905/*
5906 * If we've waited a lease period but there are still clients trying to
5907 * reclaim, wait a little longer to give them a chance to finish.
5908 */
5909static bool clients_still_reclaiming(struct nfsd_net *nn)
5910{
5911	time64_t double_grace_period_end = nn->boot_time +
5912					   2 * nn->nfsd4_lease;
5913
5914	if (nn->track_reclaim_completes &&
5915			atomic_read(&nn->nr_reclaim_complete) ==
5916			nn->reclaim_str_hashtbl_size)
5917		return false;
5918	if (!nn->somebody_reclaimed)
5919		return false;
5920	nn->somebody_reclaimed = false;
5921	/*
5922	 * If we've given them *two* lease times to reclaim, and they're
5923	 * still not done, give up:
 
5924	 */
5925	if (ktime_get_boottime_seconds() > double_grace_period_end)
5926		return false;
5927	return true;
5928}
5929
5930struct laundry_time {
5931	time64_t cutoff;
5932	time64_t new_timeo;
5933};
5934
5935static bool state_expired(struct laundry_time *lt, time64_t last_refresh)
5936{
5937	time64_t time_remaining;
 
 
 
 
 
 
5938
5939	if (last_refresh < lt->cutoff)
5940		return true;
5941	time_remaining = last_refresh - lt->cutoff;
5942	lt->new_timeo = min(lt->new_timeo, time_remaining);
5943	return false;
5944}
5945
5946#ifdef CONFIG_NFSD_V4_2_INTER_SSC
5947void nfsd4_ssc_init_umount_work(struct nfsd_net *nn)
5948{
5949	spin_lock_init(&nn->nfsd_ssc_lock);
5950	INIT_LIST_HEAD(&nn->nfsd_ssc_mount_list);
5951	init_waitqueue_head(&nn->nfsd_ssc_waitq);
5952}
5953EXPORT_SYMBOL_GPL(nfsd4_ssc_init_umount_work);
5954
5955/*
5956 * This is called when nfsd is being shutdown, after all inter_ssc
5957 * cleanup were done, to destroy the ssc delayed unmount list.
5958 */
5959static void nfsd4_ssc_shutdown_umount(struct nfsd_net *nn)
5960{
5961	struct nfsd4_ssc_umount_item *ni = NULL;
5962	struct nfsd4_ssc_umount_item *tmp;
5963
5964	spin_lock(&nn->nfsd_ssc_lock);
5965	list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5966		list_del(&ni->nsui_list);
5967		spin_unlock(&nn->nfsd_ssc_lock);
5968		mntput(ni->nsui_vfsmount);
5969		kfree(ni);
5970		spin_lock(&nn->nfsd_ssc_lock);
5971	}
5972	spin_unlock(&nn->nfsd_ssc_lock);
5973}
5974
5975static void nfsd4_ssc_expire_umount(struct nfsd_net *nn)
5976{
5977	bool do_wakeup = false;
5978	struct nfsd4_ssc_umount_item *ni = NULL;
5979	struct nfsd4_ssc_umount_item *tmp;
5980
5981	spin_lock(&nn->nfsd_ssc_lock);
5982	list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
5983		if (time_after(jiffies, ni->nsui_expire)) {
5984			if (refcount_read(&ni->nsui_refcnt) > 1)
5985				continue;
5986
5987			/* mark being unmount */
5988			ni->nsui_busy = true;
5989			spin_unlock(&nn->nfsd_ssc_lock);
5990			mntput(ni->nsui_vfsmount);
5991			spin_lock(&nn->nfsd_ssc_lock);
5992
5993			/* waiters need to start from begin of list */
5994			list_del(&ni->nsui_list);
5995			kfree(ni);
5996
5997			/* wakeup ssc_connect waiters */
5998			do_wakeup = true;
5999			continue;
6000		}
6001		break;
6002	}
6003	if (do_wakeup)
6004		wake_up_all(&nn->nfsd_ssc_waitq);
6005	spin_unlock(&nn->nfsd_ssc_lock);
6006}
6007#endif
6008
6009/* Check if any lock belonging to this lockowner has any blockers */
6010static bool
6011nfs4_lockowner_has_blockers(struct nfs4_lockowner *lo)
6012{
6013	struct file_lock_context *ctx;
6014	struct nfs4_ol_stateid *stp;
6015	struct nfs4_file *nf;
6016
6017	list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
6018		nf = stp->st_stid.sc_file;
6019		ctx = locks_inode_context(nf->fi_inode);
6020		if (!ctx)
6021			continue;
6022		if (locks_owner_has_blockers(ctx, lo))
6023			return true;
6024	}
6025	return false;
6026}
6027
6028static bool
6029nfs4_anylock_blockers(struct nfs4_client *clp)
6030{
6031	int i;
6032	struct nfs4_stateowner *so;
6033	struct nfs4_lockowner *lo;
6034
6035	if (atomic_read(&clp->cl_delegs_in_recall))
6036		return true;
6037	spin_lock(&clp->cl_lock);
6038	for (i = 0; i < OWNER_HASH_SIZE; i++) {
6039		list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[i],
6040				so_strhash) {
6041			if (so->so_is_open_owner)
6042				continue;
6043			lo = lockowner(so);
6044			if (nfs4_lockowner_has_blockers(lo)) {
6045				spin_unlock(&clp->cl_lock);
6046				return true;
6047			}
6048		}
6049	}
6050	spin_unlock(&clp->cl_lock);
6051	return false;
6052}
6053
6054static void
6055nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
6056				struct laundry_time *lt)
6057{
6058	unsigned int maxreap, reapcnt = 0;
6059	struct list_head *pos, *next;
6060	struct nfs4_client *clp;
6061
6062	maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ?
6063			NFSD_CLIENT_MAX_TRIM_PER_RUN : 0;
6064	INIT_LIST_HEAD(reaplist);
6065	spin_lock(&nn->client_lock);
6066	list_for_each_safe(pos, next, &nn->client_lru) {
6067		clp = list_entry(pos, struct nfs4_client, cl_lru);
6068		if (clp->cl_state == NFSD4_EXPIRABLE)
6069			goto exp_client;
6070		if (!state_expired(lt, clp->cl_time))
 
6071			break;
6072		if (!atomic_read(&clp->cl_rpc_users)) {
6073			if (clp->cl_state == NFSD4_ACTIVE)
6074				atomic_inc(&nn->nfsd_courtesy_clients);
6075			clp->cl_state = NFSD4_COURTESY;
6076		}
6077		if (!client_has_state(clp))
6078			goto exp_client;
6079		if (!nfs4_anylock_blockers(clp))
6080			if (reapcnt >= maxreap)
6081				continue;
6082exp_client:
6083		if (!mark_client_expired_locked(clp)) {
6084			list_add(&clp->cl_lru, reaplist);
6085			reapcnt++;
6086		}
 
 
6087	}
6088	spin_unlock(&nn->client_lock);
6089}
6090
6091static void
6092nfs4_get_courtesy_client_reaplist(struct nfsd_net *nn,
6093				struct list_head *reaplist)
6094{
6095	unsigned int maxreap = 0, reapcnt = 0;
6096	struct list_head *pos, *next;
6097	struct nfs4_client *clp;
6098
6099	maxreap = NFSD_CLIENT_MAX_TRIM_PER_RUN;
6100	INIT_LIST_HEAD(reaplist);
6101
6102	spin_lock(&nn->client_lock);
6103	list_for_each_safe(pos, next, &nn->client_lru) {
6104		clp = list_entry(pos, struct nfs4_client, cl_lru);
6105		if (clp->cl_state == NFSD4_ACTIVE)
6106			break;
6107		if (reapcnt >= maxreap)
6108			break;
6109		if (!mark_client_expired_locked(clp)) {
6110			list_add(&clp->cl_lru, reaplist);
6111			reapcnt++;
6112		}
6113	}
6114	spin_unlock(&nn->client_lock);
6115}
6116
6117static void
6118nfs4_process_client_reaplist(struct list_head *reaplist)
6119{
6120	struct list_head *pos, *next;
6121	struct nfs4_client *clp;
6122
6123	list_for_each_safe(pos, next, reaplist) {
6124		clp = list_entry(pos, struct nfs4_client, cl_lru);
6125		trace_nfsd_clid_purged(&clp->cl_clientid);
6126		list_del_init(&clp->cl_lru);
6127		expire_client(clp);
6128	}
6129}
6130
6131static time64_t
6132nfs4_laundromat(struct nfsd_net *nn)
6133{
6134	struct nfs4_openowner *oo;
6135	struct nfs4_delegation *dp;
6136	struct nfs4_ol_stateid *stp;
6137	struct nfsd4_blocked_lock *nbl;
6138	struct list_head *pos, *next, reaplist;
6139	struct laundry_time lt = {
6140		.cutoff = ktime_get_boottime_seconds() - nn->nfsd4_lease,
6141		.new_timeo = nn->nfsd4_lease
6142	};
6143	struct nfs4_cpntf_state *cps;
6144	copy_stateid_t *cps_t;
6145	int i;
6146
6147	if (clients_still_reclaiming(nn)) {
6148		lt.new_timeo = 0;
6149		goto out;
6150	}
6151	nfsd4_end_grace(nn);
6152
6153	spin_lock(&nn->s2s_cp_lock);
6154	idr_for_each_entry(&nn->s2s_cp_stateids, cps_t, i) {
6155		cps = container_of(cps_t, struct nfs4_cpntf_state, cp_stateid);
6156		if (cps->cp_stateid.cs_type == NFS4_COPYNOTIFY_STID &&
6157				state_expired(&lt, cps->cpntf_time))
6158			_free_cpntf_state_locked(nn, cps);
6159	}
6160	spin_unlock(&nn->s2s_cp_lock);
6161	nfs4_get_client_reaplist(nn, &reaplist, &lt);
6162	nfs4_process_client_reaplist(&reaplist);
6163
6164	spin_lock(&state_lock);
6165	list_for_each_safe(pos, next, &nn->del_recall_lru) {
6166		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
6167		if (!state_expired(&lt, dp->dl_time))
 
 
 
6168			break;
6169		WARN_ON(!unhash_delegation_locked(dp));
6170		list_add(&dp->dl_recall_lru, &reaplist);
6171	}
6172	spin_unlock(&state_lock);
6173	while (!list_empty(&reaplist)) {
6174		dp = list_first_entry(&reaplist, struct nfs4_delegation,
6175					dl_recall_lru);
6176		list_del_init(&dp->dl_recall_lru);
6177		revoke_delegation(dp);
6178	}
6179
6180	spin_lock(&nn->client_lock);
6181	while (!list_empty(&nn->close_lru)) {
6182		oo = list_first_entry(&nn->close_lru, struct nfs4_openowner,
6183					oo_close_lru);
6184		if (!state_expired(&lt, oo->oo_time))
 
6185			break;
6186		list_del_init(&oo->oo_close_lru);
6187		stp = oo->oo_last_closed_stid;
6188		oo->oo_last_closed_stid = NULL;
6189		spin_unlock(&nn->client_lock);
6190		nfs4_put_stid(&stp->st_stid);
6191		spin_lock(&nn->client_lock);
6192	}
6193	spin_unlock(&nn->client_lock);
6194
6195	/*
6196	 * It's possible for a client to try and acquire an already held lock
6197	 * that is being held for a long time, and then lose interest in it.
6198	 * So, we clean out any un-revisited request after a lease period
6199	 * under the assumption that the client is no longer interested.
6200	 *
6201	 * RFC5661, sec. 9.6 states that the client must not rely on getting
6202	 * notifications and must continue to poll for locks, even when the
6203	 * server supports them. Thus this shouldn't lead to clients blocking
6204	 * indefinitely once the lock does become free.
6205	 */
6206	BUG_ON(!list_empty(&reaplist));
6207	spin_lock(&nn->blocked_locks_lock);
6208	while (!list_empty(&nn->blocked_locks_lru)) {
6209		nbl = list_first_entry(&nn->blocked_locks_lru,
6210					struct nfsd4_blocked_lock, nbl_lru);
6211		if (!state_expired(&lt, nbl->nbl_time))
6212			break;
6213		list_move(&nbl->nbl_lru, &reaplist);
6214		list_del_init(&nbl->nbl_list);
6215	}
6216	spin_unlock(&nn->blocked_locks_lock);
6217
6218	while (!list_empty(&reaplist)) {
6219		nbl = list_first_entry(&reaplist,
6220					struct nfsd4_blocked_lock, nbl_lru);
6221		list_del_init(&nbl->nbl_lru);
6222		free_blocked_lock(nbl);
6223	}
6224#ifdef CONFIG_NFSD_V4_2_INTER_SSC
6225	/* service the server-to-server copy delayed unmount list */
6226	nfsd4_ssc_expire_umount(nn);
6227#endif
6228out:
6229	return max_t(time64_t, lt.new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
6230}
6231
 
6232static void laundromat_main(struct work_struct *);
 
6233
6234static void
6235laundromat_main(struct work_struct *laundry)
6236{
6237	time64_t t;
6238	struct delayed_work *dwork = to_delayed_work(laundry);
6239	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
6240					   laundromat_work);
6241
6242	t = nfs4_laundromat(nn);
6243	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
 
6244}
6245
6246static void
6247courtesy_client_reaper(struct nfsd_net *nn)
6248{
6249	struct list_head reaplist;
6250
6251	nfs4_get_courtesy_client_reaplist(nn, &reaplist);
6252	nfs4_process_client_reaplist(&reaplist);
6253}
6254
6255static void
6256deleg_reaper(struct nfsd_net *nn)
6257{
6258	struct list_head *pos, *next;
6259	struct nfs4_client *clp;
6260	struct list_head cblist;
6261
6262	INIT_LIST_HEAD(&cblist);
6263	spin_lock(&nn->client_lock);
6264	list_for_each_safe(pos, next, &nn->client_lru) {
6265		clp = list_entry(pos, struct nfs4_client, cl_lru);
6266		if (clp->cl_state != NFSD4_ACTIVE ||
6267			list_empty(&clp->cl_delegations) ||
6268			atomic_read(&clp->cl_delegs_in_recall) ||
6269			test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) ||
6270			(ktime_get_boottime_seconds() -
6271				clp->cl_ra_time < 5)) {
6272			continue;
6273		}
6274		list_add(&clp->cl_ra_cblist, &cblist);
6275
6276		/* release in nfsd4_cb_recall_any_release */
6277		atomic_inc(&clp->cl_rpc_users);
6278		set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
6279		clp->cl_ra_time = ktime_get_boottime_seconds();
6280	}
6281	spin_unlock(&nn->client_lock);
6282
6283	while (!list_empty(&cblist)) {
6284		clp = list_first_entry(&cblist, struct nfs4_client,
6285					cl_ra_cblist);
6286		list_del_init(&clp->cl_ra_cblist);
6287		clp->cl_ra->ra_keep = 0;
6288		clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG);
6289		trace_nfsd_cb_recall_any(clp->cl_ra);
6290		nfsd4_run_cb(&clp->cl_ra->ra_cb);
6291	}
6292}
6293
6294static void
6295nfsd4_state_shrinker_worker(struct work_struct *work)
6296{
6297	struct nfsd_net *nn = container_of(work, struct nfsd_net,
6298				nfsd_shrinker_work);
6299
6300	courtesy_client_reaper(nn);
6301	deleg_reaper(nn);
6302}
6303
6304static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
 
6305{
6306	if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
6307		return nfserr_bad_stateid;
6308	return nfs_ok;
6309}
6310
6311static
6312__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
6313{
6314        __be32 status = nfserr_openmode;
6315
6316	/* For lock stateid's, we test the parent open, not the lock: */
6317	if (stp->st_openstp)
6318		stp = stp->st_openstp;
6319	if ((flags & WR_STATE) && !access_permit_write(stp))
6320                goto out;
6321	if ((flags & RD_STATE) && !access_permit_read(stp))
6322                goto out;
6323	status = nfs_ok;
6324out:
6325	return status;
6326}
6327
6328static inline __be32
6329check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
6330{
6331	if (ONE_STATEID(stateid) && (flags & RD_STATE))
6332		return nfs_ok;
6333	else if (opens_in_grace(net)) {
6334		/* Answer in remaining cases depends on existence of
6335		 * conflicting state; so we must wait out the grace period. */
6336		return nfserr_grace;
6337	} else if (flags & WR_STATE)
6338		return nfs4_share_conflict(current_fh,
6339				NFS4_SHARE_DENY_WRITE);
6340	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
6341		return nfs4_share_conflict(current_fh,
6342				NFS4_SHARE_DENY_READ);
6343}
6344
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6345static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
6346{
6347	/*
6348	 * When sessions are used the stateid generation number is ignored
6349	 * when it is zero.
6350	 */
6351	if (has_session && in->si_generation == 0)
6352		return nfs_ok;
6353
6354	if (in->si_generation == ref->si_generation)
6355		return nfs_ok;
6356
6357	/* If the client sends us a stateid from the future, it's buggy: */
6358	if (nfsd4_stateid_generation_after(in, ref))
6359		return nfserr_bad_stateid;
6360	/*
6361	 * However, we could see a stateid from the past, even from a
6362	 * non-buggy client.  For example, if the client sends a lock
6363	 * while some IO is outstanding, the lock may bump si_generation
6364	 * while the IO is still in flight.  The client could avoid that
6365	 * situation by waiting for responses on all the IO requests,
6366	 * but better performance may result in retrying IO that
6367	 * receives an old_stateid error if requests are rarely
6368	 * reordered in flight:
6369	 */
6370	return nfserr_old_stateid;
6371}
6372
6373static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
6374{
6375	__be32 ret;
 
 
6376
6377	spin_lock(&s->sc_lock);
6378	ret = nfsd4_verify_open_stid(s);
6379	if (ret == nfs_ok)
6380		ret = check_stateid_generation(in, &s->sc_stateid, has_session);
6381	spin_unlock(&s->sc_lock);
6382	return ret;
6383}
6384
6385static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
6386{
6387	if (ols->st_stateowner->so_is_open_owner &&
6388	    !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
 
 
 
 
 
 
 
6389		return nfserr_bad_stateid;
6390	return nfs_ok;
6391}
6392
6393static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
6394{
6395	struct nfs4_stid *s;
6396	__be32 status = nfserr_bad_stateid;
6397
6398	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6399		CLOSE_STATEID(stateid))
6400		return status;
6401	spin_lock(&cl->cl_lock);
6402	s = find_stateid_locked(cl, stateid);
6403	if (!s)
6404		goto out_unlock;
6405	status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
6406	if (status)
6407		goto out_unlock;
6408	switch (s->sc_type) {
6409	case NFS4_DELEG_STID:
6410		status = nfs_ok;
6411		break;
6412	case NFS4_REVOKED_DELEG_STID:
6413		status = nfserr_deleg_revoked;
6414		break;
6415	case NFS4_OPEN_STID:
6416	case NFS4_LOCK_STID:
6417		status = nfsd4_check_openowner_confirmed(openlockstateid(s));
6418		break;
6419	default:
6420		printk("unknown stateid type %x\n", s->sc_type);
6421		fallthrough;
6422	case NFS4_CLOSED_STID:
6423	case NFS4_CLOSED_DELEG_STID:
6424		status = nfserr_bad_stateid;
6425	}
6426out_unlock:
6427	spin_unlock(&cl->cl_lock);
6428	return status;
6429}
6430
6431__be32
6432nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
6433		     stateid_t *stateid, unsigned char typemask,
6434		     struct nfs4_stid **s, struct nfsd_net *nn)
6435{
6436	__be32 status;
6437	struct nfs4_stid *stid;
6438	bool return_revoked = false;
6439
6440	/*
6441	 *  only return revoked delegations if explicitly asked.
6442	 *  otherwise we report revoked or bad_stateid status.
6443	 */
6444	if (typemask & NFS4_REVOKED_DELEG_STID)
6445		return_revoked = true;
6446	else if (typemask & NFS4_DELEG_STID)
6447		typemask |= NFS4_REVOKED_DELEG_STID;
6448
6449	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
6450		CLOSE_STATEID(stateid))
6451		return nfserr_bad_stateid;
6452	status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
6453	if (status == nfserr_stale_clientid) {
6454		if (cstate->session)
6455			return nfserr_bad_stateid;
6456		return nfserr_stale_stateid;
6457	}
6458	if (status)
6459		return status;
6460	stid = find_stateid_by_type(cstate->clp, stateid, typemask);
6461	if (!stid)
6462		return nfserr_bad_stateid;
6463	if ((stid->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
6464		nfs4_put_stid(stid);
6465		if (cstate->minorversion)
6466			return nfserr_deleg_revoked;
6467		return nfserr_bad_stateid;
6468	}
6469	*s = stid;
6470	return nfs_ok;
6471}
6472
6473static struct nfsd_file *
6474nfs4_find_file(struct nfs4_stid *s, int flags)
6475{
6476	struct nfsd_file *ret = NULL;
6477
6478	if (!s)
6479		return NULL;
6480
6481	switch (s->sc_type) {
6482	case NFS4_DELEG_STID:
6483		spin_lock(&s->sc_file->fi_lock);
6484		ret = nfsd_file_get(s->sc_file->fi_deleg_file);
6485		spin_unlock(&s->sc_file->fi_lock);
6486		break;
6487	case NFS4_OPEN_STID:
6488	case NFS4_LOCK_STID:
6489		if (flags & RD_STATE)
6490			ret = find_readable_file(s->sc_file);
6491		else
6492			ret = find_writeable_file(s->sc_file);
6493	}
6494
6495	return ret;
6496}
6497
6498static __be32
6499nfs4_check_olstateid(struct nfs4_ol_stateid *ols, int flags)
6500{
6501	__be32 status;
6502
6503	status = nfsd4_check_openowner_confirmed(ols);
6504	if (status)
6505		return status;
6506	return nfs4_check_openmode(ols, flags);
6507}
6508
6509static __be32
6510nfs4_check_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_stid *s,
6511		struct nfsd_file **nfp, int flags)
6512{
6513	int acc = (flags & RD_STATE) ? NFSD_MAY_READ : NFSD_MAY_WRITE;
6514	struct nfsd_file *nf;
6515	__be32 status;
6516
6517	nf = nfs4_find_file(s, flags);
6518	if (nf) {
6519		status = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
6520				acc | NFSD_MAY_OWNER_OVERRIDE);
6521		if (status) {
6522			nfsd_file_put(nf);
6523			goto out;
6524		}
6525	} else {
6526		status = nfsd_file_acquire(rqstp, fhp, acc, &nf);
6527		if (status)
6528			return status;
6529	}
6530	*nfp = nf;
6531out:
6532	return status;
6533}
6534static void
6535_free_cpntf_state_locked(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6536{
6537	WARN_ON_ONCE(cps->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID);
6538	if (!refcount_dec_and_test(&cps->cp_stateid.cs_count))
6539		return;
6540	list_del(&cps->cp_list);
6541	idr_remove(&nn->s2s_cp_stateids,
6542		   cps->cp_stateid.cs_stid.si_opaque.so_id);
6543	kfree(cps);
6544}
6545/*
6546 * A READ from an inter server to server COPY will have a
6547 * copy stateid. Look up the copy notify stateid from the
6548 * idr structure and take a reference on it.
6549 */
6550__be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6551			  struct nfs4_client *clp,
6552			  struct nfs4_cpntf_state **cps)
6553{
6554	copy_stateid_t *cps_t;
6555	struct nfs4_cpntf_state *state = NULL;
6556
6557	if (st->si_opaque.so_clid.cl_id != nn->s2s_cp_cl_id)
6558		return nfserr_bad_stateid;
6559	spin_lock(&nn->s2s_cp_lock);
6560	cps_t = idr_find(&nn->s2s_cp_stateids, st->si_opaque.so_id);
6561	if (cps_t) {
6562		state = container_of(cps_t, struct nfs4_cpntf_state,
6563				     cp_stateid);
6564		if (state->cp_stateid.cs_type != NFS4_COPYNOTIFY_STID) {
6565			state = NULL;
6566			goto unlock;
6567		}
6568		if (!clp)
6569			refcount_inc(&state->cp_stateid.cs_count);
6570		else
6571			_free_cpntf_state_locked(nn, state);
6572	}
6573unlock:
6574	spin_unlock(&nn->s2s_cp_lock);
6575	if (!state)
6576		return nfserr_bad_stateid;
6577	if (!clp)
6578		*cps = state;
6579	return 0;
6580}
6581
6582static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
6583			       struct nfs4_stid **stid)
6584{
 
 
 
 
 
6585	__be32 status;
6586	struct nfs4_cpntf_state *cps = NULL;
6587	struct nfs4_client *found;
6588
6589	status = manage_cpntf_state(nn, st, NULL, &cps);
6590	if (status)
6591		return status;
6592
6593	cps->cpntf_time = ktime_get_boottime_seconds();
 
6594
6595	status = nfserr_expired;
6596	found = lookup_clientid(&cps->cp_p_clid, true, nn);
6597	if (!found)
6598		goto out;
6599
6600	*stid = find_stateid_by_type(found, &cps->cp_p_stateid,
6601			NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
6602	if (*stid)
6603		status = nfs_ok;
6604	else
6605		status = nfserr_bad_stateid;
6606
6607	put_client_renew(found);
6608out:
6609	nfs4_put_cpntf_state(nn, cps);
6610	return status;
6611}
6612
6613void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
6614{
6615	spin_lock(&nn->s2s_cp_lock);
6616	_free_cpntf_state_locked(nn, cps);
6617	spin_unlock(&nn->s2s_cp_lock);
6618}
6619
6620/**
6621 * nfs4_preprocess_stateid_op - find and prep stateid for an operation
6622 * @rqstp: incoming request from client
6623 * @cstate: current compound state
6624 * @fhp: filehandle associated with requested stateid
6625 * @stateid: stateid (provided by client)
6626 * @flags: flags describing type of operation to be done
6627 * @nfp: optional nfsd_file return pointer (may be NULL)
6628 * @cstid: optional returned nfs4_stid pointer (may be NULL)
6629 *
6630 * Given info from the client, look up a nfs4_stid for the operation. On
6631 * success, it returns a reference to the nfs4_stid and/or the nfsd_file
6632 * associated with it.
6633 */
6634__be32
6635nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
6636		struct nfsd4_compound_state *cstate, struct svc_fh *fhp,
6637		stateid_t *stateid, int flags, struct nfsd_file **nfp,
6638		struct nfs4_stid **cstid)
6639{
6640	struct net *net = SVC_NET(rqstp);
6641	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
6642	struct nfs4_stid *s = NULL;
6643	__be32 status;
6644
6645	if (nfp)
6646		*nfp = NULL;
6647
6648	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
6649		if (cstid)
6650			status = nfserr_bad_stateid;
6651		else
6652			status = check_special_stateids(net, fhp, stateid,
6653									flags);
6654		goto done;
6655	}
6656
6657	status = nfsd4_lookup_stateid(cstate, stateid,
6658				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
6659				&s, nn);
6660	if (status == nfserr_bad_stateid)
6661		status = find_cpntf_state(nn, stateid, &s);
6662	if (status)
6663		return status;
6664	status = nfsd4_stid_check_stateid_generation(stateid, s,
6665			nfsd4_has_session(cstate));
6666	if (status)
6667		goto out;
6668
6669	switch (s->sc_type) {
6670	case NFS4_DELEG_STID:
6671		status = nfs4_check_delegmode(delegstateid(s), flags);
 
 
 
 
 
 
 
6672		break;
6673	case NFS4_OPEN_STID:
6674	case NFS4_LOCK_STID:
6675		status = nfs4_check_olstateid(openlockstateid(s), flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6676		break;
6677	default:
6678		status = nfserr_bad_stateid;
6679		break;
6680	}
6681	if (status)
6682		goto out;
6683	status = nfs4_check_fh(fhp, s);
6684
6685done:
6686	if (status == nfs_ok && nfp)
6687		status = nfs4_check_file(rqstp, fhp, s, nfp, flags);
6688out:
6689	if (s) {
6690		if (!status && cstid)
6691			*cstid = s;
6692		else
6693			nfs4_put_stid(s);
6694	}
6695	return status;
6696}
6697
 
 
 
 
 
 
 
 
 
6698/*
6699 * Test if the stateid is valid
6700 */
6701__be32
6702nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6703		   union nfsd4_op_u *u)
6704{
6705	struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
6706	struct nfsd4_test_stateid_id *stateid;
6707	struct nfs4_client *cl = cstate->clp;
6708
 
6709	list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
6710		stateid->ts_id_status =
6711			nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
6712
6713	return nfs_ok;
6714}
6715
6716static __be32
6717nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s)
6718{
6719	struct nfs4_ol_stateid *stp = openlockstateid(s);
6720	__be32 ret;
6721
6722	ret = nfsd4_lock_ol_stateid(stp);
6723	if (ret)
6724		goto out_put_stid;
6725
6726	ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6727	if (ret)
6728		goto out;
6729
6730	ret = nfserr_locks_held;
6731	if (check_for_locks(stp->st_stid.sc_file,
6732			    lockowner(stp->st_stateowner)))
6733		goto out;
6734
6735	release_lock_stateid(stp);
6736	ret = nfs_ok;
6737
6738out:
6739	mutex_unlock(&stp->st_mutex);
6740out_put_stid:
6741	nfs4_put_stid(s);
6742	return ret;
6743}
6744
6745__be32
6746nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6747		   union nfsd4_op_u *u)
6748{
6749	struct nfsd4_free_stateid *free_stateid = &u->free_stateid;
6750	stateid_t *stateid = &free_stateid->fr_stateid;
6751	struct nfs4_stid *s;
6752	struct nfs4_delegation *dp;
6753	struct nfs4_client *cl = cstate->clp;
6754	__be32 ret = nfserr_bad_stateid;
6755
6756	spin_lock(&cl->cl_lock);
6757	s = find_stateid_locked(cl, stateid);
6758	if (!s)
6759		goto out_unlock;
6760	spin_lock(&s->sc_lock);
6761	switch (s->sc_type) {
6762	case NFS4_DELEG_STID:
6763		ret = nfserr_locks_held;
6764		break;
6765	case NFS4_OPEN_STID:
 
6766		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
6767		if (ret)
6768			break;
6769		ret = nfserr_locks_held;
 
 
 
6770		break;
6771	case NFS4_LOCK_STID:
6772		spin_unlock(&s->sc_lock);
6773		refcount_inc(&s->sc_count);
6774		spin_unlock(&cl->cl_lock);
6775		ret = nfsd4_free_lock_stateid(stateid, s);
6776		goto out;
6777	case NFS4_REVOKED_DELEG_STID:
6778		spin_unlock(&s->sc_lock);
6779		dp = delegstateid(s);
6780		list_del_init(&dp->dl_recall_lru);
6781		spin_unlock(&cl->cl_lock);
6782		nfs4_put_stid(s);
6783		ret = nfs_ok;
6784		goto out;
6785	/* Default falls through and returns nfserr_bad_stateid */
6786	}
6787	spin_unlock(&s->sc_lock);
6788out_unlock:
6789	spin_unlock(&cl->cl_lock);
6790out:
 
6791	return ret;
6792}
6793
6794static inline int
6795setlkflg (int type)
6796{
6797	return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
6798		RD_STATE : WR_STATE;
6799}
6800
6801static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
6802{
6803	struct svc_fh *current_fh = &cstate->current_fh;
6804	struct nfs4_stateowner *sop = stp->st_stateowner;
6805	__be32 status;
6806
6807	status = nfsd4_check_seqid(cstate, sop, seqid);
6808	if (status)
6809		return status;
6810	status = nfsd4_lock_ol_stateid(stp);
6811	if (status != nfs_ok)
 
 
 
 
 
 
6812		return status;
6813	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
6814	if (status == nfs_ok)
6815		status = nfs4_check_fh(current_fh, &stp->st_stid);
6816	if (status != nfs_ok)
6817		mutex_unlock(&stp->st_mutex);
6818	return status;
6819}
6820
6821/**
6822 * nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op
6823 * @cstate: compund state
6824 * @seqid: seqid (provided by client)
6825 * @stateid: stateid (provided by client)
6826 * @typemask: mask of allowable types for this operation
6827 * @stpp: return pointer for the stateid found
6828 * @nn: net namespace for request
6829 *
6830 * Given a stateid+seqid from a client, look up an nfs4_ol_stateid and
6831 * return it in @stpp. On a nfs_ok return, the returned stateid will
6832 * have its st_mutex locked.
6833 */
6834static __be32
6835nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6836			 stateid_t *stateid, char typemask,
6837			 struct nfs4_ol_stateid **stpp,
6838			 struct nfsd_net *nn)
6839{
6840	__be32 status;
6841	struct nfs4_stid *s;
6842	struct nfs4_ol_stateid *stp = NULL;
6843
6844	trace_nfsd_preprocess(seqid, stateid);
 
6845
6846	*stpp = NULL;
6847	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
6848	if (status)
6849		return status;
6850	stp = openlockstateid(s);
6851	nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
6852
6853	status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
6854	if (!status)
6855		*stpp = stp;
6856	else
6857		nfs4_put_stid(&stp->st_stid);
6858	return status;
6859}
6860
6861static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
6862						 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
6863{
6864	__be32 status;
6865	struct nfs4_openowner *oo;
6866	struct nfs4_ol_stateid *stp;
6867
6868	status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
6869						NFS4_OPEN_STID, &stp, nn);
6870	if (status)
6871		return status;
6872	oo = openowner(stp->st_stateowner);
6873	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
6874		mutex_unlock(&stp->st_mutex);
6875		nfs4_put_stid(&stp->st_stid);
6876		return nfserr_bad_stateid;
6877	}
6878	*stpp = stp;
6879	return nfs_ok;
6880}
6881
6882__be32
6883nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6884		   union nfsd4_op_u *u)
6885{
6886	struct nfsd4_open_confirm *oc = &u->open_confirm;
6887	__be32 status;
6888	struct nfs4_openowner *oo;
6889	struct nfs4_ol_stateid *stp;
6890	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6891
6892	dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
6893			cstate->current_fh.fh_dentry);
 
6894
6895	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
6896	if (status)
6897		return status;
6898
 
 
6899	status = nfs4_preprocess_seqid_op(cstate,
6900					oc->oc_seqid, &oc->oc_req_stateid,
6901					NFS4_OPEN_STID, &stp, nn);
6902	if (status)
6903		goto out;
6904	oo = openowner(stp->st_stateowner);
6905	status = nfserr_bad_stateid;
6906	if (oo->oo_flags & NFS4_OO_CONFIRMED) {
6907		mutex_unlock(&stp->st_mutex);
6908		goto put_stateid;
6909	}
6910	oo->oo_flags |= NFS4_OO_CONFIRMED;
6911	nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
6912	mutex_unlock(&stp->st_mutex);
6913	trace_nfsd_open_confirm(oc->oc_seqid, &stp->st_stid.sc_stateid);
 
 
6914	nfsd4_client_record_create(oo->oo_owner.so_client);
6915	status = nfs_ok;
6916put_stateid:
6917	nfs4_put_stid(&stp->st_stid);
6918out:
6919	nfsd4_bump_seqid(cstate, status);
 
6920	return status;
6921}
6922
6923static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
6924{
6925	if (!test_access(access, stp))
6926		return;
6927	nfs4_file_put_access(stp->st_stid.sc_file, access);
6928	clear_access(access, stp);
6929}
6930
6931static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
6932{
6933	switch (to_access) {
6934	case NFS4_SHARE_ACCESS_READ:
6935		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
6936		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6937		break;
6938	case NFS4_SHARE_ACCESS_WRITE:
6939		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
6940		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
6941		break;
6942	case NFS4_SHARE_ACCESS_BOTH:
6943		break;
6944	default:
6945		WARN_ON_ONCE(1);
 
 
 
 
 
 
 
 
 
 
6946	}
6947}
6948
6949__be32
6950nfsd4_open_downgrade(struct svc_rqst *rqstp,
6951		     struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
 
6952{
6953	struct nfsd4_open_downgrade *od = &u->open_downgrade;
6954	__be32 status;
6955	struct nfs4_ol_stateid *stp;
6956	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
6957
6958	dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 
6959			cstate->current_fh.fh_dentry);
 
6960
6961	/* We don't yet support WANT bits: */
6962	if (od->od_deleg_want)
6963		dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
6964			od->od_deleg_want);
6965
 
6966	status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
6967					&od->od_stateid, &stp, nn);
6968	if (status)
6969		goto out; 
6970	status = nfserr_inval;
6971	if (!test_access(od->od_share_access, stp)) {
6972		dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
6973			stp->st_access_bmap, od->od_share_access);
6974		goto put_stateid;
6975	}
6976	if (!test_deny(od->od_share_deny, stp)) {
6977		dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
6978			stp->st_deny_bmap, od->od_share_deny);
6979		goto put_stateid;
6980	}
6981	nfs4_stateid_downgrade(stp, od->od_share_access);
 
6982	reset_union_bmap_deny(od->od_share_deny, stp);
6983	nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
 
 
6984	status = nfs_ok;
6985put_stateid:
6986	mutex_unlock(&stp->st_mutex);
6987	nfs4_put_stid(&stp->st_stid);
6988out:
6989	nfsd4_bump_seqid(cstate, status);
 
6990	return status;
6991}
6992
6993static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
6994{
6995	struct nfs4_client *clp = s->st_stid.sc_client;
6996	bool unhashed;
6997	LIST_HEAD(reaplist);
6998	struct nfs4_ol_stateid *stp;
6999
7000	spin_lock(&clp->cl_lock);
7001	unhashed = unhash_open_stateid(s, &reaplist);
 
 
 
 
 
 
 
 
 
 
 
 
7002
7003	if (clp->cl_minorversion) {
7004		if (unhashed)
7005			put_ol_stateid_locked(s, &reaplist);
7006		spin_unlock(&clp->cl_lock);
7007		list_for_each_entry(stp, &reaplist, st_locks)
7008			nfs4_free_cpntf_statelist(clp->net, &stp->st_stid);
7009		free_ol_stateid_reaplist(&reaplist);
7010	} else {
7011		spin_unlock(&clp->cl_lock);
7012		free_ol_stateid_reaplist(&reaplist);
7013		if (unhashed)
7014			move_to_close_lru(s, clp->net);
7015	}
7016}
7017
7018/*
7019 * nfs4_unlock_state() called after encode
7020 */
7021__be32
7022nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7023		union nfsd4_op_u *u)
7024{
7025	struct nfsd4_close *close = &u->close;
7026	__be32 status;
 
7027	struct nfs4_ol_stateid *stp;
7028	struct net *net = SVC_NET(rqstp);
7029	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7030
7031	dprintk("NFSD: nfsd4_close on file %pd\n", 
7032			cstate->current_fh.fh_dentry);
 
7033
 
7034	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
7035					&close->cl_stateid,
7036					NFS4_OPEN_STID|NFS4_CLOSED_STID,
7037					&stp, nn);
7038	nfsd4_bump_seqid(cstate, status);
7039	if (status)
7040		goto out; 
7041
7042	stp->st_stid.sc_type = NFS4_CLOSED_STID;
7043
7044	/*
7045	 * Technically we don't _really_ have to increment or copy it, since
7046	 * it should just be gone after this operation and we clobber the
7047	 * copied value below, but we continue to do so here just to ensure
7048	 * that racing ops see that there was a state change.
7049	 */
7050	nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
7051
7052	nfsd4_close_open_stateid(stp);
7053	mutex_unlock(&stp->st_mutex);
7054
7055	/* v4.1+ suggests that we send a special stateid in here, since the
7056	 * clients should just ignore this anyway. Since this is not useful
7057	 * for v4.0 clients either, we set it to the special close_stateid
7058	 * universally.
7059	 *
7060	 * See RFC5661 section 18.2.4, and RFC7530 section 16.2.5
7061	 */
7062	memcpy(&close->cl_stateid, &close_stateid, sizeof(close->cl_stateid));
7063
7064	/* put reference from nfs4_preprocess_seqid_op */
7065	nfs4_put_stid(&stp->st_stid);
7066out:
 
 
7067	return status;
7068}
7069
7070__be32
7071nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7072		  union nfsd4_op_u *u)
7073{
7074	struct nfsd4_delegreturn *dr = &u->delegreturn;
7075	struct nfs4_delegation *dp;
7076	stateid_t *stateid = &dr->dr_stateid;
7077	struct nfs4_stid *s;
 
7078	__be32 status;
7079	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7080
7081	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
7082		return status;
 
7083
7084	status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
 
7085	if (status)
7086		goto out;
7087	dp = delegstateid(s);
7088	status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
7089	if (status)
7090		goto put_stateid;
7091
7092	trace_nfsd_deleg_return(stateid);
7093	wake_up_var(d_inode(cstate->current_fh.fh_dentry));
7094	destroy_delegation(dp);
7095put_stateid:
7096	nfs4_put_stid(&dp->dl_stid);
7097out:
 
 
7098	return status;
7099}
7100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7101/* last octet in a range */
7102static inline u64
7103last_byte_offset(u64 start, u64 len)
7104{
7105	u64 end;
7106
7107	WARN_ON_ONCE(!len);
7108	end = start + len;
7109	return end > start ? end - 1: NFS4_MAX_UINT64;
7110}
7111
 
 
 
 
 
 
 
 
 
7112/*
7113 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
7114 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
7115 * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
7116 * locking, this prevents us from being completely protocol-compliant.  The
7117 * real solution to this problem is to start using unsigned file offsets in
7118 * the VFS, but this is a very deep change!
7119 */
7120static inline void
7121nfs4_transform_lock_offset(struct file_lock *lock)
7122{
7123	if (lock->fl_start < 0)
7124		lock->fl_start = OFFSET_MAX;
7125	if (lock->fl_end < 0)
7126		lock->fl_end = OFFSET_MAX;
7127}
7128
7129static fl_owner_t
7130nfsd4_lm_get_owner(fl_owner_t owner)
7131{
7132	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7133
7134	nfs4_get_stateowner(&lo->lo_owner);
7135	return owner;
7136}
7137
7138static void
7139nfsd4_lm_put_owner(fl_owner_t owner)
7140{
7141	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)owner;
7142
7143	if (lo)
7144		nfs4_put_stateowner(&lo->lo_owner);
7145}
7146
7147/* return pointer to struct nfs4_client if client is expirable */
7148static bool
7149nfsd4_lm_lock_expirable(struct file_lock *cfl)
7150{
7151	struct nfs4_lockowner *lo = (struct nfs4_lockowner *)cfl->fl_owner;
7152	struct nfs4_client *clp = lo->lo_owner.so_client;
7153	struct nfsd_net *nn;
7154
7155	if (try_to_expire_client(clp)) {
7156		nn = net_generic(clp->net, nfsd_net_id);
7157		mod_delayed_work(laundry_wq, &nn->laundromat_work, 0);
7158		return true;
7159	}
7160	return false;
7161}
7162
7163/* schedule laundromat to run immediately and wait for it to complete */
7164static void
7165nfsd4_lm_expire_lock(void)
7166{
7167	flush_workqueue(laundry_wq);
7168}
7169
7170static void
7171nfsd4_lm_notify(struct file_lock *fl)
7172{
7173	struct nfs4_lockowner		*lo = (struct nfs4_lockowner *)fl->fl_owner;
7174	struct net			*net = lo->lo_owner.so_client->net;
7175	struct nfsd_net			*nn = net_generic(net, nfsd_net_id);
7176	struct nfsd4_blocked_lock	*nbl = container_of(fl,
7177						struct nfsd4_blocked_lock, nbl_lock);
7178	bool queue = false;
7179
7180	/* An empty list means that something else is going to be using it */
7181	spin_lock(&nn->blocked_locks_lock);
7182	if (!list_empty(&nbl->nbl_list)) {
7183		list_del_init(&nbl->nbl_list);
7184		list_del_init(&nbl->nbl_lru);
7185		queue = true;
7186	}
7187	spin_unlock(&nn->blocked_locks_lock);
7188
7189	if (queue) {
7190		trace_nfsd_cb_notify_lock(lo, nbl);
7191		nfsd4_run_cb(&nbl->nbl_cb);
7192	}
7193}
7194
7195static const struct lock_manager_operations nfsd_posix_mng_ops  = {
7196	.lm_mod_owner = THIS_MODULE,
7197	.lm_notify = nfsd4_lm_notify,
7198	.lm_get_owner = nfsd4_lm_get_owner,
7199	.lm_put_owner = nfsd4_lm_put_owner,
7200	.lm_lock_expirable = nfsd4_lm_lock_expirable,
7201	.lm_expire_lock = nfsd4_lm_expire_lock,
7202};
7203
7204static inline void
7205nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
7206{
7207	struct nfs4_lockowner *lo;
7208
7209	if (fl->fl_lmops == &nfsd_posix_mng_ops) {
7210		lo = (struct nfs4_lockowner *) fl->fl_owner;
7211		xdr_netobj_dup(&deny->ld_owner, &lo->lo_owner.so_owner,
7212						GFP_KERNEL);
7213		if (!deny->ld_owner.data)
7214			/* We just don't care that much */
7215			goto nevermind;
 
7216		deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
7217	} else {
7218nevermind:
7219		deny->ld_owner.len = 0;
7220		deny->ld_owner.data = NULL;
7221		deny->ld_clientid.cl_boot = 0;
7222		deny->ld_clientid.cl_id = 0;
7223	}
7224	deny->ld_start = fl->fl_start;
7225	deny->ld_length = NFS4_MAX_UINT64;
7226	if (fl->fl_end != NFS4_MAX_UINT64)
7227		deny->ld_length = fl->fl_end - fl->fl_start + 1;        
7228	deny->ld_type = NFS4_READ_LT;
7229	if (fl->fl_type != F_RDLCK)
7230		deny->ld_type = NFS4_WRITE_LT;
7231}
7232
7233static struct nfs4_lockowner *
7234find_lockowner_str_locked(struct nfs4_client *clp, struct xdr_netobj *owner)
7235{
7236	unsigned int strhashval = ownerstr_hashval(owner);
7237	struct nfs4_stateowner *so;
7238
7239	lockdep_assert_held(&clp->cl_lock);
7240
7241	list_for_each_entry(so, &clp->cl_ownerstr_hashtbl[strhashval],
7242			    so_strhash) {
7243		if (so->so_is_open_owner)
7244			continue;
7245		if (same_owner_str(so, owner))
7246			return lockowner(nfs4_get_stateowner(so));
7247	}
7248	return NULL;
7249}
7250
7251static struct nfs4_lockowner *
7252find_lockowner_str(struct nfs4_client *clp, struct xdr_netobj *owner)
 
7253{
 
7254	struct nfs4_lockowner *lo;
7255
7256	spin_lock(&clp->cl_lock);
7257	lo = find_lockowner_str_locked(clp, owner);
7258	spin_unlock(&clp->cl_lock);
7259	return lo;
 
7260}
7261
7262static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
7263{
7264	unhash_lockowner_locked(lockowner(sop));
7265}
7266
7267static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
7268{
7269	struct nfs4_lockowner *lo = lockowner(sop);
7270
7271	kmem_cache_free(lockowner_slab, lo);
7272}
7273
7274static const struct nfs4_stateowner_operations lockowner_ops = {
7275	.so_unhash =	nfs4_unhash_lockowner,
7276	.so_free =	nfs4_free_lockowner,
7277};
7278
7279/*
7280 * Alloc a lock owner structure.
7281 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 
7282 * occurred. 
7283 *
7284 * strhashval = ownerstr_hashval
7285 */
 
7286static struct nfs4_lockowner *
7287alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
7288			   struct nfs4_ol_stateid *open_stp,
7289			   struct nfsd4_lock *lock)
7290{
7291	struct nfs4_lockowner *lo, *ret;
7292
7293	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
7294	if (!lo)
7295		return NULL;
7296	INIT_LIST_HEAD(&lo->lo_blocked);
7297	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
7298	lo->lo_owner.so_is_open_owner = 0;
7299	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
7300	lo->lo_owner.so_ops = &lockowner_ops;
7301	spin_lock(&clp->cl_lock);
7302	ret = find_lockowner_str_locked(clp, &lock->lk_new_owner);
7303	if (ret == NULL) {
7304		list_add(&lo->lo_owner.so_strhash,
7305			 &clp->cl_ownerstr_hashtbl[strhashval]);
7306		ret = lo;
7307	} else
7308		nfs4_free_stateowner(&lo->lo_owner);
7309
7310	spin_unlock(&clp->cl_lock);
7311	return ret;
7312}
7313
7314static struct nfs4_ol_stateid *
7315find_lock_stateid(const struct nfs4_lockowner *lo,
7316		  const struct nfs4_ol_stateid *ost)
7317{
7318	struct nfs4_ol_stateid *lst;
7319
7320	lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
7321
7322	/* If ost is not hashed, ost->st_locks will not be valid */
7323	if (!nfs4_ol_stateid_unhashed(ost))
7324		list_for_each_entry(lst, &ost->st_locks, st_locks) {
7325			if (lst->st_stateowner == &lo->lo_owner) {
7326				refcount_inc(&lst->st_stid.sc_count);
7327				return lst;
7328			}
7329		}
7330	return NULL;
7331}
7332
7333static struct nfs4_ol_stateid *
7334init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
7335		  struct nfs4_file *fp, struct inode *inode,
7336		  struct nfs4_ol_stateid *open_stp)
7337{
 
7338	struct nfs4_client *clp = lo->lo_owner.so_client;
7339	struct nfs4_ol_stateid *retstp;
7340
7341	mutex_init(&stp->st_mutex);
7342	mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
7343retry:
7344	spin_lock(&clp->cl_lock);
7345	if (nfs4_ol_stateid_unhashed(open_stp))
7346		goto out_close;
7347	retstp = find_lock_stateid(lo, open_stp);
7348	if (retstp)
7349		goto out_found;
7350	refcount_inc(&stp->st_stid.sc_count);
7351	stp->st_stid.sc_type = NFS4_LOCK_STID;
7352	stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
7353	get_nfs4_file(fp);
7354	stp->st_stid.sc_file = fp;
7355	stp->st_access_bmap = 0;
7356	stp->st_deny_bmap = open_stp->st_deny_bmap;
7357	stp->st_openstp = open_stp;
7358	spin_lock(&fp->fi_lock);
7359	list_add(&stp->st_locks, &open_stp->st_locks);
7360	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
7361	list_add(&stp->st_perfile, &fp->fi_stateids);
7362	spin_unlock(&fp->fi_lock);
7363	spin_unlock(&clp->cl_lock);
7364	return stp;
7365out_found:
7366	spin_unlock(&clp->cl_lock);
7367	if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
7368		nfs4_put_stid(&retstp->st_stid);
7369		goto retry;
7370	}
7371	/* To keep mutex tracking happy */
7372	mutex_unlock(&stp->st_mutex);
7373	return retstp;
7374out_close:
7375	spin_unlock(&clp->cl_lock);
7376	mutex_unlock(&stp->st_mutex);
7377	return NULL;
7378}
7379
7380static struct nfs4_ol_stateid *
7381find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
7382			    struct inode *inode, struct nfs4_ol_stateid *ost,
7383			    bool *new)
7384{
7385	struct nfs4_stid *ns = NULL;
7386	struct nfs4_ol_stateid *lst;
7387	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7388	struct nfs4_client *clp = oo->oo_owner.so_client;
7389
7390	*new = false;
7391	spin_lock(&clp->cl_lock);
7392	lst = find_lock_stateid(lo, ost);
7393	spin_unlock(&clp->cl_lock);
7394	if (lst != NULL) {
7395		if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
7396			goto out;
7397		nfs4_put_stid(&lst->st_stid);
7398	}
7399	ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
7400	if (ns == NULL)
7401		return NULL;
7402
7403	lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
7404	if (lst == openlockstateid(ns))
7405		*new = true;
7406	else
7407		nfs4_put_stid(ns);
7408out:
7409	return lst;
7410}
7411
7412static int
7413check_lock_length(u64 offset, u64 length)
7414{
7415	return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
7416		(length > ~offset)));
7417}
7418
7419static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
7420{
7421	struct nfs4_file *fp = lock_stp->st_stid.sc_file;
7422
7423	lockdep_assert_held(&fp->fi_lock);
7424
7425	if (test_access(access, lock_stp))
7426		return;
7427	__nfs4_file_get_access(fp, access);
7428	set_access(access, lock_stp);
7429}
7430
7431static __be32
7432lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
7433			    struct nfs4_ol_stateid *ost,
7434			    struct nfsd4_lock *lock,
7435			    struct nfs4_ol_stateid **plst, bool *new)
7436{
7437	__be32 status;
7438	struct nfs4_file *fi = ost->st_stid.sc_file;
7439	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
7440	struct nfs4_client *cl = oo->oo_owner.so_client;
7441	struct inode *inode = d_inode(cstate->current_fh.fh_dentry);
7442	struct nfs4_lockowner *lo;
7443	struct nfs4_ol_stateid *lst;
7444	unsigned int strhashval;
7445
7446	lo = find_lockowner_str(cl, &lock->lk_new_owner);
7447	if (!lo) {
7448		strhashval = ownerstr_hashval(&lock->lk_new_owner);
7449		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
7450		if (lo == NULL)
7451			return nfserr_jukebox;
7452	} else {
7453		/* with an existing lockowner, seqids must be the same */
7454		status = nfserr_bad_seqid;
7455		if (!cstate->minorversion &&
7456		    lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
7457			goto out;
7458	}
7459
7460	lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
7461	if (lst == NULL) {
7462		status = nfserr_jukebox;
7463		goto out;
 
 
 
 
7464	}
7465
7466	status = nfs_ok;
7467	*plst = lst;
7468out:
7469	nfs4_put_stateowner(&lo->lo_owner);
7470	return status;
7471}
7472
7473/*
7474 *  LOCK operation 
7475 */
7476__be32
7477nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7478	   union nfsd4_op_u *u)
7479{
7480	struct nfsd4_lock *lock = &u->lock;
7481	struct nfs4_openowner *open_sop = NULL;
7482	struct nfs4_lockowner *lock_sop = NULL;
7483	struct nfs4_ol_stateid *lock_stp = NULL;
7484	struct nfs4_ol_stateid *open_stp = NULL;
7485	struct nfs4_file *fp;
7486	struct nfsd_file *nf = NULL;
7487	struct nfsd4_blocked_lock *nbl = NULL;
7488	struct file_lock *file_lock = NULL;
7489	struct file_lock *conflock = NULL;
7490	struct super_block *sb;
7491	__be32 status = 0;
 
7492	int lkflg;
7493	int err;
7494	bool new = false;
7495	unsigned char fl_type;
7496	unsigned int fl_flags = FL_POSIX;
7497	struct net *net = SVC_NET(rqstp);
7498	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
7499
7500	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
7501		(long long) lock->lk_offset,
7502		(long long) lock->lk_length);
7503
7504	if (check_lock_length(lock->lk_offset, lock->lk_length))
7505		 return nfserr_inval;
7506
7507	if ((status = fh_verify(rqstp, &cstate->current_fh,
7508				S_IFREG, NFSD_MAY_LOCK))) {
7509		dprintk("NFSD: nfsd4_lock: permission denied!\n");
7510		return status;
7511	}
7512	sb = cstate->current_fh.fh_dentry->d_sb;
 
7513
7514	if (lock->lk_is_new) {
 
 
 
 
 
 
 
7515		if (nfsd4_has_session(cstate))
7516			/* See rfc 5661 18.10.3: given clientid is ignored: */
7517			memcpy(&lock->lk_new_clientid,
7518				&cstate->clp->cl_clientid,
7519				sizeof(clientid_t));
7520
 
 
 
 
7521		/* validate and update open stateid and open seqid */
7522		status = nfs4_preprocess_confirmed_seqid_op(cstate,
7523				        lock->lk_new_open_seqid,
7524		                        &lock->lk_new_open_stateid,
7525					&open_stp, nn);
7526		if (status)
7527			goto out;
7528		mutex_unlock(&open_stp->st_mutex);
7529		open_sop = openowner(open_stp->st_stateowner);
7530		status = nfserr_bad_stateid;
7531		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
7532						&lock->lk_new_clientid))
7533			goto out;
7534		status = lookup_or_create_lock_state(cstate, open_stp, lock,
7535							&lock_stp, &new);
 
 
7536	} else {
 
7537		status = nfs4_preprocess_seqid_op(cstate,
7538				       lock->lk_old_lock_seqid,
7539				       &lock->lk_old_lock_stateid,
7540				       NFS4_LOCK_STID, &lock_stp, nn);
 
 
7541	}
7542	if (status)
7543		goto out;
7544	lock_sop = lockowner(lock_stp->st_stateowner);
7545
7546	lkflg = setlkflg(lock->lk_type);
7547	status = nfs4_check_openmode(lock_stp, lkflg);
7548	if (status)
7549		goto out;
7550
7551	status = nfserr_grace;
7552	if (locks_in_grace(net) && !lock->lk_reclaim)
7553		goto out;
7554	status = nfserr_no_grace;
7555	if (!locks_in_grace(net) && lock->lk_reclaim)
7556		goto out;
7557
7558	if (lock->lk_reclaim)
7559		fl_flags |= FL_RECLAIM;
7560
7561	fp = lock_stp->st_stid.sc_file;
7562	switch (lock->lk_type) {
 
7563		case NFS4_READW_LT:
7564			if (nfsd4_has_session(cstate) ||
7565			    exportfs_lock_op_is_async(sb->s_export_op))
7566				fl_flags |= FL_SLEEP;
7567			fallthrough;
7568		case NFS4_READ_LT:
7569			spin_lock(&fp->fi_lock);
7570			nf = find_readable_file_locked(fp);
7571			if (nf)
7572				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
7573			spin_unlock(&fp->fi_lock);
7574			fl_type = F_RDLCK;
7575			break;
 
7576		case NFS4_WRITEW_LT:
7577			if (nfsd4_has_session(cstate) ||
7578			    exportfs_lock_op_is_async(sb->s_export_op))
7579				fl_flags |= FL_SLEEP;
7580			fallthrough;
7581		case NFS4_WRITE_LT:
7582			spin_lock(&fp->fi_lock);
7583			nf = find_writeable_file_locked(fp);
7584			if (nf)
7585				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
7586			spin_unlock(&fp->fi_lock);
7587			fl_type = F_WRLCK;
7588			break;
7589		default:
7590			status = nfserr_inval;
7591		goto out;
7592	}
7593
7594	if (!nf) {
7595		status = nfserr_openmode;
7596		goto out;
7597	}
 
 
 
 
 
 
 
 
 
7598
7599	/*
7600	 * Most filesystems with their own ->lock operations will block
7601	 * the nfsd thread waiting to acquire the lock.  That leads to
7602	 * deadlocks (we don't want every nfsd thread tied up waiting
7603	 * for file locks), so don't attempt blocking lock notifications
7604	 * on those filesystems:
7605	 */
7606	if (!exportfs_lock_op_is_async(sb->s_export_op))
7607		fl_flags &= ~FL_SLEEP;
7608
7609	nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
7610	if (!nbl) {
7611		dprintk("NFSD: %s: unable to allocate block!\n", __func__);
7612		status = nfserr_jukebox;
7613		goto out;
7614	}
7615
7616	file_lock = &nbl->nbl_lock;
7617	file_lock->fl_type = fl_type;
7618	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(&lock_sop->lo_owner));
7619	file_lock->fl_pid = current->tgid;
7620	file_lock->fl_file = nf->nf_file;
7621	file_lock->fl_flags = fl_flags;
7622	file_lock->fl_lmops = &nfsd_posix_mng_ops;
7623	file_lock->fl_start = lock->lk_offset;
7624	file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
7625	nfs4_transform_lock_offset(file_lock);
7626
7627	conflock = locks_alloc_lock();
7628	if (!conflock) {
7629		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7630		status = nfserr_jukebox;
7631		goto out;
7632	}
7633
7634	if (fl_flags & FL_SLEEP) {
7635		nbl->nbl_time = ktime_get_boottime_seconds();
7636		spin_lock(&nn->blocked_locks_lock);
7637		list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
7638		list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
7639		kref_get(&nbl->nbl_kref);
7640		spin_unlock(&nn->blocked_locks_lock);
7641	}
7642
7643	err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, conflock);
7644	switch (err) {
7645	case 0: /* success! */
7646		nfs4_inc_and_copy_stateid(&lock->lk_resp_stateid, &lock_stp->st_stid);
 
 
7647		status = 0;
7648		if (lock->lk_reclaim)
7649			nn->somebody_reclaimed = true;
7650		break;
7651	case FILE_LOCK_DEFERRED:
7652		kref_put(&nbl->nbl_kref, free_nbl);
7653		nbl = NULL;
7654		fallthrough;
7655	case -EAGAIN:		/* conflock holds conflicting lock */
7656		status = nfserr_denied;
7657		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
7658		nfs4_set_lock_denied(conflock, &lock->lk_denied);
7659		break;
7660	case -EDEADLK:
7661		status = nfserr_deadlock;
7662		break;
7663	default:
7664		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
7665		status = nfserrno(err);
7666		break;
7667	}
7668out:
7669	if (nbl) {
7670		/* dequeue it if we queued it before */
7671		if (fl_flags & FL_SLEEP) {
7672			spin_lock(&nn->blocked_locks_lock);
7673			if (!list_empty(&nbl->nbl_list) &&
7674			    !list_empty(&nbl->nbl_lru)) {
7675				list_del_init(&nbl->nbl_list);
7676				list_del_init(&nbl->nbl_lru);
7677				kref_put(&nbl->nbl_kref, free_nbl);
7678			}
7679			/* nbl can use one of lists to be linked to reaplist */
7680			spin_unlock(&nn->blocked_locks_lock);
7681		}
7682		free_blocked_lock(nbl);
7683	}
7684	if (nf)
7685		nfsd_file_put(nf);
7686	if (lock_stp) {
7687		/* Bump seqid manually if the 4.0 replay owner is openowner */
7688		if (cstate->replay_owner &&
7689		    cstate->replay_owner != &lock_sop->lo_owner &&
7690		    seqid_mutating_err(ntohl(status)))
7691			lock_sop->lo_owner.so_seqid++;
7692
7693		/*
7694		 * If this is a new, never-before-used stateid, and we are
7695		 * returning an error, then just go ahead and release it.
7696		 */
7697		if (status && new)
7698			release_lock_stateid(lock_stp);
7699
7700		mutex_unlock(&lock_stp->st_mutex);
7701
7702		nfs4_put_stid(&lock_stp->st_stid);
7703	}
7704	if (open_stp)
7705		nfs4_put_stid(&open_stp->st_stid);
7706	nfsd4_bump_seqid(cstate, status);
7707	if (conflock)
7708		locks_free_lock(conflock);
7709	return status;
7710}
7711
7712void nfsd4_lock_release(union nfsd4_op_u *u)
7713{
7714	struct nfsd4_lock *lock = &u->lock;
7715	struct nfsd4_lock_denied *deny = &lock->lk_denied;
7716
7717	kfree(deny->ld_owner.data);
7718}
7719
7720/*
7721 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
7722 * so we do a temporary open here just to get an open file to pass to
7723 * vfs_test_lock.
 
7724 */
7725static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
7726{
7727	struct nfsd_file *nf;
7728	struct inode *inode;
7729	__be32 err;
7730
7731	err = nfsd_file_acquire(rqstp, fhp, NFSD_MAY_READ, &nf);
7732	if (err)
7733		return err;
7734	inode = fhp->fh_dentry->d_inode;
7735	inode_lock(inode); /* to block new leases till after test_lock: */
7736	err = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
7737	if (err)
7738		goto out;
7739	lock->fl_file = nf->nf_file;
7740	err = nfserrno(vfs_test_lock(nf->nf_file, lock));
7741	lock->fl_file = NULL;
7742out:
7743	inode_unlock(inode);
7744	nfsd_file_put(nf);
7745	return err;
7746}
7747
7748/*
7749 * LOCKT operation
7750 */
7751__be32
7752nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7753	    union nfsd4_op_u *u)
7754{
7755	struct nfsd4_lockt *lockt = &u->lockt;
7756	struct file_lock *file_lock = NULL;
7757	struct nfs4_lockowner *lo = NULL;
7758	__be32 status;
7759	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7760
7761	if (locks_in_grace(SVC_NET(rqstp)))
7762		return nfserr_grace;
7763
7764	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
7765		 return nfserr_inval;
7766
7767	if (!nfsd4_has_session(cstate)) {
7768		status = set_client(&lockt->lt_clientid, cstate, nn);
7769		if (status)
7770			goto out;
7771	}
7772
7773	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
 
7774		goto out;
7775
7776	file_lock = locks_alloc_lock();
7777	if (!file_lock) {
7778		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7779		status = nfserr_jukebox;
7780		goto out;
7781	}
7782
 
 
7783	switch (lockt->lt_type) {
7784		case NFS4_READ_LT:
7785		case NFS4_READW_LT:
7786			file_lock->fl_type = F_RDLCK;
7787			break;
7788		case NFS4_WRITE_LT:
7789		case NFS4_WRITEW_LT:
7790			file_lock->fl_type = F_WRLCK;
7791			break;
7792		default:
7793			dprintk("NFSD: nfs4_lockt: bad lock type!\n");
7794			status = nfserr_inval;
7795			goto out;
7796	}
7797
7798	lo = find_lockowner_str(cstate->clp, &lockt->lt_owner);
7799	if (lo)
7800		file_lock->fl_owner = (fl_owner_t)lo;
7801	file_lock->fl_pid = current->tgid;
7802	file_lock->fl_flags = FL_POSIX;
7803
7804	file_lock->fl_start = lockt->lt_offset;
7805	file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
7806
7807	nfs4_transform_lock_offset(file_lock);
7808
7809	status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
7810	if (status)
7811		goto out;
7812
7813	if (file_lock->fl_type != F_UNLCK) {
7814		status = nfserr_denied;
7815		nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
7816	}
7817out:
7818	if (lo)
7819		nfs4_put_stateowner(&lo->lo_owner);
7820	if (file_lock)
7821		locks_free_lock(file_lock);
7822	return status;
7823}
7824
7825void nfsd4_lockt_release(union nfsd4_op_u *u)
7826{
7827	struct nfsd4_lockt *lockt = &u->lockt;
7828	struct nfsd4_lock_denied *deny = &lockt->lt_denied;
7829
7830	kfree(deny->ld_owner.data);
7831}
7832
7833__be32
7834nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
7835	    union nfsd4_op_u *u)
7836{
7837	struct nfsd4_locku *locku = &u->locku;
7838	struct nfs4_ol_stateid *stp;
7839	struct nfsd_file *nf = NULL;
7840	struct file_lock *file_lock = NULL;
7841	__be32 status;
7842	int err;
7843	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7844
7845	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
7846		(long long) locku->lu_offset,
7847		(long long) locku->lu_length);
7848
7849	if (check_lock_length(locku->lu_offset, locku->lu_length))
7850		 return nfserr_inval;
7851
 
 
7852	status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
7853					&locku->lu_stateid, NFS4_LOCK_STID,
7854					&stp, nn);
7855	if (status)
7856		goto out;
7857	nf = find_any_file(stp->st_stid.sc_file);
7858	if (!nf) {
7859		status = nfserr_lock_range;
7860		goto put_stateid;
7861	}
7862	file_lock = locks_alloc_lock();
7863	if (!file_lock) {
7864		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
7865		status = nfserr_jukebox;
7866		goto put_file;
7867	}
 
 
 
 
 
 
 
 
 
7868
7869	file_lock->fl_type = F_UNLCK;
7870	file_lock->fl_owner = (fl_owner_t)lockowner(nfs4_get_stateowner(stp->st_stateowner));
7871	file_lock->fl_pid = current->tgid;
7872	file_lock->fl_file = nf->nf_file;
7873	file_lock->fl_flags = FL_POSIX;
7874	file_lock->fl_lmops = &nfsd_posix_mng_ops;
7875	file_lock->fl_start = locku->lu_offset;
7876
7877	file_lock->fl_end = last_byte_offset(locku->lu_offset,
7878						locku->lu_length);
7879	nfs4_transform_lock_offset(file_lock);
7880
7881	err = vfs_lock_file(nf->nf_file, F_SETLK, file_lock, NULL);
 
 
 
7882	if (err) {
7883		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
7884		goto out_nfserr;
7885	}
7886	nfs4_inc_and_copy_stateid(&locku->lu_stateid, &stp->st_stid);
7887put_file:
7888	nfsd_file_put(nf);
7889put_stateid:
7890	mutex_unlock(&stp->st_mutex);
7891	nfs4_put_stid(&stp->st_stid);
7892out:
7893	nfsd4_bump_seqid(cstate, status);
7894	if (file_lock)
7895		locks_free_lock(file_lock);
7896	return status;
7897
7898out_nfserr:
7899	status = nfserrno(err);
7900	goto put_file;
7901}
7902
7903/*
7904 * returns
7905 * 	true:  locks held by lockowner
7906 * 	false: no locks held by lockowner
7907 */
7908static bool
7909check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
7910{
7911	struct file_lock *fl;
7912	int status = false;
7913	struct nfsd_file *nf;
7914	struct inode *inode;
7915	struct file_lock_context *flctx;
7916
7917	spin_lock(&fp->fi_lock);
7918	nf = find_any_file_locked(fp);
7919	if (!nf) {
7920		/* Any valid lock stateid should have some sort of access */
7921		WARN_ON_ONCE(1);
7922		goto out;
7923	}
7924
7925	inode = file_inode(nf->nf_file);
7926	flctx = locks_inode_context(inode);
7927
7928	if (flctx && !list_empty_careful(&flctx->flc_posix)) {
7929		spin_lock(&flctx->flc_lock);
7930		list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
7931			if (fl->fl_owner == (fl_owner_t)lowner) {
7932				status = true;
7933				break;
7934			}
7935		}
7936		spin_unlock(&flctx->flc_lock);
7937	}
7938out:
7939	spin_unlock(&fp->fi_lock);
7940	return status;
7941}
7942
7943/**
7944 * nfsd4_release_lockowner - process NFSv4.0 RELEASE_LOCKOWNER operations
7945 * @rqstp: RPC transaction
7946 * @cstate: NFSv4 COMPOUND state
7947 * @u: RELEASE_LOCKOWNER arguments
7948 *
7949 * Check if theree are any locks still held and if not - free the lockowner
7950 * and any lock state that is owned.
7951 *
7952 * Return values:
7953 *   %nfs_ok: lockowner released or not found
7954 *   %nfserr_locks_held: lockowner still in use
7955 *   %nfserr_stale_clientid: clientid no longer active
7956 *   %nfserr_expired: clientid not recognized
7957 */
7958__be32
7959nfsd4_release_lockowner(struct svc_rqst *rqstp,
7960			struct nfsd4_compound_state *cstate,
7961			union nfsd4_op_u *u)
7962{
7963	struct nfsd4_release_lockowner *rlockowner = &u->release_lockowner;
7964	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
7965	clientid_t *clid = &rlockowner->rl_clientid;
 
 
7966	struct nfs4_ol_stateid *stp;
7967	struct nfs4_lockowner *lo;
7968	struct nfs4_client *clp;
7969	LIST_HEAD(reaplist);
7970	__be32 status;
7971
7972	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
7973		clid->cl_boot, clid->cl_id);
7974
7975	status = set_client(clid, cstate, nn);
7976	if (status)
 
 
7977		return status;
7978	clp = cstate->clp;
7979
7980	spin_lock(&clp->cl_lock);
7981	lo = find_lockowner_str_locked(clp, &rlockowner->rl_owner);
7982	if (!lo) {
7983		spin_unlock(&clp->cl_lock);
7984		return nfs_ok;
7985	}
7986
7987	list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
7988		if (check_for_locks(stp->st_stid.sc_file, lo)) {
7989			spin_unlock(&clp->cl_lock);
7990			nfs4_put_stateowner(&lo->lo_owner);
7991			return nfserr_locks_held;
 
 
 
 
 
 
7992		}
7993	}
7994	unhash_lockowner_locked(lo);
7995	while (!list_empty(&lo->lo_owner.so_stateids)) {
7996		stp = list_first_entry(&lo->lo_owner.so_stateids,
7997				       struct nfs4_ol_stateid,
7998				       st_perstateowner);
7999		WARN_ON(!unhash_lock_stateid(stp));
8000		put_ol_stateid_locked(stp, &reaplist);
 
 
 
 
8001	}
8002	spin_unlock(&clp->cl_lock);
8003
8004	free_ol_stateid_reaplist(&reaplist);
8005	remove_blocked_locks(lo);
8006	nfs4_put_stateowner(&lo->lo_owner);
8007	return nfs_ok;
8008}
8009
8010static inline struct nfs4_client_reclaim *
8011alloc_reclaim(void)
8012{
8013	return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
8014}
8015
8016bool
8017nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn)
8018{
8019	struct nfs4_client_reclaim *crp;
 
8020
8021	crp = nfsd4_find_reclaim_client(name, nn);
8022	return (crp && crp->cr_clp);
 
 
8023}
8024
8025/*
8026 * failure => all reset bets are off, nfserr_no_grace...
8027 *
8028 * The caller is responsible for freeing name.data if NULL is returned (it
8029 * will be freed in nfs4_remove_reclaim_record in the normal case).
8030 */
8031struct nfs4_client_reclaim *
8032nfs4_client_to_reclaim(struct xdr_netobj name, struct xdr_netobj princhash,
8033		struct nfsd_net *nn)
8034{
8035	unsigned int strhashval;
8036	struct nfs4_client_reclaim *crp;
8037
 
8038	crp = alloc_reclaim();
8039	if (crp) {
8040		strhashval = clientstr_hashval(name);
8041		INIT_LIST_HEAD(&crp->cr_strhash);
8042		list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
8043		crp->cr_name.data = name.data;
8044		crp->cr_name.len = name.len;
8045		crp->cr_princhash.data = princhash.data;
8046		crp->cr_princhash.len = princhash.len;
8047		crp->cr_clp = NULL;
8048		nn->reclaim_str_hashtbl_size++;
8049	}
8050	return crp;
8051}
8052
8053void
8054nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
8055{
8056	list_del(&crp->cr_strhash);
8057	kfree(crp->cr_name.data);
8058	kfree(crp->cr_princhash.data);
8059	kfree(crp);
8060	nn->reclaim_str_hashtbl_size--;
8061}
8062
8063void
8064nfs4_release_reclaim(struct nfsd_net *nn)
8065{
8066	struct nfs4_client_reclaim *crp = NULL;
8067	int i;
8068
8069	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8070		while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
8071			crp = list_entry(nn->reclaim_str_hashtbl[i].next,
8072			                struct nfs4_client_reclaim, cr_strhash);
8073			nfs4_remove_reclaim_record(crp, nn);
 
 
8074		}
8075	}
8076	WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
8077}
8078
8079/*
8080 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
8081struct nfs4_client_reclaim *
8082nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
8083{
8084	unsigned int strhashval;
8085	struct nfs4_client_reclaim *crp = NULL;
8086
8087	strhashval = clientstr_hashval(name);
8088	list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
8089		if (compare_blob(&crp->cr_name, &name) == 0) {
 
 
 
 
 
8090			return crp;
8091		}
8092	}
8093	return NULL;
8094}
8095
 
 
 
8096__be32
8097nfs4_check_open_reclaim(struct nfs4_client *clp)
8098{
8099	if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
8100		return nfserr_no_grace;
8101
8102	if (nfsd4_client_record_check(clp))
 
 
8103		return nfserr_reclaim_bad;
8104
8105	return nfs_ok;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8106}
8107
8108/*
8109 * Since the lifetime of a delegation isn't limited to that of an open, a
8110 * client may quite reasonably hang on to a delegation as long as it has
8111 * the inode cached.  This becomes an obvious problem the first time a
8112 * client's inode cache approaches the size of the server's total memory.
8113 *
8114 * For now we avoid this problem by imposing a hard limit on the number
8115 * of delegations, which varies according to the server's memory size.
8116 */
8117static void
8118set_max_delegations(void)
8119{
8120	/*
8121	 * Allow at most 4 delegations per megabyte of RAM.  Quick
8122	 * estimates suggest that in the worst case (where every delegation
8123	 * is for a different inode), a delegation could take about 1.5K,
8124	 * giving a worst case usage of about 6% of memory.
8125	 */
8126	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
8127}
8128
8129static int nfs4_state_create_net(struct net *net)
8130{
8131	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8132	int i;
8133
8134	nn->conf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8135					    sizeof(struct list_head),
8136					    GFP_KERNEL);
8137	if (!nn->conf_id_hashtbl)
8138		goto err;
8139	nn->unconf_id_hashtbl = kmalloc_array(CLIENT_HASH_SIZE,
8140					      sizeof(struct list_head),
8141					      GFP_KERNEL);
8142	if (!nn->unconf_id_hashtbl)
8143		goto err_unconf_id;
8144	nn->sessionid_hashtbl = kmalloc_array(SESSION_HASH_SIZE,
8145					      sizeof(struct list_head),
8146					      GFP_KERNEL);
8147	if (!nn->sessionid_hashtbl)
8148		goto err_sessionid;
8149
8150	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8151		INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
8152		INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
 
 
 
 
 
8153	}
8154	for (i = 0; i < SESSION_HASH_SIZE; i++)
8155		INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
8156	nn->conf_name_tree = RB_ROOT;
8157	nn->unconf_name_tree = RB_ROOT;
8158	nn->boot_time = ktime_get_real_seconds();
8159	nn->grace_ended = false;
8160	nn->nfsd4_manager.block_opens = true;
8161	INIT_LIST_HEAD(&nn->nfsd4_manager.list);
8162	INIT_LIST_HEAD(&nn->client_lru);
8163	INIT_LIST_HEAD(&nn->close_lru);
8164	INIT_LIST_HEAD(&nn->del_recall_lru);
8165	spin_lock_init(&nn->client_lock);
8166	spin_lock_init(&nn->s2s_cp_lock);
8167	idr_init(&nn->s2s_cp_stateids);
8168
8169	spin_lock_init(&nn->blocked_locks_lock);
8170	INIT_LIST_HEAD(&nn->blocked_locks_lru);
8171
8172	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
8173	INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
8174	get_net(net);
8175
8176	nn->nfsd_client_shrinker = shrinker_alloc(0, "nfsd-client");
8177	if (!nn->nfsd_client_shrinker)
8178		goto err_shrinker;
8179
8180	nn->nfsd_client_shrinker->scan_objects = nfsd4_state_shrinker_scan;
8181	nn->nfsd_client_shrinker->count_objects = nfsd4_state_shrinker_count;
8182	nn->nfsd_client_shrinker->private_data = nn;
8183
8184	shrinker_register(nn->nfsd_client_shrinker);
 
 
8185
8186	return 0;
 
 
8187
8188err_shrinker:
8189	put_net(net);
8190	kfree(nn->sessionid_hashtbl);
8191err_sessionid:
8192	kfree(nn->unconf_id_hashtbl);
8193err_unconf_id:
8194	kfree(nn->conf_id_hashtbl);
8195err:
8196	return -ENOMEM;
8197}
8198
8199static void
8200nfs4_state_destroy_net(struct net *net)
8201{
8202	int i;
8203	struct nfs4_client *clp = NULL;
8204	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8205
8206	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8207		while (!list_empty(&nn->conf_id_hashtbl[i])) {
8208			clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8209			destroy_client(clp);
 
 
 
8210		}
8211	}
8212
8213	WARN_ON(!list_empty(&nn->blocked_locks_lru));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8214
8215	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
8216		while (!list_empty(&nn->unconf_id_hashtbl[i])) {
8217			clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
8218			destroy_client(clp);
8219		}
8220	}
8221
8222	kfree(nn->sessionid_hashtbl);
8223	kfree(nn->unconf_id_hashtbl);
8224	kfree(nn->conf_id_hashtbl);
8225	put_net(net);
8226}
8227
8228int
8229nfs4_state_start_net(struct net *net)
 
 
 
 
8230{
8231	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8232	int ret;
8233
8234	ret = nfs4_state_create_net(net);
8235	if (ret)
8236		return ret;
8237	locks_start_grace(net, &nn->nfsd4_manager);
8238	nfsd4_client_tracking_init(net);
8239	if (nn->track_reclaim_completes && nn->reclaim_str_hashtbl_size == 0)
8240		goto skip_grace;
8241	printk(KERN_INFO "NFSD: starting %lld-second grace period (net %x)\n",
8242	       nn->nfsd4_grace, net->ns.inum);
8243	trace_nfsd_grace_start(nn);
8244	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
8245	return 0;
 
 
 
 
 
 
 
 
 
 
8246
8247skip_grace:
8248	printk(KERN_INFO "NFSD: no clients to reclaim, skipping NFSv4 grace period (net %x)\n",
8249			net->ns.inum);
8250	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_lease * HZ);
8251	nfsd4_end_grace(nn);
8252	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
8253}
8254
8255/* initialization to perform when the nfsd service is started: */
8256
8257int
8258nfs4_state_start(void)
8259{
8260	int ret;
8261
8262	ret = rhltable_init(&nfs4_file_rhltable, &nfs4_file_rhash_params);
8263	if (ret)
8264		return ret;
8265
8266	ret = nfsd4_create_callback_queue();
 
 
 
 
 
 
 
 
 
 
8267	if (ret) {
8268		rhltable_destroy(&nfs4_file_rhltable);
8269		return ret;
 
 
 
 
 
8270	}
8271
 
 
 
8272	set_max_delegations();
8273	return 0;
 
 
 
 
 
 
8274}
8275
8276void
8277nfs4_state_shutdown_net(struct net *net)
8278{
 
 
8279	struct nfs4_delegation *dp = NULL;
8280	struct list_head *pos, *next, reaplist;
8281	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
8282
8283	shrinker_free(nn->nfsd_client_shrinker);
8284	cancel_work(&nn->nfsd_shrinker_work);
8285	cancel_delayed_work_sync(&nn->laundromat_work);
8286	locks_end_grace(&nn->nfsd4_manager);
8287
 
 
 
 
 
 
 
 
 
 
8288	INIT_LIST_HEAD(&reaplist);
8289	spin_lock(&state_lock);
8290	list_for_each_safe(pos, next, &nn->del_recall_lru) {
8291		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8292		WARN_ON(!unhash_delegation_locked(dp));
8293		list_add(&dp->dl_recall_lru, &reaplist);
8294	}
8295	spin_unlock(&state_lock);
8296	list_for_each_safe(pos, next, &reaplist) {
8297		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
8298		list_del_init(&dp->dl_recall_lru);
8299		destroy_unhashed_deleg(dp);
8300	}
8301
8302	nfsd4_client_tracking_exit(net);
8303	nfs4_state_destroy_net(net);
8304#ifdef CONFIG_NFSD_V4_2_INTER_SSC
8305	nfsd4_ssc_shutdown_umount(nn);
8306#endif
8307}
8308
8309void
8310nfs4_state_shutdown(void)
8311{
 
 
 
 
 
 
8312	nfsd4_destroy_callback_queue();
8313	rhltable_destroy(&nfs4_file_rhltable);
8314}
8315
8316static void
8317get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8318{
8319	if (HAS_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG) &&
8320	    CURRENT_STATEID(stateid))
8321		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
8322}
8323
8324static void
8325put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
8326{
8327	if (cstate->minorversion) {
8328		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
8329		SET_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8330	}
8331}
8332
8333void
8334clear_current_stateid(struct nfsd4_compound_state *cstate)
8335{
8336	CLEAR_CSTATE_FLAG(cstate, CURRENT_STATE_ID_FLAG);
8337}
8338
8339/*
8340 * functions to set current state id
8341 */
8342void
8343nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate,
8344		union nfsd4_op_u *u)
8345{
8346	put_stateid(cstate, &u->open_downgrade.od_stateid);
8347}
8348
8349void
8350nfsd4_set_openstateid(struct nfsd4_compound_state *cstate,
8351		union nfsd4_op_u *u)
8352{
8353	put_stateid(cstate, &u->open.op_stateid);
8354}
8355
8356void
8357nfsd4_set_closestateid(struct nfsd4_compound_state *cstate,
8358		union nfsd4_op_u *u)
8359{
8360	put_stateid(cstate, &u->close.cl_stateid);
8361}
8362
8363void
8364nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate,
8365		union nfsd4_op_u *u)
8366{
8367	put_stateid(cstate, &u->lock.lk_resp_stateid);
8368}
8369
8370/*
8371 * functions to consume current state id
8372 */
8373
8374void
8375nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate,
8376		union nfsd4_op_u *u)
8377{
8378	get_stateid(cstate, &u->open_downgrade.od_stateid);
8379}
8380
8381void
8382nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate,
8383		union nfsd4_op_u *u)
8384{
8385	get_stateid(cstate, &u->delegreturn.dr_stateid);
8386}
8387
8388void
8389nfsd4_get_freestateid(struct nfsd4_compound_state *cstate,
8390		union nfsd4_op_u *u)
8391{
8392	get_stateid(cstate, &u->free_stateid.fr_stateid);
8393}
8394
8395void
8396nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate,
8397		union nfsd4_op_u *u)
8398{
8399	get_stateid(cstate, &u->setattr.sa_stateid);
8400}
8401
8402void
8403nfsd4_get_closestateid(struct nfsd4_compound_state *cstate,
8404		union nfsd4_op_u *u)
8405{
8406	get_stateid(cstate, &u->close.cl_stateid);
8407}
8408
8409void
8410nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate,
8411		union nfsd4_op_u *u)
8412{
8413	get_stateid(cstate, &u->locku.lu_stateid);
8414}
8415
8416void
8417nfsd4_get_readstateid(struct nfsd4_compound_state *cstate,
8418		union nfsd4_op_u *u)
8419{
8420	get_stateid(cstate, &u->read.rd_stateid);
8421}
8422
8423void
8424nfsd4_get_writestateid(struct nfsd4_compound_state *cstate,
8425		union nfsd4_op_u *u)
8426{
8427	get_stateid(cstate, &u->write.wr_stateid);
8428}
8429
8430/**
8431 * nfsd4_deleg_getattr_conflict - Recall if GETATTR causes conflict
8432 * @rqstp: RPC transaction context
8433 * @inode: file to be checked for a conflict
8434 *
8435 * This function is called when there is a conflict between a write
8436 * delegation and a change/size GETATTR from another client. The server
8437 * must either use the CB_GETATTR to get the current values of the
8438 * attributes from the client that holds the delegation or recall the
8439 * delegation before replying to the GETATTR. See RFC 8881 section
8440 * 18.7.4.
8441 *
8442 * The current implementation does not support CB_GETATTR yet. However
8443 * this can avoid recalling the delegation could be added in follow up
8444 * work.
8445 *
8446 * Returns 0 if there is no conflict; otherwise an nfs_stat
8447 * code is returned.
8448 */
8449__be32
8450nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct inode *inode)
8451{
8452	__be32 status;
8453	struct file_lock_context *ctx;
8454	struct file_lock *fl;
8455	struct nfs4_delegation *dp;
8456
8457	ctx = locks_inode_context(inode);
8458	if (!ctx)
8459		return 0;
8460	spin_lock(&ctx->flc_lock);
8461	list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
8462		if (fl->fl_flags == FL_LAYOUT)
8463			continue;
8464		if (fl->fl_lmops != &nfsd_lease_mng_ops) {
8465			/*
8466			 * non-nfs lease, if it's a lease with F_RDLCK then
8467			 * we are done; there isn't any write delegation
8468			 * on this inode
8469			 */
8470			if (fl->fl_type == F_RDLCK)
8471				break;
8472			goto break_lease;
8473		}
8474		if (fl->fl_type == F_WRLCK) {
8475			dp = fl->fl_owner;
8476			if (dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
8477				spin_unlock(&ctx->flc_lock);
8478				return 0;
8479			}
8480break_lease:
8481			spin_unlock(&ctx->flc_lock);
8482			nfsd_stats_wdeleg_getattr_inc();
8483			status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
8484			if (status != nfserr_jukebox ||
8485					!nfsd_wait_for_delegreturn(rqstp, inode))
8486				return status;
8487			return 0;
8488		}
8489		break;
8490	}
8491	spin_unlock(&ctx->flc_lock);
8492	return 0;
8493}