Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * linux/fs/nfs/callback_proc.c
  3 *
  4 * Copyright (C) 2004 Trond Myklebust
  5 *
  6 * NFSv4 callback procedures
  7 */
 
 
 
  8#include <linux/nfs4.h>
  9#include <linux/nfs_fs.h>
 10#include <linux/slab.h>
 11#include <linux/rcupdate.h>
 
 
 12#include "nfs4_fs.h"
 13#include "callback.h"
 14#include "delegation.h"
 15#include "internal.h"
 16#include "pnfs.h"
 17#include "nfs4session.h"
 18#include "nfs4trace.h"
 19
 20#define NFSDBG_FACILITY NFSDBG_CALLBACK
 21
 22__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
 23			     struct cb_getattrres *res,
 24			     struct cb_process_state *cps)
 25{
 
 
 26	struct nfs_delegation *delegation;
 27	struct nfs_inode *nfsi;
 28	struct inode *inode;
 29
 30	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
 31	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 32		goto out;
 33
 34	res->bitmap[0] = res->bitmap[1] = 0;
 35	res->status = htonl(NFS4ERR_BADHANDLE);
 36
 37	dprintk_rcu("NFS: GETATTR callback request from %s\n",
 38		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 39
 40	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
 41	if (inode == NULL) {
 
 
 42		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
 43				-ntohl(res->status));
 44		goto out;
 45	}
 46	nfsi = NFS_I(inode);
 47	rcu_read_lock();
 48	delegation = rcu_dereference(nfsi->delegation);
 49	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
 50		goto out_iput;
 51	res->size = i_size_read(inode);
 52	res->change_attr = delegation->change_attr;
 53	if (nfsi->nrequests != 0)
 54		res->change_attr++;
 55	res->ctime = inode->i_ctime;
 56	res->mtime = inode->i_mtime;
 57	res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
 58		args->bitmap[0];
 59	res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
 60		args->bitmap[1];
 
 
 
 
 61	res->status = 0;
 62out_iput:
 63	rcu_read_unlock();
 64	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
 65	iput(inode);
 66out:
 67	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
 68	return res->status;
 69}
 70
 71__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
 72			    struct cb_process_state *cps)
 73{
 
 74	struct inode *inode;
 75	__be32 res;
 76	
 77	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
 78	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 79		goto out;
 80
 81	dprintk_rcu("NFS: RECALL callback request from %s\n",
 82		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 83
 84	res = htonl(NFS4ERR_BADHANDLE);
 85	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
 86	if (inode == NULL) {
 
 
 87		trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
 88				&args->stateid, -ntohl(res));
 89		goto out;
 90	}
 91	/* Set up a helper thread to actually return the delegation */
 92	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
 93	case 0:
 94		res = 0;
 95		break;
 96	case -ENOENT:
 97		res = htonl(NFS4ERR_BAD_STATEID);
 98		break;
 99	default:
100		res = htonl(NFS4ERR_RESOURCE);
101	}
102	trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
103			&args->stateid, -ntohl(res));
104	iput(inode);
105out:
106	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
107	return res;
108}
109
110#if defined(CONFIG_NFS_V4_1)
111
112/*
113 * Lookup a layout by filehandle.
114 *
115 * Note: gets a refcount on the layout hdr and on its respective inode.
116 * Caller must put the layout hdr and the inode.
117 *
118 * TODO: keep track of all layouts (and delegations) in a hash table
119 * hashed by filehandle.
120 */
121static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
122		struct nfs_fh *fh, nfs4_stateid *stateid)
 
123{
124	struct nfs_server *server;
125	struct inode *ino;
126	struct pnfs_layout_hdr *lo;
127
 
128	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
129		list_for_each_entry(lo, &server->layouts, plh_layouts) {
130			if (!nfs4_stateid_match_other(&lo->plh_stateid, stateid))
131				continue;
132			if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh))
133				continue;
134			ino = igrab(lo->plh_inode);
135			if (!ino)
136				break;
137			spin_lock(&ino->i_lock);
138			/* Is this layout in the process of being freed? */
139			if (NFS_I(ino)->layout != lo) {
140				spin_unlock(&ino->i_lock);
141				iput(ino);
142				break;
143			}
144			pnfs_get_layout_hdr(lo);
145			spin_unlock(&ino->i_lock);
146			return lo;
147		}
148	}
149
150	return NULL;
151}
152
153static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp,
154		struct nfs_fh *fh, nfs4_stateid *stateid)
 
 
 
 
 
 
155{
 
 
 
156	struct pnfs_layout_hdr *lo;
157
158	spin_lock(&clp->cl_lock);
159	rcu_read_lock();
160	lo = get_layout_by_fh_locked(clp, fh, stateid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161	rcu_read_unlock();
162	spin_unlock(&clp->cl_lock);
 
163
164	return lo;
 
 
 
 
 
 
 
 
 
165}
166
167/*
168 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
169 */
170static bool pnfs_check_stateid_sequence(struct pnfs_layout_hdr *lo,
171					const nfs4_stateid *new)
 
172{
173	u32 oldseq, newseq;
174
175	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
 
 
 
 
 
 
 
176	newseq = be32_to_cpu(new->seqid);
 
 
 
 
 
 
 
 
 
 
 
 
 
177
178	if (newseq > oldseq + 1)
179		return false;
180	return true;
 
 
181}
182
183static u32 initiate_file_draining(struct nfs_client *clp,
184				  struct cb_layoutrecallargs *args)
 
185{
186	struct inode *ino;
187	struct pnfs_layout_hdr *lo;
188	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
189	LIST_HEAD(free_me_list);
190
191	lo = get_layout_by_fh(clp, &args->cbl_fh, &args->cbl_stateid);
192	if (!lo) {
193		trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, NULL,
194				&args->cbl_stateid, -rv);
195		goto out;
196	}
197
198	ino = lo->plh_inode;
 
199
200	spin_lock(&ino->i_lock);
201	if (!pnfs_check_stateid_sequence(lo, &args->cbl_stateid)) {
202		rv = NFS4ERR_DELAY;
203		goto unlock;
 
204	}
205	pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
206	spin_unlock(&ino->i_lock);
207
208	pnfs_layoutcommit_inode(ino, false);
209
210	spin_lock(&ino->i_lock);
211	/*
212	 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
213	 */
214	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
215		rv = NFS4ERR_DELAY;
216		goto unlock;
217	}
218
219	if (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
220					&args->cbl_range)) {
 
 
 
 
 
221		rv = NFS4_OK;
222		goto unlock;
223	}
224
225	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
226		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
227			&args->cbl_range);
 
 
 
 
228	}
229	pnfs_mark_layout_returned_if_empty(lo);
230unlock:
231	spin_unlock(&ino->i_lock);
232	pnfs_free_lseg_list(&free_me_list);
233	/* Free all lsegs that are attached to commit buckets */
234	nfs_commit_inode(ino, 0);
235	pnfs_put_layout_hdr(lo);
 
 
 
236	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
237			&args->cbl_stateid, -rv);
238	iput(ino);
239out:
240	return rv;
241}
242
243static u32 initiate_bulk_draining(struct nfs_client *clp,
244				  struct cb_layoutrecallargs *args)
245{
246	int stat;
247
248	if (args->cbl_recall_type == RETURN_FSID)
249		stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
 
250	else
251		stat = pnfs_destroy_layouts_byclid(clp, true);
252	if (stat != 0)
253		return NFS4ERR_DELAY;
254	return NFS4ERR_NOMATCHING_LAYOUT;
255}
256
257static u32 do_callback_layoutrecall(struct nfs_client *clp,
258				    struct cb_layoutrecallargs *args)
 
259{
260	u32 res;
261
262	dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
263	if (args->cbl_recall_type == RETURN_FILE)
264		res = initiate_file_draining(clp, args);
265	else
266		res = initiate_bulk_draining(clp, args);
267	dprintk("%s returning %i\n", __func__, res);
268	return res;
269
270}
271
272__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
273				  void *dummy, struct cb_process_state *cps)
274{
275	u32 res;
276
277	dprintk("%s: -->\n", __func__);
278
279	if (cps->clp)
280		res = do_callback_layoutrecall(cps->clp, args);
281	else
282		res = NFS4ERR_OP_NOT_IN_SESSION;
283
284	dprintk("%s: exit with status = %d\n", __func__, res);
285	return cpu_to_be32(res);
286}
287
288static void pnfs_recall_all_layouts(struct nfs_client *clp)
 
289{
290	struct cb_layoutrecallargs args;
291
292	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
293	memset(&args, 0, sizeof(args));
294	args.cbl_recall_type = RETURN_ALL;
295	/* FIXME we ignore errors, what should we do? */
296	do_callback_layoutrecall(clp, &args);
297}
298
299__be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
300				  void *dummy, struct cb_process_state *cps)
301{
302	int i;
 
 
303	__be32 res = 0;
304	struct nfs_client *clp = cps->clp;
305	struct nfs_server *server = NULL;
306
307	dprintk("%s: -->\n", __func__);
308
309	if (!clp) {
310		res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
311		goto out;
312	}
313
314	for (i = 0; i < args->ndevs; i++) {
315		struct cb_devicenotifyitem *dev = &args->devs[i];
316
317		if (!server ||
318		    server->pnfs_curr_ld->id != dev->cbd_layout_type) {
319			rcu_read_lock();
320			list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
321				if (server->pnfs_curr_ld &&
322				    server->pnfs_curr_ld->id == dev->cbd_layout_type) {
323					rcu_read_unlock();
324					goto found;
325				}
326			rcu_read_unlock();
327			dprintk("%s: layout type %u not found\n",
328				__func__, dev->cbd_layout_type);
329			continue;
330		}
331
332	found:
333		nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
334	}
335
336out:
337	kfree(args->devs);
338	dprintk("%s: exit with status = %u\n",
339		__func__, be32_to_cpu(res));
340	return res;
341}
342
343/*
344 * Validate the sequenceID sent by the server.
345 * Return success if the sequenceID is one more than what we last saw on
346 * this slot, accounting for wraparound.  Increments the slot's sequence.
347 *
348 * We don't yet implement a duplicate request cache, instead we set the
349 * back channel ca_maxresponsesize_cached to zero. This is OK for now
350 * since we only currently implement idempotent callbacks anyway.
351 *
352 * We have a single slot backchannel at this time, so we don't bother
353 * checking the used_slots bit array on the table.  The lower layer guarantees
354 * a single outstanding callback request at a time.
355 */
356static __be32
357validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
358		const struct cb_sequenceargs * args)
359{
360	dprintk("%s enter. slotid %u seqid %u, slot table seqid: %u\n",
361		__func__, args->csa_slotid, args->csa_sequenceid, slot->seq_nr);
362
 
363	if (args->csa_slotid > tbl->server_highest_slotid)
364		return htonl(NFS4ERR_BADSLOT);
365
366	/* Replay */
367	if (args->csa_sequenceid == slot->seq_nr) {
368		dprintk("%s seqid %u is a replay\n",
369			__func__, args->csa_sequenceid);
370		if (nfs4_test_locked_slot(tbl, slot->slot_nr))
371			return htonl(NFS4ERR_DELAY);
 
372		/* Signal process_op to set this error on next op */
 
373		if (args->csa_cachethis == 0)
374			return htonl(NFS4ERR_RETRY_UNCACHED_REP);
375
376		/* Liar! We never allowed you to set csa_cachethis != 0 */
377		return htonl(NFS4ERR_SEQ_FALSE_RETRY);
 
378	}
379
380	/* Wraparound */
381	if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
382		if (args->csa_sequenceid == 1)
383			return htonl(NFS4_OK);
384	} else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
385		return htonl(NFS4_OK);
386
387	/* Misordered request */
388	return htonl(NFS4ERR_SEQ_MISORDERED);
 
 
 
 
 
 
 
 
389}
390
391/*
392 * For each referring call triple, check the session's slot table for
393 * a match.  If the slot is in use and the sequence numbers match, the
394 * client is still waiting for a response to the original request.
395 */
396static bool referring_call_exists(struct nfs_client *clp,
397				  uint32_t nrclists,
398				  struct referring_call_list *rclists)
 
 
 
399{
400	bool status = 0;
 
401	int i, j;
402	struct nfs4_session *session;
403	struct nfs4_slot_table *tbl;
404	struct referring_call_list *rclist;
405	struct referring_call *ref;
406
407	/*
408	 * XXX When client trunking is implemented, this becomes
409	 * a session lookup from within the loop
410	 */
411	session = clp->cl_session;
412	tbl = &session->fc_slot_table;
413
414	for (i = 0; i < nrclists; i++) {
415		rclist = &rclists[i];
416		if (memcmp(session->sess_id.data,
417			   rclist->rcl_sessionid.data,
418			   NFS4_MAX_SESSIONID_LEN) != 0)
419			continue;
420
421		for (j = 0; j < rclist->rcl_nrefcalls; j++) {
422			ref = &rclist->rcl_refcalls[j];
423
424			dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u "
425				"slotid %u\n", __func__,
426				((u32 *)&rclist->rcl_sessionid.data)[0],
427				((u32 *)&rclist->rcl_sessionid.data)[1],
428				((u32 *)&rclist->rcl_sessionid.data)[2],
429				((u32 *)&rclist->rcl_sessionid.data)[3],
430				ref->rc_sequenceid, ref->rc_slotid);
431
432			spin_lock(&tbl->slot_tbl_lock);
433			status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
434				  tbl->slots[ref->rc_slotid].seq_nr ==
435					ref->rc_sequenceid);
436			spin_unlock(&tbl->slot_tbl_lock);
437			if (status)
438				goto out;
 
439		}
440	}
441
442out:
443	return status;
444}
445
446__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
447			      struct cb_sequenceres *res,
448			      struct cb_process_state *cps)
449{
 
 
450	struct nfs4_slot_table *tbl;
451	struct nfs4_slot *slot;
452	struct nfs_client *clp;
 
453	int i;
454	__be32 status = htonl(NFS4ERR_BADSESSION);
455
456	clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
457					 &args->csa_sessionid, cps->minorversion);
458	if (clp == NULL)
459		goto out;
460
461	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
462		goto out;
463
464	tbl = &clp->cl_session->bc_slot_table;
465	slot = tbl->slots + args->csa_slotid;
466
467	/* Set up res before grabbing the spinlock */
468	memcpy(&res->csr_sessionid, &args->csa_sessionid,
469	       sizeof(res->csr_sessionid));
470	res->csr_sequenceid = args->csa_sequenceid;
471	res->csr_slotid = args->csa_slotid;
472
473	spin_lock(&tbl->slot_tbl_lock);
474	/* state manager is resetting the session */
475	if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
476		status = htonl(NFS4ERR_DELAY);
477		/* Return NFS4ERR_BADSESSION if we're draining the session
478		 * in order to reset it.
479		 */
480		if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
481			status = htonl(NFS4ERR_BADSESSION);
482		goto out_unlock;
483	}
484
485	status = htonl(NFS4ERR_BADSLOT);
486	slot = nfs4_lookup_slot(tbl, args->csa_slotid);
487	if (IS_ERR(slot))
488		goto out_unlock;
489
490	res->csr_highestslotid = tbl->server_highest_slotid;
491	res->csr_target_highestslotid = tbl->target_highest_slotid;
492
493	status = validate_seqid(tbl, slot, args);
494	if (status)
495		goto out_unlock;
496	if (!nfs4_try_to_lock_slot(tbl, slot)) {
497		status = htonl(NFS4ERR_DELAY);
498		goto out_unlock;
499	}
500	cps->slot = slot;
501
502	/* The ca_maxresponsesize_cached is 0 with no DRC */
503	if (args->csa_cachethis != 0)
504		return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
 
 
505
506	/*
507	 * Check for pending referring calls.  If a match is found, a
508	 * related callback was received before the response to the original
509	 * call.
510	 */
511	if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
 
 
512		status = htonl(NFS4ERR_DELAY);
513		goto out_unlock;
514	}
 
515
516	/*
517	 * RFC5661 20.9.3
518	 * If CB_SEQUENCE returns an error, then the state of the slot
519	 * (sequence ID, cached reply) MUST NOT change.
520	 */
521	slot->seq_nr = args->csa_sequenceid;
522out_unlock:
523	spin_unlock(&tbl->slot_tbl_lock);
524
525out:
526	cps->clp = clp; /* put in nfs4_callback_compound */
527	for (i = 0; i < args->csa_nrclists; i++)
528		kfree(args->csa_rclists[i].rcl_refcalls);
529	kfree(args->csa_rclists);
530
531	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
532		cps->drc_status = status;
533		status = 0;
534	} else
535		res->csr_status = status;
536
537	trace_nfs4_cb_sequence(args, res, status);
538	dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
539		ntohl(status), ntohl(res->csr_status));
540	return status;
541}
542
543static bool
544validate_bitmap_values(unsigned long mask)
545{
546	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
547}
548
549__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
550			       struct cb_process_state *cps)
551{
 
552	__be32 status;
553	fmode_t flags = 0;
 
554
555	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
556	if (!cps->clp) /* set in cb_sequence */
557		goto out;
558
559	dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
560		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
561
562	status = cpu_to_be32(NFS4ERR_INVAL);
563	if (!validate_bitmap_values(args->craa_type_mask))
564		goto out;
565
566	status = cpu_to_be32(NFS4_OK);
567	if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
568		     &args->craa_type_mask))
569		flags = FMODE_READ;
570	if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
571		     &args->craa_type_mask))
572		flags |= FMODE_WRITE;
573	if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
574		     &args->craa_type_mask))
575		pnfs_recall_all_layouts(cps->clp);
576	if (flags)
577		nfs_expire_unused_delegation_types(cps->clp, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578out:
579	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
580	return status;
581}
582
583/* Reduce the fore channel's max_slots to the target value */
584__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
585				struct cb_process_state *cps)
586{
 
587	struct nfs4_slot_table *fc_tbl;
588	__be32 status;
589
590	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
591	if (!cps->clp) /* set in cb_sequence */
592		goto out;
593
594	dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
595		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
596		args->crsa_target_highest_slotid);
597
598	fc_tbl = &cps->clp->cl_session->fc_slot_table;
599
600	status = htonl(NFS4_OK);
601
602	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
603	nfs41_notify_server(cps->clp);
604out:
605	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
606	return status;
607}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
608#endif /* CONFIG_NFS_V4_1 */
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/fs/nfs/callback_proc.c
  4 *
  5 * Copyright (C) 2004 Trond Myklebust
  6 *
  7 * NFSv4 callback procedures
  8 */
  9
 10#include <linux/errno.h>
 11#include <linux/math.h>
 12#include <linux/nfs4.h>
 13#include <linux/nfs_fs.h>
 14#include <linux/slab.h>
 15#include <linux/rcupdate.h>
 16#include <linux/types.h>
 17
 18#include "nfs4_fs.h"
 19#include "callback.h"
 20#include "delegation.h"
 21#include "internal.h"
 22#include "pnfs.h"
 23#include "nfs4session.h"
 24#include "nfs4trace.h"
 25
 26#define NFSDBG_FACILITY NFSDBG_CALLBACK
 27
 28__be32 nfs4_callback_getattr(void *argp, void *resp,
 
 29			     struct cb_process_state *cps)
 30{
 31	struct cb_getattrargs *args = argp;
 32	struct cb_getattrres *res = resp;
 33	struct nfs_delegation *delegation;
 
 34	struct inode *inode;
 35
 36	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
 37	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 38		goto out;
 39
 40	memset(res->bitmap, 0, sizeof(res->bitmap));
 41	res->status = htonl(NFS4ERR_BADHANDLE);
 42
 43	dprintk_rcu("NFS: GETATTR callback request from %s\n",
 44		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 45
 46	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
 47	if (IS_ERR(inode)) {
 48		if (inode == ERR_PTR(-EAGAIN))
 49			res->status = htonl(NFS4ERR_DELAY);
 50		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
 51				-ntohl(res->status));
 52		goto out;
 53	}
 
 54	rcu_read_lock();
 55	delegation = nfs4_get_valid_delegation(inode);
 56	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
 57		goto out_iput;
 58	res->size = i_size_read(inode);
 59	res->change_attr = delegation->change_attr;
 60	if (nfs_have_writebacks(inode))
 61		res->change_attr++;
 62	res->atime = inode_get_atime(inode);
 63	res->ctime = inode_get_ctime(inode);
 64	res->mtime = inode_get_mtime(inode);
 65	res->bitmap[0] = (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE) &
 66			 args->bitmap[0];
 67	res->bitmap[1] = (FATTR4_WORD1_TIME_ACCESS |
 68			  FATTR4_WORD1_TIME_METADATA |
 69			  FATTR4_WORD1_TIME_MODIFY) & args->bitmap[1];
 70	res->bitmap[2] = (FATTR4_WORD2_TIME_DELEG_ACCESS |
 71			  FATTR4_WORD2_TIME_DELEG_MODIFY) & args->bitmap[2];
 72	res->status = 0;
 73out_iput:
 74	rcu_read_unlock();
 75	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
 76	nfs_iput_and_deactive(inode);
 77out:
 78	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
 79	return res->status;
 80}
 81
 82__be32 nfs4_callback_recall(void *argp, void *resp,
 83			    struct cb_process_state *cps)
 84{
 85	struct cb_recallargs *args = argp;
 86	struct inode *inode;
 87	__be32 res;
 88	
 89	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
 90	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 91		goto out;
 92
 93	dprintk_rcu("NFS: RECALL callback request from %s\n",
 94		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 95
 96	res = htonl(NFS4ERR_BADHANDLE);
 97	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
 98	if (IS_ERR(inode)) {
 99		if (inode == ERR_PTR(-EAGAIN))
100			res = htonl(NFS4ERR_DELAY);
101		trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
102				&args->stateid, -ntohl(res));
103		goto out;
104	}
105	/* Set up a helper thread to actually return the delegation */
106	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
107	case 0:
108		res = 0;
109		break;
110	case -ENOENT:
111		res = htonl(NFS4ERR_BAD_STATEID);
112		break;
113	default:
114		res = htonl(NFS4ERR_RESOURCE);
115	}
116	trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
117			&args->stateid, -ntohl(res));
118	nfs_iput_and_deactive(inode);
119out:
120	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
121	return res;
122}
123
124#if defined(CONFIG_NFS_V4_1)
125
126/*
127 * Lookup a layout inode by stateid
 
 
 
128 *
129 * Note: returns a refcount on the inode and superblock
 
130 */
131static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
132		const nfs4_stateid *stateid)
133	__must_hold(RCU)
134{
135	struct nfs_server *server;
136	struct inode *inode;
137	struct pnfs_layout_hdr *lo;
138
139	rcu_read_lock();
140	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
141		list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
142			if (!pnfs_layout_is_valid(lo))
143				continue;
144			if (!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
145				continue;
146			if (nfs_sb_active(server->super))
147				inode = igrab(lo->plh_inode);
148			else
149				inode = ERR_PTR(-EAGAIN);
150			rcu_read_unlock();
151			if (inode)
152				return inode;
153			nfs_sb_deactive(server->super);
154			return ERR_PTR(-EAGAIN);
 
 
 
 
155		}
156	}
157	rcu_read_unlock();
158	return ERR_PTR(-ENOENT);
159}
160
161/*
162 * Lookup a layout inode by filehandle.
163 *
164 * Note: returns a refcount on the inode and superblock
165 *
166 */
167static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
168		const struct nfs_fh *fh)
169{
170	struct nfs_server *server;
171	struct nfs_inode *nfsi;
172	struct inode *inode;
173	struct pnfs_layout_hdr *lo;
174
 
175	rcu_read_lock();
176	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
177		list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
178			nfsi = NFS_I(lo->plh_inode);
179			if (nfs_compare_fh(fh, &nfsi->fh))
180				continue;
181			if (nfsi->layout != lo)
182				continue;
183			if (nfs_sb_active(server->super))
184				inode = igrab(lo->plh_inode);
185			else
186				inode = ERR_PTR(-EAGAIN);
187			rcu_read_unlock();
188			if (inode)
189				return inode;
190			nfs_sb_deactive(server->super);
191			return ERR_PTR(-EAGAIN);
192		}
193	}
194	rcu_read_unlock();
195	return ERR_PTR(-ENOENT);
196}
197
198static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
199		const struct nfs_fh *fh,
200		const nfs4_stateid *stateid)
201{
202	struct inode *inode;
203
204	inode = nfs_layout_find_inode_by_stateid(clp, stateid);
205	if (inode == ERR_PTR(-ENOENT))
206		inode = nfs_layout_find_inode_by_fh(clp, fh);
207	return inode;
208}
209
210/*
211 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
212 */
213static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
214					const nfs4_stateid *new,
215					struct cb_process_state *cps)
216{
217	u32 oldseq, newseq;
218
219	/* Is the stateid not initialised? */
220	if (!pnfs_layout_is_valid(lo))
221		return NFS4ERR_NOMATCHING_LAYOUT;
222
223	/* Mismatched stateid? */
224	if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
225		return NFS4ERR_BAD_STATEID;
226
227	newseq = be32_to_cpu(new->seqid);
228	/* Are we already in a layout recall situation? */
229	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
230		return NFS4ERR_DELAY;
231
232	/*
233	 * Check that the stateid matches what we think it should be.
234	 * Note that if the server sent us a list of referring calls,
235	 * and we know that those have completed, then we trust the
236	 * stateid argument is correct.
237	 */
238	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
239	if (newseq > oldseq + 1 && !cps->referring_calls)
240		return NFS4ERR_DELAY;
241
242	/* Crazy server! */
243	if (newseq <= oldseq)
244		return NFS4ERR_OLD_STATEID;
245
246	return NFS_OK;
247}
248
249static u32 initiate_file_draining(struct nfs_client *clp,
250				  struct cb_layoutrecallargs *args,
251				  struct cb_process_state *cps)
252{
253	struct inode *ino;
254	struct pnfs_layout_hdr *lo;
255	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
256	LIST_HEAD(free_me_list);
257
258	ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
259	if (IS_ERR(ino)) {
260		if (ino == ERR_PTR(-EAGAIN))
261			rv = NFS4ERR_DELAY;
262		goto out_noput;
263	}
264
265	pnfs_layoutcommit_inode(ino, false);
266
267
268	spin_lock(&ino->i_lock);
269	lo = NFS_I(ino)->layout;
270	if (!lo) {
271		spin_unlock(&ino->i_lock);
272		goto out;
273	}
274	pnfs_get_layout_hdr(lo);
275	rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid, cps);
276	if (rv != NFS_OK)
277		goto unlock;
278
 
279	/*
280	 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
281	 */
282	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
283		rv = NFS4ERR_DELAY;
284		goto unlock;
285	}
286
287	pnfs_set_layout_stateid(lo, &args->cbl_stateid, NULL, true);
288	switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
289				&args->cbl_range,
290				be32_to_cpu(args->cbl_stateid.seqid))) {
291	case 0:
292	case -EBUSY:
293		/* There are layout segments that need to be returned */
294		rv = NFS4_OK;
295		break;
296	case -ENOENT:
297		set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags);
298		/* Embrace your forgetfulness! */
299		rv = NFS4ERR_NOMATCHING_LAYOUT;
300
301		if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
302			NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
303				&args->cbl_range);
304		}
305	}
 
306unlock:
307	spin_unlock(&ino->i_lock);
308	pnfs_free_lseg_list(&free_me_list);
309	/* Free all lsegs that are attached to commit buckets */
310	nfs_commit_inode(ino, 0);
311	pnfs_put_layout_hdr(lo);
312out:
313	nfs_iput_and_deactive(ino);
314out_noput:
315	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
316			&args->cbl_stateid, -rv);
 
 
317	return rv;
318}
319
320static u32 initiate_bulk_draining(struct nfs_client *clp,
321				  struct cb_layoutrecallargs *args)
322{
323	int stat;
324
325	if (args->cbl_recall_type == RETURN_FSID)
326		stat = pnfs_layout_destroy_byfsid(clp, &args->cbl_fsid,
327						  PNFS_LAYOUT_BULK_RETURN);
328	else
329		stat = pnfs_layout_destroy_byclid(clp, PNFS_LAYOUT_BULK_RETURN);
330	if (stat != 0)
331		return NFS4ERR_DELAY;
332	return NFS4ERR_NOMATCHING_LAYOUT;
333}
334
335static u32 do_callback_layoutrecall(struct nfs_client *clp,
336				    struct cb_layoutrecallargs *args,
337				    struct cb_process_state *cps)
338{
 
 
 
339	if (args->cbl_recall_type == RETURN_FILE)
340		return initiate_file_draining(clp, args, cps);
341	return initiate_bulk_draining(clp, args);
 
 
 
 
342}
343
344__be32 nfs4_callback_layoutrecall(void *argp, void *resp,
345				  struct cb_process_state *cps)
346{
347	struct cb_layoutrecallargs *args = argp;
348	u32 res = NFS4ERR_OP_NOT_IN_SESSION;
 
349
350	if (cps->clp)
351		res = do_callback_layoutrecall(cps->clp, args, cps);
 
 
 
 
352	return cpu_to_be32(res);
353}
354
355static void pnfs_recall_all_layouts(struct nfs_client *clp,
356				    struct cb_process_state *cps)
357{
358	struct cb_layoutrecallargs args;
359
360	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
361	memset(&args, 0, sizeof(args));
362	args.cbl_recall_type = RETURN_ALL;
363	/* FIXME we ignore errors, what should we do? */
364	do_callback_layoutrecall(clp, &args, cps);
365}
366
367__be32 nfs4_callback_devicenotify(void *argp, void *resp,
368				  struct cb_process_state *cps)
369{
370	struct cb_devicenotifyargs *args = argp;
371	const struct pnfs_layoutdriver_type *ld = NULL;
372	uint32_t i;
373	__be32 res = 0;
 
 
 
 
374
375	if (!cps->clp) {
376		res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
377		goto out;
378	}
379
380	for (i = 0; i < args->ndevs; i++) {
381		struct cb_devicenotifyitem *dev = &args->devs[i];
382
383		if (!ld || ld->id != dev->cbd_layout_type) {
384			pnfs_put_layoutdriver(ld);
385			ld = pnfs_find_layoutdriver(dev->cbd_layout_type);
386			if (!ld)
387				continue;
 
 
 
 
 
 
 
 
388		}
389		nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id);
 
 
390	}
391	pnfs_put_layoutdriver(ld);
392out:
393	kfree(args->devs);
 
 
394	return res;
395}
396
397/*
398 * Validate the sequenceID sent by the server.
399 * Return success if the sequenceID is one more than what we last saw on
400 * this slot, accounting for wraparound.  Increments the slot's sequence.
401 *
402 * We don't yet implement a duplicate request cache, instead we set the
403 * back channel ca_maxresponsesize_cached to zero. This is OK for now
404 * since we only currently implement idempotent callbacks anyway.
405 *
406 * We have a single slot backchannel at this time, so we don't bother
407 * checking the used_slots bit array on the table.  The lower layer guarantees
408 * a single outstanding callback request at a time.
409 */
410static __be32
411validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
412		const struct cb_sequenceargs * args)
413{
414	__be32 ret;
 
415
416	ret = cpu_to_be32(NFS4ERR_BADSLOT);
417	if (args->csa_slotid > tbl->server_highest_slotid)
418		goto out_err;
419
420	/* Replay */
421	if (args->csa_sequenceid == slot->seq_nr) {
422		ret = cpu_to_be32(NFS4ERR_DELAY);
 
423		if (nfs4_test_locked_slot(tbl, slot->slot_nr))
424			goto out_err;
425
426		/* Signal process_op to set this error on next op */
427		ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP);
428		if (args->csa_cachethis == 0)
429			goto out_err;
430
431		/* Liar! We never allowed you to set csa_cachethis != 0 */
432		ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY);
433		goto out_err;
434	}
435
436	/* Note: wraparound relies on seq_nr being of type u32 */
 
 
 
 
 
 
437	/* Misordered request */
438	ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED);
439	if (args->csa_sequenceid != slot->seq_nr + 1)
440		goto out_err;
441
442	return cpu_to_be32(NFS4_OK);
443
444out_err:
445	trace_nfs4_cb_seqid_err(args, ret);
446	return ret;
447}
448
449/*
450 * For each referring call triple, check the session's slot table for
451 * a match.  If the slot is in use and the sequence numbers match, the
452 * client is still waiting for a response to the original request.
453 */
454static int referring_call_exists(struct nfs_client *clp,
455				  uint32_t nrclists,
456				  struct referring_call_list *rclists,
457				  spinlock_t *lock)
458	__releases(lock)
459	__acquires(lock)
460{
461	int status = 0;
462	int found = 0;
463	int i, j;
464	struct nfs4_session *session;
465	struct nfs4_slot_table *tbl;
466	struct referring_call_list *rclist;
467	struct referring_call *ref;
468
469	/*
470	 * XXX When client trunking is implemented, this becomes
471	 * a session lookup from within the loop
472	 */
473	session = clp->cl_session;
474	tbl = &session->fc_slot_table;
475
476	for (i = 0; i < nrclists; i++) {
477		rclist = &rclists[i];
478		if (memcmp(session->sess_id.data,
479			   rclist->rcl_sessionid.data,
480			   NFS4_MAX_SESSIONID_LEN) != 0)
481			continue;
482
483		for (j = 0; j < rclist->rcl_nrefcalls; j++) {
484			ref = &rclist->rcl_refcalls[j];
485			spin_unlock(lock);
486			status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
487					ref->rc_sequenceid, HZ >> 1) < 0;
488			spin_lock(lock);
 
 
 
 
 
 
 
 
 
 
489			if (status)
490				goto out;
491			found++;
492		}
493	}
494
495out:
496	return status < 0 ? status : found;
497}
498
499__be32 nfs4_callback_sequence(void *argp, void *resp,
 
500			      struct cb_process_state *cps)
501{
502	struct cb_sequenceargs *args = argp;
503	struct cb_sequenceres *res = resp;
504	struct nfs4_slot_table *tbl;
505	struct nfs4_slot *slot;
506	struct nfs_client *clp;
507	int ret;
508	int i;
509	__be32 status = htonl(NFS4ERR_BADSESSION);
510
511	clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
512					 &args->csa_sessionid, cps->minorversion);
513	if (clp == NULL)
514		goto out;
515
516	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
517		goto out;
518
519	tbl = &clp->cl_session->bc_slot_table;
 
520
521	/* Set up res before grabbing the spinlock */
522	memcpy(&res->csr_sessionid, &args->csa_sessionid,
523	       sizeof(res->csr_sessionid));
524	res->csr_sequenceid = args->csa_sequenceid;
525	res->csr_slotid = args->csa_slotid;
526
527	spin_lock(&tbl->slot_tbl_lock);
528	/* state manager is resetting the session */
529	if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
530		status = htonl(NFS4ERR_DELAY);
531		/* Return NFS4ERR_BADSESSION if we're draining the session
532		 * in order to reset it.
533		 */
534		if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
535			status = htonl(NFS4ERR_BADSESSION);
536		goto out_unlock;
537	}
538
539	status = htonl(NFS4ERR_BADSLOT);
540	slot = nfs4_lookup_slot(tbl, args->csa_slotid);
541	if (IS_ERR(slot))
542		goto out_unlock;
543
544	res->csr_highestslotid = tbl->server_highest_slotid;
545	res->csr_target_highestslotid = tbl->target_highest_slotid;
546
547	status = validate_seqid(tbl, slot, args);
548	if (status)
549		goto out_unlock;
550	if (!nfs4_try_to_lock_slot(tbl, slot)) {
551		status = htonl(NFS4ERR_DELAY);
552		goto out_unlock;
553	}
554	cps->slot = slot;
555
556	/* The ca_maxresponsesize_cached is 0 with no DRC */
557	if (args->csa_cachethis != 0) {
558		status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
559		goto out_unlock;
560	}
561
562	/*
563	 * Check for pending referring calls.  If a match is found, a
564	 * related callback was received before the response to the original
565	 * call.
566	 */
567	ret = referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
568				    &tbl->slot_tbl_lock);
569	if (ret < 0) {
570		status = htonl(NFS4ERR_DELAY);
571		goto out_unlock;
572	}
573	cps->referring_calls = ret;
574
575	/*
576	 * RFC5661 20.9.3
577	 * If CB_SEQUENCE returns an error, then the state of the slot
578	 * (sequence ID, cached reply) MUST NOT change.
579	 */
580	slot->seq_nr = args->csa_sequenceid;
581out_unlock:
582	spin_unlock(&tbl->slot_tbl_lock);
583
584out:
585	cps->clp = clp; /* put in nfs4_callback_compound */
586	for (i = 0; i < args->csa_nrclists; i++)
587		kfree(args->csa_rclists[i].rcl_refcalls);
588	kfree(args->csa_rclists);
589
590	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
591		cps->drc_status = status;
592		status = 0;
593	} else
594		res->csr_status = status;
595
596	trace_nfs4_cb_sequence(args, res, status);
 
 
597	return status;
598}
599
600static bool
601validate_bitmap_values(unsigned int mask)
602{
603	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
604}
605
606__be32 nfs4_callback_recallany(void *argp, void *resp,
607			       struct cb_process_state *cps)
608{
609	struct cb_recallanyargs *args = argp;
610	__be32 status;
611	fmode_t flags = 0;
612	bool schedule_manager = false;
613
614	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
615	if (!cps->clp) /* set in cb_sequence */
616		goto out;
617
618	dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
619		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
620
621	status = cpu_to_be32(NFS4ERR_INVAL);
622	if (!validate_bitmap_values(args->craa_type_mask))
623		goto out;
624
625	status = cpu_to_be32(NFS4_OK);
626	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
 
627		flags = FMODE_READ;
628	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
 
629		flags |= FMODE_WRITE;
 
 
 
630	if (flags)
631		nfs_expire_unused_delegation_types(cps->clp, flags);
632
633	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
634		pnfs_recall_all_layouts(cps->clp, cps);
635
636	if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_READ)) {
637		set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_READ, &cps->clp->cl_state);
638		schedule_manager = true;
639	}
640	if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_RW)) {
641		set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_RW, &cps->clp->cl_state);
642		schedule_manager = true;
643	}
644	if (schedule_manager)
645		nfs4_schedule_state_manager(cps->clp);
646
647out:
648	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
649	return status;
650}
651
652/* Reduce the fore channel's max_slots to the target value */
653__be32 nfs4_callback_recallslot(void *argp, void *resp,
654				struct cb_process_state *cps)
655{
656	struct cb_recallslotargs *args = argp;
657	struct nfs4_slot_table *fc_tbl;
658	__be32 status;
659
660	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
661	if (!cps->clp) /* set in cb_sequence */
662		goto out;
663
664	dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
665		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
666		args->crsa_target_highest_slotid);
667
668	fc_tbl = &cps->clp->cl_session->fc_slot_table;
669
670	status = htonl(NFS4_OK);
671
672	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
673	nfs41_notify_server(cps->clp);
674out:
675	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
676	return status;
677}
678
679__be32 nfs4_callback_notify_lock(void *argp, void *resp,
680				 struct cb_process_state *cps)
681{
682	struct cb_notify_lock_args *args = argp;
683
684	if (!cps->clp) /* set in cb_sequence */
685		return htonl(NFS4ERR_OP_NOT_IN_SESSION);
686
687	dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
688		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
689
690	/* Don't wake anybody if the string looked bogus */
691	if (args->cbnl_valid)
692		__wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
693
694	return htonl(NFS4_OK);
695}
696#endif /* CONFIG_NFS_V4_1 */
697#ifdef CONFIG_NFS_V4_2
698static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
699				struct cb_offloadargs *args)
700{
701	cp_state->count = args->wr_count;
702	cp_state->error = args->error;
703	if (!args->error) {
704		cp_state->verf.committed = args->wr_writeverf.committed;
705		memcpy(&cp_state->verf.verifier.data[0],
706			&args->wr_writeverf.verifier.data[0],
707			NFS4_VERIFIER_SIZE);
708	}
709}
710
711__be32 nfs4_callback_offload(void *data, void *dummy,
712			     struct cb_process_state *cps)
713{
714	struct cb_offloadargs *args = data;
715	struct nfs_server *server;
716	struct nfs4_copy_state *copy, *tmp_copy;
717	bool found = false;
718
719	copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
720	if (!copy)
721		return htonl(NFS4ERR_SERVERFAULT);
722
723	spin_lock(&cps->clp->cl_lock);
724	rcu_read_lock();
725	list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
726				client_link) {
727		list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
728			if (memcmp(args->coa_stateid.other,
729					tmp_copy->stateid.other,
730					sizeof(args->coa_stateid.other)))
731				continue;
732			nfs4_copy_cb_args(tmp_copy, args);
733			complete(&tmp_copy->completion);
734			found = true;
735			goto out;
736		}
737	}
738out:
739	rcu_read_unlock();
740	if (!found) {
741		memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
742		nfs4_copy_cb_args(copy, args);
743		list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids);
744	} else
745		kfree(copy);
746	spin_unlock(&cps->clp->cl_lock);
747
748	trace_nfs4_cb_offload(&args->coa_fh, &args->coa_stateid,
749			args->wr_count, args->error,
750			args->wr_writeverf.committed);
751	return 0;
752}
753#endif /* CONFIG_NFS_V4_2 */