Linux Audio

Check our new training course

Loading...
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/fs/nfs/callback_proc.c
  4 *
  5 * Copyright (C) 2004 Trond Myklebust
  6 *
  7 * NFSv4 callback procedures
  8 */
  9
 10#include <linux/errno.h>
 11#include <linux/math.h>
 12#include <linux/nfs4.h>
 13#include <linux/nfs_fs.h>
 14#include <linux/slab.h>
 15#include <linux/rcupdate.h>
 16#include <linux/types.h>
 17
 18#include "nfs4_fs.h"
 19#include "callback.h"
 20#include "delegation.h"
 21#include "internal.h"
 22#include "pnfs.h"
 23#include "nfs4session.h"
 24#include "nfs4trace.h"
 25
 26#define NFSDBG_FACILITY NFSDBG_CALLBACK
 27
 28__be32 nfs4_callback_getattr(void *argp, void *resp,
 29			     struct cb_process_state *cps)
 30{
 31	struct cb_getattrargs *args = argp;
 32	struct cb_getattrres *res = resp;
 33	struct nfs_delegation *delegation;
 34	struct inode *inode;
 35
 36	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
 37	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 38		goto out;
 39
 40	res->bitmap[0] = res->bitmap[1] = 0;
 41	res->status = htonl(NFS4ERR_BADHANDLE);
 42
 43	dprintk_rcu("NFS: GETATTR callback request from %s\n",
 44		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 45
 46	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
 47	if (IS_ERR(inode)) {
 48		if (inode == ERR_PTR(-EAGAIN))
 49			res->status = htonl(NFS4ERR_DELAY);
 50		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
 51				-ntohl(res->status));
 52		goto out;
 53	}
 54	rcu_read_lock();
 55	delegation = nfs4_get_valid_delegation(inode);
 56	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
 57		goto out_iput;
 58	res->size = i_size_read(inode);
 59	res->change_attr = delegation->change_attr;
 60	if (nfs_have_writebacks(inode))
 61		res->change_attr++;
 
 62	res->ctime = inode_get_ctime(inode);
 63	res->mtime = inode_get_mtime(inode);
 64	res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
 65		args->bitmap[0];
 66	res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
 67		args->bitmap[1];
 
 
 
 68	res->status = 0;
 69out_iput:
 70	rcu_read_unlock();
 71	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
 72	nfs_iput_and_deactive(inode);
 73out:
 74	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
 75	return res->status;
 76}
 77
 78__be32 nfs4_callback_recall(void *argp, void *resp,
 79			    struct cb_process_state *cps)
 80{
 81	struct cb_recallargs *args = argp;
 82	struct inode *inode;
 83	__be32 res;
 84	
 85	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
 86	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 87		goto out;
 88
 89	dprintk_rcu("NFS: RECALL callback request from %s\n",
 90		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 91
 92	res = htonl(NFS4ERR_BADHANDLE);
 93	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
 94	if (IS_ERR(inode)) {
 95		if (inode == ERR_PTR(-EAGAIN))
 96			res = htonl(NFS4ERR_DELAY);
 97		trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
 98				&args->stateid, -ntohl(res));
 99		goto out;
100	}
101	/* Set up a helper thread to actually return the delegation */
102	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
103	case 0:
104		res = 0;
105		break;
106	case -ENOENT:
107		res = htonl(NFS4ERR_BAD_STATEID);
108		break;
109	default:
110		res = htonl(NFS4ERR_RESOURCE);
111	}
112	trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
113			&args->stateid, -ntohl(res));
114	nfs_iput_and_deactive(inode);
115out:
116	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
117	return res;
118}
119
120#if defined(CONFIG_NFS_V4_1)
121
122/*
123 * Lookup a layout inode by stateid
124 *
125 * Note: returns a refcount on the inode and superblock
126 */
127static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
128		const nfs4_stateid *stateid)
129	__must_hold(RCU)
130{
131	struct nfs_server *server;
132	struct inode *inode;
133	struct pnfs_layout_hdr *lo;
134
135	rcu_read_lock();
136	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
137		list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
138			if (!pnfs_layout_is_valid(lo))
139				continue;
140			if (!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
141				continue;
142			if (nfs_sb_active(server->super))
143				inode = igrab(lo->plh_inode);
144			else
145				inode = ERR_PTR(-EAGAIN);
146			rcu_read_unlock();
147			if (inode)
148				return inode;
149			nfs_sb_deactive(server->super);
150			return ERR_PTR(-EAGAIN);
151		}
152	}
153	rcu_read_unlock();
154	return ERR_PTR(-ENOENT);
155}
156
157/*
158 * Lookup a layout inode by filehandle.
159 *
160 * Note: returns a refcount on the inode and superblock
161 *
162 */
163static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
164		const struct nfs_fh *fh)
165{
166	struct nfs_server *server;
167	struct nfs_inode *nfsi;
168	struct inode *inode;
169	struct pnfs_layout_hdr *lo;
170
171	rcu_read_lock();
172	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
173		list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
174			nfsi = NFS_I(lo->plh_inode);
175			if (nfs_compare_fh(fh, &nfsi->fh))
176				continue;
177			if (nfsi->layout != lo)
178				continue;
179			if (nfs_sb_active(server->super))
180				inode = igrab(lo->plh_inode);
181			else
182				inode = ERR_PTR(-EAGAIN);
183			rcu_read_unlock();
184			if (inode)
185				return inode;
186			nfs_sb_deactive(server->super);
187			return ERR_PTR(-EAGAIN);
188		}
189	}
190	rcu_read_unlock();
191	return ERR_PTR(-ENOENT);
192}
193
194static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
195		const struct nfs_fh *fh,
196		const nfs4_stateid *stateid)
197{
198	struct inode *inode;
199
200	inode = nfs_layout_find_inode_by_stateid(clp, stateid);
201	if (inode == ERR_PTR(-ENOENT))
202		inode = nfs_layout_find_inode_by_fh(clp, fh);
203	return inode;
204}
205
206/*
207 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
208 */
209static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
210					const nfs4_stateid *new,
211					struct cb_process_state *cps)
212{
213	u32 oldseq, newseq;
214
215	/* Is the stateid not initialised? */
216	if (!pnfs_layout_is_valid(lo))
217		return NFS4ERR_NOMATCHING_LAYOUT;
218
219	/* Mismatched stateid? */
220	if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
221		return NFS4ERR_BAD_STATEID;
222
223	newseq = be32_to_cpu(new->seqid);
224	/* Are we already in a layout recall situation? */
225	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
226		return NFS4ERR_DELAY;
227
228	/*
229	 * Check that the stateid matches what we think it should be.
230	 * Note that if the server sent us a list of referring calls,
231	 * and we know that those have completed, then we trust the
232	 * stateid argument is correct.
233	 */
234	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
235	if (newseq > oldseq + 1 && !cps->referring_calls)
236		return NFS4ERR_DELAY;
237
238	/* Crazy server! */
239	if (newseq <= oldseq)
240		return NFS4ERR_OLD_STATEID;
241
242	return NFS_OK;
243}
244
245static u32 initiate_file_draining(struct nfs_client *clp,
246				  struct cb_layoutrecallargs *args,
247				  struct cb_process_state *cps)
248{
249	struct inode *ino;
250	struct pnfs_layout_hdr *lo;
251	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
252	LIST_HEAD(free_me_list);
253
254	ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
255	if (IS_ERR(ino)) {
256		if (ino == ERR_PTR(-EAGAIN))
257			rv = NFS4ERR_DELAY;
258		goto out_noput;
259	}
260
261	pnfs_layoutcommit_inode(ino, false);
262
263
264	spin_lock(&ino->i_lock);
265	lo = NFS_I(ino)->layout;
266	if (!lo) {
267		spin_unlock(&ino->i_lock);
268		goto out;
269	}
270	pnfs_get_layout_hdr(lo);
271	rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid, cps);
272	if (rv != NFS_OK)
273		goto unlock;
274
275	/*
276	 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
277	 */
278	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
279		rv = NFS4ERR_DELAY;
280		goto unlock;
281	}
282
283	pnfs_set_layout_stateid(lo, &args->cbl_stateid, NULL, true);
284	switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
285				&args->cbl_range,
286				be32_to_cpu(args->cbl_stateid.seqid))) {
287	case 0:
288	case -EBUSY:
289		/* There are layout segments that need to be returned */
290		rv = NFS4_OK;
291		break;
292	case -ENOENT:
293		set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags);
294		/* Embrace your forgetfulness! */
295		rv = NFS4ERR_NOMATCHING_LAYOUT;
296
297		if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
298			NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
299				&args->cbl_range);
300		}
301	}
302unlock:
303	spin_unlock(&ino->i_lock);
304	pnfs_free_lseg_list(&free_me_list);
305	/* Free all lsegs that are attached to commit buckets */
306	nfs_commit_inode(ino, 0);
307	pnfs_put_layout_hdr(lo);
308out:
309	nfs_iput_and_deactive(ino);
310out_noput:
311	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
312			&args->cbl_stateid, -rv);
313	return rv;
314}
315
316static u32 initiate_bulk_draining(struct nfs_client *clp,
317				  struct cb_layoutrecallargs *args)
318{
319	int stat;
320
321	if (args->cbl_recall_type == RETURN_FSID)
322		stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
 
323	else
324		stat = pnfs_destroy_layouts_byclid(clp, true);
325	if (stat != 0)
326		return NFS4ERR_DELAY;
327	return NFS4ERR_NOMATCHING_LAYOUT;
328}
329
330static u32 do_callback_layoutrecall(struct nfs_client *clp,
331				    struct cb_layoutrecallargs *args,
332				    struct cb_process_state *cps)
333{
334	if (args->cbl_recall_type == RETURN_FILE)
335		return initiate_file_draining(clp, args, cps);
336	return initiate_bulk_draining(clp, args);
337}
338
339__be32 nfs4_callback_layoutrecall(void *argp, void *resp,
340				  struct cb_process_state *cps)
341{
342	struct cb_layoutrecallargs *args = argp;
343	u32 res = NFS4ERR_OP_NOT_IN_SESSION;
344
345	if (cps->clp)
346		res = do_callback_layoutrecall(cps->clp, args, cps);
347	return cpu_to_be32(res);
348}
349
350static void pnfs_recall_all_layouts(struct nfs_client *clp,
351				    struct cb_process_state *cps)
352{
353	struct cb_layoutrecallargs args;
354
355	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
356	memset(&args, 0, sizeof(args));
357	args.cbl_recall_type = RETURN_ALL;
358	/* FIXME we ignore errors, what should we do? */
359	do_callback_layoutrecall(clp, &args, cps);
360}
361
362__be32 nfs4_callback_devicenotify(void *argp, void *resp,
363				  struct cb_process_state *cps)
364{
365	struct cb_devicenotifyargs *args = argp;
366	const struct pnfs_layoutdriver_type *ld = NULL;
367	uint32_t i;
368	__be32 res = 0;
369
370	if (!cps->clp) {
371		res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
372		goto out;
373	}
374
375	for (i = 0; i < args->ndevs; i++) {
376		struct cb_devicenotifyitem *dev = &args->devs[i];
377
378		if (!ld || ld->id != dev->cbd_layout_type) {
379			pnfs_put_layoutdriver(ld);
380			ld = pnfs_find_layoutdriver(dev->cbd_layout_type);
381			if (!ld)
382				continue;
383		}
384		nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id);
385	}
386	pnfs_put_layoutdriver(ld);
387out:
388	kfree(args->devs);
389	return res;
390}
391
392/*
393 * Validate the sequenceID sent by the server.
394 * Return success if the sequenceID is one more than what we last saw on
395 * this slot, accounting for wraparound.  Increments the slot's sequence.
396 *
397 * We don't yet implement a duplicate request cache, instead we set the
398 * back channel ca_maxresponsesize_cached to zero. This is OK for now
399 * since we only currently implement idempotent callbacks anyway.
400 *
401 * We have a single slot backchannel at this time, so we don't bother
402 * checking the used_slots bit array on the table.  The lower layer guarantees
403 * a single outstanding callback request at a time.
404 */
405static __be32
406validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
407		const struct cb_sequenceargs * args)
408{
409	__be32 ret;
410
411	ret = cpu_to_be32(NFS4ERR_BADSLOT);
412	if (args->csa_slotid > tbl->server_highest_slotid)
413		goto out_err;
414
415	/* Replay */
416	if (args->csa_sequenceid == slot->seq_nr) {
417		ret = cpu_to_be32(NFS4ERR_DELAY);
418		if (nfs4_test_locked_slot(tbl, slot->slot_nr))
419			goto out_err;
420
421		/* Signal process_op to set this error on next op */
422		ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP);
423		if (args->csa_cachethis == 0)
424			goto out_err;
425
426		/* Liar! We never allowed you to set csa_cachethis != 0 */
427		ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY);
428		goto out_err;
429	}
430
431	/* Note: wraparound relies on seq_nr being of type u32 */
432	/* Misordered request */
433	ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED);
434	if (args->csa_sequenceid != slot->seq_nr + 1)
435		goto out_err;
436
437	return cpu_to_be32(NFS4_OK);
438
439out_err:
440	trace_nfs4_cb_seqid_err(args, ret);
441	return ret;
442}
443
444/*
445 * For each referring call triple, check the session's slot table for
446 * a match.  If the slot is in use and the sequence numbers match, the
447 * client is still waiting for a response to the original request.
448 */
449static int referring_call_exists(struct nfs_client *clp,
450				  uint32_t nrclists,
451				  struct referring_call_list *rclists,
452				  spinlock_t *lock)
453	__releases(lock)
454	__acquires(lock)
455{
456	int status = 0;
457	int found = 0;
458	int i, j;
459	struct nfs4_session *session;
460	struct nfs4_slot_table *tbl;
461	struct referring_call_list *rclist;
462	struct referring_call *ref;
463
464	/*
465	 * XXX When client trunking is implemented, this becomes
466	 * a session lookup from within the loop
467	 */
468	session = clp->cl_session;
469	tbl = &session->fc_slot_table;
470
471	for (i = 0; i < nrclists; i++) {
472		rclist = &rclists[i];
473		if (memcmp(session->sess_id.data,
474			   rclist->rcl_sessionid.data,
475			   NFS4_MAX_SESSIONID_LEN) != 0)
476			continue;
477
478		for (j = 0; j < rclist->rcl_nrefcalls; j++) {
479			ref = &rclist->rcl_refcalls[j];
480			spin_unlock(lock);
481			status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
482					ref->rc_sequenceid, HZ >> 1) < 0;
483			spin_lock(lock);
484			if (status)
485				goto out;
486			found++;
487		}
488	}
489
490out:
491	return status < 0 ? status : found;
492}
493
494__be32 nfs4_callback_sequence(void *argp, void *resp,
495			      struct cb_process_state *cps)
496{
497	struct cb_sequenceargs *args = argp;
498	struct cb_sequenceres *res = resp;
499	struct nfs4_slot_table *tbl;
500	struct nfs4_slot *slot;
501	struct nfs_client *clp;
502	int ret;
503	int i;
504	__be32 status = htonl(NFS4ERR_BADSESSION);
505
506	clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
507					 &args->csa_sessionid, cps->minorversion);
508	if (clp == NULL)
509		goto out;
510
511	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
512		goto out;
513
514	tbl = &clp->cl_session->bc_slot_table;
515
516	/* Set up res before grabbing the spinlock */
517	memcpy(&res->csr_sessionid, &args->csa_sessionid,
518	       sizeof(res->csr_sessionid));
519	res->csr_sequenceid = args->csa_sequenceid;
520	res->csr_slotid = args->csa_slotid;
521
522	spin_lock(&tbl->slot_tbl_lock);
523	/* state manager is resetting the session */
524	if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
525		status = htonl(NFS4ERR_DELAY);
526		/* Return NFS4ERR_BADSESSION if we're draining the session
527		 * in order to reset it.
528		 */
529		if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
530			status = htonl(NFS4ERR_BADSESSION);
531		goto out_unlock;
532	}
533
534	status = htonl(NFS4ERR_BADSLOT);
535	slot = nfs4_lookup_slot(tbl, args->csa_slotid);
536	if (IS_ERR(slot))
537		goto out_unlock;
538
539	res->csr_highestslotid = tbl->server_highest_slotid;
540	res->csr_target_highestslotid = tbl->target_highest_slotid;
541
542	status = validate_seqid(tbl, slot, args);
543	if (status)
544		goto out_unlock;
545	if (!nfs4_try_to_lock_slot(tbl, slot)) {
546		status = htonl(NFS4ERR_DELAY);
547		goto out_unlock;
548	}
549	cps->slot = slot;
550
551	/* The ca_maxresponsesize_cached is 0 with no DRC */
552	if (args->csa_cachethis != 0) {
553		status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
554		goto out_unlock;
555	}
556
557	/*
558	 * Check for pending referring calls.  If a match is found, a
559	 * related callback was received before the response to the original
560	 * call.
561	 */
562	ret = referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
563				    &tbl->slot_tbl_lock);
564	if (ret < 0) {
565		status = htonl(NFS4ERR_DELAY);
566		goto out_unlock;
567	}
568	cps->referring_calls = ret;
569
570	/*
571	 * RFC5661 20.9.3
572	 * If CB_SEQUENCE returns an error, then the state of the slot
573	 * (sequence ID, cached reply) MUST NOT change.
574	 */
575	slot->seq_nr = args->csa_sequenceid;
576out_unlock:
577	spin_unlock(&tbl->slot_tbl_lock);
578
579out:
580	cps->clp = clp; /* put in nfs4_callback_compound */
581	for (i = 0; i < args->csa_nrclists; i++)
582		kfree(args->csa_rclists[i].rcl_refcalls);
583	kfree(args->csa_rclists);
584
585	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
586		cps->drc_status = status;
587		status = 0;
588	} else
589		res->csr_status = status;
590
591	trace_nfs4_cb_sequence(args, res, status);
592	return status;
593}
594
595static bool
596validate_bitmap_values(unsigned int mask)
597{
598	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
599}
600
601__be32 nfs4_callback_recallany(void *argp, void *resp,
602			       struct cb_process_state *cps)
603{
604	struct cb_recallanyargs *args = argp;
605	__be32 status;
606	fmode_t flags = 0;
607	bool schedule_manager = false;
608
609	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
610	if (!cps->clp) /* set in cb_sequence */
611		goto out;
612
613	dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
614		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
615
616	status = cpu_to_be32(NFS4ERR_INVAL);
617	if (!validate_bitmap_values(args->craa_type_mask))
618		goto out;
619
620	status = cpu_to_be32(NFS4_OK);
621	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
622		flags = FMODE_READ;
623	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
624		flags |= FMODE_WRITE;
625	if (flags)
626		nfs_expire_unused_delegation_types(cps->clp, flags);
627
628	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
629		pnfs_recall_all_layouts(cps->clp, cps);
630
631	if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_READ)) {
632		set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_READ, &cps->clp->cl_state);
633		schedule_manager = true;
634	}
635	if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_RW)) {
636		set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_RW, &cps->clp->cl_state);
637		schedule_manager = true;
638	}
639	if (schedule_manager)
640		nfs4_schedule_state_manager(cps->clp);
641
642out:
643	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
644	return status;
645}
646
647/* Reduce the fore channel's max_slots to the target value */
648__be32 nfs4_callback_recallslot(void *argp, void *resp,
649				struct cb_process_state *cps)
650{
651	struct cb_recallslotargs *args = argp;
652	struct nfs4_slot_table *fc_tbl;
653	__be32 status;
654
655	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
656	if (!cps->clp) /* set in cb_sequence */
657		goto out;
658
659	dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
660		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
661		args->crsa_target_highest_slotid);
662
663	fc_tbl = &cps->clp->cl_session->fc_slot_table;
664
665	status = htonl(NFS4_OK);
666
667	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
668	nfs41_notify_server(cps->clp);
669out:
670	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
671	return status;
672}
673
674__be32 nfs4_callback_notify_lock(void *argp, void *resp,
675				 struct cb_process_state *cps)
676{
677	struct cb_notify_lock_args *args = argp;
678
679	if (!cps->clp) /* set in cb_sequence */
680		return htonl(NFS4ERR_OP_NOT_IN_SESSION);
681
682	dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
683		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
684
685	/* Don't wake anybody if the string looked bogus */
686	if (args->cbnl_valid)
687		__wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
688
689	return htonl(NFS4_OK);
690}
691#endif /* CONFIG_NFS_V4_1 */
692#ifdef CONFIG_NFS_V4_2
693static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
694				struct cb_offloadargs *args)
695{
696	cp_state->count = args->wr_count;
697	cp_state->error = args->error;
698	if (!args->error) {
699		cp_state->verf.committed = args->wr_writeverf.committed;
700		memcpy(&cp_state->verf.verifier.data[0],
701			&args->wr_writeverf.verifier.data[0],
702			NFS4_VERIFIER_SIZE);
703	}
704}
705
706__be32 nfs4_callback_offload(void *data, void *dummy,
707			     struct cb_process_state *cps)
708{
709	struct cb_offloadargs *args = data;
710	struct nfs_server *server;
711	struct nfs4_copy_state *copy, *tmp_copy;
712	bool found = false;
713
714	copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
715	if (!copy)
716		return htonl(NFS4ERR_SERVERFAULT);
717
718	spin_lock(&cps->clp->cl_lock);
719	rcu_read_lock();
720	list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
721				client_link) {
722		list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
723			if (memcmp(args->coa_stateid.other,
724					tmp_copy->stateid.other,
725					sizeof(args->coa_stateid.other)))
726				continue;
727			nfs4_copy_cb_args(tmp_copy, args);
728			complete(&tmp_copy->completion);
729			found = true;
730			goto out;
731		}
732	}
733out:
734	rcu_read_unlock();
735	if (!found) {
736		memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
737		nfs4_copy_cb_args(copy, args);
738		list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids);
739	} else
740		kfree(copy);
741	spin_unlock(&cps->clp->cl_lock);
742
743	trace_nfs4_cb_offload(&args->coa_fh, &args->coa_stateid,
744			args->wr_count, args->error,
745			args->wr_writeverf.committed);
746	return 0;
747}
748#endif /* CONFIG_NFS_V4_2 */
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/fs/nfs/callback_proc.c
  4 *
  5 * Copyright (C) 2004 Trond Myklebust
  6 *
  7 * NFSv4 callback procedures
  8 */
  9
 10#include <linux/errno.h>
 11#include <linux/math.h>
 12#include <linux/nfs4.h>
 13#include <linux/nfs_fs.h>
 14#include <linux/slab.h>
 15#include <linux/rcupdate.h>
 16#include <linux/types.h>
 17
 18#include "nfs4_fs.h"
 19#include "callback.h"
 20#include "delegation.h"
 21#include "internal.h"
 22#include "pnfs.h"
 23#include "nfs4session.h"
 24#include "nfs4trace.h"
 25
 26#define NFSDBG_FACILITY NFSDBG_CALLBACK
 27
 28__be32 nfs4_callback_getattr(void *argp, void *resp,
 29			     struct cb_process_state *cps)
 30{
 31	struct cb_getattrargs *args = argp;
 32	struct cb_getattrres *res = resp;
 33	struct nfs_delegation *delegation;
 34	struct inode *inode;
 35
 36	res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
 37	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 38		goto out;
 39
 40	memset(res->bitmap, 0, sizeof(res->bitmap));
 41	res->status = htonl(NFS4ERR_BADHANDLE);
 42
 43	dprintk_rcu("NFS: GETATTR callback request from %s\n",
 44		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 45
 46	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
 47	if (IS_ERR(inode)) {
 48		if (inode == ERR_PTR(-EAGAIN))
 49			res->status = htonl(NFS4ERR_DELAY);
 50		trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
 51				-ntohl(res->status));
 52		goto out;
 53	}
 54	rcu_read_lock();
 55	delegation = nfs4_get_valid_delegation(inode);
 56	if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
 57		goto out_iput;
 58	res->size = i_size_read(inode);
 59	res->change_attr = delegation->change_attr;
 60	if (nfs_have_writebacks(inode))
 61		res->change_attr++;
 62	res->atime = inode_get_atime(inode);
 63	res->ctime = inode_get_ctime(inode);
 64	res->mtime = inode_get_mtime(inode);
 65	res->bitmap[0] = (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE) &
 66			 args->bitmap[0];
 67	res->bitmap[1] = (FATTR4_WORD1_TIME_ACCESS |
 68			  FATTR4_WORD1_TIME_METADATA |
 69			  FATTR4_WORD1_TIME_MODIFY) & args->bitmap[1];
 70	res->bitmap[2] = (FATTR4_WORD2_TIME_DELEG_ACCESS |
 71			  FATTR4_WORD2_TIME_DELEG_MODIFY) & args->bitmap[2];
 72	res->status = 0;
 73out_iput:
 74	rcu_read_unlock();
 75	trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
 76	nfs_iput_and_deactive(inode);
 77out:
 78	dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
 79	return res->status;
 80}
 81
 82__be32 nfs4_callback_recall(void *argp, void *resp,
 83			    struct cb_process_state *cps)
 84{
 85	struct cb_recallargs *args = argp;
 86	struct inode *inode;
 87	__be32 res;
 88	
 89	res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
 90	if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
 91		goto out;
 92
 93	dprintk_rcu("NFS: RECALL callback request from %s\n",
 94		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
 95
 96	res = htonl(NFS4ERR_BADHANDLE);
 97	inode = nfs_delegation_find_inode(cps->clp, &args->fh);
 98	if (IS_ERR(inode)) {
 99		if (inode == ERR_PTR(-EAGAIN))
100			res = htonl(NFS4ERR_DELAY);
101		trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
102				&args->stateid, -ntohl(res));
103		goto out;
104	}
105	/* Set up a helper thread to actually return the delegation */
106	switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
107	case 0:
108		res = 0;
109		break;
110	case -ENOENT:
111		res = htonl(NFS4ERR_BAD_STATEID);
112		break;
113	default:
114		res = htonl(NFS4ERR_RESOURCE);
115	}
116	trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
117			&args->stateid, -ntohl(res));
118	nfs_iput_and_deactive(inode);
119out:
120	dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
121	return res;
122}
123
124#if defined(CONFIG_NFS_V4_1)
125
126/*
127 * Lookup a layout inode by stateid
128 *
129 * Note: returns a refcount on the inode and superblock
130 */
131static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
132		const nfs4_stateid *stateid)
133	__must_hold(RCU)
134{
135	struct nfs_server *server;
136	struct inode *inode;
137	struct pnfs_layout_hdr *lo;
138
139	rcu_read_lock();
140	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
141		list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
142			if (!pnfs_layout_is_valid(lo))
143				continue;
144			if (!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
145				continue;
146			if (nfs_sb_active(server->super))
147				inode = igrab(lo->plh_inode);
148			else
149				inode = ERR_PTR(-EAGAIN);
150			rcu_read_unlock();
151			if (inode)
152				return inode;
153			nfs_sb_deactive(server->super);
154			return ERR_PTR(-EAGAIN);
155		}
156	}
157	rcu_read_unlock();
158	return ERR_PTR(-ENOENT);
159}
160
161/*
162 * Lookup a layout inode by filehandle.
163 *
164 * Note: returns a refcount on the inode and superblock
165 *
166 */
167static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
168		const struct nfs_fh *fh)
169{
170	struct nfs_server *server;
171	struct nfs_inode *nfsi;
172	struct inode *inode;
173	struct pnfs_layout_hdr *lo;
174
175	rcu_read_lock();
176	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
177		list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
178			nfsi = NFS_I(lo->plh_inode);
179			if (nfs_compare_fh(fh, &nfsi->fh))
180				continue;
181			if (nfsi->layout != lo)
182				continue;
183			if (nfs_sb_active(server->super))
184				inode = igrab(lo->plh_inode);
185			else
186				inode = ERR_PTR(-EAGAIN);
187			rcu_read_unlock();
188			if (inode)
189				return inode;
190			nfs_sb_deactive(server->super);
191			return ERR_PTR(-EAGAIN);
192		}
193	}
194	rcu_read_unlock();
195	return ERR_PTR(-ENOENT);
196}
197
198static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
199		const struct nfs_fh *fh,
200		const nfs4_stateid *stateid)
201{
202	struct inode *inode;
203
204	inode = nfs_layout_find_inode_by_stateid(clp, stateid);
205	if (inode == ERR_PTR(-ENOENT))
206		inode = nfs_layout_find_inode_by_fh(clp, fh);
207	return inode;
208}
209
210/*
211 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
212 */
213static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
214					const nfs4_stateid *new,
215					struct cb_process_state *cps)
216{
217	u32 oldseq, newseq;
218
219	/* Is the stateid not initialised? */
220	if (!pnfs_layout_is_valid(lo))
221		return NFS4ERR_NOMATCHING_LAYOUT;
222
223	/* Mismatched stateid? */
224	if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
225		return NFS4ERR_BAD_STATEID;
226
227	newseq = be32_to_cpu(new->seqid);
228	/* Are we already in a layout recall situation? */
229	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
230		return NFS4ERR_DELAY;
231
232	/*
233	 * Check that the stateid matches what we think it should be.
234	 * Note that if the server sent us a list of referring calls,
235	 * and we know that those have completed, then we trust the
236	 * stateid argument is correct.
237	 */
238	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
239	if (newseq > oldseq + 1 && !cps->referring_calls)
240		return NFS4ERR_DELAY;
241
242	/* Crazy server! */
243	if (newseq <= oldseq)
244		return NFS4ERR_OLD_STATEID;
245
246	return NFS_OK;
247}
248
249static u32 initiate_file_draining(struct nfs_client *clp,
250				  struct cb_layoutrecallargs *args,
251				  struct cb_process_state *cps)
252{
253	struct inode *ino;
254	struct pnfs_layout_hdr *lo;
255	u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
256	LIST_HEAD(free_me_list);
257
258	ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
259	if (IS_ERR(ino)) {
260		if (ino == ERR_PTR(-EAGAIN))
261			rv = NFS4ERR_DELAY;
262		goto out_noput;
263	}
264
265	pnfs_layoutcommit_inode(ino, false);
266
267
268	spin_lock(&ino->i_lock);
269	lo = NFS_I(ino)->layout;
270	if (!lo) {
271		spin_unlock(&ino->i_lock);
272		goto out;
273	}
274	pnfs_get_layout_hdr(lo);
275	rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid, cps);
276	if (rv != NFS_OK)
277		goto unlock;
278
279	/*
280	 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
281	 */
282	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
283		rv = NFS4ERR_DELAY;
284		goto unlock;
285	}
286
287	pnfs_set_layout_stateid(lo, &args->cbl_stateid, NULL, true);
288	switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
289				&args->cbl_range,
290				be32_to_cpu(args->cbl_stateid.seqid))) {
291	case 0:
292	case -EBUSY:
293		/* There are layout segments that need to be returned */
294		rv = NFS4_OK;
295		break;
296	case -ENOENT:
297		set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags);
298		/* Embrace your forgetfulness! */
299		rv = NFS4ERR_NOMATCHING_LAYOUT;
300
301		if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
302			NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
303				&args->cbl_range);
304		}
305	}
306unlock:
307	spin_unlock(&ino->i_lock);
308	pnfs_free_lseg_list(&free_me_list);
309	/* Free all lsegs that are attached to commit buckets */
310	nfs_commit_inode(ino, 0);
311	pnfs_put_layout_hdr(lo);
312out:
313	nfs_iput_and_deactive(ino);
314out_noput:
315	trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
316			&args->cbl_stateid, -rv);
317	return rv;
318}
319
320static u32 initiate_bulk_draining(struct nfs_client *clp,
321				  struct cb_layoutrecallargs *args)
322{
323	int stat;
324
325	if (args->cbl_recall_type == RETURN_FSID)
326		stat = pnfs_layout_destroy_byfsid(clp, &args->cbl_fsid,
327						  PNFS_LAYOUT_BULK_RETURN);
328	else
329		stat = pnfs_layout_destroy_byclid(clp, PNFS_LAYOUT_BULK_RETURN);
330	if (stat != 0)
331		return NFS4ERR_DELAY;
332	return NFS4ERR_NOMATCHING_LAYOUT;
333}
334
335static u32 do_callback_layoutrecall(struct nfs_client *clp,
336				    struct cb_layoutrecallargs *args,
337				    struct cb_process_state *cps)
338{
339	if (args->cbl_recall_type == RETURN_FILE)
340		return initiate_file_draining(clp, args, cps);
341	return initiate_bulk_draining(clp, args);
342}
343
344__be32 nfs4_callback_layoutrecall(void *argp, void *resp,
345				  struct cb_process_state *cps)
346{
347	struct cb_layoutrecallargs *args = argp;
348	u32 res = NFS4ERR_OP_NOT_IN_SESSION;
349
350	if (cps->clp)
351		res = do_callback_layoutrecall(cps->clp, args, cps);
352	return cpu_to_be32(res);
353}
354
355static void pnfs_recall_all_layouts(struct nfs_client *clp,
356				    struct cb_process_state *cps)
357{
358	struct cb_layoutrecallargs args;
359
360	/* Pretend we got a CB_LAYOUTRECALL(ALL) */
361	memset(&args, 0, sizeof(args));
362	args.cbl_recall_type = RETURN_ALL;
363	/* FIXME we ignore errors, what should we do? */
364	do_callback_layoutrecall(clp, &args, cps);
365}
366
367__be32 nfs4_callback_devicenotify(void *argp, void *resp,
368				  struct cb_process_state *cps)
369{
370	struct cb_devicenotifyargs *args = argp;
371	const struct pnfs_layoutdriver_type *ld = NULL;
372	uint32_t i;
373	__be32 res = 0;
374
375	if (!cps->clp) {
376		res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
377		goto out;
378	}
379
380	for (i = 0; i < args->ndevs; i++) {
381		struct cb_devicenotifyitem *dev = &args->devs[i];
382
383		if (!ld || ld->id != dev->cbd_layout_type) {
384			pnfs_put_layoutdriver(ld);
385			ld = pnfs_find_layoutdriver(dev->cbd_layout_type);
386			if (!ld)
387				continue;
388		}
389		nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id);
390	}
391	pnfs_put_layoutdriver(ld);
392out:
393	kfree(args->devs);
394	return res;
395}
396
397/*
398 * Validate the sequenceID sent by the server.
399 * Return success if the sequenceID is one more than what we last saw on
400 * this slot, accounting for wraparound.  Increments the slot's sequence.
401 *
402 * We don't yet implement a duplicate request cache, instead we set the
403 * back channel ca_maxresponsesize_cached to zero. This is OK for now
404 * since we only currently implement idempotent callbacks anyway.
405 *
406 * We have a single slot backchannel at this time, so we don't bother
407 * checking the used_slots bit array on the table.  The lower layer guarantees
408 * a single outstanding callback request at a time.
409 */
410static __be32
411validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
412		const struct cb_sequenceargs * args)
413{
414	__be32 ret;
415
416	ret = cpu_to_be32(NFS4ERR_BADSLOT);
417	if (args->csa_slotid > tbl->server_highest_slotid)
418		goto out_err;
419
420	/* Replay */
421	if (args->csa_sequenceid == slot->seq_nr) {
422		ret = cpu_to_be32(NFS4ERR_DELAY);
423		if (nfs4_test_locked_slot(tbl, slot->slot_nr))
424			goto out_err;
425
426		/* Signal process_op to set this error on next op */
427		ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP);
428		if (args->csa_cachethis == 0)
429			goto out_err;
430
431		/* Liar! We never allowed you to set csa_cachethis != 0 */
432		ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY);
433		goto out_err;
434	}
435
436	/* Note: wraparound relies on seq_nr being of type u32 */
437	/* Misordered request */
438	ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED);
439	if (args->csa_sequenceid != slot->seq_nr + 1)
440		goto out_err;
441
442	return cpu_to_be32(NFS4_OK);
443
444out_err:
445	trace_nfs4_cb_seqid_err(args, ret);
446	return ret;
447}
448
449/*
450 * For each referring call triple, check the session's slot table for
451 * a match.  If the slot is in use and the sequence numbers match, the
452 * client is still waiting for a response to the original request.
453 */
454static int referring_call_exists(struct nfs_client *clp,
455				  uint32_t nrclists,
456				  struct referring_call_list *rclists,
457				  spinlock_t *lock)
458	__releases(lock)
459	__acquires(lock)
460{
461	int status = 0;
462	int found = 0;
463	int i, j;
464	struct nfs4_session *session;
465	struct nfs4_slot_table *tbl;
466	struct referring_call_list *rclist;
467	struct referring_call *ref;
468
469	/*
470	 * XXX When client trunking is implemented, this becomes
471	 * a session lookup from within the loop
472	 */
473	session = clp->cl_session;
474	tbl = &session->fc_slot_table;
475
476	for (i = 0; i < nrclists; i++) {
477		rclist = &rclists[i];
478		if (memcmp(session->sess_id.data,
479			   rclist->rcl_sessionid.data,
480			   NFS4_MAX_SESSIONID_LEN) != 0)
481			continue;
482
483		for (j = 0; j < rclist->rcl_nrefcalls; j++) {
484			ref = &rclist->rcl_refcalls[j];
485			spin_unlock(lock);
486			status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
487					ref->rc_sequenceid, HZ >> 1) < 0;
488			spin_lock(lock);
489			if (status)
490				goto out;
491			found++;
492		}
493	}
494
495out:
496	return status < 0 ? status : found;
497}
498
499__be32 nfs4_callback_sequence(void *argp, void *resp,
500			      struct cb_process_state *cps)
501{
502	struct cb_sequenceargs *args = argp;
503	struct cb_sequenceres *res = resp;
504	struct nfs4_slot_table *tbl;
505	struct nfs4_slot *slot;
506	struct nfs_client *clp;
507	int ret;
508	int i;
509	__be32 status = htonl(NFS4ERR_BADSESSION);
510
511	clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
512					 &args->csa_sessionid, cps->minorversion);
513	if (clp == NULL)
514		goto out;
515
516	if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
517		goto out;
518
519	tbl = &clp->cl_session->bc_slot_table;
520
521	/* Set up res before grabbing the spinlock */
522	memcpy(&res->csr_sessionid, &args->csa_sessionid,
523	       sizeof(res->csr_sessionid));
524	res->csr_sequenceid = args->csa_sequenceid;
525	res->csr_slotid = args->csa_slotid;
526
527	spin_lock(&tbl->slot_tbl_lock);
528	/* state manager is resetting the session */
529	if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
530		status = htonl(NFS4ERR_DELAY);
531		/* Return NFS4ERR_BADSESSION if we're draining the session
532		 * in order to reset it.
533		 */
534		if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
535			status = htonl(NFS4ERR_BADSESSION);
536		goto out_unlock;
537	}
538
539	status = htonl(NFS4ERR_BADSLOT);
540	slot = nfs4_lookup_slot(tbl, args->csa_slotid);
541	if (IS_ERR(slot))
542		goto out_unlock;
543
544	res->csr_highestslotid = tbl->server_highest_slotid;
545	res->csr_target_highestslotid = tbl->target_highest_slotid;
546
547	status = validate_seqid(tbl, slot, args);
548	if (status)
549		goto out_unlock;
550	if (!nfs4_try_to_lock_slot(tbl, slot)) {
551		status = htonl(NFS4ERR_DELAY);
552		goto out_unlock;
553	}
554	cps->slot = slot;
555
556	/* The ca_maxresponsesize_cached is 0 with no DRC */
557	if (args->csa_cachethis != 0) {
558		status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
559		goto out_unlock;
560	}
561
562	/*
563	 * Check for pending referring calls.  If a match is found, a
564	 * related callback was received before the response to the original
565	 * call.
566	 */
567	ret = referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
568				    &tbl->slot_tbl_lock);
569	if (ret < 0) {
570		status = htonl(NFS4ERR_DELAY);
571		goto out_unlock;
572	}
573	cps->referring_calls = ret;
574
575	/*
576	 * RFC5661 20.9.3
577	 * If CB_SEQUENCE returns an error, then the state of the slot
578	 * (sequence ID, cached reply) MUST NOT change.
579	 */
580	slot->seq_nr = args->csa_sequenceid;
581out_unlock:
582	spin_unlock(&tbl->slot_tbl_lock);
583
584out:
585	cps->clp = clp; /* put in nfs4_callback_compound */
586	for (i = 0; i < args->csa_nrclists; i++)
587		kfree(args->csa_rclists[i].rcl_refcalls);
588	kfree(args->csa_rclists);
589
590	if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
591		cps->drc_status = status;
592		status = 0;
593	} else
594		res->csr_status = status;
595
596	trace_nfs4_cb_sequence(args, res, status);
597	return status;
598}
599
600static bool
601validate_bitmap_values(unsigned int mask)
602{
603	return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
604}
605
606__be32 nfs4_callback_recallany(void *argp, void *resp,
607			       struct cb_process_state *cps)
608{
609	struct cb_recallanyargs *args = argp;
610	__be32 status;
611	fmode_t flags = 0;
612	bool schedule_manager = false;
613
614	status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
615	if (!cps->clp) /* set in cb_sequence */
616		goto out;
617
618	dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
619		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
620
621	status = cpu_to_be32(NFS4ERR_INVAL);
622	if (!validate_bitmap_values(args->craa_type_mask))
623		goto out;
624
625	status = cpu_to_be32(NFS4_OK);
626	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
627		flags = FMODE_READ;
628	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
629		flags |= FMODE_WRITE;
630	if (flags)
631		nfs_expire_unused_delegation_types(cps->clp, flags);
632
633	if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
634		pnfs_recall_all_layouts(cps->clp, cps);
635
636	if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_READ)) {
637		set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_READ, &cps->clp->cl_state);
638		schedule_manager = true;
639	}
640	if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_RW)) {
641		set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_RW, &cps->clp->cl_state);
642		schedule_manager = true;
643	}
644	if (schedule_manager)
645		nfs4_schedule_state_manager(cps->clp);
646
647out:
648	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
649	return status;
650}
651
652/* Reduce the fore channel's max_slots to the target value */
653__be32 nfs4_callback_recallslot(void *argp, void *resp,
654				struct cb_process_state *cps)
655{
656	struct cb_recallslotargs *args = argp;
657	struct nfs4_slot_table *fc_tbl;
658	__be32 status;
659
660	status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
661	if (!cps->clp) /* set in cb_sequence */
662		goto out;
663
664	dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
665		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
666		args->crsa_target_highest_slotid);
667
668	fc_tbl = &cps->clp->cl_session->fc_slot_table;
669
670	status = htonl(NFS4_OK);
671
672	nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
673	nfs41_notify_server(cps->clp);
674out:
675	dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
676	return status;
677}
678
679__be32 nfs4_callback_notify_lock(void *argp, void *resp,
680				 struct cb_process_state *cps)
681{
682	struct cb_notify_lock_args *args = argp;
683
684	if (!cps->clp) /* set in cb_sequence */
685		return htonl(NFS4ERR_OP_NOT_IN_SESSION);
686
687	dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
688		rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
689
690	/* Don't wake anybody if the string looked bogus */
691	if (args->cbnl_valid)
692		__wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
693
694	return htonl(NFS4_OK);
695}
696#endif /* CONFIG_NFS_V4_1 */
697#ifdef CONFIG_NFS_V4_2
698static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
699				struct cb_offloadargs *args)
700{
701	cp_state->count = args->wr_count;
702	cp_state->error = args->error;
703	if (!args->error) {
704		cp_state->verf.committed = args->wr_writeverf.committed;
705		memcpy(&cp_state->verf.verifier.data[0],
706			&args->wr_writeverf.verifier.data[0],
707			NFS4_VERIFIER_SIZE);
708	}
709}
710
711__be32 nfs4_callback_offload(void *data, void *dummy,
712			     struct cb_process_state *cps)
713{
714	struct cb_offloadargs *args = data;
715	struct nfs_server *server;
716	struct nfs4_copy_state *copy, *tmp_copy;
717	bool found = false;
718
719	copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
720	if (!copy)
721		return htonl(NFS4ERR_SERVERFAULT);
722
723	spin_lock(&cps->clp->cl_lock);
724	rcu_read_lock();
725	list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
726				client_link) {
727		list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
728			if (memcmp(args->coa_stateid.other,
729					tmp_copy->stateid.other,
730					sizeof(args->coa_stateid.other)))
731				continue;
732			nfs4_copy_cb_args(tmp_copy, args);
733			complete(&tmp_copy->completion);
734			found = true;
735			goto out;
736		}
737	}
738out:
739	rcu_read_unlock();
740	if (!found) {
741		memcpy(&copy->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
742		nfs4_copy_cb_args(copy, args);
743		list_add_tail(&copy->copies, &cps->clp->pending_cb_stateids);
744	} else
745		kfree(copy);
746	spin_unlock(&cps->clp->cl_lock);
747
748	trace_nfs4_cb_offload(&args->coa_fh, &args->coa_stateid,
749			args->wr_count, args->error,
750			args->wr_writeverf.committed);
751	return 0;
752}
753#endif /* CONFIG_NFS_V4_2 */