Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/nfs/callback_proc.c
4 *
5 * Copyright (C) 2004 Trond Myklebust
6 *
7 * NFSv4 callback procedures
8 */
9#include <linux/nfs4.h>
10#include <linux/nfs_fs.h>
11#include <linux/slab.h>
12#include <linux/rcupdate.h>
13#include "nfs4_fs.h"
14#include "callback.h"
15#include "delegation.h"
16#include "internal.h"
17#include "pnfs.h"
18#include "nfs4session.h"
19#include "nfs4trace.h"
20
21#define NFSDBG_FACILITY NFSDBG_CALLBACK
22
23__be32 nfs4_callback_getattr(void *argp, void *resp,
24 struct cb_process_state *cps)
25{
26 struct cb_getattrargs *args = argp;
27 struct cb_getattrres *res = resp;
28 struct nfs_delegation *delegation;
29 struct nfs_inode *nfsi;
30 struct inode *inode;
31
32 res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
33 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
34 goto out;
35
36 res->bitmap[0] = res->bitmap[1] = 0;
37 res->status = htonl(NFS4ERR_BADHANDLE);
38
39 dprintk_rcu("NFS: GETATTR callback request from %s\n",
40 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
41
42 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
43 if (IS_ERR(inode)) {
44 if (inode == ERR_PTR(-EAGAIN))
45 res->status = htonl(NFS4ERR_DELAY);
46 trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
47 -ntohl(res->status));
48 goto out;
49 }
50 nfsi = NFS_I(inode);
51 rcu_read_lock();
52 delegation = rcu_dereference(nfsi->delegation);
53 if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
54 goto out_iput;
55 res->size = i_size_read(inode);
56 res->change_attr = delegation->change_attr;
57 if (nfs_have_writebacks(inode))
58 res->change_attr++;
59 res->ctime = timespec64_to_timespec(inode->i_ctime);
60 res->mtime = timespec64_to_timespec(inode->i_mtime);
61 res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
62 args->bitmap[0];
63 res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
64 args->bitmap[1];
65 res->status = 0;
66out_iput:
67 rcu_read_unlock();
68 trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
69 nfs_iput_and_deactive(inode);
70out:
71 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
72 return res->status;
73}
74
75__be32 nfs4_callback_recall(void *argp, void *resp,
76 struct cb_process_state *cps)
77{
78 struct cb_recallargs *args = argp;
79 struct inode *inode;
80 __be32 res;
81
82 res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
83 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
84 goto out;
85
86 dprintk_rcu("NFS: RECALL callback request from %s\n",
87 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
88
89 res = htonl(NFS4ERR_BADHANDLE);
90 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
91 if (IS_ERR(inode)) {
92 if (inode == ERR_PTR(-EAGAIN))
93 res = htonl(NFS4ERR_DELAY);
94 trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
95 &args->stateid, -ntohl(res));
96 goto out;
97 }
98 /* Set up a helper thread to actually return the delegation */
99 switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
100 case 0:
101 res = 0;
102 break;
103 case -ENOENT:
104 res = htonl(NFS4ERR_BAD_STATEID);
105 break;
106 default:
107 res = htonl(NFS4ERR_RESOURCE);
108 }
109 trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
110 &args->stateid, -ntohl(res));
111 nfs_iput_and_deactive(inode);
112out:
113 dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
114 return res;
115}
116
117#if defined(CONFIG_NFS_V4_1)
118
119/*
120 * Lookup a layout inode by stateid
121 *
122 * Note: returns a refcount on the inode and superblock
123 */
124static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
125 const nfs4_stateid *stateid)
126{
127 struct nfs_server *server;
128 struct inode *inode;
129 struct pnfs_layout_hdr *lo;
130
131 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
132 list_for_each_entry(lo, &server->layouts, plh_layouts) {
133 if (stateid != NULL &&
134 !nfs4_stateid_match_other(stateid, &lo->plh_stateid))
135 continue;
136 inode = igrab(lo->plh_inode);
137 if (!inode)
138 return ERR_PTR(-EAGAIN);
139 if (!nfs_sb_active(inode->i_sb)) {
140 rcu_read_unlock();
141 spin_unlock(&clp->cl_lock);
142 iput(inode);
143 spin_lock(&clp->cl_lock);
144 rcu_read_lock();
145 return ERR_PTR(-EAGAIN);
146 }
147 return inode;
148 }
149 }
150
151 return ERR_PTR(-ENOENT);
152}
153
154/*
155 * Lookup a layout inode by filehandle.
156 *
157 * Note: returns a refcount on the inode and superblock
158 *
159 */
160static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
161 const struct nfs_fh *fh)
162{
163 struct nfs_server *server;
164 struct nfs_inode *nfsi;
165 struct inode *inode;
166 struct pnfs_layout_hdr *lo;
167
168 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
169 list_for_each_entry(lo, &server->layouts, plh_layouts) {
170 nfsi = NFS_I(lo->plh_inode);
171 if (nfs_compare_fh(fh, &nfsi->fh))
172 continue;
173 if (nfsi->layout != lo)
174 continue;
175 inode = igrab(lo->plh_inode);
176 if (!inode)
177 return ERR_PTR(-EAGAIN);
178 if (!nfs_sb_active(inode->i_sb)) {
179 rcu_read_unlock();
180 spin_unlock(&clp->cl_lock);
181 iput(inode);
182 spin_lock(&clp->cl_lock);
183 rcu_read_lock();
184 return ERR_PTR(-EAGAIN);
185 }
186 return inode;
187 }
188 }
189
190 return ERR_PTR(-ENOENT);
191}
192
193static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
194 const struct nfs_fh *fh,
195 const nfs4_stateid *stateid)
196{
197 struct inode *inode;
198
199 spin_lock(&clp->cl_lock);
200 rcu_read_lock();
201 inode = nfs_layout_find_inode_by_stateid(clp, stateid);
202 if (inode == ERR_PTR(-ENOENT))
203 inode = nfs_layout_find_inode_by_fh(clp, fh);
204 rcu_read_unlock();
205 spin_unlock(&clp->cl_lock);
206
207 return inode;
208}
209
210/*
211 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
212 */
213static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
214 const nfs4_stateid *new)
215{
216 u32 oldseq, newseq;
217
218 /* Is the stateid not initialised? */
219 if (!pnfs_layout_is_valid(lo))
220 return NFS4ERR_NOMATCHING_LAYOUT;
221
222 /* Mismatched stateid? */
223 if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
224 return NFS4ERR_BAD_STATEID;
225
226 newseq = be32_to_cpu(new->seqid);
227 /* Are we already in a layout recall situation? */
228 if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
229 lo->plh_return_seq != 0) {
230 if (newseq < lo->plh_return_seq)
231 return NFS4ERR_OLD_STATEID;
232 if (newseq > lo->plh_return_seq)
233 return NFS4ERR_DELAY;
234 goto out;
235 }
236
237 /* Check that the stateid matches what we think it should be. */
238 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
239 if (newseq > oldseq + 1)
240 return NFS4ERR_DELAY;
241 /* Crazy server! */
242 if (newseq <= oldseq)
243 return NFS4ERR_OLD_STATEID;
244out:
245 return NFS_OK;
246}
247
248static u32 initiate_file_draining(struct nfs_client *clp,
249 struct cb_layoutrecallargs *args)
250{
251 struct inode *ino;
252 struct pnfs_layout_hdr *lo;
253 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
254 LIST_HEAD(free_me_list);
255
256 ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
257 if (IS_ERR(ino)) {
258 if (ino == ERR_PTR(-EAGAIN))
259 rv = NFS4ERR_DELAY;
260 goto out_noput;
261 }
262
263 pnfs_layoutcommit_inode(ino, false);
264
265
266 spin_lock(&ino->i_lock);
267 lo = NFS_I(ino)->layout;
268 if (!lo) {
269 spin_unlock(&ino->i_lock);
270 goto out;
271 }
272 pnfs_get_layout_hdr(lo);
273 rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid);
274 if (rv != NFS_OK)
275 goto unlock;
276
277 /*
278 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
279 */
280 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
281 rv = NFS4ERR_DELAY;
282 goto unlock;
283 }
284
285 pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
286 switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
287 &args->cbl_range,
288 be32_to_cpu(args->cbl_stateid.seqid))) {
289 case 0:
290 case -EBUSY:
291 /* There are layout segments that need to be returned */
292 rv = NFS4_OK;
293 break;
294 case -ENOENT:
295 /* Embrace your forgetfulness! */
296 rv = NFS4ERR_NOMATCHING_LAYOUT;
297
298 if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
299 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
300 &args->cbl_range);
301 }
302 }
303unlock:
304 spin_unlock(&ino->i_lock);
305 pnfs_free_lseg_list(&free_me_list);
306 /* Free all lsegs that are attached to commit buckets */
307 nfs_commit_inode(ino, 0);
308 pnfs_put_layout_hdr(lo);
309out:
310 nfs_iput_and_deactive(ino);
311out_noput:
312 trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
313 &args->cbl_stateid, -rv);
314 return rv;
315}
316
317static u32 initiate_bulk_draining(struct nfs_client *clp,
318 struct cb_layoutrecallargs *args)
319{
320 int stat;
321
322 if (args->cbl_recall_type == RETURN_FSID)
323 stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
324 else
325 stat = pnfs_destroy_layouts_byclid(clp, true);
326 if (stat != 0)
327 return NFS4ERR_DELAY;
328 return NFS4ERR_NOMATCHING_LAYOUT;
329}
330
331static u32 do_callback_layoutrecall(struct nfs_client *clp,
332 struct cb_layoutrecallargs *args)
333{
334 if (args->cbl_recall_type == RETURN_FILE)
335 return initiate_file_draining(clp, args);
336 return initiate_bulk_draining(clp, args);
337}
338
339__be32 nfs4_callback_layoutrecall(void *argp, void *resp,
340 struct cb_process_state *cps)
341{
342 struct cb_layoutrecallargs *args = argp;
343 u32 res = NFS4ERR_OP_NOT_IN_SESSION;
344
345 if (cps->clp)
346 res = do_callback_layoutrecall(cps->clp, args);
347 return cpu_to_be32(res);
348}
349
350static void pnfs_recall_all_layouts(struct nfs_client *clp)
351{
352 struct cb_layoutrecallargs args;
353
354 /* Pretend we got a CB_LAYOUTRECALL(ALL) */
355 memset(&args, 0, sizeof(args));
356 args.cbl_recall_type = RETURN_ALL;
357 /* FIXME we ignore errors, what should we do? */
358 do_callback_layoutrecall(clp, &args);
359}
360
361__be32 nfs4_callback_devicenotify(void *argp, void *resp,
362 struct cb_process_state *cps)
363{
364 struct cb_devicenotifyargs *args = argp;
365 int i;
366 __be32 res = 0;
367 struct nfs_client *clp = cps->clp;
368 struct nfs_server *server = NULL;
369
370 if (!clp) {
371 res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
372 goto out;
373 }
374
375 for (i = 0; i < args->ndevs; i++) {
376 struct cb_devicenotifyitem *dev = &args->devs[i];
377
378 if (!server ||
379 server->pnfs_curr_ld->id != dev->cbd_layout_type) {
380 rcu_read_lock();
381 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
382 if (server->pnfs_curr_ld &&
383 server->pnfs_curr_ld->id == dev->cbd_layout_type) {
384 rcu_read_unlock();
385 goto found;
386 }
387 rcu_read_unlock();
388 continue;
389 }
390
391 found:
392 nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
393 }
394
395out:
396 kfree(args->devs);
397 return res;
398}
399
400/*
401 * Validate the sequenceID sent by the server.
402 * Return success if the sequenceID is one more than what we last saw on
403 * this slot, accounting for wraparound. Increments the slot's sequence.
404 *
405 * We don't yet implement a duplicate request cache, instead we set the
406 * back channel ca_maxresponsesize_cached to zero. This is OK for now
407 * since we only currently implement idempotent callbacks anyway.
408 *
409 * We have a single slot backchannel at this time, so we don't bother
410 * checking the used_slots bit array on the table. The lower layer guarantees
411 * a single outstanding callback request at a time.
412 */
413static __be32
414validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
415 const struct cb_sequenceargs * args)
416{
417 __be32 ret;
418
419 ret = cpu_to_be32(NFS4ERR_BADSLOT);
420 if (args->csa_slotid > tbl->server_highest_slotid)
421 goto out_err;
422
423 /* Replay */
424 if (args->csa_sequenceid == slot->seq_nr) {
425 ret = cpu_to_be32(NFS4ERR_DELAY);
426 if (nfs4_test_locked_slot(tbl, slot->slot_nr))
427 goto out_err;
428
429 /* Signal process_op to set this error on next op */
430 ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP);
431 if (args->csa_cachethis == 0)
432 goto out_err;
433
434 /* Liar! We never allowed you to set csa_cachethis != 0 */
435 ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY);
436 goto out_err;
437 }
438
439 /* Note: wraparound relies on seq_nr being of type u32 */
440 /* Misordered request */
441 ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED);
442 if (args->csa_sequenceid != slot->seq_nr + 1)
443 goto out_err;
444
445 return cpu_to_be32(NFS4_OK);
446
447out_err:
448 trace_nfs4_cb_seqid_err(args, ret);
449 return ret;
450}
451
452/*
453 * For each referring call triple, check the session's slot table for
454 * a match. If the slot is in use and the sequence numbers match, the
455 * client is still waiting for a response to the original request.
456 */
457static int referring_call_exists(struct nfs_client *clp,
458 uint32_t nrclists,
459 struct referring_call_list *rclists,
460 spinlock_t *lock)
461 __releases(lock)
462 __acquires(lock)
463{
464 int status = 0;
465 int i, j;
466 struct nfs4_session *session;
467 struct nfs4_slot_table *tbl;
468 struct referring_call_list *rclist;
469 struct referring_call *ref;
470
471 /*
472 * XXX When client trunking is implemented, this becomes
473 * a session lookup from within the loop
474 */
475 session = clp->cl_session;
476 tbl = &session->fc_slot_table;
477
478 for (i = 0; i < nrclists; i++) {
479 rclist = &rclists[i];
480 if (memcmp(session->sess_id.data,
481 rclist->rcl_sessionid.data,
482 NFS4_MAX_SESSIONID_LEN) != 0)
483 continue;
484
485 for (j = 0; j < rclist->rcl_nrefcalls; j++) {
486 ref = &rclist->rcl_refcalls[j];
487 spin_unlock(lock);
488 status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
489 ref->rc_sequenceid, HZ >> 1) < 0;
490 spin_lock(lock);
491 if (status)
492 goto out;
493 }
494 }
495
496out:
497 return status;
498}
499
500__be32 nfs4_callback_sequence(void *argp, void *resp,
501 struct cb_process_state *cps)
502{
503 struct cb_sequenceargs *args = argp;
504 struct cb_sequenceres *res = resp;
505 struct nfs4_slot_table *tbl;
506 struct nfs4_slot *slot;
507 struct nfs_client *clp;
508 int i;
509 __be32 status = htonl(NFS4ERR_BADSESSION);
510
511 clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
512 &args->csa_sessionid, cps->minorversion);
513 if (clp == NULL)
514 goto out;
515
516 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
517 goto out;
518
519 tbl = &clp->cl_session->bc_slot_table;
520
521 /* Set up res before grabbing the spinlock */
522 memcpy(&res->csr_sessionid, &args->csa_sessionid,
523 sizeof(res->csr_sessionid));
524 res->csr_sequenceid = args->csa_sequenceid;
525 res->csr_slotid = args->csa_slotid;
526
527 spin_lock(&tbl->slot_tbl_lock);
528 /* state manager is resetting the session */
529 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
530 status = htonl(NFS4ERR_DELAY);
531 /* Return NFS4ERR_BADSESSION if we're draining the session
532 * in order to reset it.
533 */
534 if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
535 status = htonl(NFS4ERR_BADSESSION);
536 goto out_unlock;
537 }
538
539 status = htonl(NFS4ERR_BADSLOT);
540 slot = nfs4_lookup_slot(tbl, args->csa_slotid);
541 if (IS_ERR(slot))
542 goto out_unlock;
543
544 res->csr_highestslotid = tbl->server_highest_slotid;
545 res->csr_target_highestslotid = tbl->target_highest_slotid;
546
547 status = validate_seqid(tbl, slot, args);
548 if (status)
549 goto out_unlock;
550 if (!nfs4_try_to_lock_slot(tbl, slot)) {
551 status = htonl(NFS4ERR_DELAY);
552 goto out_unlock;
553 }
554 cps->slot = slot;
555
556 /* The ca_maxresponsesize_cached is 0 with no DRC */
557 if (args->csa_cachethis != 0) {
558 status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
559 goto out_unlock;
560 }
561
562 /*
563 * Check for pending referring calls. If a match is found, a
564 * related callback was received before the response to the original
565 * call.
566 */
567 if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
568 &tbl->slot_tbl_lock) < 0) {
569 status = htonl(NFS4ERR_DELAY);
570 goto out_unlock;
571 }
572
573 /*
574 * RFC5661 20.9.3
575 * If CB_SEQUENCE returns an error, then the state of the slot
576 * (sequence ID, cached reply) MUST NOT change.
577 */
578 slot->seq_nr = args->csa_sequenceid;
579out_unlock:
580 spin_unlock(&tbl->slot_tbl_lock);
581
582out:
583 cps->clp = clp; /* put in nfs4_callback_compound */
584 for (i = 0; i < args->csa_nrclists; i++)
585 kfree(args->csa_rclists[i].rcl_refcalls);
586 kfree(args->csa_rclists);
587
588 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
589 cps->drc_status = status;
590 status = 0;
591 } else
592 res->csr_status = status;
593
594 trace_nfs4_cb_sequence(args, res, status);
595 return status;
596}
597
598static bool
599validate_bitmap_values(unsigned int mask)
600{
601 return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
602}
603
604__be32 nfs4_callback_recallany(void *argp, void *resp,
605 struct cb_process_state *cps)
606{
607 struct cb_recallanyargs *args = argp;
608 __be32 status;
609 fmode_t flags = 0;
610
611 status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
612 if (!cps->clp) /* set in cb_sequence */
613 goto out;
614
615 dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
616 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
617
618 status = cpu_to_be32(NFS4ERR_INVAL);
619 if (!validate_bitmap_values(args->craa_type_mask))
620 goto out;
621
622 status = cpu_to_be32(NFS4_OK);
623 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
624 flags = FMODE_READ;
625 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
626 flags |= FMODE_WRITE;
627 if (flags)
628 nfs_expire_unused_delegation_types(cps->clp, flags);
629
630 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
631 pnfs_recall_all_layouts(cps->clp);
632out:
633 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
634 return status;
635}
636
637/* Reduce the fore channel's max_slots to the target value */
638__be32 nfs4_callback_recallslot(void *argp, void *resp,
639 struct cb_process_state *cps)
640{
641 struct cb_recallslotargs *args = argp;
642 struct nfs4_slot_table *fc_tbl;
643 __be32 status;
644
645 status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
646 if (!cps->clp) /* set in cb_sequence */
647 goto out;
648
649 dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
650 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
651 args->crsa_target_highest_slotid);
652
653 fc_tbl = &cps->clp->cl_session->fc_slot_table;
654
655 status = htonl(NFS4_OK);
656
657 nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
658 nfs41_notify_server(cps->clp);
659out:
660 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
661 return status;
662}
663
664__be32 nfs4_callback_notify_lock(void *argp, void *resp,
665 struct cb_process_state *cps)
666{
667 struct cb_notify_lock_args *args = argp;
668
669 if (!cps->clp) /* set in cb_sequence */
670 return htonl(NFS4ERR_OP_NOT_IN_SESSION);
671
672 dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
673 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
674
675 /* Don't wake anybody if the string looked bogus */
676 if (args->cbnl_valid)
677 __wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
678
679 return htonl(NFS4_OK);
680}
681#endif /* CONFIG_NFS_V4_1 */
682#ifdef CONFIG_NFS_V4_2
683static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
684 struct cb_offloadargs *args)
685{
686 cp_state->count = args->wr_count;
687 cp_state->error = args->error;
688 if (!args->error) {
689 cp_state->verf.committed = args->wr_writeverf.committed;
690 memcpy(&cp_state->verf.verifier.data[0],
691 &args->wr_writeverf.verifier.data[0],
692 NFS4_VERIFIER_SIZE);
693 }
694}
695
696__be32 nfs4_callback_offload(void *data, void *dummy,
697 struct cb_process_state *cps)
698{
699 struct cb_offloadargs *args = data;
700 struct nfs_server *server;
701 struct nfs4_copy_state *copy, *tmp_copy;
702 bool found = false;
703
704 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_NOFS);
705 if (!copy)
706 return htonl(NFS4ERR_SERVERFAULT);
707
708 spin_lock(&cps->clp->cl_lock);
709 rcu_read_lock();
710 list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
711 client_link) {
712 list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
713 if (memcmp(args->coa_stateid.other,
714 tmp_copy->stateid.other,
715 sizeof(args->coa_stateid.other)))
716 continue;
717 nfs4_copy_cb_args(tmp_copy, args);
718 complete(&tmp_copy->completion);
719 found = true;
720 goto out;
721 }
722 }
723out:
724 rcu_read_unlock();
725 if (!found) {
726 memcpy(©->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
727 nfs4_copy_cb_args(copy, args);
728 list_add_tail(©->copies, &cps->clp->pending_cb_stateids);
729 } else
730 kfree(copy);
731 spin_unlock(&cps->clp->cl_lock);
732
733 return 0;
734}
735#endif /* CONFIG_NFS_V4_2 */
1/*
2 * linux/fs/nfs/callback_proc.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFSv4 callback procedures
7 */
8#include <linux/nfs4.h>
9#include <linux/nfs_fs.h>
10#include <linux/slab.h>
11#include <linux/rcupdate.h>
12#include "nfs4_fs.h"
13#include "callback.h"
14#include "delegation.h"
15#include "internal.h"
16#include "pnfs.h"
17#include "nfs4session.h"
18#include "nfs4trace.h"
19
20#define NFSDBG_FACILITY NFSDBG_CALLBACK
21
22__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
23 struct cb_getattrres *res,
24 struct cb_process_state *cps)
25{
26 struct nfs_delegation *delegation;
27 struct nfs_inode *nfsi;
28 struct inode *inode;
29
30 res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
31 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
32 goto out;
33
34 res->bitmap[0] = res->bitmap[1] = 0;
35 res->status = htonl(NFS4ERR_BADHANDLE);
36
37 dprintk_rcu("NFS: GETATTR callback request from %s\n",
38 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
39
40 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
41 if (inode == NULL) {
42 trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
43 -ntohl(res->status));
44 goto out;
45 }
46 nfsi = NFS_I(inode);
47 rcu_read_lock();
48 delegation = rcu_dereference(nfsi->delegation);
49 if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
50 goto out_iput;
51 res->size = i_size_read(inode);
52 res->change_attr = delegation->change_attr;
53 if (nfsi->nrequests != 0)
54 res->change_attr++;
55 res->ctime = inode->i_ctime;
56 res->mtime = inode->i_mtime;
57 res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
58 args->bitmap[0];
59 res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
60 args->bitmap[1];
61 res->status = 0;
62out_iput:
63 rcu_read_unlock();
64 trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
65 iput(inode);
66out:
67 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
68 return res->status;
69}
70
71__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
72 struct cb_process_state *cps)
73{
74 struct inode *inode;
75 __be32 res;
76
77 res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
78 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
79 goto out;
80
81 dprintk_rcu("NFS: RECALL callback request from %s\n",
82 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
83
84 res = htonl(NFS4ERR_BADHANDLE);
85 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
86 if (inode == NULL) {
87 trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
88 &args->stateid, -ntohl(res));
89 goto out;
90 }
91 /* Set up a helper thread to actually return the delegation */
92 switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
93 case 0:
94 res = 0;
95 break;
96 case -ENOENT:
97 res = htonl(NFS4ERR_BAD_STATEID);
98 break;
99 default:
100 res = htonl(NFS4ERR_RESOURCE);
101 }
102 trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
103 &args->stateid, -ntohl(res));
104 iput(inode);
105out:
106 dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
107 return res;
108}
109
110#if defined(CONFIG_NFS_V4_1)
111
112/*
113 * Lookup a layout by filehandle.
114 *
115 * Note: gets a refcount on the layout hdr and on its respective inode.
116 * Caller must put the layout hdr and the inode.
117 *
118 * TODO: keep track of all layouts (and delegations) in a hash table
119 * hashed by filehandle.
120 */
121static struct pnfs_layout_hdr * get_layout_by_fh_locked(struct nfs_client *clp,
122 struct nfs_fh *fh, nfs4_stateid *stateid)
123{
124 struct nfs_server *server;
125 struct inode *ino;
126 struct pnfs_layout_hdr *lo;
127
128 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
129 list_for_each_entry(lo, &server->layouts, plh_layouts) {
130 if (!nfs4_stateid_match_other(&lo->plh_stateid, stateid))
131 continue;
132 if (nfs_compare_fh(fh, &NFS_I(lo->plh_inode)->fh))
133 continue;
134 ino = igrab(lo->plh_inode);
135 if (!ino)
136 break;
137 spin_lock(&ino->i_lock);
138 /* Is this layout in the process of being freed? */
139 if (NFS_I(ino)->layout != lo) {
140 spin_unlock(&ino->i_lock);
141 iput(ino);
142 break;
143 }
144 pnfs_get_layout_hdr(lo);
145 spin_unlock(&ino->i_lock);
146 return lo;
147 }
148 }
149
150 return NULL;
151}
152
153static struct pnfs_layout_hdr * get_layout_by_fh(struct nfs_client *clp,
154 struct nfs_fh *fh, nfs4_stateid *stateid)
155{
156 struct pnfs_layout_hdr *lo;
157
158 spin_lock(&clp->cl_lock);
159 rcu_read_lock();
160 lo = get_layout_by_fh_locked(clp, fh, stateid);
161 rcu_read_unlock();
162 spin_unlock(&clp->cl_lock);
163
164 return lo;
165}
166
167/*
168 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
169 */
170static bool pnfs_check_stateid_sequence(struct pnfs_layout_hdr *lo,
171 const nfs4_stateid *new)
172{
173 u32 oldseq, newseq;
174
175 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
176 newseq = be32_to_cpu(new->seqid);
177
178 if (newseq > oldseq + 1)
179 return false;
180 return true;
181}
182
183static u32 initiate_file_draining(struct nfs_client *clp,
184 struct cb_layoutrecallargs *args)
185{
186 struct inode *ino;
187 struct pnfs_layout_hdr *lo;
188 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
189 LIST_HEAD(free_me_list);
190
191 lo = get_layout_by_fh(clp, &args->cbl_fh, &args->cbl_stateid);
192 if (!lo) {
193 trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, NULL,
194 &args->cbl_stateid, -rv);
195 goto out;
196 }
197
198 ino = lo->plh_inode;
199
200 spin_lock(&ino->i_lock);
201 if (!pnfs_check_stateid_sequence(lo, &args->cbl_stateid)) {
202 rv = NFS4ERR_DELAY;
203 goto unlock;
204 }
205 pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
206 spin_unlock(&ino->i_lock);
207
208 pnfs_layoutcommit_inode(ino, false);
209
210 spin_lock(&ino->i_lock);
211 /*
212 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
213 */
214 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
215 rv = NFS4ERR_DELAY;
216 goto unlock;
217 }
218
219 if (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
220 &args->cbl_range)) {
221 rv = NFS4_OK;
222 goto unlock;
223 }
224
225 if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
226 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
227 &args->cbl_range);
228 }
229 pnfs_mark_layout_returned_if_empty(lo);
230unlock:
231 spin_unlock(&ino->i_lock);
232 pnfs_free_lseg_list(&free_me_list);
233 /* Free all lsegs that are attached to commit buckets */
234 nfs_commit_inode(ino, 0);
235 pnfs_put_layout_hdr(lo);
236 trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
237 &args->cbl_stateid, -rv);
238 iput(ino);
239out:
240 return rv;
241}
242
243static u32 initiate_bulk_draining(struct nfs_client *clp,
244 struct cb_layoutrecallargs *args)
245{
246 int stat;
247
248 if (args->cbl_recall_type == RETURN_FSID)
249 stat = pnfs_destroy_layouts_byfsid(clp, &args->cbl_fsid, true);
250 else
251 stat = pnfs_destroy_layouts_byclid(clp, true);
252 if (stat != 0)
253 return NFS4ERR_DELAY;
254 return NFS4ERR_NOMATCHING_LAYOUT;
255}
256
257static u32 do_callback_layoutrecall(struct nfs_client *clp,
258 struct cb_layoutrecallargs *args)
259{
260 u32 res;
261
262 dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
263 if (args->cbl_recall_type == RETURN_FILE)
264 res = initiate_file_draining(clp, args);
265 else
266 res = initiate_bulk_draining(clp, args);
267 dprintk("%s returning %i\n", __func__, res);
268 return res;
269
270}
271
272__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
273 void *dummy, struct cb_process_state *cps)
274{
275 u32 res;
276
277 dprintk("%s: -->\n", __func__);
278
279 if (cps->clp)
280 res = do_callback_layoutrecall(cps->clp, args);
281 else
282 res = NFS4ERR_OP_NOT_IN_SESSION;
283
284 dprintk("%s: exit with status = %d\n", __func__, res);
285 return cpu_to_be32(res);
286}
287
288static void pnfs_recall_all_layouts(struct nfs_client *clp)
289{
290 struct cb_layoutrecallargs args;
291
292 /* Pretend we got a CB_LAYOUTRECALL(ALL) */
293 memset(&args, 0, sizeof(args));
294 args.cbl_recall_type = RETURN_ALL;
295 /* FIXME we ignore errors, what should we do? */
296 do_callback_layoutrecall(clp, &args);
297}
298
299__be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
300 void *dummy, struct cb_process_state *cps)
301{
302 int i;
303 __be32 res = 0;
304 struct nfs_client *clp = cps->clp;
305 struct nfs_server *server = NULL;
306
307 dprintk("%s: -->\n", __func__);
308
309 if (!clp) {
310 res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
311 goto out;
312 }
313
314 for (i = 0; i < args->ndevs; i++) {
315 struct cb_devicenotifyitem *dev = &args->devs[i];
316
317 if (!server ||
318 server->pnfs_curr_ld->id != dev->cbd_layout_type) {
319 rcu_read_lock();
320 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
321 if (server->pnfs_curr_ld &&
322 server->pnfs_curr_ld->id == dev->cbd_layout_type) {
323 rcu_read_unlock();
324 goto found;
325 }
326 rcu_read_unlock();
327 dprintk("%s: layout type %u not found\n",
328 __func__, dev->cbd_layout_type);
329 continue;
330 }
331
332 found:
333 nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
334 }
335
336out:
337 kfree(args->devs);
338 dprintk("%s: exit with status = %u\n",
339 __func__, be32_to_cpu(res));
340 return res;
341}
342
343/*
344 * Validate the sequenceID sent by the server.
345 * Return success if the sequenceID is one more than what we last saw on
346 * this slot, accounting for wraparound. Increments the slot's sequence.
347 *
348 * We don't yet implement a duplicate request cache, instead we set the
349 * back channel ca_maxresponsesize_cached to zero. This is OK for now
350 * since we only currently implement idempotent callbacks anyway.
351 *
352 * We have a single slot backchannel at this time, so we don't bother
353 * checking the used_slots bit array on the table. The lower layer guarantees
354 * a single outstanding callback request at a time.
355 */
356static __be32
357validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
358 const struct cb_sequenceargs * args)
359{
360 dprintk("%s enter. slotid %u seqid %u, slot table seqid: %u\n",
361 __func__, args->csa_slotid, args->csa_sequenceid, slot->seq_nr);
362
363 if (args->csa_slotid > tbl->server_highest_slotid)
364 return htonl(NFS4ERR_BADSLOT);
365
366 /* Replay */
367 if (args->csa_sequenceid == slot->seq_nr) {
368 dprintk("%s seqid %u is a replay\n",
369 __func__, args->csa_sequenceid);
370 if (nfs4_test_locked_slot(tbl, slot->slot_nr))
371 return htonl(NFS4ERR_DELAY);
372 /* Signal process_op to set this error on next op */
373 if (args->csa_cachethis == 0)
374 return htonl(NFS4ERR_RETRY_UNCACHED_REP);
375
376 /* Liar! We never allowed you to set csa_cachethis != 0 */
377 return htonl(NFS4ERR_SEQ_FALSE_RETRY);
378 }
379
380 /* Wraparound */
381 if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
382 if (args->csa_sequenceid == 1)
383 return htonl(NFS4_OK);
384 } else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
385 return htonl(NFS4_OK);
386
387 /* Misordered request */
388 return htonl(NFS4ERR_SEQ_MISORDERED);
389}
390
391/*
392 * For each referring call triple, check the session's slot table for
393 * a match. If the slot is in use and the sequence numbers match, the
394 * client is still waiting for a response to the original request.
395 */
396static bool referring_call_exists(struct nfs_client *clp,
397 uint32_t nrclists,
398 struct referring_call_list *rclists)
399{
400 bool status = 0;
401 int i, j;
402 struct nfs4_session *session;
403 struct nfs4_slot_table *tbl;
404 struct referring_call_list *rclist;
405 struct referring_call *ref;
406
407 /*
408 * XXX When client trunking is implemented, this becomes
409 * a session lookup from within the loop
410 */
411 session = clp->cl_session;
412 tbl = &session->fc_slot_table;
413
414 for (i = 0; i < nrclists; i++) {
415 rclist = &rclists[i];
416 if (memcmp(session->sess_id.data,
417 rclist->rcl_sessionid.data,
418 NFS4_MAX_SESSIONID_LEN) != 0)
419 continue;
420
421 for (j = 0; j < rclist->rcl_nrefcalls; j++) {
422 ref = &rclist->rcl_refcalls[j];
423
424 dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u "
425 "slotid %u\n", __func__,
426 ((u32 *)&rclist->rcl_sessionid.data)[0],
427 ((u32 *)&rclist->rcl_sessionid.data)[1],
428 ((u32 *)&rclist->rcl_sessionid.data)[2],
429 ((u32 *)&rclist->rcl_sessionid.data)[3],
430 ref->rc_sequenceid, ref->rc_slotid);
431
432 spin_lock(&tbl->slot_tbl_lock);
433 status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
434 tbl->slots[ref->rc_slotid].seq_nr ==
435 ref->rc_sequenceid);
436 spin_unlock(&tbl->slot_tbl_lock);
437 if (status)
438 goto out;
439 }
440 }
441
442out:
443 return status;
444}
445
446__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
447 struct cb_sequenceres *res,
448 struct cb_process_state *cps)
449{
450 struct nfs4_slot_table *tbl;
451 struct nfs4_slot *slot;
452 struct nfs_client *clp;
453 int i;
454 __be32 status = htonl(NFS4ERR_BADSESSION);
455
456 clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
457 &args->csa_sessionid, cps->minorversion);
458 if (clp == NULL)
459 goto out;
460
461 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
462 goto out;
463
464 tbl = &clp->cl_session->bc_slot_table;
465 slot = tbl->slots + args->csa_slotid;
466
467 /* Set up res before grabbing the spinlock */
468 memcpy(&res->csr_sessionid, &args->csa_sessionid,
469 sizeof(res->csr_sessionid));
470 res->csr_sequenceid = args->csa_sequenceid;
471 res->csr_slotid = args->csa_slotid;
472
473 spin_lock(&tbl->slot_tbl_lock);
474 /* state manager is resetting the session */
475 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
476 status = htonl(NFS4ERR_DELAY);
477 /* Return NFS4ERR_BADSESSION if we're draining the session
478 * in order to reset it.
479 */
480 if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
481 status = htonl(NFS4ERR_BADSESSION);
482 goto out_unlock;
483 }
484
485 status = htonl(NFS4ERR_BADSLOT);
486 slot = nfs4_lookup_slot(tbl, args->csa_slotid);
487 if (IS_ERR(slot))
488 goto out_unlock;
489
490 res->csr_highestslotid = tbl->server_highest_slotid;
491 res->csr_target_highestslotid = tbl->target_highest_slotid;
492
493 status = validate_seqid(tbl, slot, args);
494 if (status)
495 goto out_unlock;
496 if (!nfs4_try_to_lock_slot(tbl, slot)) {
497 status = htonl(NFS4ERR_DELAY);
498 goto out_unlock;
499 }
500 cps->slot = slot;
501
502 /* The ca_maxresponsesize_cached is 0 with no DRC */
503 if (args->csa_cachethis != 0)
504 return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
505
506 /*
507 * Check for pending referring calls. If a match is found, a
508 * related callback was received before the response to the original
509 * call.
510 */
511 if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
512 status = htonl(NFS4ERR_DELAY);
513 goto out_unlock;
514 }
515
516 /*
517 * RFC5661 20.9.3
518 * If CB_SEQUENCE returns an error, then the state of the slot
519 * (sequence ID, cached reply) MUST NOT change.
520 */
521 slot->seq_nr = args->csa_sequenceid;
522out_unlock:
523 spin_unlock(&tbl->slot_tbl_lock);
524
525out:
526 cps->clp = clp; /* put in nfs4_callback_compound */
527 for (i = 0; i < args->csa_nrclists; i++)
528 kfree(args->csa_rclists[i].rcl_refcalls);
529 kfree(args->csa_rclists);
530
531 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
532 cps->drc_status = status;
533 status = 0;
534 } else
535 res->csr_status = status;
536
537 trace_nfs4_cb_sequence(args, res, status);
538 dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
539 ntohl(status), ntohl(res->csr_status));
540 return status;
541}
542
543static bool
544validate_bitmap_values(unsigned long mask)
545{
546 return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
547}
548
549__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
550 struct cb_process_state *cps)
551{
552 __be32 status;
553 fmode_t flags = 0;
554
555 status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
556 if (!cps->clp) /* set in cb_sequence */
557 goto out;
558
559 dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
560 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
561
562 status = cpu_to_be32(NFS4ERR_INVAL);
563 if (!validate_bitmap_values(args->craa_type_mask))
564 goto out;
565
566 status = cpu_to_be32(NFS4_OK);
567 if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
568 &args->craa_type_mask))
569 flags = FMODE_READ;
570 if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
571 &args->craa_type_mask))
572 flags |= FMODE_WRITE;
573 if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
574 &args->craa_type_mask))
575 pnfs_recall_all_layouts(cps->clp);
576 if (flags)
577 nfs_expire_unused_delegation_types(cps->clp, flags);
578out:
579 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
580 return status;
581}
582
583/* Reduce the fore channel's max_slots to the target value */
584__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
585 struct cb_process_state *cps)
586{
587 struct nfs4_slot_table *fc_tbl;
588 __be32 status;
589
590 status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
591 if (!cps->clp) /* set in cb_sequence */
592 goto out;
593
594 dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
595 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
596 args->crsa_target_highest_slotid);
597
598 fc_tbl = &cps->clp->cl_session->fc_slot_table;
599
600 status = htonl(NFS4_OK);
601
602 nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
603 nfs41_notify_server(cps->clp);
604out:
605 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
606 return status;
607}
608#endif /* CONFIG_NFS_V4_1 */