Loading...
1/*
2 * linux/fs/nfs/callback_proc.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFSv4 callback procedures
7 */
8#include <linux/nfs4.h>
9#include <linux/nfs_fs.h>
10#include <linux/slab.h>
11#include "nfs4_fs.h"
12#include "callback.h"
13#include "delegation.h"
14#include "internal.h"
15#include "pnfs.h"
16
17#ifdef NFS_DEBUG
18#define NFSDBG_FACILITY NFSDBG_CALLBACK
19#endif
20
21__be32 nfs4_callback_getattr(struct cb_getattrargs *args,
22 struct cb_getattrres *res,
23 struct cb_process_state *cps)
24{
25 struct nfs_delegation *delegation;
26 struct nfs_inode *nfsi;
27 struct inode *inode;
28
29 res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
30 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
31 goto out;
32
33 res->bitmap[0] = res->bitmap[1] = 0;
34 res->status = htonl(NFS4ERR_BADHANDLE);
35
36 dprintk("NFS: GETATTR callback request from %s\n",
37 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
38
39 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
40 if (inode == NULL)
41 goto out;
42 nfsi = NFS_I(inode);
43 rcu_read_lock();
44 delegation = rcu_dereference(nfsi->delegation);
45 if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
46 goto out_iput;
47 res->size = i_size_read(inode);
48 res->change_attr = delegation->change_attr;
49 if (nfsi->npages != 0)
50 res->change_attr++;
51 res->ctime = inode->i_ctime;
52 res->mtime = inode->i_mtime;
53 res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
54 args->bitmap[0];
55 res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
56 args->bitmap[1];
57 res->status = 0;
58out_iput:
59 rcu_read_unlock();
60 iput(inode);
61out:
62 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
63 return res->status;
64}
65
66__be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy,
67 struct cb_process_state *cps)
68{
69 struct inode *inode;
70 __be32 res;
71
72 res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
73 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
74 goto out;
75
76 dprintk("NFS: RECALL callback request from %s\n",
77 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
78
79 res = htonl(NFS4ERR_BADHANDLE);
80 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
81 if (inode == NULL)
82 goto out;
83 /* Set up a helper thread to actually return the delegation */
84 switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
85 case 0:
86 res = 0;
87 break;
88 case -ENOENT:
89 if (res != 0)
90 res = htonl(NFS4ERR_BAD_STATEID);
91 break;
92 default:
93 res = htonl(NFS4ERR_RESOURCE);
94 }
95 iput(inode);
96out:
97 dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
98 return res;
99}
100
101int nfs4_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
102{
103 if (delegation == NULL || memcmp(delegation->stateid.data, stateid->data,
104 sizeof(delegation->stateid.data)) != 0)
105 return 0;
106 return 1;
107}
108
109#if defined(CONFIG_NFS_V4_1)
110
111static u32 initiate_file_draining(struct nfs_client *clp,
112 struct cb_layoutrecallargs *args)
113{
114 struct nfs_server *server;
115 struct pnfs_layout_hdr *lo;
116 struct inode *ino;
117 bool found = false;
118 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
119 LIST_HEAD(free_me_list);
120
121 spin_lock(&clp->cl_lock);
122 rcu_read_lock();
123 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
124 list_for_each_entry(lo, &server->layouts, plh_layouts) {
125 if (nfs_compare_fh(&args->cbl_fh,
126 &NFS_I(lo->plh_inode)->fh))
127 continue;
128 ino = igrab(lo->plh_inode);
129 if (!ino)
130 continue;
131 found = true;
132 /* Without this, layout can be freed as soon
133 * as we release cl_lock.
134 */
135 get_layout_hdr(lo);
136 break;
137 }
138 if (found)
139 break;
140 }
141 rcu_read_unlock();
142 spin_unlock(&clp->cl_lock);
143
144 if (!found)
145 return NFS4ERR_NOMATCHING_LAYOUT;
146
147 spin_lock(&ino->i_lock);
148 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
149 mark_matching_lsegs_invalid(lo, &free_me_list,
150 &args->cbl_range))
151 rv = NFS4ERR_DELAY;
152 else
153 rv = NFS4ERR_NOMATCHING_LAYOUT;
154 pnfs_set_layout_stateid(lo, &args->cbl_stateid, true);
155 spin_unlock(&ino->i_lock);
156 pnfs_free_lseg_list(&free_me_list);
157 put_layout_hdr(lo);
158 iput(ino);
159 return rv;
160}
161
162static u32 initiate_bulk_draining(struct nfs_client *clp,
163 struct cb_layoutrecallargs *args)
164{
165 struct nfs_server *server;
166 struct pnfs_layout_hdr *lo;
167 struct inode *ino;
168 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
169 struct pnfs_layout_hdr *tmp;
170 LIST_HEAD(recall_list);
171 LIST_HEAD(free_me_list);
172 struct pnfs_layout_range range = {
173 .iomode = IOMODE_ANY,
174 .offset = 0,
175 .length = NFS4_MAX_UINT64,
176 };
177
178 spin_lock(&clp->cl_lock);
179 rcu_read_lock();
180 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
181 if ((args->cbl_recall_type == RETURN_FSID) &&
182 memcmp(&server->fsid, &args->cbl_fsid,
183 sizeof(struct nfs_fsid)))
184 continue;
185
186 list_for_each_entry(lo, &server->layouts, plh_layouts) {
187 if (!igrab(lo->plh_inode))
188 continue;
189 get_layout_hdr(lo);
190 BUG_ON(!list_empty(&lo->plh_bulk_recall));
191 list_add(&lo->plh_bulk_recall, &recall_list);
192 }
193 }
194 rcu_read_unlock();
195 spin_unlock(&clp->cl_lock);
196
197 list_for_each_entry_safe(lo, tmp,
198 &recall_list, plh_bulk_recall) {
199 ino = lo->plh_inode;
200 spin_lock(&ino->i_lock);
201 set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
202 if (mark_matching_lsegs_invalid(lo, &free_me_list, &range))
203 rv = NFS4ERR_DELAY;
204 list_del_init(&lo->plh_bulk_recall);
205 spin_unlock(&ino->i_lock);
206 pnfs_free_lseg_list(&free_me_list);
207 put_layout_hdr(lo);
208 iput(ino);
209 }
210 return rv;
211}
212
213static u32 do_callback_layoutrecall(struct nfs_client *clp,
214 struct cb_layoutrecallargs *args)
215{
216 u32 res = NFS4ERR_DELAY;
217
218 dprintk("%s enter, type=%i\n", __func__, args->cbl_recall_type);
219 if (test_and_set_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state))
220 goto out;
221 if (args->cbl_recall_type == RETURN_FILE)
222 res = initiate_file_draining(clp, args);
223 else
224 res = initiate_bulk_draining(clp, args);
225 clear_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state);
226out:
227 dprintk("%s returning %i\n", __func__, res);
228 return res;
229
230}
231
232__be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args,
233 void *dummy, struct cb_process_state *cps)
234{
235 u32 res;
236
237 dprintk("%s: -->\n", __func__);
238
239 if (cps->clp)
240 res = do_callback_layoutrecall(cps->clp, args);
241 else
242 res = NFS4ERR_OP_NOT_IN_SESSION;
243
244 dprintk("%s: exit with status = %d\n", __func__, res);
245 return cpu_to_be32(res);
246}
247
248static void pnfs_recall_all_layouts(struct nfs_client *clp)
249{
250 struct cb_layoutrecallargs args;
251
252 /* Pretend we got a CB_LAYOUTRECALL(ALL) */
253 memset(&args, 0, sizeof(args));
254 args.cbl_recall_type = RETURN_ALL;
255 /* FIXME we ignore errors, what should we do? */
256 do_callback_layoutrecall(clp, &args);
257}
258
259__be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args,
260 void *dummy, struct cb_process_state *cps)
261{
262 int i;
263 __be32 res = 0;
264 struct nfs_client *clp = cps->clp;
265 struct nfs_server *server = NULL;
266
267 dprintk("%s: -->\n", __func__);
268
269 if (!clp) {
270 res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
271 goto out;
272 }
273
274 for (i = 0; i < args->ndevs; i++) {
275 struct cb_devicenotifyitem *dev = &args->devs[i];
276
277 if (!server ||
278 server->pnfs_curr_ld->id != dev->cbd_layout_type) {
279 rcu_read_lock();
280 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
281 if (server->pnfs_curr_ld &&
282 server->pnfs_curr_ld->id == dev->cbd_layout_type) {
283 rcu_read_unlock();
284 goto found;
285 }
286 rcu_read_unlock();
287 dprintk("%s: layout type %u not found\n",
288 __func__, dev->cbd_layout_type);
289 continue;
290 }
291
292 found:
293 if (dev->cbd_notify_type == NOTIFY_DEVICEID4_CHANGE)
294 dprintk("%s: NOTIFY_DEVICEID4_CHANGE not supported, "
295 "deleting instead\n", __func__);
296 nfs4_delete_deviceid(server->pnfs_curr_ld, clp, &dev->cbd_dev_id);
297 }
298
299out:
300 kfree(args->devs);
301 dprintk("%s: exit with status = %u\n",
302 __func__, be32_to_cpu(res));
303 return res;
304}
305
306int nfs41_validate_delegation_stateid(struct nfs_delegation *delegation, const nfs4_stateid *stateid)
307{
308 if (delegation == NULL)
309 return 0;
310
311 if (stateid->stateid.seqid != 0)
312 return 0;
313 if (memcmp(&delegation->stateid.stateid.other,
314 &stateid->stateid.other,
315 NFS4_STATEID_OTHER_SIZE))
316 return 0;
317
318 return 1;
319}
320
321/*
322 * Validate the sequenceID sent by the server.
323 * Return success if the sequenceID is one more than what we last saw on
324 * this slot, accounting for wraparound. Increments the slot's sequence.
325 *
326 * We don't yet implement a duplicate request cache, instead we set the
327 * back channel ca_maxresponsesize_cached to zero. This is OK for now
328 * since we only currently implement idempotent callbacks anyway.
329 *
330 * We have a single slot backchannel at this time, so we don't bother
331 * checking the used_slots bit array on the table. The lower layer guarantees
332 * a single outstanding callback request at a time.
333 */
334static __be32
335validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args)
336{
337 struct nfs4_slot *slot;
338
339 dprintk("%s enter. slotid %d seqid %d\n",
340 __func__, args->csa_slotid, args->csa_sequenceid);
341
342 if (args->csa_slotid > NFS41_BC_MAX_CALLBACKS)
343 return htonl(NFS4ERR_BADSLOT);
344
345 slot = tbl->slots + args->csa_slotid;
346 dprintk("%s slot table seqid: %d\n", __func__, slot->seq_nr);
347
348 /* Normal */
349 if (likely(args->csa_sequenceid == slot->seq_nr + 1)) {
350 slot->seq_nr++;
351 goto out_ok;
352 }
353
354 /* Replay */
355 if (args->csa_sequenceid == slot->seq_nr) {
356 dprintk("%s seqid %d is a replay\n",
357 __func__, args->csa_sequenceid);
358 /* Signal process_op to set this error on next op */
359 if (args->csa_cachethis == 0)
360 return htonl(NFS4ERR_RETRY_UNCACHED_REP);
361
362 /* The ca_maxresponsesize_cached is 0 with no DRC */
363 else if (args->csa_cachethis == 1)
364 return htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
365 }
366
367 /* Wraparound */
368 if (args->csa_sequenceid == 1 && (slot->seq_nr + 1) == 0) {
369 slot->seq_nr = 1;
370 goto out_ok;
371 }
372
373 /* Misordered request */
374 return htonl(NFS4ERR_SEQ_MISORDERED);
375out_ok:
376 tbl->highest_used_slotid = args->csa_slotid;
377 return htonl(NFS4_OK);
378}
379
380/*
381 * For each referring call triple, check the session's slot table for
382 * a match. If the slot is in use and the sequence numbers match, the
383 * client is still waiting for a response to the original request.
384 */
385static bool referring_call_exists(struct nfs_client *clp,
386 uint32_t nrclists,
387 struct referring_call_list *rclists)
388{
389 bool status = 0;
390 int i, j;
391 struct nfs4_session *session;
392 struct nfs4_slot_table *tbl;
393 struct referring_call_list *rclist;
394 struct referring_call *ref;
395
396 /*
397 * XXX When client trunking is implemented, this becomes
398 * a session lookup from within the loop
399 */
400 session = clp->cl_session;
401 tbl = &session->fc_slot_table;
402
403 for (i = 0; i < nrclists; i++) {
404 rclist = &rclists[i];
405 if (memcmp(session->sess_id.data,
406 rclist->rcl_sessionid.data,
407 NFS4_MAX_SESSIONID_LEN) != 0)
408 continue;
409
410 for (j = 0; j < rclist->rcl_nrefcalls; j++) {
411 ref = &rclist->rcl_refcalls[j];
412
413 dprintk("%s: sessionid %x:%x:%x:%x sequenceid %u "
414 "slotid %u\n", __func__,
415 ((u32 *)&rclist->rcl_sessionid.data)[0],
416 ((u32 *)&rclist->rcl_sessionid.data)[1],
417 ((u32 *)&rclist->rcl_sessionid.data)[2],
418 ((u32 *)&rclist->rcl_sessionid.data)[3],
419 ref->rc_sequenceid, ref->rc_slotid);
420
421 spin_lock(&tbl->slot_tbl_lock);
422 status = (test_bit(ref->rc_slotid, tbl->used_slots) &&
423 tbl->slots[ref->rc_slotid].seq_nr ==
424 ref->rc_sequenceid);
425 spin_unlock(&tbl->slot_tbl_lock);
426 if (status)
427 goto out;
428 }
429 }
430
431out:
432 return status;
433}
434
435__be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
436 struct cb_sequenceres *res,
437 struct cb_process_state *cps)
438{
439 struct nfs4_slot_table *tbl;
440 struct nfs_client *clp;
441 int i;
442 __be32 status = htonl(NFS4ERR_BADSESSION);
443
444 clp = nfs4_find_client_sessionid(args->csa_addr, &args->csa_sessionid);
445 if (clp == NULL)
446 goto out;
447
448 tbl = &clp->cl_session->bc_slot_table;
449
450 spin_lock(&tbl->slot_tbl_lock);
451 /* state manager is resetting the session */
452 if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) {
453 spin_unlock(&tbl->slot_tbl_lock);
454 status = htonl(NFS4ERR_DELAY);
455 /* Return NFS4ERR_BADSESSION if we're draining the session
456 * in order to reset it.
457 */
458 if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
459 status = htonl(NFS4ERR_BADSESSION);
460 goto out;
461 }
462
463 status = validate_seqid(&clp->cl_session->bc_slot_table, args);
464 spin_unlock(&tbl->slot_tbl_lock);
465 if (status)
466 goto out;
467
468 cps->slotid = args->csa_slotid;
469
470 /*
471 * Check for pending referring calls. If a match is found, a
472 * related callback was received before the response to the original
473 * call.
474 */
475 if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) {
476 status = htonl(NFS4ERR_DELAY);
477 goto out;
478 }
479
480 memcpy(&res->csr_sessionid, &args->csa_sessionid,
481 sizeof(res->csr_sessionid));
482 res->csr_sequenceid = args->csa_sequenceid;
483 res->csr_slotid = args->csa_slotid;
484 res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
485 res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1;
486
487out:
488 cps->clp = clp; /* put in nfs4_callback_compound */
489 for (i = 0; i < args->csa_nrclists; i++)
490 kfree(args->csa_rclists[i].rcl_refcalls);
491 kfree(args->csa_rclists);
492
493 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
494 cps->drc_status = status;
495 status = 0;
496 } else
497 res->csr_status = status;
498
499 dprintk("%s: exit with status = %d res->csr_status %d\n", __func__,
500 ntohl(status), ntohl(res->csr_status));
501 return status;
502}
503
504static bool
505validate_bitmap_values(unsigned long mask)
506{
507 return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
508}
509
510__be32 nfs4_callback_recallany(struct cb_recallanyargs *args, void *dummy,
511 struct cb_process_state *cps)
512{
513 __be32 status;
514 fmode_t flags = 0;
515
516 status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
517 if (!cps->clp) /* set in cb_sequence */
518 goto out;
519
520 dprintk("NFS: RECALL_ANY callback request from %s\n",
521 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
522
523 status = cpu_to_be32(NFS4ERR_INVAL);
524 if (!validate_bitmap_values(args->craa_type_mask))
525 goto out;
526
527 status = cpu_to_be32(NFS4_OK);
528 if (test_bit(RCA4_TYPE_MASK_RDATA_DLG, (const unsigned long *)
529 &args->craa_type_mask))
530 flags = FMODE_READ;
531 if (test_bit(RCA4_TYPE_MASK_WDATA_DLG, (const unsigned long *)
532 &args->craa_type_mask))
533 flags |= FMODE_WRITE;
534 if (test_bit(RCA4_TYPE_MASK_FILE_LAYOUT, (const unsigned long *)
535 &args->craa_type_mask))
536 pnfs_recall_all_layouts(cps->clp);
537 if (flags)
538 nfs_expire_all_delegation_types(cps->clp, flags);
539out:
540 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
541 return status;
542}
543
544/* Reduce the fore channel's max_slots to the target value */
545__be32 nfs4_callback_recallslot(struct cb_recallslotargs *args, void *dummy,
546 struct cb_process_state *cps)
547{
548 struct nfs4_slot_table *fc_tbl;
549 __be32 status;
550
551 status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
552 if (!cps->clp) /* set in cb_sequence */
553 goto out;
554
555 dprintk("NFS: CB_RECALL_SLOT request from %s target max slots %d\n",
556 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
557 args->crsa_target_max_slots);
558
559 fc_tbl = &cps->clp->cl_session->fc_slot_table;
560
561 status = htonl(NFS4ERR_BAD_HIGH_SLOT);
562 if (args->crsa_target_max_slots > fc_tbl->max_slots ||
563 args->crsa_target_max_slots < 1)
564 goto out;
565
566 status = htonl(NFS4_OK);
567 if (args->crsa_target_max_slots == fc_tbl->max_slots)
568 goto out;
569
570 fc_tbl->target_max_slots = args->crsa_target_max_slots;
571 nfs41_handle_recall_slot(cps->clp);
572out:
573 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
574 return status;
575}
576#endif /* CONFIG_NFS_V4_1 */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/fs/nfs/callback_proc.c
4 *
5 * Copyright (C) 2004 Trond Myklebust
6 *
7 * NFSv4 callback procedures
8 */
9
10#include <linux/errno.h>
11#include <linux/math.h>
12#include <linux/nfs4.h>
13#include <linux/nfs_fs.h>
14#include <linux/slab.h>
15#include <linux/rcupdate.h>
16#include <linux/types.h>
17
18#include "nfs4_fs.h"
19#include "callback.h"
20#include "delegation.h"
21#include "internal.h"
22#include "pnfs.h"
23#include "nfs4session.h"
24#include "nfs4trace.h"
25
26#define NFSDBG_FACILITY NFSDBG_CALLBACK
27
28__be32 nfs4_callback_getattr(void *argp, void *resp,
29 struct cb_process_state *cps)
30{
31 struct cb_getattrargs *args = argp;
32 struct cb_getattrres *res = resp;
33 struct nfs_delegation *delegation;
34 struct inode *inode;
35
36 res->status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
37 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
38 goto out;
39
40 memset(res->bitmap, 0, sizeof(res->bitmap));
41 res->status = htonl(NFS4ERR_BADHANDLE);
42
43 dprintk_rcu("NFS: GETATTR callback request from %s\n",
44 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
45
46 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
47 if (IS_ERR(inode)) {
48 if (inode == ERR_PTR(-EAGAIN))
49 res->status = htonl(NFS4ERR_DELAY);
50 trace_nfs4_cb_getattr(cps->clp, &args->fh, NULL,
51 -ntohl(res->status));
52 goto out;
53 }
54 rcu_read_lock();
55 delegation = nfs4_get_valid_delegation(inode);
56 if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
57 goto out_iput;
58 res->size = i_size_read(inode);
59 res->change_attr = delegation->change_attr;
60 if (nfs_have_writebacks(inode))
61 res->change_attr++;
62 res->atime = inode_get_atime(inode);
63 res->ctime = inode_get_ctime(inode);
64 res->mtime = inode_get_mtime(inode);
65 res->bitmap[0] = (FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE) &
66 args->bitmap[0];
67 res->bitmap[1] = (FATTR4_WORD1_TIME_ACCESS |
68 FATTR4_WORD1_TIME_METADATA |
69 FATTR4_WORD1_TIME_MODIFY) & args->bitmap[1];
70 res->bitmap[2] = (FATTR4_WORD2_TIME_DELEG_ACCESS |
71 FATTR4_WORD2_TIME_DELEG_MODIFY) & args->bitmap[2];
72 res->status = 0;
73out_iput:
74 rcu_read_unlock();
75 trace_nfs4_cb_getattr(cps->clp, &args->fh, inode, -ntohl(res->status));
76 nfs_iput_and_deactive(inode);
77out:
78 dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
79 return res->status;
80}
81
82__be32 nfs4_callback_recall(void *argp, void *resp,
83 struct cb_process_state *cps)
84{
85 struct cb_recallargs *args = argp;
86 struct inode *inode;
87 __be32 res;
88
89 res = htonl(NFS4ERR_OP_NOT_IN_SESSION);
90 if (!cps->clp) /* Always set for v4.0. Set in cb_sequence for v4.1 */
91 goto out;
92
93 dprintk_rcu("NFS: RECALL callback request from %s\n",
94 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
95
96 res = htonl(NFS4ERR_BADHANDLE);
97 inode = nfs_delegation_find_inode(cps->clp, &args->fh);
98 if (IS_ERR(inode)) {
99 if (inode == ERR_PTR(-EAGAIN))
100 res = htonl(NFS4ERR_DELAY);
101 trace_nfs4_cb_recall(cps->clp, &args->fh, NULL,
102 &args->stateid, -ntohl(res));
103 goto out;
104 }
105 /* Set up a helper thread to actually return the delegation */
106 switch (nfs_async_inode_return_delegation(inode, &args->stateid)) {
107 case 0:
108 res = 0;
109 break;
110 case -ENOENT:
111 res = htonl(NFS4ERR_BAD_STATEID);
112 break;
113 default:
114 res = htonl(NFS4ERR_RESOURCE);
115 }
116 trace_nfs4_cb_recall(cps->clp, &args->fh, inode,
117 &args->stateid, -ntohl(res));
118 nfs_iput_and_deactive(inode);
119out:
120 dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
121 return res;
122}
123
124#if defined(CONFIG_NFS_V4_1)
125
126/*
127 * Lookup a layout inode by stateid
128 *
129 * Note: returns a refcount on the inode and superblock
130 */
131static struct inode *nfs_layout_find_inode_by_stateid(struct nfs_client *clp,
132 const nfs4_stateid *stateid)
133 __must_hold(RCU)
134{
135 struct nfs_server *server;
136 struct inode *inode;
137 struct pnfs_layout_hdr *lo;
138
139 rcu_read_lock();
140 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
141 list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
142 if (!pnfs_layout_is_valid(lo))
143 continue;
144 if (!nfs4_stateid_match_other(stateid, &lo->plh_stateid))
145 continue;
146 if (nfs_sb_active(server->super))
147 inode = igrab(lo->plh_inode);
148 else
149 inode = ERR_PTR(-EAGAIN);
150 rcu_read_unlock();
151 if (inode)
152 return inode;
153 nfs_sb_deactive(server->super);
154 return ERR_PTR(-EAGAIN);
155 }
156 }
157 rcu_read_unlock();
158 return ERR_PTR(-ENOENT);
159}
160
161/*
162 * Lookup a layout inode by filehandle.
163 *
164 * Note: returns a refcount on the inode and superblock
165 *
166 */
167static struct inode *nfs_layout_find_inode_by_fh(struct nfs_client *clp,
168 const struct nfs_fh *fh)
169{
170 struct nfs_server *server;
171 struct nfs_inode *nfsi;
172 struct inode *inode;
173 struct pnfs_layout_hdr *lo;
174
175 rcu_read_lock();
176 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
177 list_for_each_entry_rcu(lo, &server->layouts, plh_layouts) {
178 nfsi = NFS_I(lo->plh_inode);
179 if (nfs_compare_fh(fh, &nfsi->fh))
180 continue;
181 if (nfsi->layout != lo)
182 continue;
183 if (nfs_sb_active(server->super))
184 inode = igrab(lo->plh_inode);
185 else
186 inode = ERR_PTR(-EAGAIN);
187 rcu_read_unlock();
188 if (inode)
189 return inode;
190 nfs_sb_deactive(server->super);
191 return ERR_PTR(-EAGAIN);
192 }
193 }
194 rcu_read_unlock();
195 return ERR_PTR(-ENOENT);
196}
197
198static struct inode *nfs_layout_find_inode(struct nfs_client *clp,
199 const struct nfs_fh *fh,
200 const nfs4_stateid *stateid)
201{
202 struct inode *inode;
203
204 inode = nfs_layout_find_inode_by_stateid(clp, stateid);
205 if (inode == ERR_PTR(-ENOENT))
206 inode = nfs_layout_find_inode_by_fh(clp, fh);
207 return inode;
208}
209
210/*
211 * Enforce RFC5661 section 12.5.5.2.1. (Layout Recall and Return Sequencing)
212 */
213static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
214 const nfs4_stateid *new,
215 struct cb_process_state *cps)
216{
217 u32 oldseq, newseq;
218
219 /* Is the stateid not initialised? */
220 if (!pnfs_layout_is_valid(lo))
221 return NFS4ERR_NOMATCHING_LAYOUT;
222
223 /* Mismatched stateid? */
224 if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
225 return NFS4ERR_BAD_STATEID;
226
227 newseq = be32_to_cpu(new->seqid);
228 /* Are we already in a layout recall situation? */
229 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
230 return NFS4ERR_DELAY;
231
232 /*
233 * Check that the stateid matches what we think it should be.
234 * Note that if the server sent us a list of referring calls,
235 * and we know that those have completed, then we trust the
236 * stateid argument is correct.
237 */
238 oldseq = be32_to_cpu(lo->plh_stateid.seqid);
239 if (newseq > oldseq + 1 && !cps->referring_calls)
240 return NFS4ERR_DELAY;
241
242 /* Crazy server! */
243 if (newseq <= oldseq)
244 return NFS4ERR_OLD_STATEID;
245
246 return NFS_OK;
247}
248
249static u32 initiate_file_draining(struct nfs_client *clp,
250 struct cb_layoutrecallargs *args,
251 struct cb_process_state *cps)
252{
253 struct inode *ino;
254 struct pnfs_layout_hdr *lo;
255 u32 rv = NFS4ERR_NOMATCHING_LAYOUT;
256 LIST_HEAD(free_me_list);
257
258 ino = nfs_layout_find_inode(clp, &args->cbl_fh, &args->cbl_stateid);
259 if (IS_ERR(ino)) {
260 if (ino == ERR_PTR(-EAGAIN))
261 rv = NFS4ERR_DELAY;
262 goto out_noput;
263 }
264
265 pnfs_layoutcommit_inode(ino, false);
266
267
268 spin_lock(&ino->i_lock);
269 lo = NFS_I(ino)->layout;
270 if (!lo) {
271 spin_unlock(&ino->i_lock);
272 goto out;
273 }
274 pnfs_get_layout_hdr(lo);
275 rv = pnfs_check_callback_stateid(lo, &args->cbl_stateid, cps);
276 if (rv != NFS_OK)
277 goto unlock;
278
279 /*
280 * Enforce RFC5661 Section 12.5.5.2.1.5 (Bulk Recall and Return)
281 */
282 if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
283 rv = NFS4ERR_DELAY;
284 goto unlock;
285 }
286
287 pnfs_set_layout_stateid(lo, &args->cbl_stateid, NULL, true);
288 switch (pnfs_mark_matching_lsegs_return(lo, &free_me_list,
289 &args->cbl_range,
290 be32_to_cpu(args->cbl_stateid.seqid))) {
291 case 0:
292 case -EBUSY:
293 /* There are layout segments that need to be returned */
294 rv = NFS4_OK;
295 break;
296 case -ENOENT:
297 set_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags);
298 /* Embrace your forgetfulness! */
299 rv = NFS4ERR_NOMATCHING_LAYOUT;
300
301 if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
302 NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo,
303 &args->cbl_range);
304 }
305 }
306unlock:
307 spin_unlock(&ino->i_lock);
308 pnfs_free_lseg_list(&free_me_list);
309 /* Free all lsegs that are attached to commit buckets */
310 nfs_commit_inode(ino, 0);
311 pnfs_put_layout_hdr(lo);
312out:
313 nfs_iput_and_deactive(ino);
314out_noput:
315 trace_nfs4_cb_layoutrecall_file(clp, &args->cbl_fh, ino,
316 &args->cbl_stateid, -rv);
317 return rv;
318}
319
320static u32 initiate_bulk_draining(struct nfs_client *clp,
321 struct cb_layoutrecallargs *args)
322{
323 int stat;
324
325 if (args->cbl_recall_type == RETURN_FSID)
326 stat = pnfs_layout_destroy_byfsid(clp, &args->cbl_fsid,
327 PNFS_LAYOUT_BULK_RETURN);
328 else
329 stat = pnfs_layout_destroy_byclid(clp, PNFS_LAYOUT_BULK_RETURN);
330 if (stat != 0)
331 return NFS4ERR_DELAY;
332 return NFS4ERR_NOMATCHING_LAYOUT;
333}
334
335static u32 do_callback_layoutrecall(struct nfs_client *clp,
336 struct cb_layoutrecallargs *args,
337 struct cb_process_state *cps)
338{
339 if (args->cbl_recall_type == RETURN_FILE)
340 return initiate_file_draining(clp, args, cps);
341 return initiate_bulk_draining(clp, args);
342}
343
344__be32 nfs4_callback_layoutrecall(void *argp, void *resp,
345 struct cb_process_state *cps)
346{
347 struct cb_layoutrecallargs *args = argp;
348 u32 res = NFS4ERR_OP_NOT_IN_SESSION;
349
350 if (cps->clp)
351 res = do_callback_layoutrecall(cps->clp, args, cps);
352 return cpu_to_be32(res);
353}
354
355static void pnfs_recall_all_layouts(struct nfs_client *clp,
356 struct cb_process_state *cps)
357{
358 struct cb_layoutrecallargs args;
359
360 /* Pretend we got a CB_LAYOUTRECALL(ALL) */
361 memset(&args, 0, sizeof(args));
362 args.cbl_recall_type = RETURN_ALL;
363 /* FIXME we ignore errors, what should we do? */
364 do_callback_layoutrecall(clp, &args, cps);
365}
366
367__be32 nfs4_callback_devicenotify(void *argp, void *resp,
368 struct cb_process_state *cps)
369{
370 struct cb_devicenotifyargs *args = argp;
371 const struct pnfs_layoutdriver_type *ld = NULL;
372 uint32_t i;
373 __be32 res = 0;
374
375 if (!cps->clp) {
376 res = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
377 goto out;
378 }
379
380 for (i = 0; i < args->ndevs; i++) {
381 struct cb_devicenotifyitem *dev = &args->devs[i];
382
383 if (!ld || ld->id != dev->cbd_layout_type) {
384 pnfs_put_layoutdriver(ld);
385 ld = pnfs_find_layoutdriver(dev->cbd_layout_type);
386 if (!ld)
387 continue;
388 }
389 nfs4_delete_deviceid(ld, cps->clp, &dev->cbd_dev_id);
390 }
391 pnfs_put_layoutdriver(ld);
392out:
393 kfree(args->devs);
394 return res;
395}
396
397/*
398 * Validate the sequenceID sent by the server.
399 * Return success if the sequenceID is one more than what we last saw on
400 * this slot, accounting for wraparound. Increments the slot's sequence.
401 *
402 * We don't yet implement a duplicate request cache, instead we set the
403 * back channel ca_maxresponsesize_cached to zero. This is OK for now
404 * since we only currently implement idempotent callbacks anyway.
405 *
406 * We have a single slot backchannel at this time, so we don't bother
407 * checking the used_slots bit array on the table. The lower layer guarantees
408 * a single outstanding callback request at a time.
409 */
410static __be32
411validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
412 const struct cb_sequenceargs * args)
413{
414 __be32 ret;
415
416 ret = cpu_to_be32(NFS4ERR_BADSLOT);
417 if (args->csa_slotid > tbl->server_highest_slotid)
418 goto out_err;
419
420 /* Replay */
421 if (args->csa_sequenceid == slot->seq_nr) {
422 ret = cpu_to_be32(NFS4ERR_DELAY);
423 if (nfs4_test_locked_slot(tbl, slot->slot_nr))
424 goto out_err;
425
426 /* Signal process_op to set this error on next op */
427 ret = cpu_to_be32(NFS4ERR_RETRY_UNCACHED_REP);
428 if (args->csa_cachethis == 0)
429 goto out_err;
430
431 /* Liar! We never allowed you to set csa_cachethis != 0 */
432 ret = cpu_to_be32(NFS4ERR_SEQ_FALSE_RETRY);
433 goto out_err;
434 }
435
436 /* Note: wraparound relies on seq_nr being of type u32 */
437 /* Misordered request */
438 ret = cpu_to_be32(NFS4ERR_SEQ_MISORDERED);
439 if (args->csa_sequenceid != slot->seq_nr + 1)
440 goto out_err;
441
442 return cpu_to_be32(NFS4_OK);
443
444out_err:
445 trace_nfs4_cb_seqid_err(args, ret);
446 return ret;
447}
448
449/*
450 * For each referring call triple, check the session's slot table for
451 * a match. If the slot is in use and the sequence numbers match, the
452 * client is still waiting for a response to the original request.
453 */
454static int referring_call_exists(struct nfs_client *clp,
455 uint32_t nrclists,
456 struct referring_call_list *rclists,
457 spinlock_t *lock)
458 __releases(lock)
459 __acquires(lock)
460{
461 int status = 0;
462 int found = 0;
463 int i, j;
464 struct nfs4_session *session;
465 struct nfs4_slot_table *tbl;
466 struct referring_call_list *rclist;
467 struct referring_call *ref;
468
469 /*
470 * XXX When client trunking is implemented, this becomes
471 * a session lookup from within the loop
472 */
473 session = clp->cl_session;
474 tbl = &session->fc_slot_table;
475
476 for (i = 0; i < nrclists; i++) {
477 rclist = &rclists[i];
478 if (memcmp(session->sess_id.data,
479 rclist->rcl_sessionid.data,
480 NFS4_MAX_SESSIONID_LEN) != 0)
481 continue;
482
483 for (j = 0; j < rclist->rcl_nrefcalls; j++) {
484 ref = &rclist->rcl_refcalls[j];
485 spin_unlock(lock);
486 status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid,
487 ref->rc_sequenceid, HZ >> 1) < 0;
488 spin_lock(lock);
489 if (status)
490 goto out;
491 found++;
492 }
493 }
494
495out:
496 return status < 0 ? status : found;
497}
498
499__be32 nfs4_callback_sequence(void *argp, void *resp,
500 struct cb_process_state *cps)
501{
502 struct cb_sequenceargs *args = argp;
503 struct cb_sequenceres *res = resp;
504 struct nfs4_slot_table *tbl;
505 struct nfs4_slot *slot;
506 struct nfs_client *clp;
507 int ret;
508 int i;
509 __be32 status = htonl(NFS4ERR_BADSESSION);
510
511 clp = nfs4_find_client_sessionid(cps->net, args->csa_addr,
512 &args->csa_sessionid, cps->minorversion);
513 if (clp == NULL)
514 goto out;
515
516 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
517 goto out;
518
519 tbl = &clp->cl_session->bc_slot_table;
520
521 /* Set up res before grabbing the spinlock */
522 memcpy(&res->csr_sessionid, &args->csa_sessionid,
523 sizeof(res->csr_sessionid));
524 res->csr_sequenceid = args->csa_sequenceid;
525 res->csr_slotid = args->csa_slotid;
526
527 spin_lock(&tbl->slot_tbl_lock);
528 /* state manager is resetting the session */
529 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
530 status = htonl(NFS4ERR_DELAY);
531 /* Return NFS4ERR_BADSESSION if we're draining the session
532 * in order to reset it.
533 */
534 if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state))
535 status = htonl(NFS4ERR_BADSESSION);
536 goto out_unlock;
537 }
538
539 status = htonl(NFS4ERR_BADSLOT);
540 slot = nfs4_lookup_slot(tbl, args->csa_slotid);
541 if (IS_ERR(slot))
542 goto out_unlock;
543
544 res->csr_highestslotid = tbl->server_highest_slotid;
545 res->csr_target_highestslotid = tbl->target_highest_slotid;
546
547 status = validate_seqid(tbl, slot, args);
548 if (status)
549 goto out_unlock;
550 if (!nfs4_try_to_lock_slot(tbl, slot)) {
551 status = htonl(NFS4ERR_DELAY);
552 goto out_unlock;
553 }
554 cps->slot = slot;
555
556 /* The ca_maxresponsesize_cached is 0 with no DRC */
557 if (args->csa_cachethis != 0) {
558 status = htonl(NFS4ERR_REP_TOO_BIG_TO_CACHE);
559 goto out_unlock;
560 }
561
562 /*
563 * Check for pending referring calls. If a match is found, a
564 * related callback was received before the response to the original
565 * call.
566 */
567 ret = referring_call_exists(clp, args->csa_nrclists, args->csa_rclists,
568 &tbl->slot_tbl_lock);
569 if (ret < 0) {
570 status = htonl(NFS4ERR_DELAY);
571 goto out_unlock;
572 }
573 cps->referring_calls = ret;
574
575 /*
576 * RFC5661 20.9.3
577 * If CB_SEQUENCE returns an error, then the state of the slot
578 * (sequence ID, cached reply) MUST NOT change.
579 */
580 slot->seq_nr = args->csa_sequenceid;
581out_unlock:
582 spin_unlock(&tbl->slot_tbl_lock);
583
584out:
585 cps->clp = clp; /* put in nfs4_callback_compound */
586 for (i = 0; i < args->csa_nrclists; i++)
587 kfree(args->csa_rclists[i].rcl_refcalls);
588 kfree(args->csa_rclists);
589
590 if (status == htonl(NFS4ERR_RETRY_UNCACHED_REP)) {
591 cps->drc_status = status;
592 status = 0;
593 } else
594 res->csr_status = status;
595
596 trace_nfs4_cb_sequence(args, res, status);
597 return status;
598}
599
600static bool
601validate_bitmap_values(unsigned int mask)
602{
603 return (mask & ~RCA4_TYPE_MASK_ALL) == 0;
604}
605
606__be32 nfs4_callback_recallany(void *argp, void *resp,
607 struct cb_process_state *cps)
608{
609 struct cb_recallanyargs *args = argp;
610 __be32 status;
611 fmode_t flags = 0;
612 bool schedule_manager = false;
613
614 status = cpu_to_be32(NFS4ERR_OP_NOT_IN_SESSION);
615 if (!cps->clp) /* set in cb_sequence */
616 goto out;
617
618 dprintk_rcu("NFS: RECALL_ANY callback request from %s\n",
619 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
620
621 status = cpu_to_be32(NFS4ERR_INVAL);
622 if (!validate_bitmap_values(args->craa_type_mask))
623 goto out;
624
625 status = cpu_to_be32(NFS4_OK);
626 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_RDATA_DLG))
627 flags = FMODE_READ;
628 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_WDATA_DLG))
629 flags |= FMODE_WRITE;
630 if (flags)
631 nfs_expire_unused_delegation_types(cps->clp, flags);
632
633 if (args->craa_type_mask & BIT(RCA4_TYPE_MASK_FILE_LAYOUT))
634 pnfs_recall_all_layouts(cps->clp, cps);
635
636 if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_READ)) {
637 set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_READ, &cps->clp->cl_state);
638 schedule_manager = true;
639 }
640 if (args->craa_type_mask & BIT(PNFS_FF_RCA4_TYPE_MASK_RW)) {
641 set_bit(NFS4CLNT_RECALL_ANY_LAYOUT_RW, &cps->clp->cl_state);
642 schedule_manager = true;
643 }
644 if (schedule_manager)
645 nfs4_schedule_state_manager(cps->clp);
646
647out:
648 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
649 return status;
650}
651
652/* Reduce the fore channel's max_slots to the target value */
653__be32 nfs4_callback_recallslot(void *argp, void *resp,
654 struct cb_process_state *cps)
655{
656 struct cb_recallslotargs *args = argp;
657 struct nfs4_slot_table *fc_tbl;
658 __be32 status;
659
660 status = htonl(NFS4ERR_OP_NOT_IN_SESSION);
661 if (!cps->clp) /* set in cb_sequence */
662 goto out;
663
664 dprintk_rcu("NFS: CB_RECALL_SLOT request from %s target highest slotid %u\n",
665 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR),
666 args->crsa_target_highest_slotid);
667
668 fc_tbl = &cps->clp->cl_session->fc_slot_table;
669
670 status = htonl(NFS4_OK);
671
672 nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid);
673 nfs41_notify_server(cps->clp);
674out:
675 dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
676 return status;
677}
678
679__be32 nfs4_callback_notify_lock(void *argp, void *resp,
680 struct cb_process_state *cps)
681{
682 struct cb_notify_lock_args *args = argp;
683
684 if (!cps->clp) /* set in cb_sequence */
685 return htonl(NFS4ERR_OP_NOT_IN_SESSION);
686
687 dprintk_rcu("NFS: CB_NOTIFY_LOCK request from %s\n",
688 rpc_peeraddr2str(cps->clp->cl_rpcclient, RPC_DISPLAY_ADDR));
689
690 /* Don't wake anybody if the string looked bogus */
691 if (args->cbnl_valid)
692 __wake_up(&cps->clp->cl_lock_waitq, TASK_NORMAL, 0, args);
693
694 return htonl(NFS4_OK);
695}
696#endif /* CONFIG_NFS_V4_1 */
697#ifdef CONFIG_NFS_V4_2
698static void nfs4_copy_cb_args(struct nfs4_copy_state *cp_state,
699 struct cb_offloadargs *args)
700{
701 cp_state->count = args->wr_count;
702 cp_state->error = args->error;
703 if (!args->error) {
704 cp_state->verf.committed = args->wr_writeverf.committed;
705 memcpy(&cp_state->verf.verifier.data[0],
706 &args->wr_writeverf.verifier.data[0],
707 NFS4_VERIFIER_SIZE);
708 }
709}
710
711__be32 nfs4_callback_offload(void *data, void *dummy,
712 struct cb_process_state *cps)
713{
714 struct cb_offloadargs *args = data;
715 struct nfs_server *server;
716 struct nfs4_copy_state *copy, *tmp_copy;
717 bool found = false;
718
719 copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
720 if (!copy)
721 return htonl(NFS4ERR_SERVERFAULT);
722
723 spin_lock(&cps->clp->cl_lock);
724 rcu_read_lock();
725 list_for_each_entry_rcu(server, &cps->clp->cl_superblocks,
726 client_link) {
727 list_for_each_entry(tmp_copy, &server->ss_copies, copies) {
728 if (memcmp(args->coa_stateid.other,
729 tmp_copy->stateid.other,
730 sizeof(args->coa_stateid.other)))
731 continue;
732 nfs4_copy_cb_args(tmp_copy, args);
733 complete(&tmp_copy->completion);
734 found = true;
735 goto out;
736 }
737 }
738out:
739 rcu_read_unlock();
740 if (!found) {
741 memcpy(©->stateid, &args->coa_stateid, NFS4_STATEID_SIZE);
742 nfs4_copy_cb_args(copy, args);
743 list_add_tail(©->copies, &cps->clp->pending_cb_stateids);
744 } else
745 kfree(copy);
746 spin_unlock(&cps->clp->cl_lock);
747
748 trace_nfs4_cb_offload(&args->coa_fh, &args->coa_stateid,
749 args->wr_count, args->error,
750 args->wr_writeverf.committed);
751 return 0;
752}
753#endif /* CONFIG_NFS_V4_2 */