Loading...
1/*
2 * fs/nfs/nfs4session.c
3 *
4 * Copyright (c) 2012 Trond Myklebust <Trond.Myklebust@netapp.com>
5 *
6 */
7#include <linux/kernel.h>
8#include <linux/errno.h>
9#include <linux/string.h>
10#include <linux/printk.h>
11#include <linux/slab.h>
12#include <linux/sunrpc/sched.h>
13#include <linux/sunrpc/bc_xprt.h>
14#include <linux/nfs.h>
15#include <linux/nfs4.h>
16#include <linux/nfs_fs.h>
17#include <linux/module.h>
18
19#include "nfs4_fs.h"
20#include "internal.h"
21#include "nfs4session.h"
22#include "callback.h"
23
24#define NFSDBG_FACILITY NFSDBG_STATE
25
26static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue)
27{
28 tbl->highest_used_slotid = NFS4_NO_SLOT;
29 spin_lock_init(&tbl->slot_tbl_lock);
30 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue);
31 init_waitqueue_head(&tbl->slot_waitq);
32 init_completion(&tbl->complete);
33}
34
35/*
36 * nfs4_shrink_slot_table - free retired slots from the slot table
37 */
38static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize)
39{
40 struct nfs4_slot **p;
41 if (newsize >= tbl->max_slots)
42 return;
43
44 p = &tbl->slots;
45 while (newsize--)
46 p = &(*p)->next;
47 while (*p) {
48 struct nfs4_slot *slot = *p;
49
50 *p = slot->next;
51 kfree(slot);
52 tbl->max_slots--;
53 }
54}
55
56/**
57 * nfs4_slot_tbl_drain_complete - wake waiters when drain is complete
58 * @tbl - controlling slot table
59 *
60 */
61void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl)
62{
63 if (nfs4_slot_tbl_draining(tbl))
64 complete(&tbl->complete);
65}
66
67/*
68 * nfs4_free_slot - free a slot and efficiently update slot table.
69 *
70 * freeing a slot is trivially done by clearing its respective bit
71 * in the bitmap.
72 * If the freed slotid equals highest_used_slotid we want to update it
73 * so that the server would be able to size down the slot table if needed,
74 * otherwise we know that the highest_used_slotid is still in use.
75 * When updating highest_used_slotid there may be "holes" in the bitmap
76 * so we need to scan down from highest_used_slotid to 0 looking for the now
77 * highest slotid in use.
78 * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
79 *
80 * Must be called while holding tbl->slot_tbl_lock
81 */
82void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
83{
84 u32 slotid = slot->slot_nr;
85
86 /* clear used bit in bitmap */
87 __clear_bit(slotid, tbl->used_slots);
88
89 /* update highest_used_slotid when it is freed */
90 if (slotid == tbl->highest_used_slotid) {
91 u32 new_max = find_last_bit(tbl->used_slots, slotid);
92 if (new_max < slotid)
93 tbl->highest_used_slotid = new_max;
94 else {
95 tbl->highest_used_slotid = NFS4_NO_SLOT;
96 nfs4_slot_tbl_drain_complete(tbl);
97 }
98 }
99 dprintk("%s: slotid %u highest_used_slotid %u\n", __func__,
100 slotid, tbl->highest_used_slotid);
101}
102
103static struct nfs4_slot *nfs4_new_slot(struct nfs4_slot_table *tbl,
104 u32 slotid, u32 seq_init, gfp_t gfp_mask)
105{
106 struct nfs4_slot *slot;
107
108 slot = kzalloc(sizeof(*slot), gfp_mask);
109 if (slot) {
110 slot->table = tbl;
111 slot->slot_nr = slotid;
112 slot->seq_nr = seq_init;
113 }
114 return slot;
115}
116
117static struct nfs4_slot *nfs4_find_or_create_slot(struct nfs4_slot_table *tbl,
118 u32 slotid, u32 seq_init, gfp_t gfp_mask)
119{
120 struct nfs4_slot **p, *slot;
121
122 p = &tbl->slots;
123 for (;;) {
124 if (*p == NULL) {
125 *p = nfs4_new_slot(tbl, tbl->max_slots,
126 seq_init, gfp_mask);
127 if (*p == NULL)
128 break;
129 tbl->max_slots++;
130 }
131 slot = *p;
132 if (slot->slot_nr == slotid)
133 return slot;
134 p = &slot->next;
135 }
136 return ERR_PTR(-ENOMEM);
137}
138
139static void nfs4_lock_slot(struct nfs4_slot_table *tbl,
140 struct nfs4_slot *slot)
141{
142 u32 slotid = slot->slot_nr;
143
144 __set_bit(slotid, tbl->used_slots);
145 if (slotid > tbl->highest_used_slotid ||
146 tbl->highest_used_slotid == NFS4_NO_SLOT)
147 tbl->highest_used_slotid = slotid;
148 slot->generation = tbl->generation;
149}
150
151/*
152 * nfs4_try_to_lock_slot - Given a slot try to allocate it
153 *
154 * Note: must be called with the slot_tbl_lock held.
155 */
156bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
157{
158 if (nfs4_test_locked_slot(tbl, slot->slot_nr))
159 return false;
160 nfs4_lock_slot(tbl, slot);
161 return true;
162}
163
164/*
165 * nfs4_lookup_slot - Find a slot but don't allocate it
166 *
167 * Note: must be called with the slot_tbl_lock held.
168 */
169struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
170{
171 if (slotid <= tbl->max_slotid)
172 return nfs4_find_or_create_slot(tbl, slotid, 0, GFP_NOWAIT);
173 return ERR_PTR(-E2BIG);
174}
175
176static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
177 u32 *seq_nr)
178 __must_hold(&tbl->slot_tbl_lock)
179{
180 struct nfs4_slot *slot;
181 int ret;
182
183 slot = nfs4_lookup_slot(tbl, slotid);
184 ret = PTR_ERR_OR_ZERO(slot);
185 if (!ret)
186 *seq_nr = slot->seq_nr;
187
188 return ret;
189}
190
191/*
192 * nfs4_slot_seqid_in_use - test if a slot sequence id is still in use
193 *
194 * Given a slot table, slot id and sequence number, determine if the
195 * RPC call in question is still in flight. This function is mainly
196 * intended for use by the callback channel.
197 */
198static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
199 u32 slotid, u32 seq_nr)
200{
201 u32 cur_seq = 0;
202 bool ret = false;
203
204 spin_lock(&tbl->slot_tbl_lock);
205 if (nfs4_slot_get_seqid(tbl, slotid, &cur_seq) == 0 &&
206 cur_seq == seq_nr && test_bit(slotid, tbl->used_slots))
207 ret = true;
208 spin_unlock(&tbl->slot_tbl_lock);
209 return ret;
210}
211
212/*
213 * nfs4_slot_wait_on_seqid - wait until a slot sequence id is complete
214 *
215 * Given a slot table, slot id and sequence number, wait until the
216 * corresponding RPC call completes. This function is mainly
217 * intended for use by the callback channel.
218 */
219int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
220 u32 slotid, u32 seq_nr,
221 unsigned long timeout)
222{
223 if (wait_event_timeout(tbl->slot_waitq,
224 !nfs4_slot_seqid_in_use(tbl, slotid, seq_nr),
225 timeout) == 0)
226 return -ETIMEDOUT;
227 return 0;
228}
229
230/*
231 * nfs4_alloc_slot - efficiently look for a free slot
232 *
233 * nfs4_alloc_slot looks for an unset bit in the used_slots bitmap.
234 * If found, we mark the slot as used, update the highest_used_slotid,
235 * and respectively set up the sequence operation args.
236 *
237 * Note: must be called with under the slot_tbl_lock.
238 */
239struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl)
240{
241 struct nfs4_slot *ret = ERR_PTR(-EBUSY);
242 u32 slotid;
243
244 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
245 __func__, tbl->used_slots[0], tbl->highest_used_slotid,
246 tbl->max_slotid + 1);
247 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1);
248 if (slotid <= tbl->max_slotid) {
249 ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
250 if (!IS_ERR(ret))
251 nfs4_lock_slot(tbl, ret);
252 }
253 dprintk("<-- %s used_slots=%04lx highest_used=%u slotid=%u\n",
254 __func__, tbl->used_slots[0], tbl->highest_used_slotid,
255 !IS_ERR(ret) ? ret->slot_nr : NFS4_NO_SLOT);
256 return ret;
257}
258
259static int nfs4_grow_slot_table(struct nfs4_slot_table *tbl,
260 u32 max_reqs, u32 ivalue)
261{
262 if (max_reqs <= tbl->max_slots)
263 return 0;
264 if (!IS_ERR(nfs4_find_or_create_slot(tbl, max_reqs - 1, ivalue, GFP_NOFS)))
265 return 0;
266 return -ENOMEM;
267}
268
269static void nfs4_reset_slot_table(struct nfs4_slot_table *tbl,
270 u32 server_highest_slotid,
271 u32 ivalue)
272{
273 struct nfs4_slot **p;
274
275 nfs4_shrink_slot_table(tbl, server_highest_slotid + 1);
276 p = &tbl->slots;
277 while (*p) {
278 (*p)->seq_nr = ivalue;
279 (*p)->interrupted = 0;
280 p = &(*p)->next;
281 }
282 tbl->highest_used_slotid = NFS4_NO_SLOT;
283 tbl->target_highest_slotid = server_highest_slotid;
284 tbl->server_highest_slotid = server_highest_slotid;
285 tbl->d_target_highest_slotid = 0;
286 tbl->d2_target_highest_slotid = 0;
287 tbl->max_slotid = server_highest_slotid;
288}
289
290/*
291 * (re)Initialise a slot table
292 */
293static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl,
294 u32 max_reqs, u32 ivalue)
295{
296 int ret;
297
298 dprintk("--> %s: max_reqs=%u, tbl->max_slots %u\n", __func__,
299 max_reqs, tbl->max_slots);
300
301 if (max_reqs > NFS4_MAX_SLOT_TABLE)
302 max_reqs = NFS4_MAX_SLOT_TABLE;
303
304 ret = nfs4_grow_slot_table(tbl, max_reqs, ivalue);
305 if (ret)
306 goto out;
307
308 spin_lock(&tbl->slot_tbl_lock);
309 nfs4_reset_slot_table(tbl, max_reqs - 1, ivalue);
310 spin_unlock(&tbl->slot_tbl_lock);
311
312 dprintk("%s: tbl=%p slots=%p max_slots=%u\n", __func__,
313 tbl, tbl->slots, tbl->max_slots);
314out:
315 dprintk("<-- %s: return %d\n", __func__, ret);
316 return ret;
317}
318
319/*
320 * nfs4_release_slot_table - release all slot table entries
321 */
322static void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
323{
324 nfs4_shrink_slot_table(tbl, 0);
325}
326
327/**
328 * nfs4_shutdown_slot_table - release resources attached to a slot table
329 * @tbl: slot table to shut down
330 *
331 */
332void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl)
333{
334 nfs4_release_slot_table(tbl);
335 rpc_destroy_wait_queue(&tbl->slot_tbl_waitq);
336}
337
338/**
339 * nfs4_setup_slot_table - prepare a stand-alone slot table for use
340 * @tbl: slot table to set up
341 * @max_reqs: maximum number of requests allowed
342 * @queue: name to give RPC wait queue
343 *
344 * Returns zero on success, or a negative errno.
345 */
346int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, unsigned int max_reqs,
347 const char *queue)
348{
349 nfs4_init_slot_table(tbl, queue);
350 return nfs4_realloc_slot_table(tbl, max_reqs, 0);
351}
352
353static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
354{
355 struct nfs4_sequence_args *args = task->tk_msg.rpc_argp;
356 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
357 struct nfs4_slot *slot = pslot;
358 struct nfs4_slot_table *tbl = slot->table;
359
360 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
361 return false;
362 slot->generation = tbl->generation;
363 args->sa_slot = slot;
364 res->sr_timestamp = jiffies;
365 res->sr_slot = slot;
366 res->sr_status_flags = 0;
367 res->sr_status = 1;
368 return true;
369}
370
371static bool __nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
372 struct nfs4_slot *slot)
373{
374 if (rpc_wake_up_first(&tbl->slot_tbl_waitq, nfs41_assign_slot, slot))
375 return true;
376 return false;
377}
378
379bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
380 struct nfs4_slot *slot)
381{
382 if (slot->slot_nr > tbl->max_slotid)
383 return false;
384 return __nfs41_wake_and_assign_slot(tbl, slot);
385}
386
387static bool nfs41_try_wake_next_slot_table_entry(struct nfs4_slot_table *tbl)
388{
389 struct nfs4_slot *slot = nfs4_alloc_slot(tbl);
390 if (!IS_ERR(slot)) {
391 bool ret = __nfs41_wake_and_assign_slot(tbl, slot);
392 if (ret)
393 return ret;
394 nfs4_free_slot(tbl, slot);
395 }
396 return false;
397}
398
399void nfs41_wake_slot_table(struct nfs4_slot_table *tbl)
400{
401 for (;;) {
402 if (!nfs41_try_wake_next_slot_table_entry(tbl))
403 break;
404 }
405}
406
407#if defined(CONFIG_NFS_V4_1)
408
409static void nfs41_set_max_slotid_locked(struct nfs4_slot_table *tbl,
410 u32 target_highest_slotid)
411{
412 u32 max_slotid;
413
414 max_slotid = min(NFS4_MAX_SLOT_TABLE - 1, target_highest_slotid);
415 if (max_slotid > tbl->server_highest_slotid)
416 max_slotid = tbl->server_highest_slotid;
417 if (max_slotid > tbl->target_highest_slotid)
418 max_slotid = tbl->target_highest_slotid;
419 tbl->max_slotid = max_slotid;
420 nfs41_wake_slot_table(tbl);
421}
422
423/* Update the client's idea of target_highest_slotid */
424static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl,
425 u32 target_highest_slotid)
426{
427 if (tbl->target_highest_slotid == target_highest_slotid)
428 return;
429 tbl->target_highest_slotid = target_highest_slotid;
430 tbl->generation++;
431}
432
433void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
434 u32 target_highest_slotid)
435{
436 spin_lock(&tbl->slot_tbl_lock);
437 nfs41_set_target_slotid_locked(tbl, target_highest_slotid);
438 tbl->d_target_highest_slotid = 0;
439 tbl->d2_target_highest_slotid = 0;
440 nfs41_set_max_slotid_locked(tbl, target_highest_slotid);
441 spin_unlock(&tbl->slot_tbl_lock);
442}
443
444static void nfs41_set_server_slotid_locked(struct nfs4_slot_table *tbl,
445 u32 highest_slotid)
446{
447 if (tbl->server_highest_slotid == highest_slotid)
448 return;
449 if (tbl->highest_used_slotid > highest_slotid)
450 return;
451 /* Deallocate slots */
452 nfs4_shrink_slot_table(tbl, highest_slotid + 1);
453 tbl->server_highest_slotid = highest_slotid;
454}
455
456static s32 nfs41_derivative_target_slotid(s32 s1, s32 s2)
457{
458 s1 -= s2;
459 if (s1 == 0)
460 return 0;
461 if (s1 < 0)
462 return (s1 - 1) >> 1;
463 return (s1 + 1) >> 1;
464}
465
466static int nfs41_sign_s32(s32 s1)
467{
468 if (s1 > 0)
469 return 1;
470 if (s1 < 0)
471 return -1;
472 return 0;
473}
474
475static bool nfs41_same_sign_or_zero_s32(s32 s1, s32 s2)
476{
477 if (!s1 || !s2)
478 return true;
479 return nfs41_sign_s32(s1) == nfs41_sign_s32(s2);
480}
481
482/* Try to eliminate outliers by checking for sharp changes in the
483 * derivatives and second derivatives
484 */
485static bool nfs41_is_outlier_target_slotid(struct nfs4_slot_table *tbl,
486 u32 new_target)
487{
488 s32 d_target, d2_target;
489 bool ret = true;
490
491 d_target = nfs41_derivative_target_slotid(new_target,
492 tbl->target_highest_slotid);
493 d2_target = nfs41_derivative_target_slotid(d_target,
494 tbl->d_target_highest_slotid);
495 /* Is first derivative same sign? */
496 if (nfs41_same_sign_or_zero_s32(d_target, tbl->d_target_highest_slotid))
497 ret = false;
498 /* Is second derivative same sign? */
499 if (nfs41_same_sign_or_zero_s32(d2_target, tbl->d2_target_highest_slotid))
500 ret = false;
501 tbl->d_target_highest_slotid = d_target;
502 tbl->d2_target_highest_slotid = d2_target;
503 return ret;
504}
505
506void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
507 struct nfs4_slot *slot,
508 struct nfs4_sequence_res *res)
509{
510 spin_lock(&tbl->slot_tbl_lock);
511 if (!nfs41_is_outlier_target_slotid(tbl, res->sr_target_highest_slotid))
512 nfs41_set_target_slotid_locked(tbl, res->sr_target_highest_slotid);
513 if (tbl->generation == slot->generation)
514 nfs41_set_server_slotid_locked(tbl, res->sr_highest_slotid);
515 nfs41_set_max_slotid_locked(tbl, res->sr_target_highest_slotid);
516 spin_unlock(&tbl->slot_tbl_lock);
517}
518
519static void nfs4_release_session_slot_tables(struct nfs4_session *session)
520{
521 nfs4_release_slot_table(&session->fc_slot_table);
522 nfs4_release_slot_table(&session->bc_slot_table);
523}
524
525/*
526 * Initialize or reset the forechannel and backchannel tables
527 */
528int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
529{
530 struct nfs4_slot_table *tbl;
531 int status;
532
533 dprintk("--> %s\n", __func__);
534 /* Fore channel */
535 tbl = &ses->fc_slot_table;
536 tbl->session = ses;
537 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
538 if (status || !(ses->flags & SESSION4_BACK_CHAN)) /* -ENOMEM */
539 return status;
540 /* Back channel */
541 tbl = &ses->bc_slot_table;
542 tbl->session = ses;
543 status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
544 if (status && tbl->slots == NULL)
545 /* Fore and back channel share a connection so get
546 * both slot tables or neither */
547 nfs4_release_session_slot_tables(ses);
548 return status;
549}
550
551struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
552{
553 struct nfs4_session *session;
554
555 session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
556 if (!session)
557 return NULL;
558
559 nfs4_init_slot_table(&session->fc_slot_table, "ForeChannel Slot table");
560 nfs4_init_slot_table(&session->bc_slot_table, "BackChannel Slot table");
561 session->session_state = 1<<NFS4_SESSION_INITING;
562
563 session->clp = clp;
564 return session;
565}
566
567static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
568{
569 nfs4_shutdown_slot_table(&session->fc_slot_table);
570 nfs4_shutdown_slot_table(&session->bc_slot_table);
571}
572
573void nfs4_destroy_session(struct nfs4_session *session)
574{
575 struct rpc_xprt *xprt;
576 struct rpc_cred *cred;
577
578 cred = nfs4_get_clid_cred(session->clp);
579 nfs4_proc_destroy_session(session, cred);
580 if (cred)
581 put_rpccred(cred);
582
583 rcu_read_lock();
584 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
585 rcu_read_unlock();
586 dprintk("%s Destroy backchannel for xprt %p\n",
587 __func__, xprt);
588 xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
589 nfs4_destroy_session_slot_tables(session);
590 kfree(session);
591}
592
593/*
594 * With sessions, the client is not marked ready until after a
595 * successful EXCHANGE_ID and CREATE_SESSION.
596 *
597 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
598 * other versions of NFS can be tried.
599 */
600static int nfs41_check_session_ready(struct nfs_client *clp)
601{
602 int ret;
603
604 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
605 ret = nfs4_client_recover_expired_lease(clp);
606 if (ret)
607 return ret;
608 }
609 if (clp->cl_cons_state < NFS_CS_READY)
610 return -EPROTONOSUPPORT;
611 smp_rmb();
612 return 0;
613}
614
615int nfs4_init_session(struct nfs_client *clp)
616{
617 if (!nfs4_has_session(clp))
618 return 0;
619
620 clear_bit(NFS4_SESSION_INITING, &clp->cl_session->session_state);
621 return nfs41_check_session_ready(clp);
622}
623
624int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
625{
626 struct nfs4_session *session = clp->cl_session;
627 int ret;
628
629 spin_lock(&clp->cl_lock);
630 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
631 /*
632 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
633 * DS lease to be equal to the MDS lease.
634 */
635 clp->cl_lease_time = lease_time;
636 clp->cl_last_renewal = jiffies;
637 }
638 spin_unlock(&clp->cl_lock);
639
640 ret = nfs41_check_session_ready(clp);
641 if (ret)
642 return ret;
643 /* Test for the DS role */
644 if (!is_ds_client(clp))
645 return -ENODEV;
646 return 0;
647}
648EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
649
650#endif /* defined(CONFIG_NFS_V4_1) */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * fs/nfs/nfs4session.c
4 *
5 * Copyright (c) 2012 Trond Myklebust <Trond.Myklebust@netapp.com>
6 *
7 */
8#include <linux/kernel.h>
9#include <linux/errno.h>
10#include <linux/string.h>
11#include <linux/printk.h>
12#include <linux/slab.h>
13#include <linux/sunrpc/sched.h>
14#include <linux/sunrpc/bc_xprt.h>
15#include <linux/nfs.h>
16#include <linux/nfs4.h>
17#include <linux/nfs_fs.h>
18#include <linux/module.h>
19
20#include "nfs4_fs.h"
21#include "internal.h"
22#include "nfs4session.h"
23#include "callback.h"
24
25#define NFSDBG_FACILITY NFSDBG_STATE
26
27static void nfs4_init_slot_table(struct nfs4_slot_table *tbl, const char *queue)
28{
29 tbl->highest_used_slotid = NFS4_NO_SLOT;
30 spin_lock_init(&tbl->slot_tbl_lock);
31 rpc_init_priority_wait_queue(&tbl->slot_tbl_waitq, queue);
32 init_waitqueue_head(&tbl->slot_waitq);
33 init_completion(&tbl->complete);
34}
35
36/*
37 * nfs4_shrink_slot_table - free retired slots from the slot table
38 */
39static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl, u32 newsize)
40{
41 struct nfs4_slot **p;
42 if (newsize >= tbl->max_slots)
43 return;
44
45 p = &tbl->slots;
46 while (newsize--)
47 p = &(*p)->next;
48 while (*p) {
49 struct nfs4_slot *slot = *p;
50
51 *p = slot->next;
52 kfree(slot);
53 tbl->max_slots--;
54 }
55}
56
57/**
58 * nfs4_slot_tbl_drain_complete - wake waiters when drain is complete
59 * @tbl: controlling slot table
60 *
61 */
62void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl)
63{
64 if (nfs4_slot_tbl_draining(tbl))
65 complete(&tbl->complete);
66}
67
68/*
69 * nfs4_free_slot - free a slot and efficiently update slot table.
70 *
71 * freeing a slot is trivially done by clearing its respective bit
72 * in the bitmap.
73 * If the freed slotid equals highest_used_slotid we want to update it
74 * so that the server would be able to size down the slot table if needed,
75 * otherwise we know that the highest_used_slotid is still in use.
76 * When updating highest_used_slotid there may be "holes" in the bitmap
77 * so we need to scan down from highest_used_slotid to 0 looking for the now
78 * highest slotid in use.
79 * If none found, highest_used_slotid is set to NFS4_NO_SLOT.
80 *
81 * Must be called while holding tbl->slot_tbl_lock
82 */
83void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
84{
85 u32 slotid = slot->slot_nr;
86
87 /* clear used bit in bitmap */
88 __clear_bit(slotid, tbl->used_slots);
89
90 /* update highest_used_slotid when it is freed */
91 if (slotid == tbl->highest_used_slotid) {
92 u32 new_max = find_last_bit(tbl->used_slots, slotid);
93 if (new_max < slotid)
94 tbl->highest_used_slotid = new_max;
95 else {
96 tbl->highest_used_slotid = NFS4_NO_SLOT;
97 nfs4_slot_tbl_drain_complete(tbl);
98 }
99 }
100 dprintk("%s: slotid %u highest_used_slotid %u\n", __func__,
101 slotid, tbl->highest_used_slotid);
102}
103
104static struct nfs4_slot *nfs4_new_slot(struct nfs4_slot_table *tbl,
105 u32 slotid, u32 seq_init, gfp_t gfp_mask)
106{
107 struct nfs4_slot *slot;
108
109 slot = kzalloc(sizeof(*slot), gfp_mask);
110 if (slot) {
111 slot->table = tbl;
112 slot->slot_nr = slotid;
113 slot->seq_nr = seq_init;
114 slot->seq_nr_highest_sent = seq_init;
115 slot->seq_nr_last_acked = seq_init - 1;
116 }
117 return slot;
118}
119
120static struct nfs4_slot *nfs4_find_or_create_slot(struct nfs4_slot_table *tbl,
121 u32 slotid, u32 seq_init, gfp_t gfp_mask)
122{
123 struct nfs4_slot **p, *slot;
124
125 p = &tbl->slots;
126 for (;;) {
127 if (*p == NULL) {
128 *p = nfs4_new_slot(tbl, tbl->max_slots,
129 seq_init, gfp_mask);
130 if (*p == NULL)
131 break;
132 tbl->max_slots++;
133 }
134 slot = *p;
135 if (slot->slot_nr == slotid)
136 return slot;
137 p = &slot->next;
138 }
139 return ERR_PTR(-ENOMEM);
140}
141
142static void nfs4_lock_slot(struct nfs4_slot_table *tbl,
143 struct nfs4_slot *slot)
144{
145 u32 slotid = slot->slot_nr;
146
147 __set_bit(slotid, tbl->used_slots);
148 if (slotid > tbl->highest_used_slotid ||
149 tbl->highest_used_slotid == NFS4_NO_SLOT)
150 tbl->highest_used_slotid = slotid;
151 slot->generation = tbl->generation;
152}
153
154/*
155 * nfs4_try_to_lock_slot - Given a slot try to allocate it
156 *
157 * Note: must be called with the slot_tbl_lock held.
158 */
159bool nfs4_try_to_lock_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
160{
161 if (nfs4_test_locked_slot(tbl, slot->slot_nr))
162 return false;
163 nfs4_lock_slot(tbl, slot);
164 return true;
165}
166
167/*
168 * nfs4_lookup_slot - Find a slot but don't allocate it
169 *
170 * Note: must be called with the slot_tbl_lock held.
171 */
172struct nfs4_slot *nfs4_lookup_slot(struct nfs4_slot_table *tbl, u32 slotid)
173{
174 if (slotid <= tbl->max_slotid)
175 return nfs4_find_or_create_slot(tbl, slotid, 0, GFP_NOWAIT);
176 return ERR_PTR(-E2BIG);
177}
178
179static int nfs4_slot_get_seqid(struct nfs4_slot_table *tbl, u32 slotid,
180 u32 *seq_nr)
181 __must_hold(&tbl->slot_tbl_lock)
182{
183 struct nfs4_slot *slot;
184 int ret;
185
186 slot = nfs4_lookup_slot(tbl, slotid);
187 ret = PTR_ERR_OR_ZERO(slot);
188 if (!ret)
189 *seq_nr = slot->seq_nr;
190
191 return ret;
192}
193
194/*
195 * nfs4_slot_seqid_in_use - test if a slot sequence id is still in use
196 *
197 * Given a slot table, slot id and sequence number, determine if the
198 * RPC call in question is still in flight. This function is mainly
199 * intended for use by the callback channel.
200 */
201static bool nfs4_slot_seqid_in_use(struct nfs4_slot_table *tbl,
202 u32 slotid, u32 seq_nr)
203{
204 u32 cur_seq = 0;
205 bool ret = false;
206
207 spin_lock(&tbl->slot_tbl_lock);
208 if (nfs4_slot_get_seqid(tbl, slotid, &cur_seq) == 0 &&
209 cur_seq == seq_nr && test_bit(slotid, tbl->used_slots))
210 ret = true;
211 spin_unlock(&tbl->slot_tbl_lock);
212 return ret;
213}
214
215/*
216 * nfs4_slot_wait_on_seqid - wait until a slot sequence id is complete
217 *
218 * Given a slot table, slot id and sequence number, wait until the
219 * corresponding RPC call completes. This function is mainly
220 * intended for use by the callback channel.
221 */
222int nfs4_slot_wait_on_seqid(struct nfs4_slot_table *tbl,
223 u32 slotid, u32 seq_nr,
224 unsigned long timeout)
225{
226 if (wait_event_timeout(tbl->slot_waitq,
227 !nfs4_slot_seqid_in_use(tbl, slotid, seq_nr),
228 timeout) == 0)
229 return -ETIMEDOUT;
230 return 0;
231}
232
233/*
234 * nfs4_alloc_slot - efficiently look for a free slot
235 *
236 * nfs4_alloc_slot looks for an unset bit in the used_slots bitmap.
237 * If found, we mark the slot as used, update the highest_used_slotid,
238 * and respectively set up the sequence operation args.
239 *
240 * Note: must be called with under the slot_tbl_lock.
241 */
242struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl)
243{
244 struct nfs4_slot *ret = ERR_PTR(-EBUSY);
245 u32 slotid;
246
247 dprintk("--> %s used_slots=%04lx highest_used=%u max_slots=%u\n",
248 __func__, tbl->used_slots[0], tbl->highest_used_slotid,
249 tbl->max_slotid + 1);
250 slotid = find_first_zero_bit(tbl->used_slots, tbl->max_slotid + 1);
251 if (slotid <= tbl->max_slotid) {
252 ret = nfs4_find_or_create_slot(tbl, slotid, 1, GFP_NOWAIT);
253 if (!IS_ERR(ret))
254 nfs4_lock_slot(tbl, ret);
255 }
256 dprintk("<-- %s used_slots=%04lx highest_used=%u slotid=%u\n",
257 __func__, tbl->used_slots[0], tbl->highest_used_slotid,
258 !IS_ERR(ret) ? ret->slot_nr : NFS4_NO_SLOT);
259 return ret;
260}
261
262static int nfs4_grow_slot_table(struct nfs4_slot_table *tbl,
263 u32 max_reqs, u32 ivalue)
264{
265 if (max_reqs <= tbl->max_slots)
266 return 0;
267 if (!IS_ERR(nfs4_find_or_create_slot(tbl, max_reqs - 1, ivalue, GFP_NOFS)))
268 return 0;
269 return -ENOMEM;
270}
271
272static void nfs4_reset_slot_table(struct nfs4_slot_table *tbl,
273 u32 server_highest_slotid,
274 u32 ivalue)
275{
276 struct nfs4_slot **p;
277
278 nfs4_shrink_slot_table(tbl, server_highest_slotid + 1);
279 p = &tbl->slots;
280 while (*p) {
281 (*p)->seq_nr = ivalue;
282 (*p)->seq_nr_highest_sent = ivalue;
283 (*p)->seq_nr_last_acked = ivalue - 1;
284 p = &(*p)->next;
285 }
286 tbl->highest_used_slotid = NFS4_NO_SLOT;
287 tbl->target_highest_slotid = server_highest_slotid;
288 tbl->server_highest_slotid = server_highest_slotid;
289 tbl->d_target_highest_slotid = 0;
290 tbl->d2_target_highest_slotid = 0;
291 tbl->max_slotid = server_highest_slotid;
292}
293
294/*
295 * (re)Initialise a slot table
296 */
297static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl,
298 u32 max_reqs, u32 ivalue)
299{
300 int ret;
301
302 dprintk("--> %s: max_reqs=%u, tbl->max_slots %u\n", __func__,
303 max_reqs, tbl->max_slots);
304
305 if (max_reqs > NFS4_MAX_SLOT_TABLE)
306 max_reqs = NFS4_MAX_SLOT_TABLE;
307
308 ret = nfs4_grow_slot_table(tbl, max_reqs, ivalue);
309 if (ret)
310 goto out;
311
312 spin_lock(&tbl->slot_tbl_lock);
313 nfs4_reset_slot_table(tbl, max_reqs - 1, ivalue);
314 spin_unlock(&tbl->slot_tbl_lock);
315
316 dprintk("%s: tbl=%p slots=%p max_slots=%u\n", __func__,
317 tbl, tbl->slots, tbl->max_slots);
318out:
319 dprintk("<-- %s: return %d\n", __func__, ret);
320 return ret;
321}
322
323/*
324 * nfs4_release_slot_table - release all slot table entries
325 */
326static void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
327{
328 nfs4_shrink_slot_table(tbl, 0);
329}
330
331/**
332 * nfs4_shutdown_slot_table - release resources attached to a slot table
333 * @tbl: slot table to shut down
334 *
335 */
336void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl)
337{
338 nfs4_release_slot_table(tbl);
339 rpc_destroy_wait_queue(&tbl->slot_tbl_waitq);
340}
341
342/**
343 * nfs4_setup_slot_table - prepare a stand-alone slot table for use
344 * @tbl: slot table to set up
345 * @max_reqs: maximum number of requests allowed
346 * @queue: name to give RPC wait queue
347 *
348 * Returns zero on success, or a negative errno.
349 */
350int nfs4_setup_slot_table(struct nfs4_slot_table *tbl, unsigned int max_reqs,
351 const char *queue)
352{
353 nfs4_init_slot_table(tbl, queue);
354 return nfs4_realloc_slot_table(tbl, max_reqs, 0);
355}
356
357static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
358{
359 struct nfs4_sequence_args *args = task->tk_msg.rpc_argp;
360 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
361 struct nfs4_slot *slot = pslot;
362 struct nfs4_slot_table *tbl = slot->table;
363
364 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
365 return false;
366 slot->generation = tbl->generation;
367 args->sa_slot = slot;
368 res->sr_timestamp = jiffies;
369 res->sr_slot = slot;
370 res->sr_status_flags = 0;
371 res->sr_status = 1;
372 return true;
373}
374
375static bool __nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
376 struct nfs4_slot *slot)
377{
378 if (rpc_wake_up_first(&tbl->slot_tbl_waitq, nfs41_assign_slot, slot))
379 return true;
380 return false;
381}
382
383bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
384 struct nfs4_slot *slot)
385{
386 if (slot->slot_nr > tbl->max_slotid)
387 return false;
388 return __nfs41_wake_and_assign_slot(tbl, slot);
389}
390
391static bool nfs41_try_wake_next_slot_table_entry(struct nfs4_slot_table *tbl)
392{
393 struct nfs4_slot *slot = nfs4_alloc_slot(tbl);
394 if (!IS_ERR(slot)) {
395 bool ret = __nfs41_wake_and_assign_slot(tbl, slot);
396 if (ret)
397 return ret;
398 nfs4_free_slot(tbl, slot);
399 }
400 return false;
401}
402
403void nfs41_wake_slot_table(struct nfs4_slot_table *tbl)
404{
405 for (;;) {
406 if (!nfs41_try_wake_next_slot_table_entry(tbl))
407 break;
408 }
409}
410
411#if defined(CONFIG_NFS_V4_1)
412
413static void nfs41_set_max_slotid_locked(struct nfs4_slot_table *tbl,
414 u32 target_highest_slotid)
415{
416 u32 max_slotid;
417
418 max_slotid = min(NFS4_MAX_SLOT_TABLE - 1, target_highest_slotid);
419 if (max_slotid > tbl->server_highest_slotid)
420 max_slotid = tbl->server_highest_slotid;
421 if (max_slotid > tbl->target_highest_slotid)
422 max_slotid = tbl->target_highest_slotid;
423 tbl->max_slotid = max_slotid;
424 nfs41_wake_slot_table(tbl);
425}
426
427/* Update the client's idea of target_highest_slotid */
428static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl,
429 u32 target_highest_slotid)
430{
431 if (tbl->target_highest_slotid == target_highest_slotid)
432 return;
433 tbl->target_highest_slotid = target_highest_slotid;
434 tbl->generation++;
435}
436
437void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
438 u32 target_highest_slotid)
439{
440 spin_lock(&tbl->slot_tbl_lock);
441 nfs41_set_target_slotid_locked(tbl, target_highest_slotid);
442 tbl->d_target_highest_slotid = 0;
443 tbl->d2_target_highest_slotid = 0;
444 nfs41_set_max_slotid_locked(tbl, target_highest_slotid);
445 spin_unlock(&tbl->slot_tbl_lock);
446}
447
448static void nfs41_set_server_slotid_locked(struct nfs4_slot_table *tbl,
449 u32 highest_slotid)
450{
451 if (tbl->server_highest_slotid == highest_slotid)
452 return;
453 if (tbl->highest_used_slotid > highest_slotid)
454 return;
455 /* Deallocate slots */
456 nfs4_shrink_slot_table(tbl, highest_slotid + 1);
457 tbl->server_highest_slotid = highest_slotid;
458}
459
460static s32 nfs41_derivative_target_slotid(s32 s1, s32 s2)
461{
462 s1 -= s2;
463 if (s1 == 0)
464 return 0;
465 if (s1 < 0)
466 return (s1 - 1) >> 1;
467 return (s1 + 1) >> 1;
468}
469
470static int nfs41_sign_s32(s32 s1)
471{
472 if (s1 > 0)
473 return 1;
474 if (s1 < 0)
475 return -1;
476 return 0;
477}
478
479static bool nfs41_same_sign_or_zero_s32(s32 s1, s32 s2)
480{
481 if (!s1 || !s2)
482 return true;
483 return nfs41_sign_s32(s1) == nfs41_sign_s32(s2);
484}
485
486/* Try to eliminate outliers by checking for sharp changes in the
487 * derivatives and second derivatives
488 */
489static bool nfs41_is_outlier_target_slotid(struct nfs4_slot_table *tbl,
490 u32 new_target)
491{
492 s32 d_target, d2_target;
493 bool ret = true;
494
495 d_target = nfs41_derivative_target_slotid(new_target,
496 tbl->target_highest_slotid);
497 d2_target = nfs41_derivative_target_slotid(d_target,
498 tbl->d_target_highest_slotid);
499 /* Is first derivative same sign? */
500 if (nfs41_same_sign_or_zero_s32(d_target, tbl->d_target_highest_slotid))
501 ret = false;
502 /* Is second derivative same sign? */
503 if (nfs41_same_sign_or_zero_s32(d2_target, tbl->d2_target_highest_slotid))
504 ret = false;
505 tbl->d_target_highest_slotid = d_target;
506 tbl->d2_target_highest_slotid = d2_target;
507 return ret;
508}
509
510void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
511 struct nfs4_slot *slot,
512 struct nfs4_sequence_res *res)
513{
514 u32 target_highest_slotid = min(res->sr_target_highest_slotid,
515 NFS4_MAX_SLOTID);
516 u32 highest_slotid = min(res->sr_highest_slotid, NFS4_MAX_SLOTID);
517
518 spin_lock(&tbl->slot_tbl_lock);
519 if (!nfs41_is_outlier_target_slotid(tbl, target_highest_slotid))
520 nfs41_set_target_slotid_locked(tbl, target_highest_slotid);
521 if (tbl->generation == slot->generation)
522 nfs41_set_server_slotid_locked(tbl, highest_slotid);
523 nfs41_set_max_slotid_locked(tbl, target_highest_slotid);
524 spin_unlock(&tbl->slot_tbl_lock);
525}
526
527static void nfs4_release_session_slot_tables(struct nfs4_session *session)
528{
529 nfs4_release_slot_table(&session->fc_slot_table);
530 nfs4_release_slot_table(&session->bc_slot_table);
531}
532
533/*
534 * Initialize or reset the forechannel and backchannel tables
535 */
536int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
537{
538 struct nfs4_slot_table *tbl;
539 int status;
540
541 dprintk("--> %s\n", __func__);
542 /* Fore channel */
543 tbl = &ses->fc_slot_table;
544 tbl->session = ses;
545 status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
546 if (status || !(ses->flags & SESSION4_BACK_CHAN)) /* -ENOMEM */
547 return status;
548 /* Back channel */
549 tbl = &ses->bc_slot_table;
550 tbl->session = ses;
551 status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0);
552 if (status && tbl->slots == NULL)
553 /* Fore and back channel share a connection so get
554 * both slot tables or neither */
555 nfs4_release_session_slot_tables(ses);
556 return status;
557}
558
559struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
560{
561 struct nfs4_session *session;
562
563 session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
564 if (!session)
565 return NULL;
566
567 nfs4_init_slot_table(&session->fc_slot_table, "ForeChannel Slot table");
568 nfs4_init_slot_table(&session->bc_slot_table, "BackChannel Slot table");
569 session->session_state = 1<<NFS4_SESSION_INITING;
570
571 session->clp = clp;
572 return session;
573}
574
575static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
576{
577 nfs4_shutdown_slot_table(&session->fc_slot_table);
578 nfs4_shutdown_slot_table(&session->bc_slot_table);
579}
580
581void nfs4_destroy_session(struct nfs4_session *session)
582{
583 struct rpc_xprt *xprt;
584 const struct cred *cred;
585
586 cred = nfs4_get_clid_cred(session->clp);
587 nfs4_proc_destroy_session(session, cred);
588 put_cred(cred);
589
590 rcu_read_lock();
591 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
592 rcu_read_unlock();
593 dprintk("%s Destroy backchannel for xprt %p\n",
594 __func__, xprt);
595 xprt_destroy_backchannel(xprt, NFS41_BC_MIN_CALLBACKS);
596 nfs4_destroy_session_slot_tables(session);
597 kfree(session);
598}
599
600/*
601 * With sessions, the client is not marked ready until after a
602 * successful EXCHANGE_ID and CREATE_SESSION.
603 *
604 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
605 * other versions of NFS can be tried.
606 */
607static int nfs41_check_session_ready(struct nfs_client *clp)
608{
609 int ret;
610
611 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
612 ret = nfs4_client_recover_expired_lease(clp);
613 if (ret)
614 return ret;
615 }
616 if (clp->cl_cons_state < NFS_CS_READY)
617 return -EPROTONOSUPPORT;
618 smp_rmb();
619 return 0;
620}
621
622int nfs4_init_session(struct nfs_client *clp)
623{
624 if (!nfs4_has_session(clp))
625 return 0;
626
627 clear_bit(NFS4_SESSION_INITING, &clp->cl_session->session_state);
628 return nfs41_check_session_ready(clp);
629}
630
631int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
632{
633 struct nfs4_session *session = clp->cl_session;
634 int ret;
635
636 spin_lock(&clp->cl_lock);
637 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
638 /*
639 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
640 * DS lease to be equal to the MDS lease.
641 */
642 clp->cl_lease_time = lease_time;
643 clp->cl_last_renewal = jiffies;
644 }
645 spin_unlock(&clp->cl_lock);
646
647 ret = nfs41_check_session_ready(clp);
648 if (ret)
649 return ret;
650 /* Test for the DS role */
651 if (!is_ds_client(clp))
652 return -ENODEV;
653 return 0;
654}
655EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
656
657#endif /* defined(CONFIG_NFS_V4_1) */