Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/net/sunrpc/xprt.c
4 *
5 * This is a generic RPC call interface supporting congestion avoidance,
6 * and asynchronous calls.
7 *
8 * The interface works like this:
9 *
10 * - When a process places a call, it allocates a request slot if
11 * one is available. Otherwise, it sleeps on the backlog queue
12 * (xprt_reserve).
13 * - Next, the caller puts together the RPC message, stuffs it into
14 * the request struct, and calls xprt_transmit().
15 * - xprt_transmit sends the message and installs the caller on the
16 * transport's wait list. At the same time, if a reply is expected,
17 * it installs a timer that is run after the packet's timeout has
18 * expired.
19 * - When a packet arrives, the data_ready handler walks the list of
20 * pending requests for that transport. If a matching XID is found, the
21 * caller is woken up, and the timer removed.
22 * - When no reply arrives within the timeout interval, the timer is
23 * fired by the kernel and runs xprt_timer(). It either adjusts the
24 * timeout values (minor timeout) or wakes up the caller with a status
25 * of -ETIMEDOUT.
26 * - When the caller receives a notification from RPC that a reply arrived,
27 * it should release the RPC slot, and process the reply.
28 * If the call timed out, it may choose to retry the operation by
29 * adjusting the initial timeout value, and simply calling rpc_call
30 * again.
31 *
32 * Support for async RPC is done through a set of RPC-specific scheduling
33 * primitives that `transparently' work for processes as well as async
34 * tasks that rely on callbacks.
35 *
36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37 *
38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39 */
40
41#include <linux/module.h>
42
43#include <linux/types.h>
44#include <linux/interrupt.h>
45#include <linux/workqueue.h>
46#include <linux/net.h>
47#include <linux/ktime.h>
48
49#include <linux/sunrpc/clnt.h>
50#include <linux/sunrpc/metrics.h>
51#include <linux/sunrpc/bc_xprt.h>
52#include <linux/rcupdate.h>
53#include <linux/sched/mm.h>
54
55#include <trace/events/sunrpc.h>
56
57#include "sunrpc.h"
58#include "sysfs.h"
59#include "fail.h"
60
61/*
62 * Local variables
63 */
64
65#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
66# define RPCDBG_FACILITY RPCDBG_XPRT
67#endif
68
69/*
70 * Local functions
71 */
72static void xprt_init(struct rpc_xprt *xprt, struct net *net);
73static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
74static void xprt_destroy(struct rpc_xprt *xprt);
75static void xprt_request_init(struct rpc_task *task);
76static int xprt_request_prepare(struct rpc_rqst *req, struct xdr_buf *buf);
77
78static DEFINE_SPINLOCK(xprt_list_lock);
79static LIST_HEAD(xprt_list);
80
81static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
82{
83 unsigned long timeout = jiffies + req->rq_timeout;
84
85 if (time_before(timeout, req->rq_majortimeo))
86 return timeout;
87 return req->rq_majortimeo;
88}
89
90/**
91 * xprt_register_transport - register a transport implementation
92 * @transport: transport to register
93 *
94 * If a transport implementation is loaded as a kernel module, it can
95 * call this interface to make itself known to the RPC client.
96 *
97 * Returns:
98 * 0: transport successfully registered
99 * -EEXIST: transport already registered
100 * -EINVAL: transport module being unloaded
101 */
102int xprt_register_transport(struct xprt_class *transport)
103{
104 struct xprt_class *t;
105 int result;
106
107 result = -EEXIST;
108 spin_lock(&xprt_list_lock);
109 list_for_each_entry(t, &xprt_list, list) {
110 /* don't register the same transport class twice */
111 if (t->ident == transport->ident)
112 goto out;
113 }
114
115 list_add_tail(&transport->list, &xprt_list);
116 printk(KERN_INFO "RPC: Registered %s transport module.\n",
117 transport->name);
118 result = 0;
119
120out:
121 spin_unlock(&xprt_list_lock);
122 return result;
123}
124EXPORT_SYMBOL_GPL(xprt_register_transport);
125
126/**
127 * xprt_unregister_transport - unregister a transport implementation
128 * @transport: transport to unregister
129 *
130 * Returns:
131 * 0: transport successfully unregistered
132 * -ENOENT: transport never registered
133 */
134int xprt_unregister_transport(struct xprt_class *transport)
135{
136 struct xprt_class *t;
137 int result;
138
139 result = 0;
140 spin_lock(&xprt_list_lock);
141 list_for_each_entry(t, &xprt_list, list) {
142 if (t == transport) {
143 printk(KERN_INFO
144 "RPC: Unregistered %s transport module.\n",
145 transport->name);
146 list_del_init(&transport->list);
147 goto out;
148 }
149 }
150 result = -ENOENT;
151
152out:
153 spin_unlock(&xprt_list_lock);
154 return result;
155}
156EXPORT_SYMBOL_GPL(xprt_unregister_transport);
157
158static void
159xprt_class_release(const struct xprt_class *t)
160{
161 module_put(t->owner);
162}
163
164static const struct xprt_class *
165xprt_class_find_by_ident_locked(int ident)
166{
167 const struct xprt_class *t;
168
169 list_for_each_entry(t, &xprt_list, list) {
170 if (t->ident != ident)
171 continue;
172 if (!try_module_get(t->owner))
173 continue;
174 return t;
175 }
176 return NULL;
177}
178
179static const struct xprt_class *
180xprt_class_find_by_ident(int ident)
181{
182 const struct xprt_class *t;
183
184 spin_lock(&xprt_list_lock);
185 t = xprt_class_find_by_ident_locked(ident);
186 spin_unlock(&xprt_list_lock);
187 return t;
188}
189
190static const struct xprt_class *
191xprt_class_find_by_netid_locked(const char *netid)
192{
193 const struct xprt_class *t;
194 unsigned int i;
195
196 list_for_each_entry(t, &xprt_list, list) {
197 for (i = 0; t->netid[i][0] != '\0'; i++) {
198 if (strcmp(t->netid[i], netid) != 0)
199 continue;
200 if (!try_module_get(t->owner))
201 continue;
202 return t;
203 }
204 }
205 return NULL;
206}
207
208static const struct xprt_class *
209xprt_class_find_by_netid(const char *netid)
210{
211 const struct xprt_class *t;
212
213 spin_lock(&xprt_list_lock);
214 t = xprt_class_find_by_netid_locked(netid);
215 if (!t) {
216 spin_unlock(&xprt_list_lock);
217 request_module("rpc%s", netid);
218 spin_lock(&xprt_list_lock);
219 t = xprt_class_find_by_netid_locked(netid);
220 }
221 spin_unlock(&xprt_list_lock);
222 return t;
223}
224
225/**
226 * xprt_find_transport_ident - convert a netid into a transport identifier
227 * @netid: transport to load
228 *
229 * Returns:
230 * > 0: transport identifier
231 * -ENOENT: transport module not available
232 */
233int xprt_find_transport_ident(const char *netid)
234{
235 const struct xprt_class *t;
236 int ret;
237
238 t = xprt_class_find_by_netid(netid);
239 if (!t)
240 return -ENOENT;
241 ret = t->ident;
242 xprt_class_release(t);
243 return ret;
244}
245EXPORT_SYMBOL_GPL(xprt_find_transport_ident);
246
247static void xprt_clear_locked(struct rpc_xprt *xprt)
248{
249 xprt->snd_task = NULL;
250 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state))
251 clear_bit_unlock(XPRT_LOCKED, &xprt->state);
252 else
253 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
254}
255
256/**
257 * xprt_reserve_xprt - serialize write access to transports
258 * @task: task that is requesting access to the transport
259 * @xprt: pointer to the target transport
260 *
261 * This prevents mixing the payload of separate requests, and prevents
262 * transport connects from colliding with writes. No congestion control
263 * is provided.
264 */
265int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
266{
267 struct rpc_rqst *req = task->tk_rqstp;
268
269 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
270 if (task == xprt->snd_task)
271 goto out_locked;
272 goto out_sleep;
273 }
274 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
275 goto out_unlock;
276 xprt->snd_task = task;
277
278out_locked:
279 trace_xprt_reserve_xprt(xprt, task);
280 return 1;
281
282out_unlock:
283 xprt_clear_locked(xprt);
284out_sleep:
285 task->tk_status = -EAGAIN;
286 if (RPC_IS_SOFT(task) || RPC_IS_SOFTCONN(task))
287 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
288 xprt_request_timeout(req));
289 else
290 rpc_sleep_on(&xprt->sending, task, NULL);
291 return 0;
292}
293EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
294
295static bool
296xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
297{
298 return test_bit(XPRT_CWND_WAIT, &xprt->state);
299}
300
301static void
302xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
303{
304 if (!list_empty(&xprt->xmit_queue)) {
305 /* Peek at head of queue to see if it can make progress */
306 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
307 rq_xmit)->rq_cong)
308 return;
309 }
310 set_bit(XPRT_CWND_WAIT, &xprt->state);
311}
312
313static void
314xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
315{
316 if (!RPCXPRT_CONGESTED(xprt))
317 clear_bit(XPRT_CWND_WAIT, &xprt->state);
318}
319
320/*
321 * xprt_reserve_xprt_cong - serialize write access to transports
322 * @task: task that is requesting access to the transport
323 *
324 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
325 * integrated into the decision of whether a request is allowed to be
326 * woken up and given access to the transport.
327 * Note that the lock is only granted if we know there are free slots.
328 */
329int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
330{
331 struct rpc_rqst *req = task->tk_rqstp;
332
333 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
334 if (task == xprt->snd_task)
335 goto out_locked;
336 goto out_sleep;
337 }
338 if (req == NULL) {
339 xprt->snd_task = task;
340 goto out_locked;
341 }
342 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
343 goto out_unlock;
344 if (!xprt_need_congestion_window_wait(xprt)) {
345 xprt->snd_task = task;
346 goto out_locked;
347 }
348out_unlock:
349 xprt_clear_locked(xprt);
350out_sleep:
351 task->tk_status = -EAGAIN;
352 if (RPC_IS_SOFT(task) || RPC_IS_SOFTCONN(task))
353 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
354 xprt_request_timeout(req));
355 else
356 rpc_sleep_on(&xprt->sending, task, NULL);
357 return 0;
358out_locked:
359 trace_xprt_reserve_cong(xprt, task);
360 return 1;
361}
362EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
363
364static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
365{
366 int retval;
367
368 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
369 return 1;
370 spin_lock(&xprt->transport_lock);
371 retval = xprt->ops->reserve_xprt(xprt, task);
372 spin_unlock(&xprt->transport_lock);
373 return retval;
374}
375
376static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
377{
378 struct rpc_xprt *xprt = data;
379
380 xprt->snd_task = task;
381 return true;
382}
383
384static void __xprt_lock_write_next(struct rpc_xprt *xprt)
385{
386 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
387 return;
388 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
389 goto out_unlock;
390 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
391 __xprt_lock_write_func, xprt))
392 return;
393out_unlock:
394 xprt_clear_locked(xprt);
395}
396
397static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
398{
399 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
400 return;
401 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
402 goto out_unlock;
403 if (xprt_need_congestion_window_wait(xprt))
404 goto out_unlock;
405 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
406 __xprt_lock_write_func, xprt))
407 return;
408out_unlock:
409 xprt_clear_locked(xprt);
410}
411
412/**
413 * xprt_release_xprt - allow other requests to use a transport
414 * @xprt: transport with other tasks potentially waiting
415 * @task: task that is releasing access to the transport
416 *
417 * Note that "task" can be NULL. No congestion control is provided.
418 */
419void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
420{
421 if (xprt->snd_task == task) {
422 xprt_clear_locked(xprt);
423 __xprt_lock_write_next(xprt);
424 }
425 trace_xprt_release_xprt(xprt, task);
426}
427EXPORT_SYMBOL_GPL(xprt_release_xprt);
428
429/**
430 * xprt_release_xprt_cong - allow other requests to use a transport
431 * @xprt: transport with other tasks potentially waiting
432 * @task: task that is releasing access to the transport
433 *
434 * Note that "task" can be NULL. Another task is awoken to use the
435 * transport if the transport's congestion window allows it.
436 */
437void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
438{
439 if (xprt->snd_task == task) {
440 xprt_clear_locked(xprt);
441 __xprt_lock_write_next_cong(xprt);
442 }
443 trace_xprt_release_cong(xprt, task);
444}
445EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
446
447void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
448{
449 if (xprt->snd_task != task)
450 return;
451 spin_lock(&xprt->transport_lock);
452 xprt->ops->release_xprt(xprt, task);
453 spin_unlock(&xprt->transport_lock);
454}
455
456/*
457 * Van Jacobson congestion avoidance. Check if the congestion window
458 * overflowed. Put the task to sleep if this is the case.
459 */
460static int
461__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
462{
463 if (req->rq_cong)
464 return 1;
465 trace_xprt_get_cong(xprt, req->rq_task);
466 if (RPCXPRT_CONGESTED(xprt)) {
467 xprt_set_congestion_window_wait(xprt);
468 return 0;
469 }
470 req->rq_cong = 1;
471 xprt->cong += RPC_CWNDSCALE;
472 return 1;
473}
474
475/*
476 * Adjust the congestion window, and wake up the next task
477 * that has been sleeping due to congestion
478 */
479static void
480__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
481{
482 if (!req->rq_cong)
483 return;
484 req->rq_cong = 0;
485 xprt->cong -= RPC_CWNDSCALE;
486 xprt_test_and_clear_congestion_window_wait(xprt);
487 trace_xprt_put_cong(xprt, req->rq_task);
488 __xprt_lock_write_next_cong(xprt);
489}
490
491/**
492 * xprt_request_get_cong - Request congestion control credits
493 * @xprt: pointer to transport
494 * @req: pointer to RPC request
495 *
496 * Useful for transports that require congestion control.
497 */
498bool
499xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
500{
501 bool ret = false;
502
503 if (req->rq_cong)
504 return true;
505 spin_lock(&xprt->transport_lock);
506 ret = __xprt_get_cong(xprt, req) != 0;
507 spin_unlock(&xprt->transport_lock);
508 return ret;
509}
510EXPORT_SYMBOL_GPL(xprt_request_get_cong);
511
512/**
513 * xprt_release_rqst_cong - housekeeping when request is complete
514 * @task: RPC request that recently completed
515 *
516 * Useful for transports that require congestion control.
517 */
518void xprt_release_rqst_cong(struct rpc_task *task)
519{
520 struct rpc_rqst *req = task->tk_rqstp;
521
522 __xprt_put_cong(req->rq_xprt, req);
523}
524EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
525
526static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
527{
528 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
529 __xprt_lock_write_next_cong(xprt);
530}
531
532/*
533 * Clear the congestion window wait flag and wake up the next
534 * entry on xprt->sending
535 */
536static void
537xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
538{
539 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
540 spin_lock(&xprt->transport_lock);
541 __xprt_lock_write_next_cong(xprt);
542 spin_unlock(&xprt->transport_lock);
543 }
544}
545
546/**
547 * xprt_adjust_cwnd - adjust transport congestion window
548 * @xprt: pointer to xprt
549 * @task: recently completed RPC request used to adjust window
550 * @result: result code of completed RPC request
551 *
552 * The transport code maintains an estimate on the maximum number of out-
553 * standing RPC requests, using a smoothed version of the congestion
554 * avoidance implemented in 44BSD. This is basically the Van Jacobson
555 * congestion algorithm: If a retransmit occurs, the congestion window is
556 * halved; otherwise, it is incremented by 1/cwnd when
557 *
558 * - a reply is received and
559 * - a full number of requests are outstanding and
560 * - the congestion window hasn't been updated recently.
561 */
562void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
563{
564 struct rpc_rqst *req = task->tk_rqstp;
565 unsigned long cwnd = xprt->cwnd;
566
567 if (result >= 0 && cwnd <= xprt->cong) {
568 /* The (cwnd >> 1) term makes sure
569 * the result gets rounded properly. */
570 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
571 if (cwnd > RPC_MAXCWND(xprt))
572 cwnd = RPC_MAXCWND(xprt);
573 __xprt_lock_write_next_cong(xprt);
574 } else if (result == -ETIMEDOUT) {
575 cwnd >>= 1;
576 if (cwnd < RPC_CWNDSCALE)
577 cwnd = RPC_CWNDSCALE;
578 }
579 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
580 xprt->cong, xprt->cwnd, cwnd);
581 xprt->cwnd = cwnd;
582 __xprt_put_cong(xprt, req);
583}
584EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
585
586/**
587 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
588 * @xprt: transport with waiting tasks
589 * @status: result code to plant in each task before waking it
590 *
591 */
592void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
593{
594 if (status < 0)
595 rpc_wake_up_status(&xprt->pending, status);
596 else
597 rpc_wake_up(&xprt->pending);
598}
599EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
600
601/**
602 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
603 * @xprt: transport
604 *
605 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
606 * we don't in general want to force a socket disconnection due to
607 * an incomplete RPC call transmission.
608 */
609void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
610{
611 set_bit(XPRT_WRITE_SPACE, &xprt->state);
612}
613EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
614
615static bool
616xprt_clear_write_space_locked(struct rpc_xprt *xprt)
617{
618 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
619 __xprt_lock_write_next(xprt);
620 dprintk("RPC: write space: waking waiting task on "
621 "xprt %p\n", xprt);
622 return true;
623 }
624 return false;
625}
626
627/**
628 * xprt_write_space - wake the task waiting for transport output buffer space
629 * @xprt: transport with waiting tasks
630 *
631 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
632 */
633bool xprt_write_space(struct rpc_xprt *xprt)
634{
635 bool ret;
636
637 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
638 return false;
639 spin_lock(&xprt->transport_lock);
640 ret = xprt_clear_write_space_locked(xprt);
641 spin_unlock(&xprt->transport_lock);
642 return ret;
643}
644EXPORT_SYMBOL_GPL(xprt_write_space);
645
646static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
647{
648 s64 delta = ktime_to_ns(ktime_get() - abstime);
649 return likely(delta >= 0) ?
650 jiffies - nsecs_to_jiffies(delta) :
651 jiffies + nsecs_to_jiffies(-delta);
652}
653
654static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req,
655 const struct rpc_timeout *to)
656{
657 unsigned long majortimeo = req->rq_timeout;
658
659 if (to->to_exponential)
660 majortimeo <<= to->to_retries;
661 else
662 majortimeo += to->to_increment * to->to_retries;
663 if (majortimeo > to->to_maxval || majortimeo == 0)
664 majortimeo = to->to_maxval;
665 return majortimeo;
666}
667
668static void xprt_reset_majortimeo(struct rpc_rqst *req,
669 const struct rpc_timeout *to)
670{
671 req->rq_majortimeo += xprt_calc_majortimeo(req, to);
672}
673
674static void xprt_reset_minortimeo(struct rpc_rqst *req)
675{
676 req->rq_minortimeo += req->rq_timeout;
677}
678
679static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req,
680 const struct rpc_timeout *to)
681{
682 unsigned long time_init;
683 struct rpc_xprt *xprt = req->rq_xprt;
684
685 if (likely(xprt && xprt_connected(xprt)))
686 time_init = jiffies;
687 else
688 time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
689
690 req->rq_timeout = to->to_initval;
691 req->rq_majortimeo = time_init + xprt_calc_majortimeo(req, to);
692 req->rq_minortimeo = time_init + req->rq_timeout;
693}
694
695/**
696 * xprt_adjust_timeout - adjust timeout values for next retransmit
697 * @req: RPC request containing parameters to use for the adjustment
698 *
699 */
700int xprt_adjust_timeout(struct rpc_rqst *req)
701{
702 struct rpc_xprt *xprt = req->rq_xprt;
703 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
704 int status = 0;
705
706 if (time_before(jiffies, req->rq_majortimeo)) {
707 if (time_before(jiffies, req->rq_minortimeo))
708 return status;
709 if (to->to_exponential)
710 req->rq_timeout <<= 1;
711 else
712 req->rq_timeout += to->to_increment;
713 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
714 req->rq_timeout = to->to_maxval;
715 req->rq_retries++;
716 } else {
717 req->rq_timeout = to->to_initval;
718 req->rq_retries = 0;
719 xprt_reset_majortimeo(req, to);
720 /* Reset the RTT counters == "slow start" */
721 spin_lock(&xprt->transport_lock);
722 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
723 spin_unlock(&xprt->transport_lock);
724 status = -ETIMEDOUT;
725 }
726 xprt_reset_minortimeo(req);
727
728 if (req->rq_timeout == 0) {
729 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
730 req->rq_timeout = 5 * HZ;
731 }
732 return status;
733}
734
735static void xprt_autoclose(struct work_struct *work)
736{
737 struct rpc_xprt *xprt =
738 container_of(work, struct rpc_xprt, task_cleanup);
739 unsigned int pflags = memalloc_nofs_save();
740
741 trace_xprt_disconnect_auto(xprt);
742 xprt->connect_cookie++;
743 smp_mb__before_atomic();
744 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
745 xprt->ops->close(xprt);
746 xprt_release_write(xprt, NULL);
747 wake_up_bit(&xprt->state, XPRT_LOCKED);
748 memalloc_nofs_restore(pflags);
749}
750
751/**
752 * xprt_disconnect_done - mark a transport as disconnected
753 * @xprt: transport to flag for disconnect
754 *
755 */
756void xprt_disconnect_done(struct rpc_xprt *xprt)
757{
758 trace_xprt_disconnect_done(xprt);
759 spin_lock(&xprt->transport_lock);
760 xprt_clear_connected(xprt);
761 xprt_clear_write_space_locked(xprt);
762 xprt_clear_congestion_window_wait_locked(xprt);
763 xprt_wake_pending_tasks(xprt, -ENOTCONN);
764 spin_unlock(&xprt->transport_lock);
765}
766EXPORT_SYMBOL_GPL(xprt_disconnect_done);
767
768/**
769 * xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call
770 * @xprt: transport to disconnect
771 */
772static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
773{
774 if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
775 return;
776 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
777 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
778 else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
779 rpc_wake_up_queued_task_set_status(&xprt->pending,
780 xprt->snd_task, -ENOTCONN);
781}
782
783/**
784 * xprt_force_disconnect - force a transport to disconnect
785 * @xprt: transport to disconnect
786 *
787 */
788void xprt_force_disconnect(struct rpc_xprt *xprt)
789{
790 trace_xprt_disconnect_force(xprt);
791
792 /* Don't race with the test_bit() in xprt_clear_locked() */
793 spin_lock(&xprt->transport_lock);
794 xprt_schedule_autoclose_locked(xprt);
795 spin_unlock(&xprt->transport_lock);
796}
797EXPORT_SYMBOL_GPL(xprt_force_disconnect);
798
799static unsigned int
800xprt_connect_cookie(struct rpc_xprt *xprt)
801{
802 return READ_ONCE(xprt->connect_cookie);
803}
804
805static bool
806xprt_request_retransmit_after_disconnect(struct rpc_task *task)
807{
808 struct rpc_rqst *req = task->tk_rqstp;
809 struct rpc_xprt *xprt = req->rq_xprt;
810
811 return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
812 !xprt_connected(xprt);
813}
814
815/**
816 * xprt_conditional_disconnect - force a transport to disconnect
817 * @xprt: transport to disconnect
818 * @cookie: 'connection cookie'
819 *
820 * This attempts to break the connection if and only if 'cookie' matches
821 * the current transport 'connection cookie'. It ensures that we don't
822 * try to break the connection more than once when we need to retransmit
823 * a batch of RPC requests.
824 *
825 */
826void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
827{
828 /* Don't race with the test_bit() in xprt_clear_locked() */
829 spin_lock(&xprt->transport_lock);
830 if (cookie != xprt->connect_cookie)
831 goto out;
832 if (test_bit(XPRT_CLOSING, &xprt->state))
833 goto out;
834 xprt_schedule_autoclose_locked(xprt);
835out:
836 spin_unlock(&xprt->transport_lock);
837}
838
839static bool
840xprt_has_timer(const struct rpc_xprt *xprt)
841{
842 return xprt->idle_timeout != 0;
843}
844
845static void
846xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
847 __must_hold(&xprt->transport_lock)
848{
849 xprt->last_used = jiffies;
850 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
851 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
852}
853
854static void
855xprt_init_autodisconnect(struct timer_list *t)
856{
857 struct rpc_xprt *xprt = from_timer(xprt, t, timer);
858
859 if (!RB_EMPTY_ROOT(&xprt->recv_queue))
860 return;
861 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
862 xprt->last_used = jiffies;
863 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
864 return;
865 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
866}
867
868#if IS_ENABLED(CONFIG_FAIL_SUNRPC)
869static void xprt_inject_disconnect(struct rpc_xprt *xprt)
870{
871 if (!fail_sunrpc.ignore_client_disconnect &&
872 should_fail(&fail_sunrpc.attr, 1))
873 xprt->ops->inject_disconnect(xprt);
874}
875#else
876static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
877{
878}
879#endif
880
881bool xprt_lock_connect(struct rpc_xprt *xprt,
882 struct rpc_task *task,
883 void *cookie)
884{
885 bool ret = false;
886
887 spin_lock(&xprt->transport_lock);
888 if (!test_bit(XPRT_LOCKED, &xprt->state))
889 goto out;
890 if (xprt->snd_task != task)
891 goto out;
892 set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
893 xprt->snd_task = cookie;
894 ret = true;
895out:
896 spin_unlock(&xprt->transport_lock);
897 return ret;
898}
899EXPORT_SYMBOL_GPL(xprt_lock_connect);
900
901void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
902{
903 spin_lock(&xprt->transport_lock);
904 if (xprt->snd_task != cookie)
905 goto out;
906 if (!test_bit(XPRT_LOCKED, &xprt->state))
907 goto out;
908 xprt->snd_task =NULL;
909 clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
910 xprt->ops->release_xprt(xprt, NULL);
911 xprt_schedule_autodisconnect(xprt);
912out:
913 spin_unlock(&xprt->transport_lock);
914 wake_up_bit(&xprt->state, XPRT_LOCKED);
915}
916EXPORT_SYMBOL_GPL(xprt_unlock_connect);
917
918/**
919 * xprt_connect - schedule a transport connect operation
920 * @task: RPC task that is requesting the connect
921 *
922 */
923void xprt_connect(struct rpc_task *task)
924{
925 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
926
927 trace_xprt_connect(xprt);
928
929 if (!xprt_bound(xprt)) {
930 task->tk_status = -EAGAIN;
931 return;
932 }
933 if (!xprt_lock_write(xprt, task))
934 return;
935
936 if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
937 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
938 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
939 xprt_request_timeout(task->tk_rqstp));
940
941 if (test_bit(XPRT_CLOSING, &xprt->state))
942 return;
943 if (xprt_test_and_set_connecting(xprt))
944 return;
945 /* Race breaker */
946 if (!xprt_connected(xprt)) {
947 xprt->stat.connect_start = jiffies;
948 xprt->ops->connect(xprt, task);
949 } else {
950 xprt_clear_connecting(xprt);
951 task->tk_status = 0;
952 rpc_wake_up_queued_task(&xprt->pending, task);
953 }
954 }
955 xprt_release_write(xprt, task);
956}
957
958/**
959 * xprt_reconnect_delay - compute the wait before scheduling a connect
960 * @xprt: transport instance
961 *
962 */
963unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
964{
965 unsigned long start, now = jiffies;
966
967 start = xprt->stat.connect_start + xprt->reestablish_timeout;
968 if (time_after(start, now))
969 return start - now;
970 return 0;
971}
972EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
973
974/**
975 * xprt_reconnect_backoff - compute the new re-establish timeout
976 * @xprt: transport instance
977 * @init_to: initial reestablish timeout
978 *
979 */
980void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
981{
982 xprt->reestablish_timeout <<= 1;
983 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
984 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
985 if (xprt->reestablish_timeout < init_to)
986 xprt->reestablish_timeout = init_to;
987}
988EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
989
990enum xprt_xid_rb_cmp {
991 XID_RB_EQUAL,
992 XID_RB_LEFT,
993 XID_RB_RIGHT,
994};
995static enum xprt_xid_rb_cmp
996xprt_xid_cmp(__be32 xid1, __be32 xid2)
997{
998 if (xid1 == xid2)
999 return XID_RB_EQUAL;
1000 if ((__force u32)xid1 < (__force u32)xid2)
1001 return XID_RB_LEFT;
1002 return XID_RB_RIGHT;
1003}
1004
1005static struct rpc_rqst *
1006xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
1007{
1008 struct rb_node *n = xprt->recv_queue.rb_node;
1009 struct rpc_rqst *req;
1010
1011 while (n != NULL) {
1012 req = rb_entry(n, struct rpc_rqst, rq_recv);
1013 switch (xprt_xid_cmp(xid, req->rq_xid)) {
1014 case XID_RB_LEFT:
1015 n = n->rb_left;
1016 break;
1017 case XID_RB_RIGHT:
1018 n = n->rb_right;
1019 break;
1020 case XID_RB_EQUAL:
1021 return req;
1022 }
1023 }
1024 return NULL;
1025}
1026
1027static void
1028xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
1029{
1030 struct rb_node **p = &xprt->recv_queue.rb_node;
1031 struct rb_node *n = NULL;
1032 struct rpc_rqst *req;
1033
1034 while (*p != NULL) {
1035 n = *p;
1036 req = rb_entry(n, struct rpc_rqst, rq_recv);
1037 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
1038 case XID_RB_LEFT:
1039 p = &n->rb_left;
1040 break;
1041 case XID_RB_RIGHT:
1042 p = &n->rb_right;
1043 break;
1044 case XID_RB_EQUAL:
1045 WARN_ON_ONCE(new != req);
1046 return;
1047 }
1048 }
1049 rb_link_node(&new->rq_recv, n, p);
1050 rb_insert_color(&new->rq_recv, &xprt->recv_queue);
1051}
1052
1053static void
1054xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1055{
1056 rb_erase(&req->rq_recv, &xprt->recv_queue);
1057}
1058
1059/**
1060 * xprt_lookup_rqst - find an RPC request corresponding to an XID
1061 * @xprt: transport on which the original request was transmitted
1062 * @xid: RPC XID of incoming reply
1063 *
1064 * Caller holds xprt->queue_lock.
1065 */
1066struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1067{
1068 struct rpc_rqst *entry;
1069
1070 entry = xprt_request_rb_find(xprt, xid);
1071 if (entry != NULL) {
1072 trace_xprt_lookup_rqst(xprt, xid, 0);
1073 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
1074 return entry;
1075 }
1076
1077 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
1078 ntohl(xid));
1079 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1080 xprt->stat.bad_xids++;
1081 return NULL;
1082}
1083EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1084
1085static bool
1086xprt_is_pinned_rqst(struct rpc_rqst *req)
1087{
1088 return atomic_read(&req->rq_pin) != 0;
1089}
1090
1091/**
1092 * xprt_pin_rqst - Pin a request on the transport receive list
1093 * @req: Request to pin
1094 *
1095 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1096 * so should be holding xprt->queue_lock.
1097 */
1098void xprt_pin_rqst(struct rpc_rqst *req)
1099{
1100 atomic_inc(&req->rq_pin);
1101}
1102EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1103
1104/**
1105 * xprt_unpin_rqst - Unpin a request on the transport receive list
1106 * @req: Request to pin
1107 *
1108 * Caller should be holding xprt->queue_lock.
1109 */
1110void xprt_unpin_rqst(struct rpc_rqst *req)
1111{
1112 if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1113 atomic_dec(&req->rq_pin);
1114 return;
1115 }
1116 if (atomic_dec_and_test(&req->rq_pin))
1117 wake_up_var(&req->rq_pin);
1118}
1119EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1120
1121static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1122{
1123 wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1124}
1125
1126static bool
1127xprt_request_data_received(struct rpc_task *task)
1128{
1129 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1130 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1131}
1132
1133static bool
1134xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1135{
1136 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1137 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1138}
1139
1140/**
1141 * xprt_request_enqueue_receive - Add an request to the receive queue
1142 * @task: RPC task
1143 *
1144 */
1145int
1146xprt_request_enqueue_receive(struct rpc_task *task)
1147{
1148 struct rpc_rqst *req = task->tk_rqstp;
1149 struct rpc_xprt *xprt = req->rq_xprt;
1150 int ret;
1151
1152 if (!xprt_request_need_enqueue_receive(task, req))
1153 return 0;
1154
1155 ret = xprt_request_prepare(task->tk_rqstp, &req->rq_rcv_buf);
1156 if (ret)
1157 return ret;
1158 spin_lock(&xprt->queue_lock);
1159
1160 /* Update the softirq receive buffer */
1161 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1162 sizeof(req->rq_private_buf));
1163
1164 /* Add request to the receive list */
1165 xprt_request_rb_insert(xprt, req);
1166 set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1167 spin_unlock(&xprt->queue_lock);
1168
1169 /* Turn off autodisconnect */
1170 del_timer_sync(&xprt->timer);
1171 return 0;
1172}
1173
1174/**
1175 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1176 * @task: RPC task
1177 *
1178 * Caller must hold xprt->queue_lock.
1179 */
1180static void
1181xprt_request_dequeue_receive_locked(struct rpc_task *task)
1182{
1183 struct rpc_rqst *req = task->tk_rqstp;
1184
1185 if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1186 xprt_request_rb_remove(req->rq_xprt, req);
1187}
1188
1189/**
1190 * xprt_update_rtt - Update RPC RTT statistics
1191 * @task: RPC request that recently completed
1192 *
1193 * Caller holds xprt->queue_lock.
1194 */
1195void xprt_update_rtt(struct rpc_task *task)
1196{
1197 struct rpc_rqst *req = task->tk_rqstp;
1198 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1199 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1200 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1201
1202 if (timer) {
1203 if (req->rq_ntrans == 1)
1204 rpc_update_rtt(rtt, timer, m);
1205 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1206 }
1207}
1208EXPORT_SYMBOL_GPL(xprt_update_rtt);
1209
1210/**
1211 * xprt_complete_rqst - called when reply processing is complete
1212 * @task: RPC request that recently completed
1213 * @copied: actual number of bytes received from the transport
1214 *
1215 * Caller holds xprt->queue_lock.
1216 */
1217void xprt_complete_rqst(struct rpc_task *task, int copied)
1218{
1219 struct rpc_rqst *req = task->tk_rqstp;
1220 struct rpc_xprt *xprt = req->rq_xprt;
1221
1222 xprt->stat.recvs++;
1223
1224 xdr_free_bvec(&req->rq_rcv_buf);
1225 req->rq_private_buf.bvec = NULL;
1226 req->rq_private_buf.len = copied;
1227 /* Ensure all writes are done before we update */
1228 /* req->rq_reply_bytes_recvd */
1229 smp_wmb();
1230 req->rq_reply_bytes_recvd = copied;
1231 xprt_request_dequeue_receive_locked(task);
1232 rpc_wake_up_queued_task(&xprt->pending, task);
1233}
1234EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1235
1236static void xprt_timer(struct rpc_task *task)
1237{
1238 struct rpc_rqst *req = task->tk_rqstp;
1239 struct rpc_xprt *xprt = req->rq_xprt;
1240
1241 if (task->tk_status != -ETIMEDOUT)
1242 return;
1243
1244 trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1245 if (!req->rq_reply_bytes_recvd) {
1246 if (xprt->ops->timer)
1247 xprt->ops->timer(xprt, task);
1248 } else
1249 task->tk_status = 0;
1250}
1251
1252/**
1253 * xprt_wait_for_reply_request_def - wait for reply
1254 * @task: pointer to rpc_task
1255 *
1256 * Set a request's retransmit timeout based on the transport's
1257 * default timeout parameters. Used by transports that don't adjust
1258 * the retransmit timeout based on round-trip time estimation,
1259 * and put the task to sleep on the pending queue.
1260 */
1261void xprt_wait_for_reply_request_def(struct rpc_task *task)
1262{
1263 struct rpc_rqst *req = task->tk_rqstp;
1264
1265 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1266 xprt_request_timeout(req));
1267}
1268EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1269
1270/**
1271 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1272 * @task: pointer to rpc_task
1273 *
1274 * Set a request's retransmit timeout using the RTT estimator,
1275 * and put the task to sleep on the pending queue.
1276 */
1277void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1278{
1279 int timer = task->tk_msg.rpc_proc->p_timer;
1280 struct rpc_clnt *clnt = task->tk_client;
1281 struct rpc_rtt *rtt = clnt->cl_rtt;
1282 struct rpc_rqst *req = task->tk_rqstp;
1283 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1284 unsigned long timeout;
1285
1286 timeout = rpc_calc_rto(rtt, timer);
1287 timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1288 if (timeout > max_timeout || timeout == 0)
1289 timeout = max_timeout;
1290 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1291 jiffies + timeout);
1292}
1293EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1294
1295/**
1296 * xprt_request_wait_receive - wait for the reply to an RPC request
1297 * @task: RPC task about to send a request
1298 *
1299 */
1300void xprt_request_wait_receive(struct rpc_task *task)
1301{
1302 struct rpc_rqst *req = task->tk_rqstp;
1303 struct rpc_xprt *xprt = req->rq_xprt;
1304
1305 if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1306 return;
1307 /*
1308 * Sleep on the pending queue if we're expecting a reply.
1309 * The spinlock ensures atomicity between the test of
1310 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1311 */
1312 spin_lock(&xprt->queue_lock);
1313 if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1314 xprt->ops->wait_for_reply_request(task);
1315 /*
1316 * Send an extra queue wakeup call if the
1317 * connection was dropped in case the call to
1318 * rpc_sleep_on() raced.
1319 */
1320 if (xprt_request_retransmit_after_disconnect(task))
1321 rpc_wake_up_queued_task_set_status(&xprt->pending,
1322 task, -ENOTCONN);
1323 }
1324 spin_unlock(&xprt->queue_lock);
1325}
1326
1327static bool
1328xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1329{
1330 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1331}
1332
1333/**
1334 * xprt_request_enqueue_transmit - queue a task for transmission
1335 * @task: pointer to rpc_task
1336 *
1337 * Add a task to the transmission queue.
1338 */
1339void
1340xprt_request_enqueue_transmit(struct rpc_task *task)
1341{
1342 struct rpc_rqst *pos, *req = task->tk_rqstp;
1343 struct rpc_xprt *xprt = req->rq_xprt;
1344 int ret;
1345
1346 if (xprt_request_need_enqueue_transmit(task, req)) {
1347 ret = xprt_request_prepare(task->tk_rqstp, &req->rq_snd_buf);
1348 if (ret) {
1349 task->tk_status = ret;
1350 return;
1351 }
1352 req->rq_bytes_sent = 0;
1353 spin_lock(&xprt->queue_lock);
1354 /*
1355 * Requests that carry congestion control credits are added
1356 * to the head of the list to avoid starvation issues.
1357 */
1358 if (req->rq_cong) {
1359 xprt_clear_congestion_window_wait(xprt);
1360 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1361 if (pos->rq_cong)
1362 continue;
1363 /* Note: req is added _before_ pos */
1364 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1365 INIT_LIST_HEAD(&req->rq_xmit2);
1366 goto out;
1367 }
1368 } else if (!req->rq_seqno) {
1369 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1370 if (pos->rq_task->tk_owner != task->tk_owner)
1371 continue;
1372 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1373 INIT_LIST_HEAD(&req->rq_xmit);
1374 goto out;
1375 }
1376 }
1377 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1378 INIT_LIST_HEAD(&req->rq_xmit2);
1379out:
1380 atomic_long_inc(&xprt->xmit_queuelen);
1381 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1382 spin_unlock(&xprt->queue_lock);
1383 }
1384}
1385
1386/**
1387 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1388 * @task: pointer to rpc_task
1389 *
1390 * Remove a task from the transmission queue
1391 * Caller must hold xprt->queue_lock
1392 */
1393static void
1394xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1395{
1396 struct rpc_rqst *req = task->tk_rqstp;
1397
1398 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1399 return;
1400 if (!list_empty(&req->rq_xmit)) {
1401 list_del(&req->rq_xmit);
1402 if (!list_empty(&req->rq_xmit2)) {
1403 struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1404 struct rpc_rqst, rq_xmit2);
1405 list_del(&req->rq_xmit2);
1406 list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1407 }
1408 } else
1409 list_del(&req->rq_xmit2);
1410 atomic_long_dec(&req->rq_xprt->xmit_queuelen);
1411 xdr_free_bvec(&req->rq_snd_buf);
1412}
1413
1414/**
1415 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1416 * @task: pointer to rpc_task
1417 *
1418 * Remove a task from the transmission queue
1419 */
1420static void
1421xprt_request_dequeue_transmit(struct rpc_task *task)
1422{
1423 struct rpc_rqst *req = task->tk_rqstp;
1424 struct rpc_xprt *xprt = req->rq_xprt;
1425
1426 spin_lock(&xprt->queue_lock);
1427 xprt_request_dequeue_transmit_locked(task);
1428 spin_unlock(&xprt->queue_lock);
1429}
1430
1431/**
1432 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1433 * @task: pointer to rpc_task
1434 *
1435 * Remove a task from the transmit and receive queues, and ensure that
1436 * it is not pinned by the receive work item.
1437 */
1438void
1439xprt_request_dequeue_xprt(struct rpc_task *task)
1440{
1441 struct rpc_rqst *req = task->tk_rqstp;
1442 struct rpc_xprt *xprt = req->rq_xprt;
1443
1444 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1445 test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1446 xprt_is_pinned_rqst(req)) {
1447 spin_lock(&xprt->queue_lock);
1448 while (xprt_is_pinned_rqst(req)) {
1449 set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1450 spin_unlock(&xprt->queue_lock);
1451 xprt_wait_on_pinned_rqst(req);
1452 spin_lock(&xprt->queue_lock);
1453 clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1454 }
1455 xprt_request_dequeue_transmit_locked(task);
1456 xprt_request_dequeue_receive_locked(task);
1457 spin_unlock(&xprt->queue_lock);
1458 xdr_free_bvec(&req->rq_rcv_buf);
1459 }
1460}
1461
1462/**
1463 * xprt_request_prepare - prepare an encoded request for transport
1464 * @req: pointer to rpc_rqst
1465 * @buf: pointer to send/rcv xdr_buf
1466 *
1467 * Calls into the transport layer to do whatever is needed to prepare
1468 * the request for transmission or receive.
1469 * Returns error, or zero.
1470 */
1471static int
1472xprt_request_prepare(struct rpc_rqst *req, struct xdr_buf *buf)
1473{
1474 struct rpc_xprt *xprt = req->rq_xprt;
1475
1476 if (xprt->ops->prepare_request)
1477 return xprt->ops->prepare_request(req, buf);
1478 return 0;
1479}
1480
1481/**
1482 * xprt_request_need_retransmit - Test if a task needs retransmission
1483 * @task: pointer to rpc_task
1484 *
1485 * Test for whether a connection breakage requires the task to retransmit
1486 */
1487bool
1488xprt_request_need_retransmit(struct rpc_task *task)
1489{
1490 return xprt_request_retransmit_after_disconnect(task);
1491}
1492
1493/**
1494 * xprt_prepare_transmit - reserve the transport before sending a request
1495 * @task: RPC task about to send a request
1496 *
1497 */
1498bool xprt_prepare_transmit(struct rpc_task *task)
1499{
1500 struct rpc_rqst *req = task->tk_rqstp;
1501 struct rpc_xprt *xprt = req->rq_xprt;
1502
1503 if (!xprt_lock_write(xprt, task)) {
1504 /* Race breaker: someone may have transmitted us */
1505 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1506 rpc_wake_up_queued_task_set_status(&xprt->sending,
1507 task, 0);
1508 return false;
1509
1510 }
1511 if (atomic_read(&xprt->swapper))
1512 /* This will be clear in __rpc_execute */
1513 current->flags |= PF_MEMALLOC;
1514 return true;
1515}
1516
1517void xprt_end_transmit(struct rpc_task *task)
1518{
1519 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1520
1521 xprt_inject_disconnect(xprt);
1522 xprt_release_write(xprt, task);
1523}
1524
1525/**
1526 * xprt_request_transmit - send an RPC request on a transport
1527 * @req: pointer to request to transmit
1528 * @snd_task: RPC task that owns the transport lock
1529 *
1530 * This performs the transmission of a single request.
1531 * Note that if the request is not the same as snd_task, then it
1532 * does need to be pinned.
1533 * Returns '0' on success.
1534 */
1535static int
1536xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1537{
1538 struct rpc_xprt *xprt = req->rq_xprt;
1539 struct rpc_task *task = req->rq_task;
1540 unsigned int connect_cookie;
1541 int is_retrans = RPC_WAS_SENT(task);
1542 int status;
1543
1544 if (!req->rq_bytes_sent) {
1545 if (xprt_request_data_received(task)) {
1546 status = 0;
1547 goto out_dequeue;
1548 }
1549 /* Verify that our message lies in the RPCSEC_GSS window */
1550 if (rpcauth_xmit_need_reencode(task)) {
1551 status = -EBADMSG;
1552 goto out_dequeue;
1553 }
1554 if (RPC_SIGNALLED(task)) {
1555 status = -ERESTARTSYS;
1556 goto out_dequeue;
1557 }
1558 }
1559
1560 /*
1561 * Update req->rq_ntrans before transmitting to avoid races with
1562 * xprt_update_rtt(), which needs to know that it is recording a
1563 * reply to the first transmission.
1564 */
1565 req->rq_ntrans++;
1566
1567 trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
1568 connect_cookie = xprt->connect_cookie;
1569 status = xprt->ops->send_request(req);
1570 if (status != 0) {
1571 req->rq_ntrans--;
1572 trace_xprt_transmit(req, status);
1573 return status;
1574 }
1575
1576 if (is_retrans) {
1577 task->tk_client->cl_stats->rpcretrans++;
1578 trace_xprt_retransmit(req);
1579 }
1580
1581 xprt_inject_disconnect(xprt);
1582
1583 task->tk_flags |= RPC_TASK_SENT;
1584 spin_lock(&xprt->transport_lock);
1585
1586 xprt->stat.sends++;
1587 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1588 xprt->stat.bklog_u += xprt->backlog.qlen;
1589 xprt->stat.sending_u += xprt->sending.qlen;
1590 xprt->stat.pending_u += xprt->pending.qlen;
1591 spin_unlock(&xprt->transport_lock);
1592
1593 req->rq_connect_cookie = connect_cookie;
1594out_dequeue:
1595 trace_xprt_transmit(req, status);
1596 xprt_request_dequeue_transmit(task);
1597 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1598 return status;
1599}
1600
1601/**
1602 * xprt_transmit - send an RPC request on a transport
1603 * @task: controlling RPC task
1604 *
1605 * Attempts to drain the transmit queue. On exit, either the transport
1606 * signalled an error that needs to be handled before transmission can
1607 * resume, or @task finished transmitting, and detected that it already
1608 * received a reply.
1609 */
1610void
1611xprt_transmit(struct rpc_task *task)
1612{
1613 struct rpc_rqst *next, *req = task->tk_rqstp;
1614 struct rpc_xprt *xprt = req->rq_xprt;
1615 int status;
1616
1617 spin_lock(&xprt->queue_lock);
1618 for (;;) {
1619 next = list_first_entry_or_null(&xprt->xmit_queue,
1620 struct rpc_rqst, rq_xmit);
1621 if (!next)
1622 break;
1623 xprt_pin_rqst(next);
1624 spin_unlock(&xprt->queue_lock);
1625 status = xprt_request_transmit(next, task);
1626 if (status == -EBADMSG && next != req)
1627 status = 0;
1628 spin_lock(&xprt->queue_lock);
1629 xprt_unpin_rqst(next);
1630 if (status < 0) {
1631 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1632 task->tk_status = status;
1633 break;
1634 }
1635 /* Was @task transmitted, and has it received a reply? */
1636 if (xprt_request_data_received(task) &&
1637 !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1638 break;
1639 cond_resched_lock(&xprt->queue_lock);
1640 }
1641 spin_unlock(&xprt->queue_lock);
1642}
1643
1644static void xprt_complete_request_init(struct rpc_task *task)
1645{
1646 if (task->tk_rqstp)
1647 xprt_request_init(task);
1648}
1649
1650void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1651{
1652 set_bit(XPRT_CONGESTED, &xprt->state);
1653 rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
1654}
1655EXPORT_SYMBOL_GPL(xprt_add_backlog);
1656
1657static bool __xprt_set_rq(struct rpc_task *task, void *data)
1658{
1659 struct rpc_rqst *req = data;
1660
1661 if (task->tk_rqstp == NULL) {
1662 memset(req, 0, sizeof(*req)); /* mark unused */
1663 task->tk_rqstp = req;
1664 return true;
1665 }
1666 return false;
1667}
1668
1669bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
1670{
1671 if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
1672 clear_bit(XPRT_CONGESTED, &xprt->state);
1673 return false;
1674 }
1675 return true;
1676}
1677EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
1678
1679static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1680{
1681 bool ret = false;
1682
1683 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1684 goto out;
1685 spin_lock(&xprt->reserve_lock);
1686 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1687 xprt_add_backlog(xprt, task);
1688 ret = true;
1689 }
1690 spin_unlock(&xprt->reserve_lock);
1691out:
1692 return ret;
1693}
1694
1695static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1696{
1697 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1698
1699 if (xprt->num_reqs >= xprt->max_reqs)
1700 goto out;
1701 ++xprt->num_reqs;
1702 spin_unlock(&xprt->reserve_lock);
1703 req = kzalloc(sizeof(*req), rpc_task_gfp_mask());
1704 spin_lock(&xprt->reserve_lock);
1705 if (req != NULL)
1706 goto out;
1707 --xprt->num_reqs;
1708 req = ERR_PTR(-ENOMEM);
1709out:
1710 return req;
1711}
1712
1713static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1714{
1715 if (xprt->num_reqs > xprt->min_reqs) {
1716 --xprt->num_reqs;
1717 kfree(req);
1718 return true;
1719 }
1720 return false;
1721}
1722
1723void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1724{
1725 struct rpc_rqst *req;
1726
1727 spin_lock(&xprt->reserve_lock);
1728 if (!list_empty(&xprt->free)) {
1729 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1730 list_del(&req->rq_list);
1731 goto out_init_req;
1732 }
1733 req = xprt_dynamic_alloc_slot(xprt);
1734 if (!IS_ERR(req))
1735 goto out_init_req;
1736 switch (PTR_ERR(req)) {
1737 case -ENOMEM:
1738 dprintk("RPC: dynamic allocation of request slot "
1739 "failed! Retrying\n");
1740 task->tk_status = -ENOMEM;
1741 break;
1742 case -EAGAIN:
1743 xprt_add_backlog(xprt, task);
1744 dprintk("RPC: waiting for request slot\n");
1745 fallthrough;
1746 default:
1747 task->tk_status = -EAGAIN;
1748 }
1749 spin_unlock(&xprt->reserve_lock);
1750 return;
1751out_init_req:
1752 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1753 xprt->num_reqs);
1754 spin_unlock(&xprt->reserve_lock);
1755
1756 task->tk_status = 0;
1757 task->tk_rqstp = req;
1758}
1759EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1760
1761void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1762{
1763 spin_lock(&xprt->reserve_lock);
1764 if (!xprt_wake_up_backlog(xprt, req) &&
1765 !xprt_dynamic_free_slot(xprt, req)) {
1766 memset(req, 0, sizeof(*req)); /* mark unused */
1767 list_add(&req->rq_list, &xprt->free);
1768 }
1769 spin_unlock(&xprt->reserve_lock);
1770}
1771EXPORT_SYMBOL_GPL(xprt_free_slot);
1772
1773static void xprt_free_all_slots(struct rpc_xprt *xprt)
1774{
1775 struct rpc_rqst *req;
1776 while (!list_empty(&xprt->free)) {
1777 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1778 list_del(&req->rq_list);
1779 kfree(req);
1780 }
1781}
1782
1783static DEFINE_IDA(rpc_xprt_ids);
1784
1785void xprt_cleanup_ids(void)
1786{
1787 ida_destroy(&rpc_xprt_ids);
1788}
1789
1790static int xprt_alloc_id(struct rpc_xprt *xprt)
1791{
1792 int id;
1793
1794 id = ida_alloc(&rpc_xprt_ids, GFP_KERNEL);
1795 if (id < 0)
1796 return id;
1797
1798 xprt->id = id;
1799 return 0;
1800}
1801
1802static void xprt_free_id(struct rpc_xprt *xprt)
1803{
1804 ida_free(&rpc_xprt_ids, xprt->id);
1805}
1806
1807struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1808 unsigned int num_prealloc,
1809 unsigned int max_alloc)
1810{
1811 struct rpc_xprt *xprt;
1812 struct rpc_rqst *req;
1813 int i;
1814
1815 xprt = kzalloc(size, GFP_KERNEL);
1816 if (xprt == NULL)
1817 goto out;
1818
1819 xprt_alloc_id(xprt);
1820 xprt_init(xprt, net);
1821
1822 for (i = 0; i < num_prealloc; i++) {
1823 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1824 if (!req)
1825 goto out_free;
1826 list_add(&req->rq_list, &xprt->free);
1827 }
1828 xprt->max_reqs = max_t(unsigned int, max_alloc, num_prealloc);
1829 xprt->min_reqs = num_prealloc;
1830 xprt->num_reqs = num_prealloc;
1831
1832 return xprt;
1833
1834out_free:
1835 xprt_free(xprt);
1836out:
1837 return NULL;
1838}
1839EXPORT_SYMBOL_GPL(xprt_alloc);
1840
1841void xprt_free(struct rpc_xprt *xprt)
1842{
1843 put_net_track(xprt->xprt_net, &xprt->ns_tracker);
1844 xprt_free_all_slots(xprt);
1845 xprt_free_id(xprt);
1846 rpc_sysfs_xprt_destroy(xprt);
1847 kfree_rcu(xprt, rcu);
1848}
1849EXPORT_SYMBOL_GPL(xprt_free);
1850
1851static void
1852xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1853{
1854 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1855}
1856
1857static __be32
1858xprt_alloc_xid(struct rpc_xprt *xprt)
1859{
1860 __be32 xid;
1861
1862 spin_lock(&xprt->reserve_lock);
1863 xid = (__force __be32)xprt->xid++;
1864 spin_unlock(&xprt->reserve_lock);
1865 return xid;
1866}
1867
1868static void
1869xprt_init_xid(struct rpc_xprt *xprt)
1870{
1871 xprt->xid = get_random_u32();
1872}
1873
1874static void
1875xprt_request_init(struct rpc_task *task)
1876{
1877 struct rpc_xprt *xprt = task->tk_xprt;
1878 struct rpc_rqst *req = task->tk_rqstp;
1879
1880 req->rq_task = task;
1881 req->rq_xprt = xprt;
1882 req->rq_buffer = NULL;
1883 req->rq_xid = xprt_alloc_xid(xprt);
1884 xprt_init_connect_cookie(req, xprt);
1885 req->rq_snd_buf.len = 0;
1886 req->rq_snd_buf.buflen = 0;
1887 req->rq_rcv_buf.len = 0;
1888 req->rq_rcv_buf.buflen = 0;
1889 req->rq_snd_buf.bvec = NULL;
1890 req->rq_rcv_buf.bvec = NULL;
1891 req->rq_release_snd_buf = NULL;
1892 xprt_init_majortimeo(task, req, task->tk_client->cl_timeout);
1893
1894 trace_xprt_reserve(req);
1895}
1896
1897static void
1898xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1899{
1900 xprt->ops->alloc_slot(xprt, task);
1901 if (task->tk_rqstp != NULL)
1902 xprt_request_init(task);
1903}
1904
1905/**
1906 * xprt_reserve - allocate an RPC request slot
1907 * @task: RPC task requesting a slot allocation
1908 *
1909 * If the transport is marked as being congested, or if no more
1910 * slots are available, place the task on the transport's
1911 * backlog queue.
1912 */
1913void xprt_reserve(struct rpc_task *task)
1914{
1915 struct rpc_xprt *xprt = task->tk_xprt;
1916
1917 task->tk_status = 0;
1918 if (task->tk_rqstp != NULL)
1919 return;
1920
1921 task->tk_status = -EAGAIN;
1922 if (!xprt_throttle_congested(xprt, task))
1923 xprt_do_reserve(xprt, task);
1924}
1925
1926/**
1927 * xprt_retry_reserve - allocate an RPC request slot
1928 * @task: RPC task requesting a slot allocation
1929 *
1930 * If no more slots are available, place the task on the transport's
1931 * backlog queue.
1932 * Note that the only difference with xprt_reserve is that we now
1933 * ignore the value of the XPRT_CONGESTED flag.
1934 */
1935void xprt_retry_reserve(struct rpc_task *task)
1936{
1937 struct rpc_xprt *xprt = task->tk_xprt;
1938
1939 task->tk_status = 0;
1940 if (task->tk_rqstp != NULL)
1941 return;
1942
1943 task->tk_status = -EAGAIN;
1944 xprt_do_reserve(xprt, task);
1945}
1946
1947/**
1948 * xprt_release - release an RPC request slot
1949 * @task: task which is finished with the slot
1950 *
1951 */
1952void xprt_release(struct rpc_task *task)
1953{
1954 struct rpc_xprt *xprt;
1955 struct rpc_rqst *req = task->tk_rqstp;
1956
1957 if (req == NULL) {
1958 if (task->tk_client) {
1959 xprt = task->tk_xprt;
1960 xprt_release_write(xprt, task);
1961 }
1962 return;
1963 }
1964
1965 xprt = req->rq_xprt;
1966 xprt_request_dequeue_xprt(task);
1967 spin_lock(&xprt->transport_lock);
1968 xprt->ops->release_xprt(xprt, task);
1969 if (xprt->ops->release_request)
1970 xprt->ops->release_request(task);
1971 xprt_schedule_autodisconnect(xprt);
1972 spin_unlock(&xprt->transport_lock);
1973 if (req->rq_buffer)
1974 xprt->ops->buf_free(task);
1975 if (req->rq_cred != NULL)
1976 put_rpccred(req->rq_cred);
1977 if (req->rq_release_snd_buf)
1978 req->rq_release_snd_buf(req);
1979
1980 task->tk_rqstp = NULL;
1981 if (likely(!bc_prealloc(req)))
1982 xprt->ops->free_slot(xprt, req);
1983 else
1984 xprt_free_bc_request(req);
1985}
1986
1987#ifdef CONFIG_SUNRPC_BACKCHANNEL
1988void
1989xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task,
1990 const struct rpc_timeout *to)
1991{
1992 struct xdr_buf *xbufp = &req->rq_snd_buf;
1993
1994 task->tk_rqstp = req;
1995 req->rq_task = task;
1996 xprt_init_connect_cookie(req, req->rq_xprt);
1997 /*
1998 * Set up the xdr_buf length.
1999 * This also indicates that the buffer is XDR encoded already.
2000 */
2001 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
2002 xbufp->tail[0].iov_len;
2003 /*
2004 * Backchannel Replies are sent with !RPC_TASK_SOFT and
2005 * RPC_TASK_NO_RETRANS_TIMEOUT. The major timeout setting
2006 * affects only how long each Reply waits to be sent when
2007 * a transport connection cannot be established.
2008 */
2009 xprt_init_majortimeo(task, req, to);
2010}
2011#endif
2012
2013static void xprt_init(struct rpc_xprt *xprt, struct net *net)
2014{
2015 kref_init(&xprt->kref);
2016
2017 spin_lock_init(&xprt->transport_lock);
2018 spin_lock_init(&xprt->reserve_lock);
2019 spin_lock_init(&xprt->queue_lock);
2020
2021 INIT_LIST_HEAD(&xprt->free);
2022 xprt->recv_queue = RB_ROOT;
2023 INIT_LIST_HEAD(&xprt->xmit_queue);
2024#if defined(CONFIG_SUNRPC_BACKCHANNEL)
2025 spin_lock_init(&xprt->bc_pa_lock);
2026 INIT_LIST_HEAD(&xprt->bc_pa_list);
2027#endif /* CONFIG_SUNRPC_BACKCHANNEL */
2028 INIT_LIST_HEAD(&xprt->xprt_switch);
2029
2030 xprt->last_used = jiffies;
2031 xprt->cwnd = RPC_INITCWND;
2032 xprt->bind_index = 0;
2033
2034 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
2035 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
2036 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
2037 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
2038
2039 xprt_init_xid(xprt);
2040
2041 xprt->xprt_net = get_net_track(net, &xprt->ns_tracker, GFP_KERNEL);
2042}
2043
2044/**
2045 * xprt_create_transport - create an RPC transport
2046 * @args: rpc transport creation arguments
2047 *
2048 */
2049struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
2050{
2051 struct rpc_xprt *xprt;
2052 const struct xprt_class *t;
2053
2054 t = xprt_class_find_by_ident(args->ident);
2055 if (!t) {
2056 dprintk("RPC: transport (%d) not supported\n", args->ident);
2057 return ERR_PTR(-EIO);
2058 }
2059
2060 xprt = t->setup(args);
2061 xprt_class_release(t);
2062
2063 if (IS_ERR(xprt))
2064 goto out;
2065 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
2066 xprt->idle_timeout = 0;
2067 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
2068 if (xprt_has_timer(xprt))
2069 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
2070 else
2071 timer_setup(&xprt->timer, NULL, 0);
2072
2073 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
2074 xprt_destroy(xprt);
2075 return ERR_PTR(-EINVAL);
2076 }
2077 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
2078 if (xprt->servername == NULL) {
2079 xprt_destroy(xprt);
2080 return ERR_PTR(-ENOMEM);
2081 }
2082
2083 rpc_xprt_debugfs_register(xprt);
2084
2085 trace_xprt_create(xprt);
2086out:
2087 return xprt;
2088}
2089
2090static void xprt_destroy_cb(struct work_struct *work)
2091{
2092 struct rpc_xprt *xprt =
2093 container_of(work, struct rpc_xprt, task_cleanup);
2094
2095 trace_xprt_destroy(xprt);
2096
2097 rpc_xprt_debugfs_unregister(xprt);
2098 rpc_destroy_wait_queue(&xprt->binding);
2099 rpc_destroy_wait_queue(&xprt->pending);
2100 rpc_destroy_wait_queue(&xprt->sending);
2101 rpc_destroy_wait_queue(&xprt->backlog);
2102 kfree(xprt->servername);
2103 /*
2104 * Destroy any existing back channel
2105 */
2106 xprt_destroy_backchannel(xprt, UINT_MAX);
2107
2108 /*
2109 * Tear down transport state and free the rpc_xprt
2110 */
2111 xprt->ops->destroy(xprt);
2112}
2113
2114/**
2115 * xprt_destroy - destroy an RPC transport, killing off all requests.
2116 * @xprt: transport to destroy
2117 *
2118 */
2119static void xprt_destroy(struct rpc_xprt *xprt)
2120{
2121 /*
2122 * Exclude transport connect/disconnect handlers and autoclose
2123 */
2124 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2125
2126 /*
2127 * xprt_schedule_autodisconnect() can run after XPRT_LOCKED
2128 * is cleared. We use ->transport_lock to ensure the mod_timer()
2129 * can only run *before* del_time_sync(), never after.
2130 */
2131 spin_lock(&xprt->transport_lock);
2132 del_timer_sync(&xprt->timer);
2133 spin_unlock(&xprt->transport_lock);
2134
2135 /*
2136 * Destroy sockets etc from the system workqueue so they can
2137 * safely flush receive work running on rpciod.
2138 */
2139 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2140 schedule_work(&xprt->task_cleanup);
2141}
2142
2143static void xprt_destroy_kref(struct kref *kref)
2144{
2145 xprt_destroy(container_of(kref, struct rpc_xprt, kref));
2146}
2147
2148/**
2149 * xprt_get - return a reference to an RPC transport.
2150 * @xprt: pointer to the transport
2151 *
2152 */
2153struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2154{
2155 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2156 return xprt;
2157 return NULL;
2158}
2159EXPORT_SYMBOL_GPL(xprt_get);
2160
2161/**
2162 * xprt_put - release a reference to an RPC transport.
2163 * @xprt: pointer to the transport
2164 *
2165 */
2166void xprt_put(struct rpc_xprt *xprt)
2167{
2168 if (xprt != NULL)
2169 kref_put(&xprt->kref, xprt_destroy_kref);
2170}
2171EXPORT_SYMBOL_GPL(xprt_put);
2172
2173void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2174{
2175 if (!test_and_set_bit(XPRT_OFFLINE, &xprt->state)) {
2176 spin_lock(&xps->xps_lock);
2177 xps->xps_nactive--;
2178 spin_unlock(&xps->xps_lock);
2179 }
2180}
2181
2182void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2183{
2184 if (test_and_clear_bit(XPRT_OFFLINE, &xprt->state)) {
2185 spin_lock(&xps->xps_lock);
2186 xps->xps_nactive++;
2187 spin_unlock(&xps->xps_lock);
2188 }
2189}
2190
2191void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2192{
2193 if (test_and_set_bit(XPRT_REMOVE, &xprt->state))
2194 return;
2195
2196 xprt_force_disconnect(xprt);
2197 if (!test_bit(XPRT_CONNECTED, &xprt->state))
2198 return;
2199
2200 if (!xprt->sending.qlen && !xprt->pending.qlen &&
2201 !xprt->backlog.qlen && !atomic_long_read(&xprt->queuelen))
2202 rpc_xprt_switch_remove_xprt(xps, xprt, true);
2203}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/net/sunrpc/xprt.c
4 *
5 * This is a generic RPC call interface supporting congestion avoidance,
6 * and asynchronous calls.
7 *
8 * The interface works like this:
9 *
10 * - When a process places a call, it allocates a request slot if
11 * one is available. Otherwise, it sleeps on the backlog queue
12 * (xprt_reserve).
13 * - Next, the caller puts together the RPC message, stuffs it into
14 * the request struct, and calls xprt_transmit().
15 * - xprt_transmit sends the message and installs the caller on the
16 * transport's wait list. At the same time, if a reply is expected,
17 * it installs a timer that is run after the packet's timeout has
18 * expired.
19 * - When a packet arrives, the data_ready handler walks the list of
20 * pending requests for that transport. If a matching XID is found, the
21 * caller is woken up, and the timer removed.
22 * - When no reply arrives within the timeout interval, the timer is
23 * fired by the kernel and runs xprt_timer(). It either adjusts the
24 * timeout values (minor timeout) or wakes up the caller with a status
25 * of -ETIMEDOUT.
26 * - When the caller receives a notification from RPC that a reply arrived,
27 * it should release the RPC slot, and process the reply.
28 * If the call timed out, it may choose to retry the operation by
29 * adjusting the initial timeout value, and simply calling rpc_call
30 * again.
31 *
32 * Support for async RPC is done through a set of RPC-specific scheduling
33 * primitives that `transparently' work for processes as well as async
34 * tasks that rely on callbacks.
35 *
36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37 *
38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39 */
40
41#include <linux/module.h>
42
43#include <linux/types.h>
44#include <linux/interrupt.h>
45#include <linux/workqueue.h>
46#include <linux/net.h>
47#include <linux/ktime.h>
48
49#include <linux/sunrpc/clnt.h>
50#include <linux/sunrpc/metrics.h>
51#include <linux/sunrpc/bc_xprt.h>
52#include <linux/rcupdate.h>
53#include <linux/sched/mm.h>
54
55#include <trace/events/sunrpc.h>
56
57#include "sunrpc.h"
58#include "sysfs.h"
59#include "fail.h"
60
61/*
62 * Local variables
63 */
64
65#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
66# define RPCDBG_FACILITY RPCDBG_XPRT
67#endif
68
69/*
70 * Local functions
71 */
72static void xprt_init(struct rpc_xprt *xprt, struct net *net);
73static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
74static void xprt_destroy(struct rpc_xprt *xprt);
75static void xprt_request_init(struct rpc_task *task);
76static int xprt_request_prepare(struct rpc_rqst *req, struct xdr_buf *buf);
77
78static DEFINE_SPINLOCK(xprt_list_lock);
79static LIST_HEAD(xprt_list);
80
81static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
82{
83 unsigned long timeout = jiffies + req->rq_timeout;
84
85 if (time_before(timeout, req->rq_majortimeo))
86 return timeout;
87 return req->rq_majortimeo;
88}
89
90/**
91 * xprt_register_transport - register a transport implementation
92 * @transport: transport to register
93 *
94 * If a transport implementation is loaded as a kernel module, it can
95 * call this interface to make itself known to the RPC client.
96 *
97 * Returns:
98 * 0: transport successfully registered
99 * -EEXIST: transport already registered
100 * -EINVAL: transport module being unloaded
101 */
102int xprt_register_transport(struct xprt_class *transport)
103{
104 struct xprt_class *t;
105 int result;
106
107 result = -EEXIST;
108 spin_lock(&xprt_list_lock);
109 list_for_each_entry(t, &xprt_list, list) {
110 /* don't register the same transport class twice */
111 if (t->ident == transport->ident)
112 goto out;
113 }
114
115 list_add_tail(&transport->list, &xprt_list);
116 printk(KERN_INFO "RPC: Registered %s transport module.\n",
117 transport->name);
118 result = 0;
119
120out:
121 spin_unlock(&xprt_list_lock);
122 return result;
123}
124EXPORT_SYMBOL_GPL(xprt_register_transport);
125
126/**
127 * xprt_unregister_transport - unregister a transport implementation
128 * @transport: transport to unregister
129 *
130 * Returns:
131 * 0: transport successfully unregistered
132 * -ENOENT: transport never registered
133 */
134int xprt_unregister_transport(struct xprt_class *transport)
135{
136 struct xprt_class *t;
137 int result;
138
139 result = 0;
140 spin_lock(&xprt_list_lock);
141 list_for_each_entry(t, &xprt_list, list) {
142 if (t == transport) {
143 printk(KERN_INFO
144 "RPC: Unregistered %s transport module.\n",
145 transport->name);
146 list_del_init(&transport->list);
147 goto out;
148 }
149 }
150 result = -ENOENT;
151
152out:
153 spin_unlock(&xprt_list_lock);
154 return result;
155}
156EXPORT_SYMBOL_GPL(xprt_unregister_transport);
157
158static void
159xprt_class_release(const struct xprt_class *t)
160{
161 module_put(t->owner);
162}
163
164static const struct xprt_class *
165xprt_class_find_by_ident_locked(int ident)
166{
167 const struct xprt_class *t;
168
169 list_for_each_entry(t, &xprt_list, list) {
170 if (t->ident != ident)
171 continue;
172 if (!try_module_get(t->owner))
173 continue;
174 return t;
175 }
176 return NULL;
177}
178
179static const struct xprt_class *
180xprt_class_find_by_ident(int ident)
181{
182 const struct xprt_class *t;
183
184 spin_lock(&xprt_list_lock);
185 t = xprt_class_find_by_ident_locked(ident);
186 spin_unlock(&xprt_list_lock);
187 return t;
188}
189
190static const struct xprt_class *
191xprt_class_find_by_netid_locked(const char *netid)
192{
193 const struct xprt_class *t;
194 unsigned int i;
195
196 list_for_each_entry(t, &xprt_list, list) {
197 for (i = 0; t->netid[i][0] != '\0'; i++) {
198 if (strcmp(t->netid[i], netid) != 0)
199 continue;
200 if (!try_module_get(t->owner))
201 continue;
202 return t;
203 }
204 }
205 return NULL;
206}
207
208static const struct xprt_class *
209xprt_class_find_by_netid(const char *netid)
210{
211 const struct xprt_class *t;
212
213 spin_lock(&xprt_list_lock);
214 t = xprt_class_find_by_netid_locked(netid);
215 if (!t) {
216 spin_unlock(&xprt_list_lock);
217 request_module("rpc%s", netid);
218 spin_lock(&xprt_list_lock);
219 t = xprt_class_find_by_netid_locked(netid);
220 }
221 spin_unlock(&xprt_list_lock);
222 return t;
223}
224
225/**
226 * xprt_find_transport_ident - convert a netid into a transport identifier
227 * @netid: transport to load
228 *
229 * Returns:
230 * > 0: transport identifier
231 * -ENOENT: transport module not available
232 */
233int xprt_find_transport_ident(const char *netid)
234{
235 const struct xprt_class *t;
236 int ret;
237
238 t = xprt_class_find_by_netid(netid);
239 if (!t)
240 return -ENOENT;
241 ret = t->ident;
242 xprt_class_release(t);
243 return ret;
244}
245EXPORT_SYMBOL_GPL(xprt_find_transport_ident);
246
247static void xprt_clear_locked(struct rpc_xprt *xprt)
248{
249 xprt->snd_task = NULL;
250 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state))
251 clear_bit_unlock(XPRT_LOCKED, &xprt->state);
252 else
253 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
254}
255
256/**
257 * xprt_reserve_xprt - serialize write access to transports
258 * @task: task that is requesting access to the transport
259 * @xprt: pointer to the target transport
260 *
261 * This prevents mixing the payload of separate requests, and prevents
262 * transport connects from colliding with writes. No congestion control
263 * is provided.
264 */
265int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
266{
267 struct rpc_rqst *req = task->tk_rqstp;
268
269 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
270 if (task == xprt->snd_task)
271 goto out_locked;
272 goto out_sleep;
273 }
274 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
275 goto out_unlock;
276 xprt->snd_task = task;
277
278out_locked:
279 trace_xprt_reserve_xprt(xprt, task);
280 return 1;
281
282out_unlock:
283 xprt_clear_locked(xprt);
284out_sleep:
285 task->tk_status = -EAGAIN;
286 if (RPC_IS_SOFT(task))
287 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
288 xprt_request_timeout(req));
289 else
290 rpc_sleep_on(&xprt->sending, task, NULL);
291 return 0;
292}
293EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
294
295static bool
296xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
297{
298 return test_bit(XPRT_CWND_WAIT, &xprt->state);
299}
300
301static void
302xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
303{
304 if (!list_empty(&xprt->xmit_queue)) {
305 /* Peek at head of queue to see if it can make progress */
306 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
307 rq_xmit)->rq_cong)
308 return;
309 }
310 set_bit(XPRT_CWND_WAIT, &xprt->state);
311}
312
313static void
314xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
315{
316 if (!RPCXPRT_CONGESTED(xprt))
317 clear_bit(XPRT_CWND_WAIT, &xprt->state);
318}
319
320/*
321 * xprt_reserve_xprt_cong - serialize write access to transports
322 * @task: task that is requesting access to the transport
323 *
324 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
325 * integrated into the decision of whether a request is allowed to be
326 * woken up and given access to the transport.
327 * Note that the lock is only granted if we know there are free slots.
328 */
329int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
330{
331 struct rpc_rqst *req = task->tk_rqstp;
332
333 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
334 if (task == xprt->snd_task)
335 goto out_locked;
336 goto out_sleep;
337 }
338 if (req == NULL) {
339 xprt->snd_task = task;
340 goto out_locked;
341 }
342 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
343 goto out_unlock;
344 if (!xprt_need_congestion_window_wait(xprt)) {
345 xprt->snd_task = task;
346 goto out_locked;
347 }
348out_unlock:
349 xprt_clear_locked(xprt);
350out_sleep:
351 task->tk_status = -EAGAIN;
352 if (RPC_IS_SOFT(task))
353 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
354 xprt_request_timeout(req));
355 else
356 rpc_sleep_on(&xprt->sending, task, NULL);
357 return 0;
358out_locked:
359 trace_xprt_reserve_cong(xprt, task);
360 return 1;
361}
362EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
363
364static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
365{
366 int retval;
367
368 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
369 return 1;
370 spin_lock(&xprt->transport_lock);
371 retval = xprt->ops->reserve_xprt(xprt, task);
372 spin_unlock(&xprt->transport_lock);
373 return retval;
374}
375
376static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
377{
378 struct rpc_xprt *xprt = data;
379
380 xprt->snd_task = task;
381 return true;
382}
383
384static void __xprt_lock_write_next(struct rpc_xprt *xprt)
385{
386 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
387 return;
388 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
389 goto out_unlock;
390 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
391 __xprt_lock_write_func, xprt))
392 return;
393out_unlock:
394 xprt_clear_locked(xprt);
395}
396
397static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
398{
399 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
400 return;
401 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
402 goto out_unlock;
403 if (xprt_need_congestion_window_wait(xprt))
404 goto out_unlock;
405 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
406 __xprt_lock_write_func, xprt))
407 return;
408out_unlock:
409 xprt_clear_locked(xprt);
410}
411
412/**
413 * xprt_release_xprt - allow other requests to use a transport
414 * @xprt: transport with other tasks potentially waiting
415 * @task: task that is releasing access to the transport
416 *
417 * Note that "task" can be NULL. No congestion control is provided.
418 */
419void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
420{
421 if (xprt->snd_task == task) {
422 xprt_clear_locked(xprt);
423 __xprt_lock_write_next(xprt);
424 }
425 trace_xprt_release_xprt(xprt, task);
426}
427EXPORT_SYMBOL_GPL(xprt_release_xprt);
428
429/**
430 * xprt_release_xprt_cong - allow other requests to use a transport
431 * @xprt: transport with other tasks potentially waiting
432 * @task: task that is releasing access to the transport
433 *
434 * Note that "task" can be NULL. Another task is awoken to use the
435 * transport if the transport's congestion window allows it.
436 */
437void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
438{
439 if (xprt->snd_task == task) {
440 xprt_clear_locked(xprt);
441 __xprt_lock_write_next_cong(xprt);
442 }
443 trace_xprt_release_cong(xprt, task);
444}
445EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
446
447void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
448{
449 if (xprt->snd_task != task)
450 return;
451 spin_lock(&xprt->transport_lock);
452 xprt->ops->release_xprt(xprt, task);
453 spin_unlock(&xprt->transport_lock);
454}
455
456/*
457 * Van Jacobson congestion avoidance. Check if the congestion window
458 * overflowed. Put the task to sleep if this is the case.
459 */
460static int
461__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
462{
463 if (req->rq_cong)
464 return 1;
465 trace_xprt_get_cong(xprt, req->rq_task);
466 if (RPCXPRT_CONGESTED(xprt)) {
467 xprt_set_congestion_window_wait(xprt);
468 return 0;
469 }
470 req->rq_cong = 1;
471 xprt->cong += RPC_CWNDSCALE;
472 return 1;
473}
474
475/*
476 * Adjust the congestion window, and wake up the next task
477 * that has been sleeping due to congestion
478 */
479static void
480__xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
481{
482 if (!req->rq_cong)
483 return;
484 req->rq_cong = 0;
485 xprt->cong -= RPC_CWNDSCALE;
486 xprt_test_and_clear_congestion_window_wait(xprt);
487 trace_xprt_put_cong(xprt, req->rq_task);
488 __xprt_lock_write_next_cong(xprt);
489}
490
491/**
492 * xprt_request_get_cong - Request congestion control credits
493 * @xprt: pointer to transport
494 * @req: pointer to RPC request
495 *
496 * Useful for transports that require congestion control.
497 */
498bool
499xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
500{
501 bool ret = false;
502
503 if (req->rq_cong)
504 return true;
505 spin_lock(&xprt->transport_lock);
506 ret = __xprt_get_cong(xprt, req) != 0;
507 spin_unlock(&xprt->transport_lock);
508 return ret;
509}
510EXPORT_SYMBOL_GPL(xprt_request_get_cong);
511
512/**
513 * xprt_release_rqst_cong - housekeeping when request is complete
514 * @task: RPC request that recently completed
515 *
516 * Useful for transports that require congestion control.
517 */
518void xprt_release_rqst_cong(struct rpc_task *task)
519{
520 struct rpc_rqst *req = task->tk_rqstp;
521
522 __xprt_put_cong(req->rq_xprt, req);
523}
524EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
525
526static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
527{
528 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
529 __xprt_lock_write_next_cong(xprt);
530}
531
532/*
533 * Clear the congestion window wait flag and wake up the next
534 * entry on xprt->sending
535 */
536static void
537xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
538{
539 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
540 spin_lock(&xprt->transport_lock);
541 __xprt_lock_write_next_cong(xprt);
542 spin_unlock(&xprt->transport_lock);
543 }
544}
545
546/**
547 * xprt_adjust_cwnd - adjust transport congestion window
548 * @xprt: pointer to xprt
549 * @task: recently completed RPC request used to adjust window
550 * @result: result code of completed RPC request
551 *
552 * The transport code maintains an estimate on the maximum number of out-
553 * standing RPC requests, using a smoothed version of the congestion
554 * avoidance implemented in 44BSD. This is basically the Van Jacobson
555 * congestion algorithm: If a retransmit occurs, the congestion window is
556 * halved; otherwise, it is incremented by 1/cwnd when
557 *
558 * - a reply is received and
559 * - a full number of requests are outstanding and
560 * - the congestion window hasn't been updated recently.
561 */
562void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
563{
564 struct rpc_rqst *req = task->tk_rqstp;
565 unsigned long cwnd = xprt->cwnd;
566
567 if (result >= 0 && cwnd <= xprt->cong) {
568 /* The (cwnd >> 1) term makes sure
569 * the result gets rounded properly. */
570 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
571 if (cwnd > RPC_MAXCWND(xprt))
572 cwnd = RPC_MAXCWND(xprt);
573 __xprt_lock_write_next_cong(xprt);
574 } else if (result == -ETIMEDOUT) {
575 cwnd >>= 1;
576 if (cwnd < RPC_CWNDSCALE)
577 cwnd = RPC_CWNDSCALE;
578 }
579 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
580 xprt->cong, xprt->cwnd, cwnd);
581 xprt->cwnd = cwnd;
582 __xprt_put_cong(xprt, req);
583}
584EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
585
586/**
587 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
588 * @xprt: transport with waiting tasks
589 * @status: result code to plant in each task before waking it
590 *
591 */
592void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
593{
594 if (status < 0)
595 rpc_wake_up_status(&xprt->pending, status);
596 else
597 rpc_wake_up(&xprt->pending);
598}
599EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
600
601/**
602 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
603 * @xprt: transport
604 *
605 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
606 * we don't in general want to force a socket disconnection due to
607 * an incomplete RPC call transmission.
608 */
609void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
610{
611 set_bit(XPRT_WRITE_SPACE, &xprt->state);
612}
613EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
614
615static bool
616xprt_clear_write_space_locked(struct rpc_xprt *xprt)
617{
618 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
619 __xprt_lock_write_next(xprt);
620 dprintk("RPC: write space: waking waiting task on "
621 "xprt %p\n", xprt);
622 return true;
623 }
624 return false;
625}
626
627/**
628 * xprt_write_space - wake the task waiting for transport output buffer space
629 * @xprt: transport with waiting tasks
630 *
631 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
632 */
633bool xprt_write_space(struct rpc_xprt *xprt)
634{
635 bool ret;
636
637 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
638 return false;
639 spin_lock(&xprt->transport_lock);
640 ret = xprt_clear_write_space_locked(xprt);
641 spin_unlock(&xprt->transport_lock);
642 return ret;
643}
644EXPORT_SYMBOL_GPL(xprt_write_space);
645
646static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
647{
648 s64 delta = ktime_to_ns(ktime_get() - abstime);
649 return likely(delta >= 0) ?
650 jiffies - nsecs_to_jiffies(delta) :
651 jiffies + nsecs_to_jiffies(-delta);
652}
653
654static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
655{
656 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
657 unsigned long majortimeo = req->rq_timeout;
658
659 if (to->to_exponential)
660 majortimeo <<= to->to_retries;
661 else
662 majortimeo += to->to_increment * to->to_retries;
663 if (majortimeo > to->to_maxval || majortimeo == 0)
664 majortimeo = to->to_maxval;
665 return majortimeo;
666}
667
668static void xprt_reset_majortimeo(struct rpc_rqst *req)
669{
670 req->rq_majortimeo += xprt_calc_majortimeo(req);
671}
672
673static void xprt_reset_minortimeo(struct rpc_rqst *req)
674{
675 req->rq_minortimeo += req->rq_timeout;
676}
677
678static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
679{
680 unsigned long time_init;
681 struct rpc_xprt *xprt = req->rq_xprt;
682
683 if (likely(xprt && xprt_connected(xprt)))
684 time_init = jiffies;
685 else
686 time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
687 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
688 req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
689 req->rq_minortimeo = time_init + req->rq_timeout;
690}
691
692/**
693 * xprt_adjust_timeout - adjust timeout values for next retransmit
694 * @req: RPC request containing parameters to use for the adjustment
695 *
696 */
697int xprt_adjust_timeout(struct rpc_rqst *req)
698{
699 struct rpc_xprt *xprt = req->rq_xprt;
700 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
701 int status = 0;
702
703 if (time_before(jiffies, req->rq_majortimeo)) {
704 if (time_before(jiffies, req->rq_minortimeo))
705 return status;
706 if (to->to_exponential)
707 req->rq_timeout <<= 1;
708 else
709 req->rq_timeout += to->to_increment;
710 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
711 req->rq_timeout = to->to_maxval;
712 req->rq_retries++;
713 } else {
714 req->rq_timeout = to->to_initval;
715 req->rq_retries = 0;
716 xprt_reset_majortimeo(req);
717 /* Reset the RTT counters == "slow start" */
718 spin_lock(&xprt->transport_lock);
719 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
720 spin_unlock(&xprt->transport_lock);
721 status = -ETIMEDOUT;
722 }
723 xprt_reset_minortimeo(req);
724
725 if (req->rq_timeout == 0) {
726 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
727 req->rq_timeout = 5 * HZ;
728 }
729 return status;
730}
731
732static void xprt_autoclose(struct work_struct *work)
733{
734 struct rpc_xprt *xprt =
735 container_of(work, struct rpc_xprt, task_cleanup);
736 unsigned int pflags = memalloc_nofs_save();
737
738 trace_xprt_disconnect_auto(xprt);
739 xprt->connect_cookie++;
740 smp_mb__before_atomic();
741 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
742 xprt->ops->close(xprt);
743 xprt_release_write(xprt, NULL);
744 wake_up_bit(&xprt->state, XPRT_LOCKED);
745 memalloc_nofs_restore(pflags);
746}
747
748/**
749 * xprt_disconnect_done - mark a transport as disconnected
750 * @xprt: transport to flag for disconnect
751 *
752 */
753void xprt_disconnect_done(struct rpc_xprt *xprt)
754{
755 trace_xprt_disconnect_done(xprt);
756 spin_lock(&xprt->transport_lock);
757 xprt_clear_connected(xprt);
758 xprt_clear_write_space_locked(xprt);
759 xprt_clear_congestion_window_wait_locked(xprt);
760 xprt_wake_pending_tasks(xprt, -ENOTCONN);
761 spin_unlock(&xprt->transport_lock);
762}
763EXPORT_SYMBOL_GPL(xprt_disconnect_done);
764
765/**
766 * xprt_schedule_autoclose_locked - Try to schedule an autoclose RPC call
767 * @xprt: transport to disconnect
768 */
769static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
770{
771 if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
772 return;
773 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
774 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
775 else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
776 rpc_wake_up_queued_task_set_status(&xprt->pending,
777 xprt->snd_task, -ENOTCONN);
778}
779
780/**
781 * xprt_force_disconnect - force a transport to disconnect
782 * @xprt: transport to disconnect
783 *
784 */
785void xprt_force_disconnect(struct rpc_xprt *xprt)
786{
787 trace_xprt_disconnect_force(xprt);
788
789 /* Don't race with the test_bit() in xprt_clear_locked() */
790 spin_lock(&xprt->transport_lock);
791 xprt_schedule_autoclose_locked(xprt);
792 spin_unlock(&xprt->transport_lock);
793}
794EXPORT_SYMBOL_GPL(xprt_force_disconnect);
795
796static unsigned int
797xprt_connect_cookie(struct rpc_xprt *xprt)
798{
799 return READ_ONCE(xprt->connect_cookie);
800}
801
802static bool
803xprt_request_retransmit_after_disconnect(struct rpc_task *task)
804{
805 struct rpc_rqst *req = task->tk_rqstp;
806 struct rpc_xprt *xprt = req->rq_xprt;
807
808 return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
809 !xprt_connected(xprt);
810}
811
812/**
813 * xprt_conditional_disconnect - force a transport to disconnect
814 * @xprt: transport to disconnect
815 * @cookie: 'connection cookie'
816 *
817 * This attempts to break the connection if and only if 'cookie' matches
818 * the current transport 'connection cookie'. It ensures that we don't
819 * try to break the connection more than once when we need to retransmit
820 * a batch of RPC requests.
821 *
822 */
823void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
824{
825 /* Don't race with the test_bit() in xprt_clear_locked() */
826 spin_lock(&xprt->transport_lock);
827 if (cookie != xprt->connect_cookie)
828 goto out;
829 if (test_bit(XPRT_CLOSING, &xprt->state))
830 goto out;
831 xprt_schedule_autoclose_locked(xprt);
832out:
833 spin_unlock(&xprt->transport_lock);
834}
835
836static bool
837xprt_has_timer(const struct rpc_xprt *xprt)
838{
839 return xprt->idle_timeout != 0;
840}
841
842static void
843xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
844 __must_hold(&xprt->transport_lock)
845{
846 xprt->last_used = jiffies;
847 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
848 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
849}
850
851static void
852xprt_init_autodisconnect(struct timer_list *t)
853{
854 struct rpc_xprt *xprt = from_timer(xprt, t, timer);
855
856 if (!RB_EMPTY_ROOT(&xprt->recv_queue))
857 return;
858 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
859 xprt->last_used = jiffies;
860 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
861 return;
862 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
863}
864
865#if IS_ENABLED(CONFIG_FAIL_SUNRPC)
866static void xprt_inject_disconnect(struct rpc_xprt *xprt)
867{
868 if (!fail_sunrpc.ignore_client_disconnect &&
869 should_fail(&fail_sunrpc.attr, 1))
870 xprt->ops->inject_disconnect(xprt);
871}
872#else
873static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
874{
875}
876#endif
877
878bool xprt_lock_connect(struct rpc_xprt *xprt,
879 struct rpc_task *task,
880 void *cookie)
881{
882 bool ret = false;
883
884 spin_lock(&xprt->transport_lock);
885 if (!test_bit(XPRT_LOCKED, &xprt->state))
886 goto out;
887 if (xprt->snd_task != task)
888 goto out;
889 set_bit(XPRT_SND_IS_COOKIE, &xprt->state);
890 xprt->snd_task = cookie;
891 ret = true;
892out:
893 spin_unlock(&xprt->transport_lock);
894 return ret;
895}
896EXPORT_SYMBOL_GPL(xprt_lock_connect);
897
898void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
899{
900 spin_lock(&xprt->transport_lock);
901 if (xprt->snd_task != cookie)
902 goto out;
903 if (!test_bit(XPRT_LOCKED, &xprt->state))
904 goto out;
905 xprt->snd_task =NULL;
906 clear_bit(XPRT_SND_IS_COOKIE, &xprt->state);
907 xprt->ops->release_xprt(xprt, NULL);
908 xprt_schedule_autodisconnect(xprt);
909out:
910 spin_unlock(&xprt->transport_lock);
911 wake_up_bit(&xprt->state, XPRT_LOCKED);
912}
913EXPORT_SYMBOL_GPL(xprt_unlock_connect);
914
915/**
916 * xprt_connect - schedule a transport connect operation
917 * @task: RPC task that is requesting the connect
918 *
919 */
920void xprt_connect(struct rpc_task *task)
921{
922 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
923
924 trace_xprt_connect(xprt);
925
926 if (!xprt_bound(xprt)) {
927 task->tk_status = -EAGAIN;
928 return;
929 }
930 if (!xprt_lock_write(xprt, task))
931 return;
932
933 if (!xprt_connected(xprt) && !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
934 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
935 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
936 xprt_request_timeout(task->tk_rqstp));
937
938 if (test_bit(XPRT_CLOSING, &xprt->state))
939 return;
940 if (xprt_test_and_set_connecting(xprt))
941 return;
942 /* Race breaker */
943 if (!xprt_connected(xprt)) {
944 xprt->stat.connect_start = jiffies;
945 xprt->ops->connect(xprt, task);
946 } else {
947 xprt_clear_connecting(xprt);
948 task->tk_status = 0;
949 rpc_wake_up_queued_task(&xprt->pending, task);
950 }
951 }
952 xprt_release_write(xprt, task);
953}
954
955/**
956 * xprt_reconnect_delay - compute the wait before scheduling a connect
957 * @xprt: transport instance
958 *
959 */
960unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
961{
962 unsigned long start, now = jiffies;
963
964 start = xprt->stat.connect_start + xprt->reestablish_timeout;
965 if (time_after(start, now))
966 return start - now;
967 return 0;
968}
969EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
970
971/**
972 * xprt_reconnect_backoff - compute the new re-establish timeout
973 * @xprt: transport instance
974 * @init_to: initial reestablish timeout
975 *
976 */
977void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
978{
979 xprt->reestablish_timeout <<= 1;
980 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
981 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
982 if (xprt->reestablish_timeout < init_to)
983 xprt->reestablish_timeout = init_to;
984}
985EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
986
987enum xprt_xid_rb_cmp {
988 XID_RB_EQUAL,
989 XID_RB_LEFT,
990 XID_RB_RIGHT,
991};
992static enum xprt_xid_rb_cmp
993xprt_xid_cmp(__be32 xid1, __be32 xid2)
994{
995 if (xid1 == xid2)
996 return XID_RB_EQUAL;
997 if ((__force u32)xid1 < (__force u32)xid2)
998 return XID_RB_LEFT;
999 return XID_RB_RIGHT;
1000}
1001
1002static struct rpc_rqst *
1003xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
1004{
1005 struct rb_node *n = xprt->recv_queue.rb_node;
1006 struct rpc_rqst *req;
1007
1008 while (n != NULL) {
1009 req = rb_entry(n, struct rpc_rqst, rq_recv);
1010 switch (xprt_xid_cmp(xid, req->rq_xid)) {
1011 case XID_RB_LEFT:
1012 n = n->rb_left;
1013 break;
1014 case XID_RB_RIGHT:
1015 n = n->rb_right;
1016 break;
1017 case XID_RB_EQUAL:
1018 return req;
1019 }
1020 }
1021 return NULL;
1022}
1023
1024static void
1025xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
1026{
1027 struct rb_node **p = &xprt->recv_queue.rb_node;
1028 struct rb_node *n = NULL;
1029 struct rpc_rqst *req;
1030
1031 while (*p != NULL) {
1032 n = *p;
1033 req = rb_entry(n, struct rpc_rqst, rq_recv);
1034 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
1035 case XID_RB_LEFT:
1036 p = &n->rb_left;
1037 break;
1038 case XID_RB_RIGHT:
1039 p = &n->rb_right;
1040 break;
1041 case XID_RB_EQUAL:
1042 WARN_ON_ONCE(new != req);
1043 return;
1044 }
1045 }
1046 rb_link_node(&new->rq_recv, n, p);
1047 rb_insert_color(&new->rq_recv, &xprt->recv_queue);
1048}
1049
1050static void
1051xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1052{
1053 rb_erase(&req->rq_recv, &xprt->recv_queue);
1054}
1055
1056/**
1057 * xprt_lookup_rqst - find an RPC request corresponding to an XID
1058 * @xprt: transport on which the original request was transmitted
1059 * @xid: RPC XID of incoming reply
1060 *
1061 * Caller holds xprt->queue_lock.
1062 */
1063struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1064{
1065 struct rpc_rqst *entry;
1066
1067 entry = xprt_request_rb_find(xprt, xid);
1068 if (entry != NULL) {
1069 trace_xprt_lookup_rqst(xprt, xid, 0);
1070 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
1071 return entry;
1072 }
1073
1074 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
1075 ntohl(xid));
1076 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1077 xprt->stat.bad_xids++;
1078 return NULL;
1079}
1080EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1081
1082static bool
1083xprt_is_pinned_rqst(struct rpc_rqst *req)
1084{
1085 return atomic_read(&req->rq_pin) != 0;
1086}
1087
1088/**
1089 * xprt_pin_rqst - Pin a request on the transport receive list
1090 * @req: Request to pin
1091 *
1092 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1093 * so should be holding xprt->queue_lock.
1094 */
1095void xprt_pin_rqst(struct rpc_rqst *req)
1096{
1097 atomic_inc(&req->rq_pin);
1098}
1099EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1100
1101/**
1102 * xprt_unpin_rqst - Unpin a request on the transport receive list
1103 * @req: Request to pin
1104 *
1105 * Caller should be holding xprt->queue_lock.
1106 */
1107void xprt_unpin_rqst(struct rpc_rqst *req)
1108{
1109 if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1110 atomic_dec(&req->rq_pin);
1111 return;
1112 }
1113 if (atomic_dec_and_test(&req->rq_pin))
1114 wake_up_var(&req->rq_pin);
1115}
1116EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1117
1118static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1119{
1120 wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1121}
1122
1123static bool
1124xprt_request_data_received(struct rpc_task *task)
1125{
1126 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1127 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1128}
1129
1130static bool
1131xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1132{
1133 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1134 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1135}
1136
1137/**
1138 * xprt_request_enqueue_receive - Add an request to the receive queue
1139 * @task: RPC task
1140 *
1141 */
1142int
1143xprt_request_enqueue_receive(struct rpc_task *task)
1144{
1145 struct rpc_rqst *req = task->tk_rqstp;
1146 struct rpc_xprt *xprt = req->rq_xprt;
1147 int ret;
1148
1149 if (!xprt_request_need_enqueue_receive(task, req))
1150 return 0;
1151
1152 ret = xprt_request_prepare(task->tk_rqstp, &req->rq_rcv_buf);
1153 if (ret)
1154 return ret;
1155 spin_lock(&xprt->queue_lock);
1156
1157 /* Update the softirq receive buffer */
1158 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1159 sizeof(req->rq_private_buf));
1160
1161 /* Add request to the receive list */
1162 xprt_request_rb_insert(xprt, req);
1163 set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1164 spin_unlock(&xprt->queue_lock);
1165
1166 /* Turn off autodisconnect */
1167 del_timer_sync(&xprt->timer);
1168 return 0;
1169}
1170
1171/**
1172 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1173 * @task: RPC task
1174 *
1175 * Caller must hold xprt->queue_lock.
1176 */
1177static void
1178xprt_request_dequeue_receive_locked(struct rpc_task *task)
1179{
1180 struct rpc_rqst *req = task->tk_rqstp;
1181
1182 if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1183 xprt_request_rb_remove(req->rq_xprt, req);
1184}
1185
1186/**
1187 * xprt_update_rtt - Update RPC RTT statistics
1188 * @task: RPC request that recently completed
1189 *
1190 * Caller holds xprt->queue_lock.
1191 */
1192void xprt_update_rtt(struct rpc_task *task)
1193{
1194 struct rpc_rqst *req = task->tk_rqstp;
1195 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1196 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1197 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1198
1199 if (timer) {
1200 if (req->rq_ntrans == 1)
1201 rpc_update_rtt(rtt, timer, m);
1202 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1203 }
1204}
1205EXPORT_SYMBOL_GPL(xprt_update_rtt);
1206
1207/**
1208 * xprt_complete_rqst - called when reply processing is complete
1209 * @task: RPC request that recently completed
1210 * @copied: actual number of bytes received from the transport
1211 *
1212 * Caller holds xprt->queue_lock.
1213 */
1214void xprt_complete_rqst(struct rpc_task *task, int copied)
1215{
1216 struct rpc_rqst *req = task->tk_rqstp;
1217 struct rpc_xprt *xprt = req->rq_xprt;
1218
1219 xprt->stat.recvs++;
1220
1221 xdr_free_bvec(&req->rq_rcv_buf);
1222 req->rq_private_buf.bvec = NULL;
1223 req->rq_private_buf.len = copied;
1224 /* Ensure all writes are done before we update */
1225 /* req->rq_reply_bytes_recvd */
1226 smp_wmb();
1227 req->rq_reply_bytes_recvd = copied;
1228 xprt_request_dequeue_receive_locked(task);
1229 rpc_wake_up_queued_task(&xprt->pending, task);
1230}
1231EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1232
1233static void xprt_timer(struct rpc_task *task)
1234{
1235 struct rpc_rqst *req = task->tk_rqstp;
1236 struct rpc_xprt *xprt = req->rq_xprt;
1237
1238 if (task->tk_status != -ETIMEDOUT)
1239 return;
1240
1241 trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1242 if (!req->rq_reply_bytes_recvd) {
1243 if (xprt->ops->timer)
1244 xprt->ops->timer(xprt, task);
1245 } else
1246 task->tk_status = 0;
1247}
1248
1249/**
1250 * xprt_wait_for_reply_request_def - wait for reply
1251 * @task: pointer to rpc_task
1252 *
1253 * Set a request's retransmit timeout based on the transport's
1254 * default timeout parameters. Used by transports that don't adjust
1255 * the retransmit timeout based on round-trip time estimation,
1256 * and put the task to sleep on the pending queue.
1257 */
1258void xprt_wait_for_reply_request_def(struct rpc_task *task)
1259{
1260 struct rpc_rqst *req = task->tk_rqstp;
1261
1262 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1263 xprt_request_timeout(req));
1264}
1265EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1266
1267/**
1268 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1269 * @task: pointer to rpc_task
1270 *
1271 * Set a request's retransmit timeout using the RTT estimator,
1272 * and put the task to sleep on the pending queue.
1273 */
1274void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1275{
1276 int timer = task->tk_msg.rpc_proc->p_timer;
1277 struct rpc_clnt *clnt = task->tk_client;
1278 struct rpc_rtt *rtt = clnt->cl_rtt;
1279 struct rpc_rqst *req = task->tk_rqstp;
1280 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1281 unsigned long timeout;
1282
1283 timeout = rpc_calc_rto(rtt, timer);
1284 timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1285 if (timeout > max_timeout || timeout == 0)
1286 timeout = max_timeout;
1287 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1288 jiffies + timeout);
1289}
1290EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1291
1292/**
1293 * xprt_request_wait_receive - wait for the reply to an RPC request
1294 * @task: RPC task about to send a request
1295 *
1296 */
1297void xprt_request_wait_receive(struct rpc_task *task)
1298{
1299 struct rpc_rqst *req = task->tk_rqstp;
1300 struct rpc_xprt *xprt = req->rq_xprt;
1301
1302 if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1303 return;
1304 /*
1305 * Sleep on the pending queue if we're expecting a reply.
1306 * The spinlock ensures atomicity between the test of
1307 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1308 */
1309 spin_lock(&xprt->queue_lock);
1310 if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1311 xprt->ops->wait_for_reply_request(task);
1312 /*
1313 * Send an extra queue wakeup call if the
1314 * connection was dropped in case the call to
1315 * rpc_sleep_on() raced.
1316 */
1317 if (xprt_request_retransmit_after_disconnect(task))
1318 rpc_wake_up_queued_task_set_status(&xprt->pending,
1319 task, -ENOTCONN);
1320 }
1321 spin_unlock(&xprt->queue_lock);
1322}
1323
1324static bool
1325xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1326{
1327 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1328}
1329
1330/**
1331 * xprt_request_enqueue_transmit - queue a task for transmission
1332 * @task: pointer to rpc_task
1333 *
1334 * Add a task to the transmission queue.
1335 */
1336void
1337xprt_request_enqueue_transmit(struct rpc_task *task)
1338{
1339 struct rpc_rqst *pos, *req = task->tk_rqstp;
1340 struct rpc_xprt *xprt = req->rq_xprt;
1341 int ret;
1342
1343 if (xprt_request_need_enqueue_transmit(task, req)) {
1344 ret = xprt_request_prepare(task->tk_rqstp, &req->rq_snd_buf);
1345 if (ret) {
1346 task->tk_status = ret;
1347 return;
1348 }
1349 req->rq_bytes_sent = 0;
1350 spin_lock(&xprt->queue_lock);
1351 /*
1352 * Requests that carry congestion control credits are added
1353 * to the head of the list to avoid starvation issues.
1354 */
1355 if (req->rq_cong) {
1356 xprt_clear_congestion_window_wait(xprt);
1357 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1358 if (pos->rq_cong)
1359 continue;
1360 /* Note: req is added _before_ pos */
1361 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1362 INIT_LIST_HEAD(&req->rq_xmit2);
1363 goto out;
1364 }
1365 } else if (!req->rq_seqno) {
1366 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1367 if (pos->rq_task->tk_owner != task->tk_owner)
1368 continue;
1369 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1370 INIT_LIST_HEAD(&req->rq_xmit);
1371 goto out;
1372 }
1373 }
1374 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1375 INIT_LIST_HEAD(&req->rq_xmit2);
1376out:
1377 atomic_long_inc(&xprt->xmit_queuelen);
1378 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1379 spin_unlock(&xprt->queue_lock);
1380 }
1381}
1382
1383/**
1384 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1385 * @task: pointer to rpc_task
1386 *
1387 * Remove a task from the transmission queue
1388 * Caller must hold xprt->queue_lock
1389 */
1390static void
1391xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1392{
1393 struct rpc_rqst *req = task->tk_rqstp;
1394
1395 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1396 return;
1397 if (!list_empty(&req->rq_xmit)) {
1398 list_del(&req->rq_xmit);
1399 if (!list_empty(&req->rq_xmit2)) {
1400 struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1401 struct rpc_rqst, rq_xmit2);
1402 list_del(&req->rq_xmit2);
1403 list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1404 }
1405 } else
1406 list_del(&req->rq_xmit2);
1407 atomic_long_dec(&req->rq_xprt->xmit_queuelen);
1408 xdr_free_bvec(&req->rq_snd_buf);
1409}
1410
1411/**
1412 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1413 * @task: pointer to rpc_task
1414 *
1415 * Remove a task from the transmission queue
1416 */
1417static void
1418xprt_request_dequeue_transmit(struct rpc_task *task)
1419{
1420 struct rpc_rqst *req = task->tk_rqstp;
1421 struct rpc_xprt *xprt = req->rq_xprt;
1422
1423 spin_lock(&xprt->queue_lock);
1424 xprt_request_dequeue_transmit_locked(task);
1425 spin_unlock(&xprt->queue_lock);
1426}
1427
1428/**
1429 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1430 * @task: pointer to rpc_task
1431 *
1432 * Remove a task from the transmit and receive queues, and ensure that
1433 * it is not pinned by the receive work item.
1434 */
1435void
1436xprt_request_dequeue_xprt(struct rpc_task *task)
1437{
1438 struct rpc_rqst *req = task->tk_rqstp;
1439 struct rpc_xprt *xprt = req->rq_xprt;
1440
1441 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1442 test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1443 xprt_is_pinned_rqst(req)) {
1444 spin_lock(&xprt->queue_lock);
1445 while (xprt_is_pinned_rqst(req)) {
1446 set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1447 spin_unlock(&xprt->queue_lock);
1448 xprt_wait_on_pinned_rqst(req);
1449 spin_lock(&xprt->queue_lock);
1450 clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1451 }
1452 xprt_request_dequeue_transmit_locked(task);
1453 xprt_request_dequeue_receive_locked(task);
1454 spin_unlock(&xprt->queue_lock);
1455 xdr_free_bvec(&req->rq_rcv_buf);
1456 }
1457}
1458
1459/**
1460 * xprt_request_prepare - prepare an encoded request for transport
1461 * @req: pointer to rpc_rqst
1462 * @buf: pointer to send/rcv xdr_buf
1463 *
1464 * Calls into the transport layer to do whatever is needed to prepare
1465 * the request for transmission or receive.
1466 * Returns error, or zero.
1467 */
1468static int
1469xprt_request_prepare(struct rpc_rqst *req, struct xdr_buf *buf)
1470{
1471 struct rpc_xprt *xprt = req->rq_xprt;
1472
1473 if (xprt->ops->prepare_request)
1474 return xprt->ops->prepare_request(req, buf);
1475 return 0;
1476}
1477
1478/**
1479 * xprt_request_need_retransmit - Test if a task needs retransmission
1480 * @task: pointer to rpc_task
1481 *
1482 * Test for whether a connection breakage requires the task to retransmit
1483 */
1484bool
1485xprt_request_need_retransmit(struct rpc_task *task)
1486{
1487 return xprt_request_retransmit_after_disconnect(task);
1488}
1489
1490/**
1491 * xprt_prepare_transmit - reserve the transport before sending a request
1492 * @task: RPC task about to send a request
1493 *
1494 */
1495bool xprt_prepare_transmit(struct rpc_task *task)
1496{
1497 struct rpc_rqst *req = task->tk_rqstp;
1498 struct rpc_xprt *xprt = req->rq_xprt;
1499
1500 if (!xprt_lock_write(xprt, task)) {
1501 /* Race breaker: someone may have transmitted us */
1502 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1503 rpc_wake_up_queued_task_set_status(&xprt->sending,
1504 task, 0);
1505 return false;
1506
1507 }
1508 if (atomic_read(&xprt->swapper))
1509 /* This will be clear in __rpc_execute */
1510 current->flags |= PF_MEMALLOC;
1511 return true;
1512}
1513
1514void xprt_end_transmit(struct rpc_task *task)
1515{
1516 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1517
1518 xprt_inject_disconnect(xprt);
1519 xprt_release_write(xprt, task);
1520}
1521
1522/**
1523 * xprt_request_transmit - send an RPC request on a transport
1524 * @req: pointer to request to transmit
1525 * @snd_task: RPC task that owns the transport lock
1526 *
1527 * This performs the transmission of a single request.
1528 * Note that if the request is not the same as snd_task, then it
1529 * does need to be pinned.
1530 * Returns '0' on success.
1531 */
1532static int
1533xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1534{
1535 struct rpc_xprt *xprt = req->rq_xprt;
1536 struct rpc_task *task = req->rq_task;
1537 unsigned int connect_cookie;
1538 int is_retrans = RPC_WAS_SENT(task);
1539 int status;
1540
1541 if (!req->rq_bytes_sent) {
1542 if (xprt_request_data_received(task)) {
1543 status = 0;
1544 goto out_dequeue;
1545 }
1546 /* Verify that our message lies in the RPCSEC_GSS window */
1547 if (rpcauth_xmit_need_reencode(task)) {
1548 status = -EBADMSG;
1549 goto out_dequeue;
1550 }
1551 if (RPC_SIGNALLED(task)) {
1552 status = -ERESTARTSYS;
1553 goto out_dequeue;
1554 }
1555 }
1556
1557 /*
1558 * Update req->rq_ntrans before transmitting to avoid races with
1559 * xprt_update_rtt(), which needs to know that it is recording a
1560 * reply to the first transmission.
1561 */
1562 req->rq_ntrans++;
1563
1564 trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
1565 connect_cookie = xprt->connect_cookie;
1566 status = xprt->ops->send_request(req);
1567 if (status != 0) {
1568 req->rq_ntrans--;
1569 trace_xprt_transmit(req, status);
1570 return status;
1571 }
1572
1573 if (is_retrans) {
1574 task->tk_client->cl_stats->rpcretrans++;
1575 trace_xprt_retransmit(req);
1576 }
1577
1578 xprt_inject_disconnect(xprt);
1579
1580 task->tk_flags |= RPC_TASK_SENT;
1581 spin_lock(&xprt->transport_lock);
1582
1583 xprt->stat.sends++;
1584 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1585 xprt->stat.bklog_u += xprt->backlog.qlen;
1586 xprt->stat.sending_u += xprt->sending.qlen;
1587 xprt->stat.pending_u += xprt->pending.qlen;
1588 spin_unlock(&xprt->transport_lock);
1589
1590 req->rq_connect_cookie = connect_cookie;
1591out_dequeue:
1592 trace_xprt_transmit(req, status);
1593 xprt_request_dequeue_transmit(task);
1594 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1595 return status;
1596}
1597
1598/**
1599 * xprt_transmit - send an RPC request on a transport
1600 * @task: controlling RPC task
1601 *
1602 * Attempts to drain the transmit queue. On exit, either the transport
1603 * signalled an error that needs to be handled before transmission can
1604 * resume, or @task finished transmitting, and detected that it already
1605 * received a reply.
1606 */
1607void
1608xprt_transmit(struct rpc_task *task)
1609{
1610 struct rpc_rqst *next, *req = task->tk_rqstp;
1611 struct rpc_xprt *xprt = req->rq_xprt;
1612 int status;
1613
1614 spin_lock(&xprt->queue_lock);
1615 for (;;) {
1616 next = list_first_entry_or_null(&xprt->xmit_queue,
1617 struct rpc_rqst, rq_xmit);
1618 if (!next)
1619 break;
1620 xprt_pin_rqst(next);
1621 spin_unlock(&xprt->queue_lock);
1622 status = xprt_request_transmit(next, task);
1623 if (status == -EBADMSG && next != req)
1624 status = 0;
1625 spin_lock(&xprt->queue_lock);
1626 xprt_unpin_rqst(next);
1627 if (status < 0) {
1628 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1629 task->tk_status = status;
1630 break;
1631 }
1632 /* Was @task transmitted, and has it received a reply? */
1633 if (xprt_request_data_received(task) &&
1634 !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1635 break;
1636 cond_resched_lock(&xprt->queue_lock);
1637 }
1638 spin_unlock(&xprt->queue_lock);
1639}
1640
1641static void xprt_complete_request_init(struct rpc_task *task)
1642{
1643 if (task->tk_rqstp)
1644 xprt_request_init(task);
1645}
1646
1647void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1648{
1649 set_bit(XPRT_CONGESTED, &xprt->state);
1650 rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
1651}
1652EXPORT_SYMBOL_GPL(xprt_add_backlog);
1653
1654static bool __xprt_set_rq(struct rpc_task *task, void *data)
1655{
1656 struct rpc_rqst *req = data;
1657
1658 if (task->tk_rqstp == NULL) {
1659 memset(req, 0, sizeof(*req)); /* mark unused */
1660 task->tk_rqstp = req;
1661 return true;
1662 }
1663 return false;
1664}
1665
1666bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
1667{
1668 if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
1669 clear_bit(XPRT_CONGESTED, &xprt->state);
1670 return false;
1671 }
1672 return true;
1673}
1674EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
1675
1676static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1677{
1678 bool ret = false;
1679
1680 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1681 goto out;
1682 spin_lock(&xprt->reserve_lock);
1683 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1684 xprt_add_backlog(xprt, task);
1685 ret = true;
1686 }
1687 spin_unlock(&xprt->reserve_lock);
1688out:
1689 return ret;
1690}
1691
1692static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1693{
1694 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1695
1696 if (xprt->num_reqs >= xprt->max_reqs)
1697 goto out;
1698 ++xprt->num_reqs;
1699 spin_unlock(&xprt->reserve_lock);
1700 req = kzalloc(sizeof(*req), rpc_task_gfp_mask());
1701 spin_lock(&xprt->reserve_lock);
1702 if (req != NULL)
1703 goto out;
1704 --xprt->num_reqs;
1705 req = ERR_PTR(-ENOMEM);
1706out:
1707 return req;
1708}
1709
1710static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1711{
1712 if (xprt->num_reqs > xprt->min_reqs) {
1713 --xprt->num_reqs;
1714 kfree(req);
1715 return true;
1716 }
1717 return false;
1718}
1719
1720void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1721{
1722 struct rpc_rqst *req;
1723
1724 spin_lock(&xprt->reserve_lock);
1725 if (!list_empty(&xprt->free)) {
1726 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1727 list_del(&req->rq_list);
1728 goto out_init_req;
1729 }
1730 req = xprt_dynamic_alloc_slot(xprt);
1731 if (!IS_ERR(req))
1732 goto out_init_req;
1733 switch (PTR_ERR(req)) {
1734 case -ENOMEM:
1735 dprintk("RPC: dynamic allocation of request slot "
1736 "failed! Retrying\n");
1737 task->tk_status = -ENOMEM;
1738 break;
1739 case -EAGAIN:
1740 xprt_add_backlog(xprt, task);
1741 dprintk("RPC: waiting for request slot\n");
1742 fallthrough;
1743 default:
1744 task->tk_status = -EAGAIN;
1745 }
1746 spin_unlock(&xprt->reserve_lock);
1747 return;
1748out_init_req:
1749 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1750 xprt->num_reqs);
1751 spin_unlock(&xprt->reserve_lock);
1752
1753 task->tk_status = 0;
1754 task->tk_rqstp = req;
1755}
1756EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1757
1758void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1759{
1760 spin_lock(&xprt->reserve_lock);
1761 if (!xprt_wake_up_backlog(xprt, req) &&
1762 !xprt_dynamic_free_slot(xprt, req)) {
1763 memset(req, 0, sizeof(*req)); /* mark unused */
1764 list_add(&req->rq_list, &xprt->free);
1765 }
1766 spin_unlock(&xprt->reserve_lock);
1767}
1768EXPORT_SYMBOL_GPL(xprt_free_slot);
1769
1770static void xprt_free_all_slots(struct rpc_xprt *xprt)
1771{
1772 struct rpc_rqst *req;
1773 while (!list_empty(&xprt->free)) {
1774 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1775 list_del(&req->rq_list);
1776 kfree(req);
1777 }
1778}
1779
1780static DEFINE_IDA(rpc_xprt_ids);
1781
1782void xprt_cleanup_ids(void)
1783{
1784 ida_destroy(&rpc_xprt_ids);
1785}
1786
1787static int xprt_alloc_id(struct rpc_xprt *xprt)
1788{
1789 int id;
1790
1791 id = ida_alloc(&rpc_xprt_ids, GFP_KERNEL);
1792 if (id < 0)
1793 return id;
1794
1795 xprt->id = id;
1796 return 0;
1797}
1798
1799static void xprt_free_id(struct rpc_xprt *xprt)
1800{
1801 ida_free(&rpc_xprt_ids, xprt->id);
1802}
1803
1804struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1805 unsigned int num_prealloc,
1806 unsigned int max_alloc)
1807{
1808 struct rpc_xprt *xprt;
1809 struct rpc_rqst *req;
1810 int i;
1811
1812 xprt = kzalloc(size, GFP_KERNEL);
1813 if (xprt == NULL)
1814 goto out;
1815
1816 xprt_alloc_id(xprt);
1817 xprt_init(xprt, net);
1818
1819 for (i = 0; i < num_prealloc; i++) {
1820 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1821 if (!req)
1822 goto out_free;
1823 list_add(&req->rq_list, &xprt->free);
1824 }
1825 xprt->max_reqs = max_t(unsigned int, max_alloc, num_prealloc);
1826 xprt->min_reqs = num_prealloc;
1827 xprt->num_reqs = num_prealloc;
1828
1829 return xprt;
1830
1831out_free:
1832 xprt_free(xprt);
1833out:
1834 return NULL;
1835}
1836EXPORT_SYMBOL_GPL(xprt_alloc);
1837
1838void xprt_free(struct rpc_xprt *xprt)
1839{
1840 put_net_track(xprt->xprt_net, &xprt->ns_tracker);
1841 xprt_free_all_slots(xprt);
1842 xprt_free_id(xprt);
1843 rpc_sysfs_xprt_destroy(xprt);
1844 kfree_rcu(xprt, rcu);
1845}
1846EXPORT_SYMBOL_GPL(xprt_free);
1847
1848static void
1849xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1850{
1851 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1852}
1853
1854static __be32
1855xprt_alloc_xid(struct rpc_xprt *xprt)
1856{
1857 __be32 xid;
1858
1859 spin_lock(&xprt->reserve_lock);
1860 xid = (__force __be32)xprt->xid++;
1861 spin_unlock(&xprt->reserve_lock);
1862 return xid;
1863}
1864
1865static void
1866xprt_init_xid(struct rpc_xprt *xprt)
1867{
1868 xprt->xid = get_random_u32();
1869}
1870
1871static void
1872xprt_request_init(struct rpc_task *task)
1873{
1874 struct rpc_xprt *xprt = task->tk_xprt;
1875 struct rpc_rqst *req = task->tk_rqstp;
1876
1877 req->rq_task = task;
1878 req->rq_xprt = xprt;
1879 req->rq_buffer = NULL;
1880 req->rq_xid = xprt_alloc_xid(xprt);
1881 xprt_init_connect_cookie(req, xprt);
1882 req->rq_snd_buf.len = 0;
1883 req->rq_snd_buf.buflen = 0;
1884 req->rq_rcv_buf.len = 0;
1885 req->rq_rcv_buf.buflen = 0;
1886 req->rq_snd_buf.bvec = NULL;
1887 req->rq_rcv_buf.bvec = NULL;
1888 req->rq_release_snd_buf = NULL;
1889 xprt_init_majortimeo(task, req);
1890
1891 trace_xprt_reserve(req);
1892}
1893
1894static void
1895xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1896{
1897 xprt->ops->alloc_slot(xprt, task);
1898 if (task->tk_rqstp != NULL)
1899 xprt_request_init(task);
1900}
1901
1902/**
1903 * xprt_reserve - allocate an RPC request slot
1904 * @task: RPC task requesting a slot allocation
1905 *
1906 * If the transport is marked as being congested, or if no more
1907 * slots are available, place the task on the transport's
1908 * backlog queue.
1909 */
1910void xprt_reserve(struct rpc_task *task)
1911{
1912 struct rpc_xprt *xprt = task->tk_xprt;
1913
1914 task->tk_status = 0;
1915 if (task->tk_rqstp != NULL)
1916 return;
1917
1918 task->tk_status = -EAGAIN;
1919 if (!xprt_throttle_congested(xprt, task))
1920 xprt_do_reserve(xprt, task);
1921}
1922
1923/**
1924 * xprt_retry_reserve - allocate an RPC request slot
1925 * @task: RPC task requesting a slot allocation
1926 *
1927 * If no more slots are available, place the task on the transport's
1928 * backlog queue.
1929 * Note that the only difference with xprt_reserve is that we now
1930 * ignore the value of the XPRT_CONGESTED flag.
1931 */
1932void xprt_retry_reserve(struct rpc_task *task)
1933{
1934 struct rpc_xprt *xprt = task->tk_xprt;
1935
1936 task->tk_status = 0;
1937 if (task->tk_rqstp != NULL)
1938 return;
1939
1940 task->tk_status = -EAGAIN;
1941 xprt_do_reserve(xprt, task);
1942}
1943
1944/**
1945 * xprt_release - release an RPC request slot
1946 * @task: task which is finished with the slot
1947 *
1948 */
1949void xprt_release(struct rpc_task *task)
1950{
1951 struct rpc_xprt *xprt;
1952 struct rpc_rqst *req = task->tk_rqstp;
1953
1954 if (req == NULL) {
1955 if (task->tk_client) {
1956 xprt = task->tk_xprt;
1957 xprt_release_write(xprt, task);
1958 }
1959 return;
1960 }
1961
1962 xprt = req->rq_xprt;
1963 xprt_request_dequeue_xprt(task);
1964 spin_lock(&xprt->transport_lock);
1965 xprt->ops->release_xprt(xprt, task);
1966 if (xprt->ops->release_request)
1967 xprt->ops->release_request(task);
1968 xprt_schedule_autodisconnect(xprt);
1969 spin_unlock(&xprt->transport_lock);
1970 if (req->rq_buffer)
1971 xprt->ops->buf_free(task);
1972 if (req->rq_cred != NULL)
1973 put_rpccred(req->rq_cred);
1974 if (req->rq_release_snd_buf)
1975 req->rq_release_snd_buf(req);
1976
1977 task->tk_rqstp = NULL;
1978 if (likely(!bc_prealloc(req)))
1979 xprt->ops->free_slot(xprt, req);
1980 else
1981 xprt_free_bc_request(req);
1982}
1983
1984#ifdef CONFIG_SUNRPC_BACKCHANNEL
1985void
1986xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1987{
1988 struct xdr_buf *xbufp = &req->rq_snd_buf;
1989
1990 task->tk_rqstp = req;
1991 req->rq_task = task;
1992 xprt_init_connect_cookie(req, req->rq_xprt);
1993 /*
1994 * Set up the xdr_buf length.
1995 * This also indicates that the buffer is XDR encoded already.
1996 */
1997 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1998 xbufp->tail[0].iov_len;
1999}
2000#endif
2001
2002static void xprt_init(struct rpc_xprt *xprt, struct net *net)
2003{
2004 kref_init(&xprt->kref);
2005
2006 spin_lock_init(&xprt->transport_lock);
2007 spin_lock_init(&xprt->reserve_lock);
2008 spin_lock_init(&xprt->queue_lock);
2009
2010 INIT_LIST_HEAD(&xprt->free);
2011 xprt->recv_queue = RB_ROOT;
2012 INIT_LIST_HEAD(&xprt->xmit_queue);
2013#if defined(CONFIG_SUNRPC_BACKCHANNEL)
2014 spin_lock_init(&xprt->bc_pa_lock);
2015 INIT_LIST_HEAD(&xprt->bc_pa_list);
2016#endif /* CONFIG_SUNRPC_BACKCHANNEL */
2017 INIT_LIST_HEAD(&xprt->xprt_switch);
2018
2019 xprt->last_used = jiffies;
2020 xprt->cwnd = RPC_INITCWND;
2021 xprt->bind_index = 0;
2022
2023 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
2024 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
2025 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
2026 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
2027
2028 xprt_init_xid(xprt);
2029
2030 xprt->xprt_net = get_net_track(net, &xprt->ns_tracker, GFP_KERNEL);
2031}
2032
2033/**
2034 * xprt_create_transport - create an RPC transport
2035 * @args: rpc transport creation arguments
2036 *
2037 */
2038struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
2039{
2040 struct rpc_xprt *xprt;
2041 const struct xprt_class *t;
2042
2043 t = xprt_class_find_by_ident(args->ident);
2044 if (!t) {
2045 dprintk("RPC: transport (%d) not supported\n", args->ident);
2046 return ERR_PTR(-EIO);
2047 }
2048
2049 xprt = t->setup(args);
2050 xprt_class_release(t);
2051
2052 if (IS_ERR(xprt))
2053 goto out;
2054 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
2055 xprt->idle_timeout = 0;
2056 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
2057 if (xprt_has_timer(xprt))
2058 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
2059 else
2060 timer_setup(&xprt->timer, NULL, 0);
2061
2062 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
2063 xprt_destroy(xprt);
2064 return ERR_PTR(-EINVAL);
2065 }
2066 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
2067 if (xprt->servername == NULL) {
2068 xprt_destroy(xprt);
2069 return ERR_PTR(-ENOMEM);
2070 }
2071
2072 rpc_xprt_debugfs_register(xprt);
2073
2074 trace_xprt_create(xprt);
2075out:
2076 return xprt;
2077}
2078
2079static void xprt_destroy_cb(struct work_struct *work)
2080{
2081 struct rpc_xprt *xprt =
2082 container_of(work, struct rpc_xprt, task_cleanup);
2083
2084 trace_xprt_destroy(xprt);
2085
2086 rpc_xprt_debugfs_unregister(xprt);
2087 rpc_destroy_wait_queue(&xprt->binding);
2088 rpc_destroy_wait_queue(&xprt->pending);
2089 rpc_destroy_wait_queue(&xprt->sending);
2090 rpc_destroy_wait_queue(&xprt->backlog);
2091 kfree(xprt->servername);
2092 /*
2093 * Destroy any existing back channel
2094 */
2095 xprt_destroy_backchannel(xprt, UINT_MAX);
2096
2097 /*
2098 * Tear down transport state and free the rpc_xprt
2099 */
2100 xprt->ops->destroy(xprt);
2101}
2102
2103/**
2104 * xprt_destroy - destroy an RPC transport, killing off all requests.
2105 * @xprt: transport to destroy
2106 *
2107 */
2108static void xprt_destroy(struct rpc_xprt *xprt)
2109{
2110 /*
2111 * Exclude transport connect/disconnect handlers and autoclose
2112 */
2113 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2114
2115 /*
2116 * xprt_schedule_autodisconnect() can run after XPRT_LOCKED
2117 * is cleared. We use ->transport_lock to ensure the mod_timer()
2118 * can only run *before* del_time_sync(), never after.
2119 */
2120 spin_lock(&xprt->transport_lock);
2121 del_timer_sync(&xprt->timer);
2122 spin_unlock(&xprt->transport_lock);
2123
2124 /*
2125 * Destroy sockets etc from the system workqueue so they can
2126 * safely flush receive work running on rpciod.
2127 */
2128 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2129 schedule_work(&xprt->task_cleanup);
2130}
2131
2132static void xprt_destroy_kref(struct kref *kref)
2133{
2134 xprt_destroy(container_of(kref, struct rpc_xprt, kref));
2135}
2136
2137/**
2138 * xprt_get - return a reference to an RPC transport.
2139 * @xprt: pointer to the transport
2140 *
2141 */
2142struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2143{
2144 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2145 return xprt;
2146 return NULL;
2147}
2148EXPORT_SYMBOL_GPL(xprt_get);
2149
2150/**
2151 * xprt_put - release a reference to an RPC transport.
2152 * @xprt: pointer to the transport
2153 *
2154 */
2155void xprt_put(struct rpc_xprt *xprt)
2156{
2157 if (xprt != NULL)
2158 kref_put(&xprt->kref, xprt_destroy_kref);
2159}
2160EXPORT_SYMBOL_GPL(xprt_put);
2161
2162void xprt_set_offline_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2163{
2164 if (!test_and_set_bit(XPRT_OFFLINE, &xprt->state)) {
2165 spin_lock(&xps->xps_lock);
2166 xps->xps_nactive--;
2167 spin_unlock(&xps->xps_lock);
2168 }
2169}
2170
2171void xprt_set_online_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2172{
2173 if (test_and_clear_bit(XPRT_OFFLINE, &xprt->state)) {
2174 spin_lock(&xps->xps_lock);
2175 xps->xps_nactive++;
2176 spin_unlock(&xps->xps_lock);
2177 }
2178}
2179
2180void xprt_delete_locked(struct rpc_xprt *xprt, struct rpc_xprt_switch *xps)
2181{
2182 if (test_and_set_bit(XPRT_REMOVE, &xprt->state))
2183 return;
2184
2185 xprt_force_disconnect(xprt);
2186 if (!test_bit(XPRT_CONNECTED, &xprt->state))
2187 return;
2188
2189 if (!xprt->sending.qlen && !xprt->pending.qlen &&
2190 !xprt->backlog.qlen && !atomic_long_read(&xprt->queuelen))
2191 rpc_xprt_switch_remove_xprt(xps, xprt, true);
2192}