Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/net/sunrpc/xprtsock.c
4 *
5 * Client-side transport implementation for sockets.
6 *
7 * TCP callback races fixes (C) 1998 Red Hat
8 * TCP send fixes (C) 1998 Red Hat
9 * TCP NFS related read + write fixes
10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11 *
12 * Rewrite of larges part of the code in order to stabilize TCP stuff.
13 * Fix behaviour when socket buffer is full.
14 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
15 *
16 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
17 *
18 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
19 * <gilles.quillard@bull.net>
20 */
21
22#include <linux/types.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/capability.h>
27#include <linux/pagemap.h>
28#include <linux/errno.h>
29#include <linux/socket.h>
30#include <linux/in.h>
31#include <linux/net.h>
32#include <linux/mm.h>
33#include <linux/un.h>
34#include <linux/udp.h>
35#include <linux/tcp.h>
36#include <linux/sunrpc/clnt.h>
37#include <linux/sunrpc/addr.h>
38#include <linux/sunrpc/sched.h>
39#include <linux/sunrpc/svcsock.h>
40#include <linux/sunrpc/xprtsock.h>
41#include <linux/file.h>
42#ifdef CONFIG_SUNRPC_BACKCHANNEL
43#include <linux/sunrpc/bc_xprt.h>
44#endif
45
46#include <net/sock.h>
47#include <net/checksum.h>
48#include <net/udp.h>
49#include <net/tcp.h>
50
51#include <trace/events/sunrpc.h>
52
53#include "sunrpc.h"
54
55#define RPC_TCP_READ_CHUNK_SZ (3*512*1024)
56
57static void xs_close(struct rpc_xprt *xprt);
58static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
59 struct socket *sock);
60
61/*
62 * xprtsock tunables
63 */
64static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
65static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
66static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
67
68static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
69static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
70
71#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
72
73#define XS_TCP_LINGER_TO (15U * HZ)
74static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
75
76/*
77 * We can register our own files under /proc/sys/sunrpc by
78 * calling register_sysctl_table() again. The files in that
79 * directory become the union of all files registered there.
80 *
81 * We simply need to make sure that we don't collide with
82 * someone else's file names!
83 */
84
85static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
86static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
87static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
88static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
89static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
90
91static struct ctl_table_header *sunrpc_table_header;
92
93/*
94 * FIXME: changing the UDP slot table size should also resize the UDP
95 * socket buffers for existing UDP transports
96 */
97static struct ctl_table xs_tunables_table[] = {
98 {
99 .procname = "udp_slot_table_entries",
100 .data = &xprt_udp_slot_table_entries,
101 .maxlen = sizeof(unsigned int),
102 .mode = 0644,
103 .proc_handler = proc_dointvec_minmax,
104 .extra1 = &min_slot_table_size,
105 .extra2 = &max_slot_table_size
106 },
107 {
108 .procname = "tcp_slot_table_entries",
109 .data = &xprt_tcp_slot_table_entries,
110 .maxlen = sizeof(unsigned int),
111 .mode = 0644,
112 .proc_handler = proc_dointvec_minmax,
113 .extra1 = &min_slot_table_size,
114 .extra2 = &max_slot_table_size
115 },
116 {
117 .procname = "tcp_max_slot_table_entries",
118 .data = &xprt_max_tcp_slot_table_entries,
119 .maxlen = sizeof(unsigned int),
120 .mode = 0644,
121 .proc_handler = proc_dointvec_minmax,
122 .extra1 = &min_slot_table_size,
123 .extra2 = &max_tcp_slot_table_limit
124 },
125 {
126 .procname = "min_resvport",
127 .data = &xprt_min_resvport,
128 .maxlen = sizeof(unsigned int),
129 .mode = 0644,
130 .proc_handler = proc_dointvec_minmax,
131 .extra1 = &xprt_min_resvport_limit,
132 .extra2 = &xprt_max_resvport
133 },
134 {
135 .procname = "max_resvport",
136 .data = &xprt_max_resvport,
137 .maxlen = sizeof(unsigned int),
138 .mode = 0644,
139 .proc_handler = proc_dointvec_minmax,
140 .extra1 = &xprt_min_resvport,
141 .extra2 = &xprt_max_resvport_limit
142 },
143 {
144 .procname = "tcp_fin_timeout",
145 .data = &xs_tcp_fin_timeout,
146 .maxlen = sizeof(xs_tcp_fin_timeout),
147 .mode = 0644,
148 .proc_handler = proc_dointvec_jiffies,
149 },
150 { },
151};
152
153static struct ctl_table sunrpc_table[] = {
154 {
155 .procname = "sunrpc",
156 .mode = 0555,
157 .child = xs_tunables_table
158 },
159 { },
160};
161
162#endif
163
164/*
165 * Wait duration for a reply from the RPC portmapper.
166 */
167#define XS_BIND_TO (60U * HZ)
168
169/*
170 * Delay if a UDP socket connect error occurs. This is most likely some
171 * kind of resource problem on the local host.
172 */
173#define XS_UDP_REEST_TO (2U * HZ)
174
175/*
176 * The reestablish timeout allows clients to delay for a bit before attempting
177 * to reconnect to a server that just dropped our connection.
178 *
179 * We implement an exponential backoff when trying to reestablish a TCP
180 * transport connection with the server. Some servers like to drop a TCP
181 * connection when they are overworked, so we start with a short timeout and
182 * increase over time if the server is down or not responding.
183 */
184#define XS_TCP_INIT_REEST_TO (3U * HZ)
185
186/*
187 * TCP idle timeout; client drops the transport socket if it is idle
188 * for this long. Note that we also timeout UDP sockets to prevent
189 * holding port numbers when there is no RPC traffic.
190 */
191#define XS_IDLE_DISC_TO (5U * 60 * HZ)
192
193#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
194# undef RPC_DEBUG_DATA
195# define RPCDBG_FACILITY RPCDBG_TRANS
196#endif
197
198#ifdef RPC_DEBUG_DATA
199static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
200{
201 u8 *buf = (u8 *) packet;
202 int j;
203
204 dprintk("RPC: %s\n", msg);
205 for (j = 0; j < count && j < 128; j += 4) {
206 if (!(j & 31)) {
207 if (j)
208 dprintk("\n");
209 dprintk("0x%04x ", j);
210 }
211 dprintk("%02x%02x%02x%02x ",
212 buf[j], buf[j+1], buf[j+2], buf[j+3]);
213 }
214 dprintk("\n");
215}
216#else
217static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
218{
219 /* NOP */
220}
221#endif
222
223static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
224{
225 return (struct rpc_xprt *) sk->sk_user_data;
226}
227
228static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
229{
230 return (struct sockaddr *) &xprt->addr;
231}
232
233static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
234{
235 return (struct sockaddr_un *) &xprt->addr;
236}
237
238static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
239{
240 return (struct sockaddr_in *) &xprt->addr;
241}
242
243static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
244{
245 return (struct sockaddr_in6 *) &xprt->addr;
246}
247
248static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
249{
250 struct sockaddr *sap = xs_addr(xprt);
251 struct sockaddr_in6 *sin6;
252 struct sockaddr_in *sin;
253 struct sockaddr_un *sun;
254 char buf[128];
255
256 switch (sap->sa_family) {
257 case AF_LOCAL:
258 sun = xs_addr_un(xprt);
259 strlcpy(buf, sun->sun_path, sizeof(buf));
260 xprt->address_strings[RPC_DISPLAY_ADDR] =
261 kstrdup(buf, GFP_KERNEL);
262 break;
263 case AF_INET:
264 (void)rpc_ntop(sap, buf, sizeof(buf));
265 xprt->address_strings[RPC_DISPLAY_ADDR] =
266 kstrdup(buf, GFP_KERNEL);
267 sin = xs_addr_in(xprt);
268 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
269 break;
270 case AF_INET6:
271 (void)rpc_ntop(sap, buf, sizeof(buf));
272 xprt->address_strings[RPC_DISPLAY_ADDR] =
273 kstrdup(buf, GFP_KERNEL);
274 sin6 = xs_addr_in6(xprt);
275 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
276 break;
277 default:
278 BUG();
279 }
280
281 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
282}
283
284static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
285{
286 struct sockaddr *sap = xs_addr(xprt);
287 char buf[128];
288
289 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
290 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
291
292 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
293 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
294}
295
296static void xs_format_peer_addresses(struct rpc_xprt *xprt,
297 const char *protocol,
298 const char *netid)
299{
300 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
301 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
302 xs_format_common_peer_addresses(xprt);
303 xs_format_common_peer_ports(xprt);
304}
305
306static void xs_update_peer_port(struct rpc_xprt *xprt)
307{
308 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
309 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
310
311 xs_format_common_peer_ports(xprt);
312}
313
314static void xs_free_peer_addresses(struct rpc_xprt *xprt)
315{
316 unsigned int i;
317
318 for (i = 0; i < RPC_DISPLAY_MAX; i++)
319 switch (i) {
320 case RPC_DISPLAY_PROTO:
321 case RPC_DISPLAY_NETID:
322 continue;
323 default:
324 kfree(xprt->address_strings[i]);
325 }
326}
327
328#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
329
330static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
331{
332 struct msghdr msg = {
333 .msg_name = addr,
334 .msg_namelen = addrlen,
335 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
336 };
337 struct kvec iov = {
338 .iov_base = vec->iov_base + base,
339 .iov_len = vec->iov_len - base,
340 };
341
342 if (iov.iov_len != 0)
343 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
344 return kernel_sendmsg(sock, &msg, NULL, 0, 0);
345}
346
347static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p)
348{
349 ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
350 int offset, size_t size, int flags);
351 struct page **ppage;
352 unsigned int remainder;
353 int err;
354
355 remainder = xdr->page_len - base;
356 base += xdr->page_base;
357 ppage = xdr->pages + (base >> PAGE_SHIFT);
358 base &= ~PAGE_MASK;
359 do_sendpage = sock->ops->sendpage;
360 if (!zerocopy)
361 do_sendpage = sock_no_sendpage;
362 for(;;) {
363 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
364 int flags = XS_SENDMSG_FLAGS;
365
366 remainder -= len;
367 if (more)
368 flags |= MSG_MORE;
369 if (remainder != 0)
370 flags |= MSG_SENDPAGE_NOTLAST | MSG_MORE;
371 err = do_sendpage(sock, *ppage, base, len, flags);
372 if (remainder == 0 || err != len)
373 break;
374 *sent_p += err;
375 ppage++;
376 base = 0;
377 }
378 if (err > 0) {
379 *sent_p += err;
380 err = 0;
381 }
382 return err;
383}
384
385/**
386 * xs_sendpages - write pages directly to a socket
387 * @sock: socket to send on
388 * @addr: UDP only -- address of destination
389 * @addrlen: UDP only -- length of destination address
390 * @xdr: buffer containing this request
391 * @base: starting position in the buffer
392 * @zerocopy: true if it is safe to use sendpage()
393 * @sent_p: return the total number of bytes successfully queued for sending
394 *
395 */
396static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p)
397{
398 unsigned int remainder = xdr->len - base;
399 int err = 0;
400 int sent = 0;
401
402 if (unlikely(!sock))
403 return -ENOTSOCK;
404
405 if (base != 0) {
406 addr = NULL;
407 addrlen = 0;
408 }
409
410 if (base < xdr->head[0].iov_len || addr != NULL) {
411 unsigned int len = xdr->head[0].iov_len - base;
412 remainder -= len;
413 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
414 if (remainder == 0 || err != len)
415 goto out;
416 *sent_p += err;
417 base = 0;
418 } else
419 base -= xdr->head[0].iov_len;
420
421 if (base < xdr->page_len) {
422 unsigned int len = xdr->page_len - base;
423 remainder -= len;
424 err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent);
425 *sent_p += sent;
426 if (remainder == 0 || sent != len)
427 goto out;
428 base = 0;
429 } else
430 base -= xdr->page_len;
431
432 if (base >= xdr->tail[0].iov_len)
433 return 0;
434 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
435out:
436 if (err > 0) {
437 *sent_p += err;
438 err = 0;
439 }
440 return err;
441}
442
443static void xs_nospace_callback(struct rpc_task *task)
444{
445 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
446
447 transport->inet->sk_write_pending--;
448}
449
450/**
451 * xs_nospace - place task on wait queue if transmit was incomplete
452 * @task: task to put to sleep
453 *
454 */
455static int xs_nospace(struct rpc_task *task)
456{
457 struct rpc_rqst *req = task->tk_rqstp;
458 struct rpc_xprt *xprt = req->rq_xprt;
459 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
460 struct sock *sk = transport->inet;
461 int ret = -EAGAIN;
462
463 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
464 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
465 req->rq_slen);
466
467 /* Protect against races with write_space */
468 spin_lock_bh(&xprt->transport_lock);
469
470 /* Don't race with disconnect */
471 if (xprt_connected(xprt)) {
472 /* wait for more buffer space */
473 sk->sk_write_pending++;
474 xprt_wait_for_buffer_space(task, xs_nospace_callback);
475 } else
476 ret = -ENOTCONN;
477
478 spin_unlock_bh(&xprt->transport_lock);
479
480 /* Race breaker in case memory is freed before above code is called */
481 if (ret == -EAGAIN) {
482 struct socket_wq *wq;
483
484 rcu_read_lock();
485 wq = rcu_dereference(sk->sk_wq);
486 set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
487 rcu_read_unlock();
488
489 sk->sk_write_space(sk);
490 }
491 return ret;
492}
493
494/*
495 * Construct a stream transport record marker in @buf.
496 */
497static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
498{
499 u32 reclen = buf->len - sizeof(rpc_fraghdr);
500 rpc_fraghdr *base = buf->head[0].iov_base;
501 *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
502}
503
504/**
505 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
506 * @task: RPC task that manages the state of an RPC request
507 *
508 * Return values:
509 * 0: The request has been sent
510 * EAGAIN: The socket was blocked, please call again later to
511 * complete the request
512 * ENOTCONN: Caller needs to invoke connect logic then call again
513 * other: Some other error occured, the request was not sent
514 */
515static int xs_local_send_request(struct rpc_task *task)
516{
517 struct rpc_rqst *req = task->tk_rqstp;
518 struct rpc_xprt *xprt = req->rq_xprt;
519 struct sock_xprt *transport =
520 container_of(xprt, struct sock_xprt, xprt);
521 struct xdr_buf *xdr = &req->rq_snd_buf;
522 int status;
523 int sent = 0;
524
525 xs_encode_stream_record_marker(&req->rq_snd_buf);
526
527 xs_pktdump("packet data:",
528 req->rq_svec->iov_base, req->rq_svec->iov_len);
529
530 req->rq_xtime = ktime_get();
531 status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent,
532 true, &sent);
533 dprintk("RPC: %s(%u) = %d\n",
534 __func__, xdr->len - req->rq_bytes_sent, status);
535
536 if (status == -EAGAIN && sock_writeable(transport->inet))
537 status = -ENOBUFS;
538
539 if (likely(sent > 0) || status == 0) {
540 req->rq_bytes_sent += sent;
541 req->rq_xmit_bytes_sent += sent;
542 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
543 req->rq_bytes_sent = 0;
544 return 0;
545 }
546 status = -EAGAIN;
547 }
548
549 switch (status) {
550 case -ENOBUFS:
551 break;
552 case -EAGAIN:
553 status = xs_nospace(task);
554 break;
555 default:
556 dprintk("RPC: sendmsg returned unrecognized error %d\n",
557 -status);
558 /* fall through */
559 case -EPIPE:
560 xs_close(xprt);
561 status = -ENOTCONN;
562 }
563
564 return status;
565}
566
567/**
568 * xs_udp_send_request - write an RPC request to a UDP socket
569 * @task: address of RPC task that manages the state of an RPC request
570 *
571 * Return values:
572 * 0: The request has been sent
573 * EAGAIN: The socket was blocked, please call again later to
574 * complete the request
575 * ENOTCONN: Caller needs to invoke connect logic then call again
576 * other: Some other error occurred, the request was not sent
577 */
578static int xs_udp_send_request(struct rpc_task *task)
579{
580 struct rpc_rqst *req = task->tk_rqstp;
581 struct rpc_xprt *xprt = req->rq_xprt;
582 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
583 struct xdr_buf *xdr = &req->rq_snd_buf;
584 int sent = 0;
585 int status;
586
587 xs_pktdump("packet data:",
588 req->rq_svec->iov_base,
589 req->rq_svec->iov_len);
590
591 if (!xprt_bound(xprt))
592 return -ENOTCONN;
593 req->rq_xtime = ktime_get();
594 status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
595 xdr, req->rq_bytes_sent, true, &sent);
596
597 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
598 xdr->len - req->rq_bytes_sent, status);
599
600 /* firewall is blocking us, don't return -EAGAIN or we end up looping */
601 if (status == -EPERM)
602 goto process_status;
603
604 if (status == -EAGAIN && sock_writeable(transport->inet))
605 status = -ENOBUFS;
606
607 if (sent > 0 || status == 0) {
608 req->rq_xmit_bytes_sent += sent;
609 if (sent >= req->rq_slen)
610 return 0;
611 /* Still some bytes left; set up for a retry later. */
612 status = -EAGAIN;
613 }
614
615process_status:
616 switch (status) {
617 case -ENOTSOCK:
618 status = -ENOTCONN;
619 /* Should we call xs_close() here? */
620 break;
621 case -EAGAIN:
622 status = xs_nospace(task);
623 break;
624 case -ENETUNREACH:
625 case -ENOBUFS:
626 case -EPIPE:
627 case -ECONNREFUSED:
628 case -EPERM:
629 /* When the server has died, an ICMP port unreachable message
630 * prompts ECONNREFUSED. */
631 break;
632 default:
633 dprintk("RPC: sendmsg returned unrecognized error %d\n",
634 -status);
635 }
636
637 return status;
638}
639
640/**
641 * xs_tcp_send_request - write an RPC request to a TCP socket
642 * @task: address of RPC task that manages the state of an RPC request
643 *
644 * Return values:
645 * 0: The request has been sent
646 * EAGAIN: The socket was blocked, please call again later to
647 * complete the request
648 * ENOTCONN: Caller needs to invoke connect logic then call again
649 * other: Some other error occurred, the request was not sent
650 *
651 * XXX: In the case of soft timeouts, should we eventually give up
652 * if sendmsg is not able to make progress?
653 */
654static int xs_tcp_send_request(struct rpc_task *task)
655{
656 struct rpc_rqst *req = task->tk_rqstp;
657 struct rpc_xprt *xprt = req->rq_xprt;
658 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
659 struct xdr_buf *xdr = &req->rq_snd_buf;
660 bool zerocopy = true;
661 bool vm_wait = false;
662 int status;
663 int sent;
664
665 xs_encode_stream_record_marker(&req->rq_snd_buf);
666
667 xs_pktdump("packet data:",
668 req->rq_svec->iov_base,
669 req->rq_svec->iov_len);
670 /* Don't use zero copy if this is a resend. If the RPC call
671 * completes while the socket holds a reference to the pages,
672 * then we may end up resending corrupted data.
673 */
674 if (task->tk_flags & RPC_TASK_SENT)
675 zerocopy = false;
676
677 if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
678 xs_tcp_set_socket_timeouts(xprt, transport->sock);
679
680 /* Continue transmitting the packet/record. We must be careful
681 * to cope with writespace callbacks arriving _after_ we have
682 * called sendmsg(). */
683 req->rq_xtime = ktime_get();
684 while (1) {
685 sent = 0;
686 status = xs_sendpages(transport->sock, NULL, 0, xdr,
687 req->rq_bytes_sent, zerocopy, &sent);
688
689 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
690 xdr->len - req->rq_bytes_sent, status);
691
692 /* If we've sent the entire packet, immediately
693 * reset the count of bytes sent. */
694 req->rq_bytes_sent += sent;
695 req->rq_xmit_bytes_sent += sent;
696 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
697 req->rq_bytes_sent = 0;
698 return 0;
699 }
700
701 WARN_ON_ONCE(sent == 0 && status == 0);
702
703 if (status == -EAGAIN ) {
704 /*
705 * Return EAGAIN if we're sure we're hitting the
706 * socket send buffer limits.
707 */
708 if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
709 break;
710 /*
711 * Did we hit a memory allocation failure?
712 */
713 if (sent == 0) {
714 status = -ENOBUFS;
715 if (vm_wait)
716 break;
717 /* Retry, knowing now that we're below the
718 * socket send buffer limit
719 */
720 vm_wait = true;
721 }
722 continue;
723 }
724 if (status < 0)
725 break;
726 vm_wait = false;
727 }
728
729 switch (status) {
730 case -ENOTSOCK:
731 status = -ENOTCONN;
732 /* Should we call xs_close() here? */
733 break;
734 case -EAGAIN:
735 status = xs_nospace(task);
736 break;
737 case -ECONNRESET:
738 case -ECONNREFUSED:
739 case -ENOTCONN:
740 case -EADDRINUSE:
741 case -ENOBUFS:
742 case -EPIPE:
743 break;
744 default:
745 dprintk("RPC: sendmsg returned unrecognized error %d\n",
746 -status);
747 }
748
749 return status;
750}
751
752/**
753 * xs_tcp_release_xprt - clean up after a tcp transmission
754 * @xprt: transport
755 * @task: rpc task
756 *
757 * This cleans up if an error causes us to abort the transmission of a request.
758 * In this case, the socket may need to be reset in order to avoid confusing
759 * the server.
760 */
761static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
762{
763 struct rpc_rqst *req;
764
765 if (task != xprt->snd_task)
766 return;
767 if (task == NULL)
768 goto out_release;
769 req = task->tk_rqstp;
770 if (req == NULL)
771 goto out_release;
772 if (req->rq_bytes_sent == 0)
773 goto out_release;
774 if (req->rq_bytes_sent == req->rq_snd_buf.len)
775 goto out_release;
776 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
777out_release:
778 xprt_release_xprt(xprt, task);
779}
780
781static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
782{
783 transport->old_data_ready = sk->sk_data_ready;
784 transport->old_state_change = sk->sk_state_change;
785 transport->old_write_space = sk->sk_write_space;
786 transport->old_error_report = sk->sk_error_report;
787}
788
789static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
790{
791 sk->sk_data_ready = transport->old_data_ready;
792 sk->sk_state_change = transport->old_state_change;
793 sk->sk_write_space = transport->old_write_space;
794 sk->sk_error_report = transport->old_error_report;
795}
796
797static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
798{
799 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
800
801 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
802}
803
804static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
805{
806 smp_mb__before_atomic();
807 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
808 clear_bit(XPRT_CLOSING, &xprt->state);
809 xs_sock_reset_state_flags(xprt);
810 smp_mb__after_atomic();
811}
812
813/**
814 * xs_error_report - callback to handle TCP socket state errors
815 * @sk: socket
816 *
817 * Note: we don't call sock_error() since there may be a rpc_task
818 * using the socket, and so we don't want to clear sk->sk_err.
819 */
820static void xs_error_report(struct sock *sk)
821{
822 struct rpc_xprt *xprt;
823 int err;
824
825 read_lock_bh(&sk->sk_callback_lock);
826 if (!(xprt = xprt_from_sock(sk)))
827 goto out;
828
829 err = -sk->sk_err;
830 if (err == 0)
831 goto out;
832 dprintk("RPC: xs_error_report client %p, error=%d...\n",
833 xprt, -err);
834 trace_rpc_socket_error(xprt, sk->sk_socket, err);
835 xprt_wake_pending_tasks(xprt, err);
836 out:
837 read_unlock_bh(&sk->sk_callback_lock);
838}
839
840static void xs_reset_transport(struct sock_xprt *transport)
841{
842 struct socket *sock = transport->sock;
843 struct sock *sk = transport->inet;
844 struct rpc_xprt *xprt = &transport->xprt;
845
846 if (sk == NULL)
847 return;
848
849 if (atomic_read(&transport->xprt.swapper))
850 sk_clear_memalloc(sk);
851
852 kernel_sock_shutdown(sock, SHUT_RDWR);
853
854 mutex_lock(&transport->recv_mutex);
855 write_lock_bh(&sk->sk_callback_lock);
856 transport->inet = NULL;
857 transport->sock = NULL;
858
859 sk->sk_user_data = NULL;
860
861 xs_restore_old_callbacks(transport, sk);
862 xprt_clear_connected(xprt);
863 write_unlock_bh(&sk->sk_callback_lock);
864 xs_sock_reset_connection_flags(xprt);
865 mutex_unlock(&transport->recv_mutex);
866
867 trace_rpc_socket_close(xprt, sock);
868 sock_release(sock);
869}
870
871/**
872 * xs_close - close a socket
873 * @xprt: transport
874 *
875 * This is used when all requests are complete; ie, no DRC state remains
876 * on the server we want to save.
877 *
878 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
879 * xs_reset_transport() zeroing the socket from underneath a writer.
880 */
881static void xs_close(struct rpc_xprt *xprt)
882{
883 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
884
885 dprintk("RPC: xs_close xprt %p\n", xprt);
886
887 xs_reset_transport(transport);
888 xprt->reestablish_timeout = 0;
889
890 xprt_disconnect_done(xprt);
891}
892
893static void xs_inject_disconnect(struct rpc_xprt *xprt)
894{
895 dprintk("RPC: injecting transport disconnect on xprt=%p\n",
896 xprt);
897 xprt_disconnect_done(xprt);
898}
899
900static void xs_xprt_free(struct rpc_xprt *xprt)
901{
902 xs_free_peer_addresses(xprt);
903 xprt_free(xprt);
904}
905
906/**
907 * xs_destroy - prepare to shutdown a transport
908 * @xprt: doomed transport
909 *
910 */
911static void xs_destroy(struct rpc_xprt *xprt)
912{
913 struct sock_xprt *transport = container_of(xprt,
914 struct sock_xprt, xprt);
915 dprintk("RPC: xs_destroy xprt %p\n", xprt);
916
917 cancel_delayed_work_sync(&transport->connect_worker);
918 xs_close(xprt);
919 cancel_work_sync(&transport->recv_worker);
920 xs_xprt_free(xprt);
921 module_put(THIS_MODULE);
922}
923
924static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
925{
926 struct xdr_skb_reader desc = {
927 .skb = skb,
928 .offset = sizeof(rpc_fraghdr),
929 .count = skb->len - sizeof(rpc_fraghdr),
930 };
931
932 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
933 return -1;
934 if (desc.count)
935 return -1;
936 return 0;
937}
938
939/**
940 * xs_local_data_read_skb
941 * @xprt: transport
942 * @sk: socket
943 * @skb: skbuff
944 *
945 * Currently this assumes we can read the whole reply in a single gulp.
946 */
947static void xs_local_data_read_skb(struct rpc_xprt *xprt,
948 struct sock *sk,
949 struct sk_buff *skb)
950{
951 struct rpc_task *task;
952 struct rpc_rqst *rovr;
953 int repsize, copied;
954 u32 _xid;
955 __be32 *xp;
956
957 repsize = skb->len - sizeof(rpc_fraghdr);
958 if (repsize < 4) {
959 dprintk("RPC: impossible RPC reply size %d\n", repsize);
960 return;
961 }
962
963 /* Copy the XID from the skb... */
964 xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
965 if (xp == NULL)
966 return;
967
968 /* Look up and lock the request corresponding to the given XID */
969 spin_lock(&xprt->recv_lock);
970 rovr = xprt_lookup_rqst(xprt, *xp);
971 if (!rovr)
972 goto out_unlock;
973 xprt_pin_rqst(rovr);
974 spin_unlock(&xprt->recv_lock);
975 task = rovr->rq_task;
976
977 copied = rovr->rq_private_buf.buflen;
978 if (copied > repsize)
979 copied = repsize;
980
981 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
982 dprintk("RPC: sk_buff copy failed\n");
983 spin_lock(&xprt->recv_lock);
984 goto out_unpin;
985 }
986
987 spin_lock(&xprt->recv_lock);
988 xprt_complete_rqst(task, copied);
989out_unpin:
990 xprt_unpin_rqst(rovr);
991 out_unlock:
992 spin_unlock(&xprt->recv_lock);
993}
994
995static void xs_local_data_receive(struct sock_xprt *transport)
996{
997 struct sk_buff *skb;
998 struct sock *sk;
999 int err;
1000
1001restart:
1002 mutex_lock(&transport->recv_mutex);
1003 sk = transport->inet;
1004 if (sk == NULL)
1005 goto out;
1006 for (;;) {
1007 skb = skb_recv_datagram(sk, 0, 1, &err);
1008 if (skb != NULL) {
1009 xs_local_data_read_skb(&transport->xprt, sk, skb);
1010 skb_free_datagram(sk, skb);
1011 continue;
1012 }
1013 if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1014 break;
1015 if (need_resched()) {
1016 mutex_unlock(&transport->recv_mutex);
1017 cond_resched();
1018 goto restart;
1019 }
1020 }
1021out:
1022 mutex_unlock(&transport->recv_mutex);
1023}
1024
1025static void xs_local_data_receive_workfn(struct work_struct *work)
1026{
1027 struct sock_xprt *transport =
1028 container_of(work, struct sock_xprt, recv_worker);
1029 xs_local_data_receive(transport);
1030}
1031
1032/**
1033 * xs_udp_data_read_skb - receive callback for UDP sockets
1034 * @xprt: transport
1035 * @sk: socket
1036 * @skb: skbuff
1037 *
1038 */
1039static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
1040 struct sock *sk,
1041 struct sk_buff *skb)
1042{
1043 struct rpc_task *task;
1044 struct rpc_rqst *rovr;
1045 int repsize, copied;
1046 u32 _xid;
1047 __be32 *xp;
1048
1049 repsize = skb->len;
1050 if (repsize < 4) {
1051 dprintk("RPC: impossible RPC reply size %d!\n", repsize);
1052 return;
1053 }
1054
1055 /* Copy the XID from the skb... */
1056 xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
1057 if (xp == NULL)
1058 return;
1059
1060 /* Look up and lock the request corresponding to the given XID */
1061 spin_lock(&xprt->recv_lock);
1062 rovr = xprt_lookup_rqst(xprt, *xp);
1063 if (!rovr)
1064 goto out_unlock;
1065 xprt_pin_rqst(rovr);
1066 xprt_update_rtt(rovr->rq_task);
1067 spin_unlock(&xprt->recv_lock);
1068 task = rovr->rq_task;
1069
1070 if ((copied = rovr->rq_private_buf.buflen) > repsize)
1071 copied = repsize;
1072
1073 /* Suck it into the iovec, verify checksum if not done by hw. */
1074 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1075 spin_lock(&xprt->recv_lock);
1076 __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
1077 goto out_unpin;
1078 }
1079
1080
1081 spin_lock_bh(&xprt->transport_lock);
1082 xprt_adjust_cwnd(xprt, task, copied);
1083 spin_unlock_bh(&xprt->transport_lock);
1084 spin_lock(&xprt->recv_lock);
1085 xprt_complete_rqst(task, copied);
1086 __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
1087out_unpin:
1088 xprt_unpin_rqst(rovr);
1089 out_unlock:
1090 spin_unlock(&xprt->recv_lock);
1091}
1092
1093static void xs_udp_data_receive(struct sock_xprt *transport)
1094{
1095 struct sk_buff *skb;
1096 struct sock *sk;
1097 int err;
1098
1099restart:
1100 mutex_lock(&transport->recv_mutex);
1101 sk = transport->inet;
1102 if (sk == NULL)
1103 goto out;
1104 for (;;) {
1105 skb = skb_recv_udp(sk, 0, 1, &err);
1106 if (skb != NULL) {
1107 xs_udp_data_read_skb(&transport->xprt, sk, skb);
1108 consume_skb(skb);
1109 continue;
1110 }
1111 if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1112 break;
1113 if (need_resched()) {
1114 mutex_unlock(&transport->recv_mutex);
1115 cond_resched();
1116 goto restart;
1117 }
1118 }
1119out:
1120 mutex_unlock(&transport->recv_mutex);
1121}
1122
1123static void xs_udp_data_receive_workfn(struct work_struct *work)
1124{
1125 struct sock_xprt *transport =
1126 container_of(work, struct sock_xprt, recv_worker);
1127 xs_udp_data_receive(transport);
1128}
1129
1130/**
1131 * xs_data_ready - "data ready" callback for UDP sockets
1132 * @sk: socket with data to read
1133 *
1134 */
1135static void xs_data_ready(struct sock *sk)
1136{
1137 struct rpc_xprt *xprt;
1138
1139 read_lock_bh(&sk->sk_callback_lock);
1140 dprintk("RPC: xs_data_ready...\n");
1141 xprt = xprt_from_sock(sk);
1142 if (xprt != NULL) {
1143 struct sock_xprt *transport = container_of(xprt,
1144 struct sock_xprt, xprt);
1145 transport->old_data_ready(sk);
1146 /* Any data means we had a useful conversation, so
1147 * then we don't need to delay the next reconnect
1148 */
1149 if (xprt->reestablish_timeout)
1150 xprt->reestablish_timeout = 0;
1151 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1152 queue_work(xprtiod_workqueue, &transport->recv_worker);
1153 }
1154 read_unlock_bh(&sk->sk_callback_lock);
1155}
1156
1157/*
1158 * Helper function to force a TCP close if the server is sending
1159 * junk and/or it has put us in CLOSE_WAIT
1160 */
1161static void xs_tcp_force_close(struct rpc_xprt *xprt)
1162{
1163 xprt_force_disconnect(xprt);
1164}
1165
1166static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
1167{
1168 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1169 size_t len, used;
1170 char *p;
1171
1172 p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
1173 len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
1174 used = xdr_skb_read_bits(desc, p, len);
1175 transport->tcp_offset += used;
1176 if (used != len)
1177 return;
1178
1179 transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
1180 if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
1181 transport->tcp_flags |= TCP_RCV_LAST_FRAG;
1182 else
1183 transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
1184 transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
1185
1186 transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
1187 transport->tcp_offset = 0;
1188
1189 /* Sanity check of the record length */
1190 if (unlikely(transport->tcp_reclen < 8)) {
1191 dprintk("RPC: invalid TCP record fragment length\n");
1192 xs_tcp_force_close(xprt);
1193 return;
1194 }
1195 dprintk("RPC: reading TCP record fragment of length %d\n",
1196 transport->tcp_reclen);
1197}
1198
1199static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
1200{
1201 if (transport->tcp_offset == transport->tcp_reclen) {
1202 transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
1203 transport->tcp_offset = 0;
1204 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
1205 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1206 transport->tcp_flags |= TCP_RCV_COPY_XID;
1207 transport->tcp_copied = 0;
1208 }
1209 }
1210}
1211
1212static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1213{
1214 size_t len, used;
1215 char *p;
1216
1217 len = sizeof(transport->tcp_xid) - transport->tcp_offset;
1218 dprintk("RPC: reading XID (%zu bytes)\n", len);
1219 p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
1220 used = xdr_skb_read_bits(desc, p, len);
1221 transport->tcp_offset += used;
1222 if (used != len)
1223 return;
1224 transport->tcp_flags &= ~TCP_RCV_COPY_XID;
1225 transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
1226 transport->tcp_copied = 4;
1227 dprintk("RPC: reading %s XID %08x\n",
1228 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
1229 : "request with",
1230 ntohl(transport->tcp_xid));
1231 xs_tcp_check_fraghdr(transport);
1232}
1233
1234static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
1235 struct xdr_skb_reader *desc)
1236{
1237 size_t len, used;
1238 u32 offset;
1239 char *p;
1240
1241 /*
1242 * We want transport->tcp_offset to be 8 at the end of this routine
1243 * (4 bytes for the xid and 4 bytes for the call/reply flag).
1244 * When this function is called for the first time,
1245 * transport->tcp_offset is 4 (after having already read the xid).
1246 */
1247 offset = transport->tcp_offset - sizeof(transport->tcp_xid);
1248 len = sizeof(transport->tcp_calldir) - offset;
1249 dprintk("RPC: reading CALL/REPLY flag (%zu bytes)\n", len);
1250 p = ((char *) &transport->tcp_calldir) + offset;
1251 used = xdr_skb_read_bits(desc, p, len);
1252 transport->tcp_offset += used;
1253 if (used != len)
1254 return;
1255 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
1256 /*
1257 * We don't yet have the XDR buffer, so we will write the calldir
1258 * out after we get the buffer from the 'struct rpc_rqst'
1259 */
1260 switch (ntohl(transport->tcp_calldir)) {
1261 case RPC_REPLY:
1262 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1263 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1264 transport->tcp_flags |= TCP_RPC_REPLY;
1265 break;
1266 case RPC_CALL:
1267 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1268 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1269 transport->tcp_flags &= ~TCP_RPC_REPLY;
1270 break;
1271 default:
1272 dprintk("RPC: invalid request message type\n");
1273 xs_tcp_force_close(&transport->xprt);
1274 }
1275 xs_tcp_check_fraghdr(transport);
1276}
1277
1278static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
1279 struct xdr_skb_reader *desc,
1280 struct rpc_rqst *req)
1281{
1282 struct sock_xprt *transport =
1283 container_of(xprt, struct sock_xprt, xprt);
1284 struct xdr_buf *rcvbuf;
1285 size_t len;
1286 ssize_t r;
1287
1288 rcvbuf = &req->rq_private_buf;
1289
1290 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
1291 /*
1292 * Save the RPC direction in the XDR buffer
1293 */
1294 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
1295 &transport->tcp_calldir,
1296 sizeof(transport->tcp_calldir));
1297 transport->tcp_copied += sizeof(transport->tcp_calldir);
1298 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
1299 }
1300
1301 len = desc->count;
1302 if (len > transport->tcp_reclen - transport->tcp_offset)
1303 desc->count = transport->tcp_reclen - transport->tcp_offset;
1304 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1305 desc, xdr_skb_read_bits);
1306
1307 if (desc->count) {
1308 /* Error when copying to the receive buffer,
1309 * usually because we weren't able to allocate
1310 * additional buffer pages. All we can do now
1311 * is turn off TCP_RCV_COPY_DATA, so the request
1312 * will not receive any additional updates,
1313 * and time out.
1314 * Any remaining data from this record will
1315 * be discarded.
1316 */
1317 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1318 dprintk("RPC: XID %08x truncated request\n",
1319 ntohl(transport->tcp_xid));
1320 dprintk("RPC: xprt = %p, tcp_copied = %lu, "
1321 "tcp_offset = %u, tcp_reclen = %u\n",
1322 xprt, transport->tcp_copied,
1323 transport->tcp_offset, transport->tcp_reclen);
1324 return;
1325 }
1326
1327 transport->tcp_copied += r;
1328 transport->tcp_offset += r;
1329 desc->count = len - r;
1330
1331 dprintk("RPC: XID %08x read %zd bytes\n",
1332 ntohl(transport->tcp_xid), r);
1333 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
1334 "tcp_reclen = %u\n", xprt, transport->tcp_copied,
1335 transport->tcp_offset, transport->tcp_reclen);
1336
1337 if (transport->tcp_copied == req->rq_private_buf.buflen)
1338 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1339 else if (transport->tcp_offset == transport->tcp_reclen) {
1340 if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
1341 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1342 }
1343}
1344
1345/*
1346 * Finds the request corresponding to the RPC xid and invokes the common
1347 * tcp read code to read the data.
1348 */
1349static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
1350 struct xdr_skb_reader *desc)
1351{
1352 struct sock_xprt *transport =
1353 container_of(xprt, struct sock_xprt, xprt);
1354 struct rpc_rqst *req;
1355
1356 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
1357
1358 /* Find and lock the request corresponding to this xid */
1359 spin_lock(&xprt->recv_lock);
1360 req = xprt_lookup_rqst(xprt, transport->tcp_xid);
1361 if (!req) {
1362 dprintk("RPC: XID %08x request not found!\n",
1363 ntohl(transport->tcp_xid));
1364 spin_unlock(&xprt->recv_lock);
1365 return -1;
1366 }
1367 xprt_pin_rqst(req);
1368 spin_unlock(&xprt->recv_lock);
1369
1370 xs_tcp_read_common(xprt, desc, req);
1371
1372 spin_lock(&xprt->recv_lock);
1373 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1374 xprt_complete_rqst(req->rq_task, transport->tcp_copied);
1375 xprt_unpin_rqst(req);
1376 spin_unlock(&xprt->recv_lock);
1377 return 0;
1378}
1379
1380#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1381/*
1382 * Obtains an rpc_rqst previously allocated and invokes the common
1383 * tcp read code to read the data. The result is placed in the callback
1384 * queue.
1385 * If we're unable to obtain the rpc_rqst we schedule the closing of the
1386 * connection and return -1.
1387 */
1388static int xs_tcp_read_callback(struct rpc_xprt *xprt,
1389 struct xdr_skb_reader *desc)
1390{
1391 struct sock_xprt *transport =
1392 container_of(xprt, struct sock_xprt, xprt);
1393 struct rpc_rqst *req;
1394
1395 /* Look up the request corresponding to the given XID */
1396 req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
1397 if (req == NULL) {
1398 printk(KERN_WARNING "Callback slot table overflowed\n");
1399 xprt_force_disconnect(xprt);
1400 return -1;
1401 }
1402
1403 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
1404 xs_tcp_read_common(xprt, desc, req);
1405
1406 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1407 xprt_complete_bc_request(req, transport->tcp_copied);
1408
1409 return 0;
1410}
1411
1412static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1413 struct xdr_skb_reader *desc)
1414{
1415 struct sock_xprt *transport =
1416 container_of(xprt, struct sock_xprt, xprt);
1417
1418 return (transport->tcp_flags & TCP_RPC_REPLY) ?
1419 xs_tcp_read_reply(xprt, desc) :
1420 xs_tcp_read_callback(xprt, desc);
1421}
1422
1423static int xs_tcp_bc_up(struct svc_serv *serv, struct net *net)
1424{
1425 int ret;
1426
1427 ret = svc_create_xprt(serv, "tcp-bc", net, PF_INET, 0,
1428 SVC_SOCK_ANONYMOUS);
1429 if (ret < 0)
1430 return ret;
1431 return 0;
1432}
1433
1434static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
1435{
1436 return PAGE_SIZE;
1437}
1438#else
1439static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1440 struct xdr_skb_reader *desc)
1441{
1442 return xs_tcp_read_reply(xprt, desc);
1443}
1444#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1445
1446/*
1447 * Read data off the transport. This can be either an RPC_CALL or an
1448 * RPC_REPLY. Relay the processing to helper functions.
1449 */
1450static void xs_tcp_read_data(struct rpc_xprt *xprt,
1451 struct xdr_skb_reader *desc)
1452{
1453 struct sock_xprt *transport =
1454 container_of(xprt, struct sock_xprt, xprt);
1455
1456 if (_xs_tcp_read_data(xprt, desc) == 0)
1457 xs_tcp_check_fraghdr(transport);
1458 else {
1459 /*
1460 * The transport_lock protects the request handling.
1461 * There's no need to hold it to update the tcp_flags.
1462 */
1463 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1464 }
1465}
1466
1467static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1468{
1469 size_t len;
1470
1471 len = transport->tcp_reclen - transport->tcp_offset;
1472 if (len > desc->count)
1473 len = desc->count;
1474 desc->count -= len;
1475 desc->offset += len;
1476 transport->tcp_offset += len;
1477 dprintk("RPC: discarded %zu bytes\n", len);
1478 xs_tcp_check_fraghdr(transport);
1479}
1480
1481static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
1482{
1483 struct rpc_xprt *xprt = rd_desc->arg.data;
1484 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1485 struct xdr_skb_reader desc = {
1486 .skb = skb,
1487 .offset = offset,
1488 .count = len,
1489 };
1490 size_t ret;
1491
1492 dprintk("RPC: xs_tcp_data_recv started\n");
1493 do {
1494 trace_xs_tcp_data_recv(transport);
1495 /* Read in a new fragment marker if necessary */
1496 /* Can we ever really expect to get completely empty fragments? */
1497 if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
1498 xs_tcp_read_fraghdr(xprt, &desc);
1499 continue;
1500 }
1501 /* Read in the xid if necessary */
1502 if (transport->tcp_flags & TCP_RCV_COPY_XID) {
1503 xs_tcp_read_xid(transport, &desc);
1504 continue;
1505 }
1506 /* Read in the call/reply flag */
1507 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
1508 xs_tcp_read_calldir(transport, &desc);
1509 continue;
1510 }
1511 /* Read in the request data */
1512 if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
1513 xs_tcp_read_data(xprt, &desc);
1514 continue;
1515 }
1516 /* Skip over any trailing bytes on short reads */
1517 xs_tcp_read_discard(transport, &desc);
1518 } while (desc.count);
1519 ret = len - desc.count;
1520 if (ret < rd_desc->count)
1521 rd_desc->count -= ret;
1522 else
1523 rd_desc->count = 0;
1524 trace_xs_tcp_data_recv(transport);
1525 dprintk("RPC: xs_tcp_data_recv done\n");
1526 return ret;
1527}
1528
1529static void xs_tcp_data_receive(struct sock_xprt *transport)
1530{
1531 struct rpc_xprt *xprt = &transport->xprt;
1532 struct sock *sk;
1533 read_descriptor_t rd_desc = {
1534 .arg.data = xprt,
1535 };
1536 unsigned long total = 0;
1537 int read = 0;
1538
1539restart:
1540 mutex_lock(&transport->recv_mutex);
1541 sk = transport->inet;
1542 if (sk == NULL)
1543 goto out;
1544
1545 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
1546 for (;;) {
1547 rd_desc.count = RPC_TCP_READ_CHUNK_SZ;
1548 lock_sock(sk);
1549 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1550 if (rd_desc.count != 0 || read < 0) {
1551 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
1552 release_sock(sk);
1553 break;
1554 }
1555 release_sock(sk);
1556 total += read;
1557 if (need_resched()) {
1558 mutex_unlock(&transport->recv_mutex);
1559 cond_resched();
1560 goto restart;
1561 }
1562 }
1563 if (test_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1564 queue_work(xprtiod_workqueue, &transport->recv_worker);
1565out:
1566 mutex_unlock(&transport->recv_mutex);
1567 trace_xs_tcp_data_ready(xprt, read, total);
1568}
1569
1570static void xs_tcp_data_receive_workfn(struct work_struct *work)
1571{
1572 struct sock_xprt *transport =
1573 container_of(work, struct sock_xprt, recv_worker);
1574 xs_tcp_data_receive(transport);
1575}
1576
1577/**
1578 * xs_tcp_state_change - callback to handle TCP socket state changes
1579 * @sk: socket whose state has changed
1580 *
1581 */
1582static void xs_tcp_state_change(struct sock *sk)
1583{
1584 struct rpc_xprt *xprt;
1585 struct sock_xprt *transport;
1586
1587 read_lock_bh(&sk->sk_callback_lock);
1588 if (!(xprt = xprt_from_sock(sk)))
1589 goto out;
1590 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1591 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1592 sk->sk_state, xprt_connected(xprt),
1593 sock_flag(sk, SOCK_DEAD),
1594 sock_flag(sk, SOCK_ZAPPED),
1595 sk->sk_shutdown);
1596
1597 transport = container_of(xprt, struct sock_xprt, xprt);
1598 trace_rpc_socket_state_change(xprt, sk->sk_socket);
1599 switch (sk->sk_state) {
1600 case TCP_ESTABLISHED:
1601 spin_lock(&xprt->transport_lock);
1602 if (!xprt_test_and_set_connected(xprt)) {
1603
1604 /* Reset TCP record info */
1605 transport->tcp_offset = 0;
1606 transport->tcp_reclen = 0;
1607 transport->tcp_copied = 0;
1608 transport->tcp_flags =
1609 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
1610 xprt->connect_cookie++;
1611 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1612 xprt_clear_connecting(xprt);
1613
1614 xprt_wake_pending_tasks(xprt, -EAGAIN);
1615 }
1616 spin_unlock(&xprt->transport_lock);
1617 break;
1618 case TCP_FIN_WAIT1:
1619 /* The client initiated a shutdown of the socket */
1620 xprt->connect_cookie++;
1621 xprt->reestablish_timeout = 0;
1622 set_bit(XPRT_CLOSING, &xprt->state);
1623 smp_mb__before_atomic();
1624 clear_bit(XPRT_CONNECTED, &xprt->state);
1625 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1626 smp_mb__after_atomic();
1627 break;
1628 case TCP_CLOSE_WAIT:
1629 /* The server initiated a shutdown of the socket */
1630 xprt->connect_cookie++;
1631 clear_bit(XPRT_CONNECTED, &xprt->state);
1632 xs_tcp_force_close(xprt);
1633 /* fall through */
1634 case TCP_CLOSING:
1635 /*
1636 * If the server closed down the connection, make sure that
1637 * we back off before reconnecting
1638 */
1639 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1640 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1641 break;
1642 case TCP_LAST_ACK:
1643 set_bit(XPRT_CLOSING, &xprt->state);
1644 smp_mb__before_atomic();
1645 clear_bit(XPRT_CONNECTED, &xprt->state);
1646 smp_mb__after_atomic();
1647 break;
1648 case TCP_CLOSE:
1649 if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1650 &transport->sock_state))
1651 xprt_clear_connecting(xprt);
1652 clear_bit(XPRT_CLOSING, &xprt->state);
1653 if (sk->sk_err)
1654 xprt_wake_pending_tasks(xprt, -sk->sk_err);
1655 /* Trigger the socket release */
1656 xs_tcp_force_close(xprt);
1657 }
1658 out:
1659 read_unlock_bh(&sk->sk_callback_lock);
1660}
1661
1662static void xs_write_space(struct sock *sk)
1663{
1664 struct socket_wq *wq;
1665 struct rpc_xprt *xprt;
1666
1667 if (!sk->sk_socket)
1668 return;
1669 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1670
1671 if (unlikely(!(xprt = xprt_from_sock(sk))))
1672 return;
1673 rcu_read_lock();
1674 wq = rcu_dereference(sk->sk_wq);
1675 if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
1676 goto out;
1677
1678 xprt_write_space(xprt);
1679out:
1680 rcu_read_unlock();
1681}
1682
1683/**
1684 * xs_udp_write_space - callback invoked when socket buffer space
1685 * becomes available
1686 * @sk: socket whose state has changed
1687 *
1688 * Called when more output buffer space is available for this socket.
1689 * We try not to wake our writers until they can make "significant"
1690 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1691 * with a bunch of small requests.
1692 */
1693static void xs_udp_write_space(struct sock *sk)
1694{
1695 read_lock_bh(&sk->sk_callback_lock);
1696
1697 /* from net/core/sock.c:sock_def_write_space */
1698 if (sock_writeable(sk))
1699 xs_write_space(sk);
1700
1701 read_unlock_bh(&sk->sk_callback_lock);
1702}
1703
1704/**
1705 * xs_tcp_write_space - callback invoked when socket buffer space
1706 * becomes available
1707 * @sk: socket whose state has changed
1708 *
1709 * Called when more output buffer space is available for this socket.
1710 * We try not to wake our writers until they can make "significant"
1711 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1712 * with a bunch of small requests.
1713 */
1714static void xs_tcp_write_space(struct sock *sk)
1715{
1716 read_lock_bh(&sk->sk_callback_lock);
1717
1718 /* from net/core/stream.c:sk_stream_write_space */
1719 if (sk_stream_is_writeable(sk))
1720 xs_write_space(sk);
1721
1722 read_unlock_bh(&sk->sk_callback_lock);
1723}
1724
1725static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1726{
1727 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1728 struct sock *sk = transport->inet;
1729
1730 if (transport->rcvsize) {
1731 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1732 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1733 }
1734 if (transport->sndsize) {
1735 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1736 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1737 sk->sk_write_space(sk);
1738 }
1739}
1740
1741/**
1742 * xs_udp_set_buffer_size - set send and receive limits
1743 * @xprt: generic transport
1744 * @sndsize: requested size of send buffer, in bytes
1745 * @rcvsize: requested size of receive buffer, in bytes
1746 *
1747 * Set socket send and receive buffer size limits.
1748 */
1749static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1750{
1751 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1752
1753 transport->sndsize = 0;
1754 if (sndsize)
1755 transport->sndsize = sndsize + 1024;
1756 transport->rcvsize = 0;
1757 if (rcvsize)
1758 transport->rcvsize = rcvsize + 1024;
1759
1760 xs_udp_do_set_buffer_size(xprt);
1761}
1762
1763/**
1764 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1765 * @task: task that timed out
1766 *
1767 * Adjust the congestion window after a retransmit timeout has occurred.
1768 */
1769static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1770{
1771 spin_lock_bh(&xprt->transport_lock);
1772 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1773 spin_unlock_bh(&xprt->transport_lock);
1774}
1775
1776static unsigned short xs_get_random_port(void)
1777{
1778 unsigned short range = xprt_max_resvport - xprt_min_resvport + 1;
1779 unsigned short rand = (unsigned short) prandom_u32() % range;
1780 return rand + xprt_min_resvport;
1781}
1782
1783/**
1784 * xs_set_reuseaddr_port - set the socket's port and address reuse options
1785 * @sock: socket
1786 *
1787 * Note that this function has to be called on all sockets that share the
1788 * same port, and it must be called before binding.
1789 */
1790static void xs_sock_set_reuseport(struct socket *sock)
1791{
1792 int opt = 1;
1793
1794 kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT,
1795 (char *)&opt, sizeof(opt));
1796}
1797
1798static unsigned short xs_sock_getport(struct socket *sock)
1799{
1800 struct sockaddr_storage buf;
1801 unsigned short port = 0;
1802
1803 if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0)
1804 goto out;
1805 switch (buf.ss_family) {
1806 case AF_INET6:
1807 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1808 break;
1809 case AF_INET:
1810 port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1811 }
1812out:
1813 return port;
1814}
1815
1816/**
1817 * xs_set_port - reset the port number in the remote endpoint address
1818 * @xprt: generic transport
1819 * @port: new port number
1820 *
1821 */
1822static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1823{
1824 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1825
1826 rpc_set_port(xs_addr(xprt), port);
1827 xs_update_peer_port(xprt);
1828}
1829
1830static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1831{
1832 if (transport->srcport == 0)
1833 transport->srcport = xs_sock_getport(sock);
1834}
1835
1836static unsigned short xs_get_srcport(struct sock_xprt *transport)
1837{
1838 unsigned short port = transport->srcport;
1839
1840 if (port == 0 && transport->xprt.resvport)
1841 port = xs_get_random_port();
1842 return port;
1843}
1844
1845static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1846{
1847 if (transport->srcport != 0)
1848 transport->srcport = 0;
1849 if (!transport->xprt.resvport)
1850 return 0;
1851 if (port <= xprt_min_resvport || port > xprt_max_resvport)
1852 return xprt_max_resvport;
1853 return --port;
1854}
1855static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1856{
1857 struct sockaddr_storage myaddr;
1858 int err, nloop = 0;
1859 unsigned short port = xs_get_srcport(transport);
1860 unsigned short last;
1861
1862 /*
1863 * If we are asking for any ephemeral port (i.e. port == 0 &&
1864 * transport->xprt.resvport == 0), don't bind. Let the local
1865 * port selection happen implicitly when the socket is used
1866 * (for example at connect time).
1867 *
1868 * This ensures that we can continue to establish TCP
1869 * connections even when all local ephemeral ports are already
1870 * a part of some TCP connection. This makes no difference
1871 * for UDP sockets, but also doens't harm them.
1872 *
1873 * If we're asking for any reserved port (i.e. port == 0 &&
1874 * transport->xprt.resvport == 1) xs_get_srcport above will
1875 * ensure that port is non-zero and we will bind as needed.
1876 */
1877 if (port == 0)
1878 return 0;
1879
1880 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1881 do {
1882 rpc_set_port((struct sockaddr *)&myaddr, port);
1883 err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1884 transport->xprt.addrlen);
1885 if (err == 0) {
1886 transport->srcport = port;
1887 break;
1888 }
1889 last = port;
1890 port = xs_next_srcport(transport, port);
1891 if (port > last)
1892 nloop++;
1893 } while (err == -EADDRINUSE && nloop != 2);
1894
1895 if (myaddr.ss_family == AF_INET)
1896 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
1897 &((struct sockaddr_in *)&myaddr)->sin_addr,
1898 port, err ? "failed" : "ok", err);
1899 else
1900 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
1901 &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1902 port, err ? "failed" : "ok", err);
1903 return err;
1904}
1905
1906/*
1907 * We don't support autobind on AF_LOCAL sockets
1908 */
1909static void xs_local_rpcbind(struct rpc_task *task)
1910{
1911 xprt_set_bound(task->tk_xprt);
1912}
1913
1914static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1915{
1916}
1917
1918#ifdef CONFIG_DEBUG_LOCK_ALLOC
1919static struct lock_class_key xs_key[2];
1920static struct lock_class_key xs_slock_key[2];
1921
1922static inline void xs_reclassify_socketu(struct socket *sock)
1923{
1924 struct sock *sk = sock->sk;
1925
1926 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1927 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1928}
1929
1930static inline void xs_reclassify_socket4(struct socket *sock)
1931{
1932 struct sock *sk = sock->sk;
1933
1934 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1935 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1936}
1937
1938static inline void xs_reclassify_socket6(struct socket *sock)
1939{
1940 struct sock *sk = sock->sk;
1941
1942 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1943 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1944}
1945
1946static inline void xs_reclassify_socket(int family, struct socket *sock)
1947{
1948 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
1949 return;
1950
1951 switch (family) {
1952 case AF_LOCAL:
1953 xs_reclassify_socketu(sock);
1954 break;
1955 case AF_INET:
1956 xs_reclassify_socket4(sock);
1957 break;
1958 case AF_INET6:
1959 xs_reclassify_socket6(sock);
1960 break;
1961 }
1962}
1963#else
1964static inline void xs_reclassify_socket(int family, struct socket *sock)
1965{
1966}
1967#endif
1968
1969static void xs_dummy_setup_socket(struct work_struct *work)
1970{
1971}
1972
1973static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1974 struct sock_xprt *transport, int family, int type,
1975 int protocol, bool reuseport)
1976{
1977 struct socket *sock;
1978 int err;
1979
1980 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1981 if (err < 0) {
1982 dprintk("RPC: can't create %d transport socket (%d).\n",
1983 protocol, -err);
1984 goto out;
1985 }
1986 xs_reclassify_socket(family, sock);
1987
1988 if (reuseport)
1989 xs_sock_set_reuseport(sock);
1990
1991 err = xs_bind(transport, sock);
1992 if (err) {
1993 sock_release(sock);
1994 goto out;
1995 }
1996
1997 return sock;
1998out:
1999 return ERR_PTR(err);
2000}
2001
2002static int xs_local_finish_connecting(struct rpc_xprt *xprt,
2003 struct socket *sock)
2004{
2005 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
2006 xprt);
2007
2008 if (!transport->inet) {
2009 struct sock *sk = sock->sk;
2010
2011 write_lock_bh(&sk->sk_callback_lock);
2012
2013 xs_save_old_callbacks(transport, sk);
2014
2015 sk->sk_user_data = xprt;
2016 sk->sk_data_ready = xs_data_ready;
2017 sk->sk_write_space = xs_udp_write_space;
2018 sock_set_flag(sk, SOCK_FASYNC);
2019 sk->sk_error_report = xs_error_report;
2020 sk->sk_allocation = GFP_NOIO;
2021
2022 xprt_clear_connected(xprt);
2023
2024 /* Reset to new socket */
2025 transport->sock = sock;
2026 transport->inet = sk;
2027
2028 write_unlock_bh(&sk->sk_callback_lock);
2029 }
2030
2031 /* Tell the socket layer to start connecting... */
2032 xprt->stat.connect_count++;
2033 xprt->stat.connect_start = jiffies;
2034 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
2035}
2036
2037/**
2038 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
2039 * @transport: socket transport to connect
2040 */
2041static int xs_local_setup_socket(struct sock_xprt *transport)
2042{
2043 struct rpc_xprt *xprt = &transport->xprt;
2044 struct socket *sock;
2045 int status = -EIO;
2046
2047 status = __sock_create(xprt->xprt_net, AF_LOCAL,
2048 SOCK_STREAM, 0, &sock, 1);
2049 if (status < 0) {
2050 dprintk("RPC: can't create AF_LOCAL "
2051 "transport socket (%d).\n", -status);
2052 goto out;
2053 }
2054 xs_reclassify_socket(AF_LOCAL, sock);
2055
2056 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
2057 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2058
2059 status = xs_local_finish_connecting(xprt, sock);
2060 trace_rpc_socket_connect(xprt, sock, status);
2061 switch (status) {
2062 case 0:
2063 dprintk("RPC: xprt %p connected to %s\n",
2064 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2065 xprt_set_connected(xprt);
2066 case -ENOBUFS:
2067 break;
2068 case -ENOENT:
2069 dprintk("RPC: xprt %p: socket %s does not exist\n",
2070 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2071 break;
2072 case -ECONNREFUSED:
2073 dprintk("RPC: xprt %p: connection refused for %s\n",
2074 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2075 break;
2076 default:
2077 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
2078 __func__, -status,
2079 xprt->address_strings[RPC_DISPLAY_ADDR]);
2080 }
2081
2082out:
2083 xprt_clear_connecting(xprt);
2084 xprt_wake_pending_tasks(xprt, status);
2085 return status;
2086}
2087
2088static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2089{
2090 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2091 int ret;
2092
2093 if (RPC_IS_ASYNC(task)) {
2094 /*
2095 * We want the AF_LOCAL connect to be resolved in the
2096 * filesystem namespace of the process making the rpc
2097 * call. Thus we connect synchronously.
2098 *
2099 * If we want to support asynchronous AF_LOCAL calls,
2100 * we'll need to figure out how to pass a namespace to
2101 * connect.
2102 */
2103 rpc_exit(task, -ENOTCONN);
2104 return;
2105 }
2106 ret = xs_local_setup_socket(transport);
2107 if (ret && !RPC_IS_SOFTCONN(task))
2108 msleep_interruptible(15000);
2109}
2110
2111#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
2112/*
2113 * Note that this should be called with XPRT_LOCKED held (or when we otherwise
2114 * know that we have exclusive access to the socket), to guard against
2115 * races with xs_reset_transport.
2116 */
2117static void xs_set_memalloc(struct rpc_xprt *xprt)
2118{
2119 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
2120 xprt);
2121
2122 /*
2123 * If there's no sock, then we have nothing to set. The
2124 * reconnecting process will get it for us.
2125 */
2126 if (!transport->inet)
2127 return;
2128 if (atomic_read(&xprt->swapper))
2129 sk_set_memalloc(transport->inet);
2130}
2131
2132/**
2133 * xs_enable_swap - Tag this transport as being used for swap.
2134 * @xprt: transport to tag
2135 *
2136 * Take a reference to this transport on behalf of the rpc_clnt, and
2137 * optionally mark it for swapping if it wasn't already.
2138 */
2139static int
2140xs_enable_swap(struct rpc_xprt *xprt)
2141{
2142 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2143
2144 if (atomic_inc_return(&xprt->swapper) != 1)
2145 return 0;
2146 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2147 return -ERESTARTSYS;
2148 if (xs->inet)
2149 sk_set_memalloc(xs->inet);
2150 xprt_release_xprt(xprt, NULL);
2151 return 0;
2152}
2153
2154/**
2155 * xs_disable_swap - Untag this transport as being used for swap.
2156 * @xprt: transport to tag
2157 *
2158 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
2159 * swapper refcount goes to 0, untag the socket as a memalloc socket.
2160 */
2161static void
2162xs_disable_swap(struct rpc_xprt *xprt)
2163{
2164 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2165
2166 if (!atomic_dec_and_test(&xprt->swapper))
2167 return;
2168 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2169 return;
2170 if (xs->inet)
2171 sk_clear_memalloc(xs->inet);
2172 xprt_release_xprt(xprt, NULL);
2173}
2174#else
2175static void xs_set_memalloc(struct rpc_xprt *xprt)
2176{
2177}
2178
2179static int
2180xs_enable_swap(struct rpc_xprt *xprt)
2181{
2182 return -EINVAL;
2183}
2184
2185static void
2186xs_disable_swap(struct rpc_xprt *xprt)
2187{
2188}
2189#endif
2190
2191static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2192{
2193 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2194
2195 if (!transport->inet) {
2196 struct sock *sk = sock->sk;
2197
2198 write_lock_bh(&sk->sk_callback_lock);
2199
2200 xs_save_old_callbacks(transport, sk);
2201
2202 sk->sk_user_data = xprt;
2203 sk->sk_data_ready = xs_data_ready;
2204 sk->sk_write_space = xs_udp_write_space;
2205 sock_set_flag(sk, SOCK_FASYNC);
2206 sk->sk_allocation = GFP_NOIO;
2207
2208 xprt_set_connected(xprt);
2209
2210 /* Reset to new socket */
2211 transport->sock = sock;
2212 transport->inet = sk;
2213
2214 xs_set_memalloc(xprt);
2215
2216 write_unlock_bh(&sk->sk_callback_lock);
2217 }
2218 xs_udp_do_set_buffer_size(xprt);
2219
2220 xprt->stat.connect_start = jiffies;
2221}
2222
2223static void xs_udp_setup_socket(struct work_struct *work)
2224{
2225 struct sock_xprt *transport =
2226 container_of(work, struct sock_xprt, connect_worker.work);
2227 struct rpc_xprt *xprt = &transport->xprt;
2228 struct socket *sock;
2229 int status = -EIO;
2230
2231 sock = xs_create_sock(xprt, transport,
2232 xs_addr(xprt)->sa_family, SOCK_DGRAM,
2233 IPPROTO_UDP, false);
2234 if (IS_ERR(sock))
2235 goto out;
2236
2237 dprintk("RPC: worker connecting xprt %p via %s to "
2238 "%s (port %s)\n", xprt,
2239 xprt->address_strings[RPC_DISPLAY_PROTO],
2240 xprt->address_strings[RPC_DISPLAY_ADDR],
2241 xprt->address_strings[RPC_DISPLAY_PORT]);
2242
2243 xs_udp_finish_connecting(xprt, sock);
2244 trace_rpc_socket_connect(xprt, sock, 0);
2245 status = 0;
2246out:
2247 xprt_unlock_connect(xprt, transport);
2248 xprt_clear_connecting(xprt);
2249 xprt_wake_pending_tasks(xprt, status);
2250}
2251
2252/**
2253 * xs_tcp_shutdown - gracefully shut down a TCP socket
2254 * @xprt: transport
2255 *
2256 * Initiates a graceful shutdown of the TCP socket by calling the
2257 * equivalent of shutdown(SHUT_RDWR);
2258 */
2259static void xs_tcp_shutdown(struct rpc_xprt *xprt)
2260{
2261 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2262 struct socket *sock = transport->sock;
2263 int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE;
2264
2265 if (sock == NULL)
2266 return;
2267 switch (skst) {
2268 default:
2269 kernel_sock_shutdown(sock, SHUT_RDWR);
2270 trace_rpc_socket_shutdown(xprt, sock);
2271 break;
2272 case TCP_CLOSE:
2273 case TCP_TIME_WAIT:
2274 xs_reset_transport(transport);
2275 }
2276}
2277
2278static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
2279 struct socket *sock)
2280{
2281 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2282 unsigned int keepidle;
2283 unsigned int keepcnt;
2284 unsigned int opt_on = 1;
2285 unsigned int timeo;
2286
2287 spin_lock_bh(&xprt->transport_lock);
2288 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ);
2289 keepcnt = xprt->timeout->to_retries + 1;
2290 timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2291 (xprt->timeout->to_retries + 1);
2292 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2293 spin_unlock_bh(&xprt->transport_lock);
2294
2295 /* TCP Keepalive options */
2296 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
2297 (char *)&opt_on, sizeof(opt_on));
2298 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
2299 (char *)&keepidle, sizeof(keepidle));
2300 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
2301 (char *)&keepidle, sizeof(keepidle));
2302 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
2303 (char *)&keepcnt, sizeof(keepcnt));
2304
2305 /* TCP user timeout (see RFC5482) */
2306 kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
2307 (char *)&timeo, sizeof(timeo));
2308}
2309
2310static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
2311 unsigned long connect_timeout,
2312 unsigned long reconnect_timeout)
2313{
2314 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2315 struct rpc_timeout to;
2316 unsigned long initval;
2317
2318 spin_lock_bh(&xprt->transport_lock);
2319 if (reconnect_timeout < xprt->max_reconnect_timeout)
2320 xprt->max_reconnect_timeout = reconnect_timeout;
2321 if (connect_timeout < xprt->connect_timeout) {
2322 memcpy(&to, xprt->timeout, sizeof(to));
2323 initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1);
2324 /* Arbitrary lower limit */
2325 if (initval < XS_TCP_INIT_REEST_TO << 1)
2326 initval = XS_TCP_INIT_REEST_TO << 1;
2327 to.to_initval = initval;
2328 to.to_maxval = initval;
2329 memcpy(&transport->tcp_timeout, &to,
2330 sizeof(transport->tcp_timeout));
2331 xprt->timeout = &transport->tcp_timeout;
2332 xprt->connect_timeout = connect_timeout;
2333 }
2334 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2335 spin_unlock_bh(&xprt->transport_lock);
2336}
2337
2338static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2339{
2340 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2341 int ret = -ENOTCONN;
2342
2343 if (!transport->inet) {
2344 struct sock *sk = sock->sk;
2345 unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC;
2346
2347 /* Avoid temporary address, they are bad for long-lived
2348 * connections such as NFS mounts.
2349 * RFC4941, section 3.6 suggests that:
2350 * Individual applications, which have specific
2351 * knowledge about the normal duration of connections,
2352 * MAY override this as appropriate.
2353 */
2354 kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES,
2355 (char *)&addr_pref, sizeof(addr_pref));
2356
2357 xs_tcp_set_socket_timeouts(xprt, sock);
2358
2359 write_lock_bh(&sk->sk_callback_lock);
2360
2361 xs_save_old_callbacks(transport, sk);
2362
2363 sk->sk_user_data = xprt;
2364 sk->sk_data_ready = xs_data_ready;
2365 sk->sk_state_change = xs_tcp_state_change;
2366 sk->sk_write_space = xs_tcp_write_space;
2367 sock_set_flag(sk, SOCK_FASYNC);
2368 sk->sk_error_report = xs_error_report;
2369 sk->sk_allocation = GFP_NOIO;
2370
2371 /* socket options */
2372 sock_reset_flag(sk, SOCK_LINGER);
2373 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
2374
2375 xprt_clear_connected(xprt);
2376
2377 /* Reset to new socket */
2378 transport->sock = sock;
2379 transport->inet = sk;
2380
2381 write_unlock_bh(&sk->sk_callback_lock);
2382 }
2383
2384 if (!xprt_bound(xprt))
2385 goto out;
2386
2387 xs_set_memalloc(xprt);
2388
2389 /* Tell the socket layer to start connecting... */
2390 xprt->stat.connect_count++;
2391 xprt->stat.connect_start = jiffies;
2392 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2393 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2394 switch (ret) {
2395 case 0:
2396 xs_set_srcport(transport, sock);
2397 /* fall through */
2398 case -EINPROGRESS:
2399 /* SYN_SENT! */
2400 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2401 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2402 break;
2403 case -EADDRNOTAVAIL:
2404 /* Source port number is unavailable. Try a new one! */
2405 transport->srcport = 0;
2406 }
2407out:
2408 return ret;
2409}
2410
2411/**
2412 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2413 *
2414 * Invoked by a work queue tasklet.
2415 */
2416static void xs_tcp_setup_socket(struct work_struct *work)
2417{
2418 struct sock_xprt *transport =
2419 container_of(work, struct sock_xprt, connect_worker.work);
2420 struct socket *sock = transport->sock;
2421 struct rpc_xprt *xprt = &transport->xprt;
2422 int status = -EIO;
2423
2424 if (!sock) {
2425 sock = xs_create_sock(xprt, transport,
2426 xs_addr(xprt)->sa_family, SOCK_STREAM,
2427 IPPROTO_TCP, true);
2428 if (IS_ERR(sock)) {
2429 status = PTR_ERR(sock);
2430 goto out;
2431 }
2432 }
2433
2434 dprintk("RPC: worker connecting xprt %p via %s to "
2435 "%s (port %s)\n", xprt,
2436 xprt->address_strings[RPC_DISPLAY_PROTO],
2437 xprt->address_strings[RPC_DISPLAY_ADDR],
2438 xprt->address_strings[RPC_DISPLAY_PORT]);
2439
2440 status = xs_tcp_finish_connecting(xprt, sock);
2441 trace_rpc_socket_connect(xprt, sock, status);
2442 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
2443 xprt, -status, xprt_connected(xprt),
2444 sock->sk->sk_state);
2445 switch (status) {
2446 default:
2447 printk("%s: connect returned unhandled error %d\n",
2448 __func__, status);
2449 /* fall through */
2450 case -EADDRNOTAVAIL:
2451 /* We're probably in TIME_WAIT. Get rid of existing socket,
2452 * and retry
2453 */
2454 xs_tcp_force_close(xprt);
2455 break;
2456 case 0:
2457 case -EINPROGRESS:
2458 case -EALREADY:
2459 xprt_unlock_connect(xprt, transport);
2460 return;
2461 case -EINVAL:
2462 /* Happens, for instance, if the user specified a link
2463 * local IPv6 address without a scope-id.
2464 */
2465 case -ECONNREFUSED:
2466 case -ECONNRESET:
2467 case -ENETDOWN:
2468 case -ENETUNREACH:
2469 case -EHOSTUNREACH:
2470 case -EADDRINUSE:
2471 case -ENOBUFS:
2472 /*
2473 * xs_tcp_force_close() wakes tasks with -EIO.
2474 * We need to wake them first to ensure the
2475 * correct error code.
2476 */
2477 xprt_wake_pending_tasks(xprt, status);
2478 xs_tcp_force_close(xprt);
2479 goto out;
2480 }
2481 status = -EAGAIN;
2482out:
2483 xprt_unlock_connect(xprt, transport);
2484 xprt_clear_connecting(xprt);
2485 xprt_wake_pending_tasks(xprt, status);
2486}
2487
2488static unsigned long xs_reconnect_delay(const struct rpc_xprt *xprt)
2489{
2490 unsigned long start, now = jiffies;
2491
2492 start = xprt->stat.connect_start + xprt->reestablish_timeout;
2493 if (time_after(start, now))
2494 return start - now;
2495 return 0;
2496}
2497
2498static void xs_reconnect_backoff(struct rpc_xprt *xprt)
2499{
2500 xprt->reestablish_timeout <<= 1;
2501 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
2502 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
2503 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2504 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2505}
2506
2507/**
2508 * xs_connect - connect a socket to a remote endpoint
2509 * @xprt: pointer to transport structure
2510 * @task: address of RPC task that manages state of connect request
2511 *
2512 * TCP: If the remote end dropped the connection, delay reconnecting.
2513 *
2514 * UDP socket connects are synchronous, but we use a work queue anyway
2515 * to guarantee that even unprivileged user processes can set up a
2516 * socket on a privileged port.
2517 *
2518 * If a UDP socket connect fails, the delay behavior here prevents
2519 * retry floods (hard mounts).
2520 */
2521static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2522{
2523 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2524 unsigned long delay = 0;
2525
2526 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2527
2528 if (transport->sock != NULL) {
2529 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2530 "seconds\n",
2531 xprt, xprt->reestablish_timeout / HZ);
2532
2533 /* Start by resetting any existing state */
2534 xs_reset_transport(transport);
2535
2536 delay = xs_reconnect_delay(xprt);
2537 xs_reconnect_backoff(xprt);
2538
2539 } else
2540 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2541
2542 queue_delayed_work(xprtiod_workqueue,
2543 &transport->connect_worker,
2544 delay);
2545}
2546
2547/**
2548 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2549 * @xprt: rpc_xprt struct containing statistics
2550 * @seq: output file
2551 *
2552 */
2553static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2554{
2555 long idle_time = 0;
2556
2557 if (xprt_connected(xprt))
2558 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2559
2560 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2561 "%llu %llu %lu %llu %llu\n",
2562 xprt->stat.bind_count,
2563 xprt->stat.connect_count,
2564 xprt->stat.connect_time,
2565 idle_time,
2566 xprt->stat.sends,
2567 xprt->stat.recvs,
2568 xprt->stat.bad_xids,
2569 xprt->stat.req_u,
2570 xprt->stat.bklog_u,
2571 xprt->stat.max_slots,
2572 xprt->stat.sending_u,
2573 xprt->stat.pending_u);
2574}
2575
2576/**
2577 * xs_udp_print_stats - display UDP socket-specifc stats
2578 * @xprt: rpc_xprt struct containing statistics
2579 * @seq: output file
2580 *
2581 */
2582static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2583{
2584 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2585
2586 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2587 "%lu %llu %llu\n",
2588 transport->srcport,
2589 xprt->stat.bind_count,
2590 xprt->stat.sends,
2591 xprt->stat.recvs,
2592 xprt->stat.bad_xids,
2593 xprt->stat.req_u,
2594 xprt->stat.bklog_u,
2595 xprt->stat.max_slots,
2596 xprt->stat.sending_u,
2597 xprt->stat.pending_u);
2598}
2599
2600/**
2601 * xs_tcp_print_stats - display TCP socket-specifc stats
2602 * @xprt: rpc_xprt struct containing statistics
2603 * @seq: output file
2604 *
2605 */
2606static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2607{
2608 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2609 long idle_time = 0;
2610
2611 if (xprt_connected(xprt))
2612 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2613
2614 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2615 "%llu %llu %lu %llu %llu\n",
2616 transport->srcport,
2617 xprt->stat.bind_count,
2618 xprt->stat.connect_count,
2619 xprt->stat.connect_time,
2620 idle_time,
2621 xprt->stat.sends,
2622 xprt->stat.recvs,
2623 xprt->stat.bad_xids,
2624 xprt->stat.req_u,
2625 xprt->stat.bklog_u,
2626 xprt->stat.max_slots,
2627 xprt->stat.sending_u,
2628 xprt->stat.pending_u);
2629}
2630
2631/*
2632 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2633 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2634 * to use the server side send routines.
2635 */
2636static int bc_malloc(struct rpc_task *task)
2637{
2638 struct rpc_rqst *rqst = task->tk_rqstp;
2639 size_t size = rqst->rq_callsize;
2640 struct page *page;
2641 struct rpc_buffer *buf;
2642
2643 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) {
2644 WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n",
2645 size);
2646 return -EINVAL;
2647 }
2648
2649 page = alloc_page(GFP_KERNEL);
2650 if (!page)
2651 return -ENOMEM;
2652
2653 buf = page_address(page);
2654 buf->len = PAGE_SIZE;
2655
2656 rqst->rq_buffer = buf->data;
2657 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
2658 return 0;
2659}
2660
2661/*
2662 * Free the space allocated in the bc_alloc routine
2663 */
2664static void bc_free(struct rpc_task *task)
2665{
2666 void *buffer = task->tk_rqstp->rq_buffer;
2667 struct rpc_buffer *buf;
2668
2669 buf = container_of(buffer, struct rpc_buffer, data);
2670 free_page((unsigned long)buf);
2671}
2672
2673/*
2674 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2675 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2676 */
2677static int bc_sendto(struct rpc_rqst *req)
2678{
2679 int len;
2680 struct xdr_buf *xbufp = &req->rq_snd_buf;
2681 struct rpc_xprt *xprt = req->rq_xprt;
2682 struct sock_xprt *transport =
2683 container_of(xprt, struct sock_xprt, xprt);
2684 struct socket *sock = transport->sock;
2685 unsigned long headoff;
2686 unsigned long tailoff;
2687
2688 xs_encode_stream_record_marker(xbufp);
2689
2690 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2691 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2692 len = svc_send_common(sock, xbufp,
2693 virt_to_page(xbufp->head[0].iov_base), headoff,
2694 xbufp->tail[0].iov_base, tailoff);
2695
2696 if (len != xbufp->len) {
2697 printk(KERN_NOTICE "Error sending entire callback!\n");
2698 len = -EAGAIN;
2699 }
2700
2701 return len;
2702}
2703
2704/*
2705 * The send routine. Borrows from svc_send
2706 */
2707static int bc_send_request(struct rpc_task *task)
2708{
2709 struct rpc_rqst *req = task->tk_rqstp;
2710 struct svc_xprt *xprt;
2711 int len;
2712
2713 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2714 /*
2715 * Get the server socket associated with this callback xprt
2716 */
2717 xprt = req->rq_xprt->bc_xprt;
2718
2719 /*
2720 * Grab the mutex to serialize data as the connection is shared
2721 * with the fore channel
2722 */
2723 if (!mutex_trylock(&xprt->xpt_mutex)) {
2724 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
2725 if (!mutex_trylock(&xprt->xpt_mutex))
2726 return -EAGAIN;
2727 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
2728 }
2729 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2730 len = -ENOTCONN;
2731 else
2732 len = bc_sendto(req);
2733 mutex_unlock(&xprt->xpt_mutex);
2734
2735 if (len > 0)
2736 len = 0;
2737
2738 return len;
2739}
2740
2741/*
2742 * The close routine. Since this is client initiated, we do nothing
2743 */
2744
2745static void bc_close(struct rpc_xprt *xprt)
2746{
2747}
2748
2749/*
2750 * The xprt destroy routine. Again, because this connection is client
2751 * initiated, we do nothing
2752 */
2753
2754static void bc_destroy(struct rpc_xprt *xprt)
2755{
2756 dprintk("RPC: bc_destroy xprt %p\n", xprt);
2757
2758 xs_xprt_free(xprt);
2759 module_put(THIS_MODULE);
2760}
2761
2762static const struct rpc_xprt_ops xs_local_ops = {
2763 .reserve_xprt = xprt_reserve_xprt,
2764 .release_xprt = xs_tcp_release_xprt,
2765 .alloc_slot = xprt_alloc_slot,
2766 .rpcbind = xs_local_rpcbind,
2767 .set_port = xs_local_set_port,
2768 .connect = xs_local_connect,
2769 .buf_alloc = rpc_malloc,
2770 .buf_free = rpc_free,
2771 .send_request = xs_local_send_request,
2772 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2773 .close = xs_close,
2774 .destroy = xs_destroy,
2775 .print_stats = xs_local_print_stats,
2776 .enable_swap = xs_enable_swap,
2777 .disable_swap = xs_disable_swap,
2778};
2779
2780static const struct rpc_xprt_ops xs_udp_ops = {
2781 .set_buffer_size = xs_udp_set_buffer_size,
2782 .reserve_xprt = xprt_reserve_xprt_cong,
2783 .release_xprt = xprt_release_xprt_cong,
2784 .alloc_slot = xprt_alloc_slot,
2785 .rpcbind = rpcb_getport_async,
2786 .set_port = xs_set_port,
2787 .connect = xs_connect,
2788 .buf_alloc = rpc_malloc,
2789 .buf_free = rpc_free,
2790 .send_request = xs_udp_send_request,
2791 .set_retrans_timeout = xprt_set_retrans_timeout_rtt,
2792 .timer = xs_udp_timer,
2793 .release_request = xprt_release_rqst_cong,
2794 .close = xs_close,
2795 .destroy = xs_destroy,
2796 .print_stats = xs_udp_print_stats,
2797 .enable_swap = xs_enable_swap,
2798 .disable_swap = xs_disable_swap,
2799 .inject_disconnect = xs_inject_disconnect,
2800};
2801
2802static const struct rpc_xprt_ops xs_tcp_ops = {
2803 .reserve_xprt = xprt_reserve_xprt,
2804 .release_xprt = xs_tcp_release_xprt,
2805 .alloc_slot = xprt_lock_and_alloc_slot,
2806 .rpcbind = rpcb_getport_async,
2807 .set_port = xs_set_port,
2808 .connect = xs_connect,
2809 .buf_alloc = rpc_malloc,
2810 .buf_free = rpc_free,
2811 .send_request = xs_tcp_send_request,
2812 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2813 .close = xs_tcp_shutdown,
2814 .destroy = xs_destroy,
2815 .set_connect_timeout = xs_tcp_set_connect_timeout,
2816 .print_stats = xs_tcp_print_stats,
2817 .enable_swap = xs_enable_swap,
2818 .disable_swap = xs_disable_swap,
2819 .inject_disconnect = xs_inject_disconnect,
2820#ifdef CONFIG_SUNRPC_BACKCHANNEL
2821 .bc_setup = xprt_setup_bc,
2822 .bc_up = xs_tcp_bc_up,
2823 .bc_maxpayload = xs_tcp_bc_maxpayload,
2824 .bc_free_rqst = xprt_free_bc_rqst,
2825 .bc_destroy = xprt_destroy_bc,
2826#endif
2827};
2828
2829/*
2830 * The rpc_xprt_ops for the server backchannel
2831 */
2832
2833static const struct rpc_xprt_ops bc_tcp_ops = {
2834 .reserve_xprt = xprt_reserve_xprt,
2835 .release_xprt = xprt_release_xprt,
2836 .alloc_slot = xprt_alloc_slot,
2837 .buf_alloc = bc_malloc,
2838 .buf_free = bc_free,
2839 .send_request = bc_send_request,
2840 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2841 .close = bc_close,
2842 .destroy = bc_destroy,
2843 .print_stats = xs_tcp_print_stats,
2844 .enable_swap = xs_enable_swap,
2845 .disable_swap = xs_disable_swap,
2846 .inject_disconnect = xs_inject_disconnect,
2847};
2848
2849static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2850{
2851 static const struct sockaddr_in sin = {
2852 .sin_family = AF_INET,
2853 .sin_addr.s_addr = htonl(INADDR_ANY),
2854 };
2855 static const struct sockaddr_in6 sin6 = {
2856 .sin6_family = AF_INET6,
2857 .sin6_addr = IN6ADDR_ANY_INIT,
2858 };
2859
2860 switch (family) {
2861 case AF_LOCAL:
2862 break;
2863 case AF_INET:
2864 memcpy(sap, &sin, sizeof(sin));
2865 break;
2866 case AF_INET6:
2867 memcpy(sap, &sin6, sizeof(sin6));
2868 break;
2869 default:
2870 dprintk("RPC: %s: Bad address family\n", __func__);
2871 return -EAFNOSUPPORT;
2872 }
2873 return 0;
2874}
2875
2876static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2877 unsigned int slot_table_size,
2878 unsigned int max_slot_table_size)
2879{
2880 struct rpc_xprt *xprt;
2881 struct sock_xprt *new;
2882
2883 if (args->addrlen > sizeof(xprt->addr)) {
2884 dprintk("RPC: xs_setup_xprt: address too large\n");
2885 return ERR_PTR(-EBADF);
2886 }
2887
2888 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2889 max_slot_table_size);
2890 if (xprt == NULL) {
2891 dprintk("RPC: xs_setup_xprt: couldn't allocate "
2892 "rpc_xprt\n");
2893 return ERR_PTR(-ENOMEM);
2894 }
2895
2896 new = container_of(xprt, struct sock_xprt, xprt);
2897 mutex_init(&new->recv_mutex);
2898 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2899 xprt->addrlen = args->addrlen;
2900 if (args->srcaddr)
2901 memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2902 else {
2903 int err;
2904 err = xs_init_anyaddr(args->dstaddr->sa_family,
2905 (struct sockaddr *)&new->srcaddr);
2906 if (err != 0) {
2907 xprt_free(xprt);
2908 return ERR_PTR(err);
2909 }
2910 }
2911
2912 return xprt;
2913}
2914
2915static const struct rpc_timeout xs_local_default_timeout = {
2916 .to_initval = 10 * HZ,
2917 .to_maxval = 10 * HZ,
2918 .to_retries = 2,
2919};
2920
2921/**
2922 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2923 * @args: rpc transport creation arguments
2924 *
2925 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2926 */
2927static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2928{
2929 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2930 struct sock_xprt *transport;
2931 struct rpc_xprt *xprt;
2932 struct rpc_xprt *ret;
2933
2934 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2935 xprt_max_tcp_slot_table_entries);
2936 if (IS_ERR(xprt))
2937 return xprt;
2938 transport = container_of(xprt, struct sock_xprt, xprt);
2939
2940 xprt->prot = 0;
2941 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2942 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2943
2944 xprt->bind_timeout = XS_BIND_TO;
2945 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2946 xprt->idle_timeout = XS_IDLE_DISC_TO;
2947
2948 xprt->ops = &xs_local_ops;
2949 xprt->timeout = &xs_local_default_timeout;
2950
2951 INIT_WORK(&transport->recv_worker, xs_local_data_receive_workfn);
2952 INIT_DELAYED_WORK(&transport->connect_worker,
2953 xs_dummy_setup_socket);
2954
2955 switch (sun->sun_family) {
2956 case AF_LOCAL:
2957 if (sun->sun_path[0] != '/') {
2958 dprintk("RPC: bad AF_LOCAL address: %s\n",
2959 sun->sun_path);
2960 ret = ERR_PTR(-EINVAL);
2961 goto out_err;
2962 }
2963 xprt_set_bound(xprt);
2964 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2965 ret = ERR_PTR(xs_local_setup_socket(transport));
2966 if (ret)
2967 goto out_err;
2968 break;
2969 default:
2970 ret = ERR_PTR(-EAFNOSUPPORT);
2971 goto out_err;
2972 }
2973
2974 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2975 xprt->address_strings[RPC_DISPLAY_ADDR]);
2976
2977 if (try_module_get(THIS_MODULE))
2978 return xprt;
2979 ret = ERR_PTR(-EINVAL);
2980out_err:
2981 xs_xprt_free(xprt);
2982 return ret;
2983}
2984
2985static const struct rpc_timeout xs_udp_default_timeout = {
2986 .to_initval = 5 * HZ,
2987 .to_maxval = 30 * HZ,
2988 .to_increment = 5 * HZ,
2989 .to_retries = 5,
2990};
2991
2992/**
2993 * xs_setup_udp - Set up transport to use a UDP socket
2994 * @args: rpc transport creation arguments
2995 *
2996 */
2997static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2998{
2999 struct sockaddr *addr = args->dstaddr;
3000 struct rpc_xprt *xprt;
3001 struct sock_xprt *transport;
3002 struct rpc_xprt *ret;
3003
3004 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
3005 xprt_udp_slot_table_entries);
3006 if (IS_ERR(xprt))
3007 return xprt;
3008 transport = container_of(xprt, struct sock_xprt, xprt);
3009
3010 xprt->prot = IPPROTO_UDP;
3011 xprt->tsh_size = 0;
3012 /* XXX: header size can vary due to auth type, IPv6, etc. */
3013 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
3014
3015 xprt->bind_timeout = XS_BIND_TO;
3016 xprt->reestablish_timeout = XS_UDP_REEST_TO;
3017 xprt->idle_timeout = XS_IDLE_DISC_TO;
3018
3019 xprt->ops = &xs_udp_ops;
3020
3021 xprt->timeout = &xs_udp_default_timeout;
3022
3023 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
3024 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
3025
3026 switch (addr->sa_family) {
3027 case AF_INET:
3028 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3029 xprt_set_bound(xprt);
3030
3031 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
3032 break;
3033 case AF_INET6:
3034 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
3035 xprt_set_bound(xprt);
3036
3037 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
3038 break;
3039 default:
3040 ret = ERR_PTR(-EAFNOSUPPORT);
3041 goto out_err;
3042 }
3043
3044 if (xprt_bound(xprt))
3045 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3046 xprt->address_strings[RPC_DISPLAY_ADDR],
3047 xprt->address_strings[RPC_DISPLAY_PORT],
3048 xprt->address_strings[RPC_DISPLAY_PROTO]);
3049 else
3050 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
3051 xprt->address_strings[RPC_DISPLAY_ADDR],
3052 xprt->address_strings[RPC_DISPLAY_PROTO]);
3053
3054 if (try_module_get(THIS_MODULE))
3055 return xprt;
3056 ret = ERR_PTR(-EINVAL);
3057out_err:
3058 xs_xprt_free(xprt);
3059 return ret;
3060}
3061
3062static const struct rpc_timeout xs_tcp_default_timeout = {
3063 .to_initval = 60 * HZ,
3064 .to_maxval = 60 * HZ,
3065 .to_retries = 2,
3066};
3067
3068/**
3069 * xs_setup_tcp - Set up transport to use a TCP socket
3070 * @args: rpc transport creation arguments
3071 *
3072 */
3073static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
3074{
3075 struct sockaddr *addr = args->dstaddr;
3076 struct rpc_xprt *xprt;
3077 struct sock_xprt *transport;
3078 struct rpc_xprt *ret;
3079 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
3080
3081 if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
3082 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
3083
3084 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
3085 max_slot_table_size);
3086 if (IS_ERR(xprt))
3087 return xprt;
3088 transport = container_of(xprt, struct sock_xprt, xprt);
3089
3090 xprt->prot = IPPROTO_TCP;
3091 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
3092 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3093
3094 xprt->bind_timeout = XS_BIND_TO;
3095 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
3096 xprt->idle_timeout = XS_IDLE_DISC_TO;
3097
3098 xprt->ops = &xs_tcp_ops;
3099 xprt->timeout = &xs_tcp_default_timeout;
3100
3101 xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
3102 xprt->connect_timeout = xprt->timeout->to_initval *
3103 (xprt->timeout->to_retries + 1);
3104
3105 INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn);
3106 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
3107
3108 switch (addr->sa_family) {
3109 case AF_INET:
3110 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3111 xprt_set_bound(xprt);
3112
3113 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
3114 break;
3115 case AF_INET6:
3116 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
3117 xprt_set_bound(xprt);
3118
3119 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
3120 break;
3121 default:
3122 ret = ERR_PTR(-EAFNOSUPPORT);
3123 goto out_err;
3124 }
3125
3126 if (xprt_bound(xprt))
3127 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3128 xprt->address_strings[RPC_DISPLAY_ADDR],
3129 xprt->address_strings[RPC_DISPLAY_PORT],
3130 xprt->address_strings[RPC_DISPLAY_PROTO]);
3131 else
3132 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
3133 xprt->address_strings[RPC_DISPLAY_ADDR],
3134 xprt->address_strings[RPC_DISPLAY_PROTO]);
3135
3136 if (try_module_get(THIS_MODULE))
3137 return xprt;
3138 ret = ERR_PTR(-EINVAL);
3139out_err:
3140 xs_xprt_free(xprt);
3141 return ret;
3142}
3143
3144/**
3145 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
3146 * @args: rpc transport creation arguments
3147 *
3148 */
3149static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
3150{
3151 struct sockaddr *addr = args->dstaddr;
3152 struct rpc_xprt *xprt;
3153 struct sock_xprt *transport;
3154 struct svc_sock *bc_sock;
3155 struct rpc_xprt *ret;
3156
3157 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
3158 xprt_tcp_slot_table_entries);
3159 if (IS_ERR(xprt))
3160 return xprt;
3161 transport = container_of(xprt, struct sock_xprt, xprt);
3162
3163 xprt->prot = IPPROTO_TCP;
3164 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
3165 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3166 xprt->timeout = &xs_tcp_default_timeout;
3167
3168 /* backchannel */
3169 xprt_set_bound(xprt);
3170 xprt->bind_timeout = 0;
3171 xprt->reestablish_timeout = 0;
3172 xprt->idle_timeout = 0;
3173
3174 xprt->ops = &bc_tcp_ops;
3175
3176 switch (addr->sa_family) {
3177 case AF_INET:
3178 xs_format_peer_addresses(xprt, "tcp",
3179 RPCBIND_NETID_TCP);
3180 break;
3181 case AF_INET6:
3182 xs_format_peer_addresses(xprt, "tcp",
3183 RPCBIND_NETID_TCP6);
3184 break;
3185 default:
3186 ret = ERR_PTR(-EAFNOSUPPORT);
3187 goto out_err;
3188 }
3189
3190 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3191 xprt->address_strings[RPC_DISPLAY_ADDR],
3192 xprt->address_strings[RPC_DISPLAY_PORT],
3193 xprt->address_strings[RPC_DISPLAY_PROTO]);
3194
3195 /*
3196 * Once we've associated a backchannel xprt with a connection,
3197 * we want to keep it around as long as the connection lasts,
3198 * in case we need to start using it for a backchannel again;
3199 * this reference won't be dropped until bc_xprt is destroyed.
3200 */
3201 xprt_get(xprt);
3202 args->bc_xprt->xpt_bc_xprt = xprt;
3203 xprt->bc_xprt = args->bc_xprt;
3204 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
3205 transport->sock = bc_sock->sk_sock;
3206 transport->inet = bc_sock->sk_sk;
3207
3208 /*
3209 * Since we don't want connections for the backchannel, we set
3210 * the xprt status to connected
3211 */
3212 xprt_set_connected(xprt);
3213
3214 if (try_module_get(THIS_MODULE))
3215 return xprt;
3216
3217 args->bc_xprt->xpt_bc_xprt = NULL;
3218 args->bc_xprt->xpt_bc_xps = NULL;
3219 xprt_put(xprt);
3220 ret = ERR_PTR(-EINVAL);
3221out_err:
3222 xs_xprt_free(xprt);
3223 return ret;
3224}
3225
3226static struct xprt_class xs_local_transport = {
3227 .list = LIST_HEAD_INIT(xs_local_transport.list),
3228 .name = "named UNIX socket",
3229 .owner = THIS_MODULE,
3230 .ident = XPRT_TRANSPORT_LOCAL,
3231 .setup = xs_setup_local,
3232};
3233
3234static struct xprt_class xs_udp_transport = {
3235 .list = LIST_HEAD_INIT(xs_udp_transport.list),
3236 .name = "udp",
3237 .owner = THIS_MODULE,
3238 .ident = XPRT_TRANSPORT_UDP,
3239 .setup = xs_setup_udp,
3240};
3241
3242static struct xprt_class xs_tcp_transport = {
3243 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
3244 .name = "tcp",
3245 .owner = THIS_MODULE,
3246 .ident = XPRT_TRANSPORT_TCP,
3247 .setup = xs_setup_tcp,
3248};
3249
3250static struct xprt_class xs_bc_tcp_transport = {
3251 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
3252 .name = "tcp NFSv4.1 backchannel",
3253 .owner = THIS_MODULE,
3254 .ident = XPRT_TRANSPORT_BC_TCP,
3255 .setup = xs_setup_bc_tcp,
3256};
3257
3258/**
3259 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3260 *
3261 */
3262int init_socket_xprt(void)
3263{
3264#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3265 if (!sunrpc_table_header)
3266 sunrpc_table_header = register_sysctl_table(sunrpc_table);
3267#endif
3268
3269 xprt_register_transport(&xs_local_transport);
3270 xprt_register_transport(&xs_udp_transport);
3271 xprt_register_transport(&xs_tcp_transport);
3272 xprt_register_transport(&xs_bc_tcp_transport);
3273
3274 return 0;
3275}
3276
3277/**
3278 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3279 *
3280 */
3281void cleanup_socket_xprt(void)
3282{
3283#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3284 if (sunrpc_table_header) {
3285 unregister_sysctl_table(sunrpc_table_header);
3286 sunrpc_table_header = NULL;
3287 }
3288#endif
3289
3290 xprt_unregister_transport(&xs_local_transport);
3291 xprt_unregister_transport(&xs_udp_transport);
3292 xprt_unregister_transport(&xs_tcp_transport);
3293 xprt_unregister_transport(&xs_bc_tcp_transport);
3294}
3295
3296static int param_set_uint_minmax(const char *val,
3297 const struct kernel_param *kp,
3298 unsigned int min, unsigned int max)
3299{
3300 unsigned int num;
3301 int ret;
3302
3303 if (!val)
3304 return -EINVAL;
3305 ret = kstrtouint(val, 0, &num);
3306 if (ret)
3307 return ret;
3308 if (num < min || num > max)
3309 return -EINVAL;
3310 *((unsigned int *)kp->arg) = num;
3311 return 0;
3312}
3313
3314static int param_set_portnr(const char *val, const struct kernel_param *kp)
3315{
3316 if (kp->arg == &xprt_min_resvport)
3317 return param_set_uint_minmax(val, kp,
3318 RPC_MIN_RESVPORT,
3319 xprt_max_resvport);
3320 return param_set_uint_minmax(val, kp,
3321 xprt_min_resvport,
3322 RPC_MAX_RESVPORT);
3323}
3324
3325static const struct kernel_param_ops param_ops_portnr = {
3326 .set = param_set_portnr,
3327 .get = param_get_uint,
3328};
3329
3330#define param_check_portnr(name, p) \
3331 __param_check(name, p, unsigned int);
3332
3333module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3334module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3335
3336static int param_set_slot_table_size(const char *val,
3337 const struct kernel_param *kp)
3338{
3339 return param_set_uint_minmax(val, kp,
3340 RPC_MIN_SLOT_TABLE,
3341 RPC_MAX_SLOT_TABLE);
3342}
3343
3344static const struct kernel_param_ops param_ops_slot_table_size = {
3345 .set = param_set_slot_table_size,
3346 .get = param_get_uint,
3347};
3348
3349#define param_check_slot_table_size(name, p) \
3350 __param_check(name, p, unsigned int);
3351
3352static int param_set_max_slot_table_size(const char *val,
3353 const struct kernel_param *kp)
3354{
3355 return param_set_uint_minmax(val, kp,
3356 RPC_MIN_SLOT_TABLE,
3357 RPC_MAX_SLOT_TABLE_LIMIT);
3358}
3359
3360static const struct kernel_param_ops param_ops_max_slot_table_size = {
3361 .set = param_set_max_slot_table_size,
3362 .get = param_get_uint,
3363};
3364
3365#define param_check_max_slot_table_size(name, p) \
3366 __param_check(name, p, unsigned int);
3367
3368module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3369 slot_table_size, 0644);
3370module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3371 max_slot_table_size, 0644);
3372module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3373 slot_table_size, 0644);
3374
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/net/sunrpc/xprtsock.c
4 *
5 * Client-side transport implementation for sockets.
6 *
7 * TCP callback races fixes (C) 1998 Red Hat
8 * TCP send fixes (C) 1998 Red Hat
9 * TCP NFS related read + write fixes
10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11 *
12 * Rewrite of larges part of the code in order to stabilize TCP stuff.
13 * Fix behaviour when socket buffer is full.
14 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
15 *
16 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
17 *
18 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
19 * <gilles.quillard@bull.net>
20 */
21
22#include <linux/types.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/capability.h>
27#include <linux/pagemap.h>
28#include <linux/errno.h>
29#include <linux/socket.h>
30#include <linux/in.h>
31#include <linux/net.h>
32#include <linux/mm.h>
33#include <linux/un.h>
34#include <linux/udp.h>
35#include <linux/tcp.h>
36#include <linux/sunrpc/clnt.h>
37#include <linux/sunrpc/addr.h>
38#include <linux/sunrpc/sched.h>
39#include <linux/sunrpc/svcsock.h>
40#include <linux/sunrpc/xprtsock.h>
41#include <linux/file.h>
42#ifdef CONFIG_SUNRPC_BACKCHANNEL
43#include <linux/sunrpc/bc_xprt.h>
44#endif
45
46#include <net/sock.h>
47#include <net/checksum.h>
48#include <net/udp.h>
49#include <net/tcp.h>
50#include <linux/bvec.h>
51#include <linux/highmem.h>
52#include <linux/uio.h>
53#include <linux/sched/mm.h>
54
55#include <trace/events/sunrpc.h>
56
57#include "socklib.h"
58#include "sunrpc.h"
59
60static void xs_close(struct rpc_xprt *xprt);
61static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock);
62static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
63 struct socket *sock);
64
65/*
66 * xprtsock tunables
67 */
68static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
69static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
70static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
71
72static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
73static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
74
75#define XS_TCP_LINGER_TO (15U * HZ)
76static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
77
78/*
79 * We can register our own files under /proc/sys/sunrpc by
80 * calling register_sysctl_table() again. The files in that
81 * directory become the union of all files registered there.
82 *
83 * We simply need to make sure that we don't collide with
84 * someone else's file names!
85 */
86
87static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
88static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
89static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
90static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
91static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
92
93static struct ctl_table_header *sunrpc_table_header;
94
95static struct xprt_class xs_local_transport;
96static struct xprt_class xs_udp_transport;
97static struct xprt_class xs_tcp_transport;
98static struct xprt_class xs_bc_tcp_transport;
99
100/*
101 * FIXME: changing the UDP slot table size should also resize the UDP
102 * socket buffers for existing UDP transports
103 */
104static struct ctl_table xs_tunables_table[] = {
105 {
106 .procname = "udp_slot_table_entries",
107 .data = &xprt_udp_slot_table_entries,
108 .maxlen = sizeof(unsigned int),
109 .mode = 0644,
110 .proc_handler = proc_dointvec_minmax,
111 .extra1 = &min_slot_table_size,
112 .extra2 = &max_slot_table_size
113 },
114 {
115 .procname = "tcp_slot_table_entries",
116 .data = &xprt_tcp_slot_table_entries,
117 .maxlen = sizeof(unsigned int),
118 .mode = 0644,
119 .proc_handler = proc_dointvec_minmax,
120 .extra1 = &min_slot_table_size,
121 .extra2 = &max_slot_table_size
122 },
123 {
124 .procname = "tcp_max_slot_table_entries",
125 .data = &xprt_max_tcp_slot_table_entries,
126 .maxlen = sizeof(unsigned int),
127 .mode = 0644,
128 .proc_handler = proc_dointvec_minmax,
129 .extra1 = &min_slot_table_size,
130 .extra2 = &max_tcp_slot_table_limit
131 },
132 {
133 .procname = "min_resvport",
134 .data = &xprt_min_resvport,
135 .maxlen = sizeof(unsigned int),
136 .mode = 0644,
137 .proc_handler = proc_dointvec_minmax,
138 .extra1 = &xprt_min_resvport_limit,
139 .extra2 = &xprt_max_resvport_limit
140 },
141 {
142 .procname = "max_resvport",
143 .data = &xprt_max_resvport,
144 .maxlen = sizeof(unsigned int),
145 .mode = 0644,
146 .proc_handler = proc_dointvec_minmax,
147 .extra1 = &xprt_min_resvport_limit,
148 .extra2 = &xprt_max_resvport_limit
149 },
150 {
151 .procname = "tcp_fin_timeout",
152 .data = &xs_tcp_fin_timeout,
153 .maxlen = sizeof(xs_tcp_fin_timeout),
154 .mode = 0644,
155 .proc_handler = proc_dointvec_jiffies,
156 },
157 { },
158};
159
160static struct ctl_table sunrpc_table[] = {
161 {
162 .procname = "sunrpc",
163 .mode = 0555,
164 .child = xs_tunables_table
165 },
166 { },
167};
168
169/*
170 * Wait duration for a reply from the RPC portmapper.
171 */
172#define XS_BIND_TO (60U * HZ)
173
174/*
175 * Delay if a UDP socket connect error occurs. This is most likely some
176 * kind of resource problem on the local host.
177 */
178#define XS_UDP_REEST_TO (2U * HZ)
179
180/*
181 * The reestablish timeout allows clients to delay for a bit before attempting
182 * to reconnect to a server that just dropped our connection.
183 *
184 * We implement an exponential backoff when trying to reestablish a TCP
185 * transport connection with the server. Some servers like to drop a TCP
186 * connection when they are overworked, so we start with a short timeout and
187 * increase over time if the server is down or not responding.
188 */
189#define XS_TCP_INIT_REEST_TO (3U * HZ)
190
191/*
192 * TCP idle timeout; client drops the transport socket if it is idle
193 * for this long. Note that we also timeout UDP sockets to prevent
194 * holding port numbers when there is no RPC traffic.
195 */
196#define XS_IDLE_DISC_TO (5U * 60 * HZ)
197
198#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
199# undef RPC_DEBUG_DATA
200# define RPCDBG_FACILITY RPCDBG_TRANS
201#endif
202
203#ifdef RPC_DEBUG_DATA
204static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
205{
206 u8 *buf = (u8 *) packet;
207 int j;
208
209 dprintk("RPC: %s\n", msg);
210 for (j = 0; j < count && j < 128; j += 4) {
211 if (!(j & 31)) {
212 if (j)
213 dprintk("\n");
214 dprintk("0x%04x ", j);
215 }
216 dprintk("%02x%02x%02x%02x ",
217 buf[j], buf[j+1], buf[j+2], buf[j+3]);
218 }
219 dprintk("\n");
220}
221#else
222static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
223{
224 /* NOP */
225}
226#endif
227
228static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
229{
230 return (struct rpc_xprt *) sk->sk_user_data;
231}
232
233static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
234{
235 return (struct sockaddr *) &xprt->addr;
236}
237
238static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
239{
240 return (struct sockaddr_un *) &xprt->addr;
241}
242
243static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
244{
245 return (struct sockaddr_in *) &xprt->addr;
246}
247
248static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
249{
250 return (struct sockaddr_in6 *) &xprt->addr;
251}
252
253static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
254{
255 struct sockaddr *sap = xs_addr(xprt);
256 struct sockaddr_in6 *sin6;
257 struct sockaddr_in *sin;
258 struct sockaddr_un *sun;
259 char buf[128];
260
261 switch (sap->sa_family) {
262 case AF_LOCAL:
263 sun = xs_addr_un(xprt);
264 strscpy(buf, sun->sun_path, sizeof(buf));
265 xprt->address_strings[RPC_DISPLAY_ADDR] =
266 kstrdup(buf, GFP_KERNEL);
267 break;
268 case AF_INET:
269 (void)rpc_ntop(sap, buf, sizeof(buf));
270 xprt->address_strings[RPC_DISPLAY_ADDR] =
271 kstrdup(buf, GFP_KERNEL);
272 sin = xs_addr_in(xprt);
273 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
274 break;
275 case AF_INET6:
276 (void)rpc_ntop(sap, buf, sizeof(buf));
277 xprt->address_strings[RPC_DISPLAY_ADDR] =
278 kstrdup(buf, GFP_KERNEL);
279 sin6 = xs_addr_in6(xprt);
280 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
281 break;
282 default:
283 BUG();
284 }
285
286 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
287}
288
289static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
290{
291 struct sockaddr *sap = xs_addr(xprt);
292 char buf[128];
293
294 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
295 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
296
297 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
298 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
299}
300
301static void xs_format_peer_addresses(struct rpc_xprt *xprt,
302 const char *protocol,
303 const char *netid)
304{
305 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
306 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
307 xs_format_common_peer_addresses(xprt);
308 xs_format_common_peer_ports(xprt);
309}
310
311static void xs_update_peer_port(struct rpc_xprt *xprt)
312{
313 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
314 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
315
316 xs_format_common_peer_ports(xprt);
317}
318
319static void xs_free_peer_addresses(struct rpc_xprt *xprt)
320{
321 unsigned int i;
322
323 for (i = 0; i < RPC_DISPLAY_MAX; i++)
324 switch (i) {
325 case RPC_DISPLAY_PROTO:
326 case RPC_DISPLAY_NETID:
327 continue;
328 default:
329 kfree(xprt->address_strings[i]);
330 }
331}
332
333static size_t
334xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
335{
336 size_t i,n;
337
338 if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
339 return want;
340 n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
341 for (i = 0; i < n; i++) {
342 if (buf->pages[i])
343 continue;
344 buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
345 if (!buf->pages[i]) {
346 i *= PAGE_SIZE;
347 return i > buf->page_base ? i - buf->page_base : 0;
348 }
349 }
350 return want;
351}
352
353static ssize_t
354xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
355{
356 ssize_t ret;
357 if (seek != 0)
358 iov_iter_advance(&msg->msg_iter, seek);
359 ret = sock_recvmsg(sock, msg, flags);
360 return ret > 0 ? ret + seek : ret;
361}
362
363static ssize_t
364xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
365 struct kvec *kvec, size_t count, size_t seek)
366{
367 iov_iter_kvec(&msg->msg_iter, ITER_DEST, kvec, 1, count);
368 return xs_sock_recvmsg(sock, msg, flags, seek);
369}
370
371static ssize_t
372xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
373 struct bio_vec *bvec, unsigned long nr, size_t count,
374 size_t seek)
375{
376 iov_iter_bvec(&msg->msg_iter, ITER_DEST, bvec, nr, count);
377 return xs_sock_recvmsg(sock, msg, flags, seek);
378}
379
380static ssize_t
381xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
382 size_t count)
383{
384 iov_iter_discard(&msg->msg_iter, ITER_DEST, count);
385 return sock_recvmsg(sock, msg, flags);
386}
387
388#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
389static void
390xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
391{
392 struct bvec_iter bi = {
393 .bi_size = count,
394 };
395 struct bio_vec bv;
396
397 bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
398 for_each_bvec(bv, bvec, bi, bi)
399 flush_dcache_page(bv.bv_page);
400}
401#else
402static inline void
403xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
404{
405}
406#endif
407
408static ssize_t
409xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
410 struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
411{
412 size_t want, seek_init = seek, offset = 0;
413 ssize_t ret;
414
415 want = min_t(size_t, count, buf->head[0].iov_len);
416 if (seek < want) {
417 ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek);
418 if (ret <= 0)
419 goto sock_err;
420 offset += ret;
421 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
422 goto out;
423 if (ret != want)
424 goto out;
425 seek = 0;
426 } else {
427 seek -= want;
428 offset += want;
429 }
430
431 want = xs_alloc_sparse_pages(
432 buf, min_t(size_t, count - offset, buf->page_len),
433 GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
434 if (seek < want) {
435 ret = xs_read_bvec(sock, msg, flags, buf->bvec,
436 xdr_buf_pagecount(buf),
437 want + buf->page_base,
438 seek + buf->page_base);
439 if (ret <= 0)
440 goto sock_err;
441 xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
442 ret -= buf->page_base;
443 offset += ret;
444 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
445 goto out;
446 if (ret != want)
447 goto out;
448 seek = 0;
449 } else {
450 seek -= want;
451 offset += want;
452 }
453
454 want = min_t(size_t, count - offset, buf->tail[0].iov_len);
455 if (seek < want) {
456 ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
457 if (ret <= 0)
458 goto sock_err;
459 offset += ret;
460 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
461 goto out;
462 if (ret != want)
463 goto out;
464 } else if (offset < seek_init)
465 offset = seek_init;
466 ret = -EMSGSIZE;
467out:
468 *read = offset - seek_init;
469 return ret;
470sock_err:
471 offset += seek;
472 goto out;
473}
474
475static void
476xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf)
477{
478 if (!transport->recv.copied) {
479 if (buf->head[0].iov_len >= transport->recv.offset)
480 memcpy(buf->head[0].iov_base,
481 &transport->recv.xid,
482 transport->recv.offset);
483 transport->recv.copied = transport->recv.offset;
484 }
485}
486
487static bool
488xs_read_stream_request_done(struct sock_xprt *transport)
489{
490 return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT);
491}
492
493static void
494xs_read_stream_check_eor(struct sock_xprt *transport,
495 struct msghdr *msg)
496{
497 if (xs_read_stream_request_done(transport))
498 msg->msg_flags |= MSG_EOR;
499}
500
501static ssize_t
502xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
503 int flags, struct rpc_rqst *req)
504{
505 struct xdr_buf *buf = &req->rq_private_buf;
506 size_t want, read;
507 ssize_t ret;
508
509 xs_read_header(transport, buf);
510
511 want = transport->recv.len - transport->recv.offset;
512 if (want != 0) {
513 ret = xs_read_xdr_buf(transport->sock, msg, flags, buf,
514 transport->recv.copied + want,
515 transport->recv.copied,
516 &read);
517 transport->recv.offset += read;
518 transport->recv.copied += read;
519 }
520
521 if (transport->recv.offset == transport->recv.len)
522 xs_read_stream_check_eor(transport, msg);
523
524 if (want == 0)
525 return 0;
526
527 switch (ret) {
528 default:
529 break;
530 case -EFAULT:
531 case -EMSGSIZE:
532 msg->msg_flags |= MSG_TRUNC;
533 return read;
534 case 0:
535 return -ESHUTDOWN;
536 }
537 return ret < 0 ? ret : read;
538}
539
540static size_t
541xs_read_stream_headersize(bool isfrag)
542{
543 if (isfrag)
544 return sizeof(__be32);
545 return 3 * sizeof(__be32);
546}
547
548static ssize_t
549xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg,
550 int flags, size_t want, size_t seek)
551{
552 struct kvec kvec = {
553 .iov_base = &transport->recv.fraghdr,
554 .iov_len = want,
555 };
556 return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek);
557}
558
559#if defined(CONFIG_SUNRPC_BACKCHANNEL)
560static ssize_t
561xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
562{
563 struct rpc_xprt *xprt = &transport->xprt;
564 struct rpc_rqst *req;
565 ssize_t ret;
566
567 /* Is this transport associated with the backchannel? */
568 if (!xprt->bc_serv)
569 return -ESHUTDOWN;
570
571 /* Look up and lock the request corresponding to the given XID */
572 req = xprt_lookup_bc_request(xprt, transport->recv.xid);
573 if (!req) {
574 printk(KERN_WARNING "Callback slot table overflowed\n");
575 return -ESHUTDOWN;
576 }
577 if (transport->recv.copied && !req->rq_private_buf.len)
578 return -ESHUTDOWN;
579
580 ret = xs_read_stream_request(transport, msg, flags, req);
581 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
582 xprt_complete_bc_request(req, transport->recv.copied);
583 else
584 req->rq_private_buf.len = transport->recv.copied;
585
586 return ret;
587}
588#else /* CONFIG_SUNRPC_BACKCHANNEL */
589static ssize_t
590xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
591{
592 return -ESHUTDOWN;
593}
594#endif /* CONFIG_SUNRPC_BACKCHANNEL */
595
596static ssize_t
597xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
598{
599 struct rpc_xprt *xprt = &transport->xprt;
600 struct rpc_rqst *req;
601 ssize_t ret = 0;
602
603 /* Look up and lock the request corresponding to the given XID */
604 spin_lock(&xprt->queue_lock);
605 req = xprt_lookup_rqst(xprt, transport->recv.xid);
606 if (!req || (transport->recv.copied && !req->rq_private_buf.len)) {
607 msg->msg_flags |= MSG_TRUNC;
608 goto out;
609 }
610 xprt_pin_rqst(req);
611 spin_unlock(&xprt->queue_lock);
612
613 ret = xs_read_stream_request(transport, msg, flags, req);
614
615 spin_lock(&xprt->queue_lock);
616 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
617 xprt_complete_rqst(req->rq_task, transport->recv.copied);
618 else
619 req->rq_private_buf.len = transport->recv.copied;
620 xprt_unpin_rqst(req);
621out:
622 spin_unlock(&xprt->queue_lock);
623 return ret;
624}
625
626static ssize_t
627xs_read_stream(struct sock_xprt *transport, int flags)
628{
629 struct msghdr msg = { 0 };
630 size_t want, read = 0;
631 ssize_t ret = 0;
632
633 if (transport->recv.len == 0) {
634 want = xs_read_stream_headersize(transport->recv.copied != 0);
635 ret = xs_read_stream_header(transport, &msg, flags, want,
636 transport->recv.offset);
637 if (ret <= 0)
638 goto out_err;
639 transport->recv.offset = ret;
640 if (transport->recv.offset != want)
641 return transport->recv.offset;
642 transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
643 RPC_FRAGMENT_SIZE_MASK;
644 transport->recv.offset -= sizeof(transport->recv.fraghdr);
645 read = ret;
646 }
647
648 switch (be32_to_cpu(transport->recv.calldir)) {
649 default:
650 msg.msg_flags |= MSG_TRUNC;
651 break;
652 case RPC_CALL:
653 ret = xs_read_stream_call(transport, &msg, flags);
654 break;
655 case RPC_REPLY:
656 ret = xs_read_stream_reply(transport, &msg, flags);
657 }
658 if (msg.msg_flags & MSG_TRUNC) {
659 transport->recv.calldir = cpu_to_be32(-1);
660 transport->recv.copied = -1;
661 }
662 if (ret < 0)
663 goto out_err;
664 read += ret;
665 if (transport->recv.offset < transport->recv.len) {
666 if (!(msg.msg_flags & MSG_TRUNC))
667 return read;
668 msg.msg_flags = 0;
669 ret = xs_read_discard(transport->sock, &msg, flags,
670 transport->recv.len - transport->recv.offset);
671 if (ret <= 0)
672 goto out_err;
673 transport->recv.offset += ret;
674 read += ret;
675 if (transport->recv.offset != transport->recv.len)
676 return read;
677 }
678 if (xs_read_stream_request_done(transport)) {
679 trace_xs_stream_read_request(transport);
680 transport->recv.copied = 0;
681 }
682 transport->recv.offset = 0;
683 transport->recv.len = 0;
684 return read;
685out_err:
686 return ret != 0 ? ret : -ESHUTDOWN;
687}
688
689static __poll_t xs_poll_socket(struct sock_xprt *transport)
690{
691 return transport->sock->ops->poll(transport->file, transport->sock,
692 NULL);
693}
694
695static bool xs_poll_socket_readable(struct sock_xprt *transport)
696{
697 __poll_t events = xs_poll_socket(transport);
698
699 return (events & (EPOLLIN | EPOLLRDNORM)) && !(events & EPOLLRDHUP);
700}
701
702static void xs_poll_check_readable(struct sock_xprt *transport)
703{
704
705 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
706 if (!xs_poll_socket_readable(transport))
707 return;
708 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
709 queue_work(xprtiod_workqueue, &transport->recv_worker);
710}
711
712static void xs_stream_data_receive(struct sock_xprt *transport)
713{
714 size_t read = 0;
715 ssize_t ret = 0;
716
717 mutex_lock(&transport->recv_mutex);
718 if (transport->sock == NULL)
719 goto out;
720 for (;;) {
721 ret = xs_read_stream(transport, MSG_DONTWAIT);
722 if (ret < 0)
723 break;
724 read += ret;
725 cond_resched();
726 }
727 if (ret == -ESHUTDOWN)
728 kernel_sock_shutdown(transport->sock, SHUT_RDWR);
729 else
730 xs_poll_check_readable(transport);
731out:
732 mutex_unlock(&transport->recv_mutex);
733 trace_xs_stream_read_data(&transport->xprt, ret, read);
734}
735
736static void xs_stream_data_receive_workfn(struct work_struct *work)
737{
738 struct sock_xprt *transport =
739 container_of(work, struct sock_xprt, recv_worker);
740 unsigned int pflags = memalloc_nofs_save();
741
742 xs_stream_data_receive(transport);
743 memalloc_nofs_restore(pflags);
744}
745
746static void
747xs_stream_reset_connect(struct sock_xprt *transport)
748{
749 transport->recv.offset = 0;
750 transport->recv.len = 0;
751 transport->recv.copied = 0;
752 transport->xmit.offset = 0;
753}
754
755static void
756xs_stream_start_connect(struct sock_xprt *transport)
757{
758 transport->xprt.stat.connect_count++;
759 transport->xprt.stat.connect_start = jiffies;
760}
761
762#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
763
764/**
765 * xs_nospace - handle transmit was incomplete
766 * @req: pointer to RPC request
767 * @transport: pointer to struct sock_xprt
768 *
769 */
770static int xs_nospace(struct rpc_rqst *req, struct sock_xprt *transport)
771{
772 struct rpc_xprt *xprt = &transport->xprt;
773 struct sock *sk = transport->inet;
774 int ret = -EAGAIN;
775
776 trace_rpc_socket_nospace(req, transport);
777
778 /* Protect against races with write_space */
779 spin_lock(&xprt->transport_lock);
780
781 /* Don't race with disconnect */
782 if (xprt_connected(xprt)) {
783 /* wait for more buffer space */
784 set_bit(XPRT_SOCK_NOSPACE, &transport->sock_state);
785 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
786 sk->sk_write_pending++;
787 xprt_wait_for_buffer_space(xprt);
788 } else
789 ret = -ENOTCONN;
790
791 spin_unlock(&xprt->transport_lock);
792 return ret;
793}
794
795static int xs_sock_nospace(struct rpc_rqst *req)
796{
797 struct sock_xprt *transport =
798 container_of(req->rq_xprt, struct sock_xprt, xprt);
799 struct sock *sk = transport->inet;
800 int ret = -EAGAIN;
801
802 lock_sock(sk);
803 if (!sock_writeable(sk))
804 ret = xs_nospace(req, transport);
805 release_sock(sk);
806 return ret;
807}
808
809static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait)
810{
811 struct sock_xprt *transport =
812 container_of(req->rq_xprt, struct sock_xprt, xprt);
813 struct sock *sk = transport->inet;
814 int ret = -EAGAIN;
815
816 if (vm_wait)
817 return -ENOBUFS;
818 lock_sock(sk);
819 if (!sk_stream_memory_free(sk))
820 ret = xs_nospace(req, transport);
821 release_sock(sk);
822 return ret;
823}
824
825static int xs_stream_prepare_request(struct rpc_rqst *req, struct xdr_buf *buf)
826{
827 return xdr_alloc_bvec(buf, rpc_task_gfp_mask());
828}
829
830/*
831 * Determine if the previous message in the stream was aborted before it
832 * could complete transmission.
833 */
834static bool
835xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
836{
837 return transport->xmit.offset != 0 && req->rq_bytes_sent == 0;
838}
839
840/*
841 * Return the stream record marker field for a record of length < 2^31-1
842 */
843static rpc_fraghdr
844xs_stream_record_marker(struct xdr_buf *xdr)
845{
846 if (!xdr->len)
847 return 0;
848 return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len);
849}
850
851/**
852 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
853 * @req: pointer to RPC request
854 *
855 * Return values:
856 * 0: The request has been sent
857 * EAGAIN: The socket was blocked, please call again later to
858 * complete the request
859 * ENOTCONN: Caller needs to invoke connect logic then call again
860 * other: Some other error occurred, the request was not sent
861 */
862static int xs_local_send_request(struct rpc_rqst *req)
863{
864 struct rpc_xprt *xprt = req->rq_xprt;
865 struct sock_xprt *transport =
866 container_of(xprt, struct sock_xprt, xprt);
867 struct xdr_buf *xdr = &req->rq_snd_buf;
868 rpc_fraghdr rm = xs_stream_record_marker(xdr);
869 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
870 struct msghdr msg = {
871 .msg_flags = XS_SENDMSG_FLAGS,
872 };
873 bool vm_wait;
874 unsigned int sent;
875 int status;
876
877 /* Close the stream if the previous transmission was incomplete */
878 if (xs_send_request_was_aborted(transport, req)) {
879 xprt_force_disconnect(xprt);
880 return -ENOTCONN;
881 }
882
883 xs_pktdump("packet data:",
884 req->rq_svec->iov_base, req->rq_svec->iov_len);
885
886 vm_wait = sk_stream_is_writeable(transport->inet) ? true : false;
887
888 req->rq_xtime = ktime_get();
889 status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
890 transport->xmit.offset, rm, &sent);
891 dprintk("RPC: %s(%u) = %d\n",
892 __func__, xdr->len - transport->xmit.offset, status);
893
894 if (likely(sent > 0) || status == 0) {
895 transport->xmit.offset += sent;
896 req->rq_bytes_sent = transport->xmit.offset;
897 if (likely(req->rq_bytes_sent >= msglen)) {
898 req->rq_xmit_bytes_sent += transport->xmit.offset;
899 transport->xmit.offset = 0;
900 return 0;
901 }
902 status = -EAGAIN;
903 vm_wait = false;
904 }
905
906 switch (status) {
907 case -EAGAIN:
908 status = xs_stream_nospace(req, vm_wait);
909 break;
910 default:
911 dprintk("RPC: sendmsg returned unrecognized error %d\n",
912 -status);
913 fallthrough;
914 case -EPIPE:
915 xprt_force_disconnect(xprt);
916 status = -ENOTCONN;
917 }
918
919 return status;
920}
921
922/**
923 * xs_udp_send_request - write an RPC request to a UDP socket
924 * @req: pointer to RPC request
925 *
926 * Return values:
927 * 0: The request has been sent
928 * EAGAIN: The socket was blocked, please call again later to
929 * complete the request
930 * ENOTCONN: Caller needs to invoke connect logic then call again
931 * other: Some other error occurred, the request was not sent
932 */
933static int xs_udp_send_request(struct rpc_rqst *req)
934{
935 struct rpc_xprt *xprt = req->rq_xprt;
936 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
937 struct xdr_buf *xdr = &req->rq_snd_buf;
938 struct msghdr msg = {
939 .msg_name = xs_addr(xprt),
940 .msg_namelen = xprt->addrlen,
941 .msg_flags = XS_SENDMSG_FLAGS,
942 };
943 unsigned int sent;
944 int status;
945
946 xs_pktdump("packet data:",
947 req->rq_svec->iov_base,
948 req->rq_svec->iov_len);
949
950 if (!xprt_bound(xprt))
951 return -ENOTCONN;
952
953 if (!xprt_request_get_cong(xprt, req))
954 return -EBADSLT;
955
956 status = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
957 if (status < 0)
958 return status;
959 req->rq_xtime = ktime_get();
960 status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent);
961
962 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
963 xdr->len, status);
964
965 /* firewall is blocking us, don't return -EAGAIN or we end up looping */
966 if (status == -EPERM)
967 goto process_status;
968
969 if (status == -EAGAIN && sock_writeable(transport->inet))
970 status = -ENOBUFS;
971
972 if (sent > 0 || status == 0) {
973 req->rq_xmit_bytes_sent += sent;
974 if (sent >= req->rq_slen)
975 return 0;
976 /* Still some bytes left; set up for a retry later. */
977 status = -EAGAIN;
978 }
979
980process_status:
981 switch (status) {
982 case -ENOTSOCK:
983 status = -ENOTCONN;
984 /* Should we call xs_close() here? */
985 break;
986 case -EAGAIN:
987 status = xs_sock_nospace(req);
988 break;
989 case -ENETUNREACH:
990 case -ENOBUFS:
991 case -EPIPE:
992 case -ECONNREFUSED:
993 case -EPERM:
994 /* When the server has died, an ICMP port unreachable message
995 * prompts ECONNREFUSED. */
996 break;
997 default:
998 dprintk("RPC: sendmsg returned unrecognized error %d\n",
999 -status);
1000 }
1001
1002 return status;
1003}
1004
1005/**
1006 * xs_tcp_send_request - write an RPC request to a TCP socket
1007 * @req: pointer to RPC request
1008 *
1009 * Return values:
1010 * 0: The request has been sent
1011 * EAGAIN: The socket was blocked, please call again later to
1012 * complete the request
1013 * ENOTCONN: Caller needs to invoke connect logic then call again
1014 * other: Some other error occurred, the request was not sent
1015 *
1016 * XXX: In the case of soft timeouts, should we eventually give up
1017 * if sendmsg is not able to make progress?
1018 */
1019static int xs_tcp_send_request(struct rpc_rqst *req)
1020{
1021 struct rpc_xprt *xprt = req->rq_xprt;
1022 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1023 struct xdr_buf *xdr = &req->rq_snd_buf;
1024 rpc_fraghdr rm = xs_stream_record_marker(xdr);
1025 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
1026 struct msghdr msg = {
1027 .msg_flags = XS_SENDMSG_FLAGS,
1028 };
1029 bool vm_wait;
1030 unsigned int sent;
1031 int status;
1032
1033 /* Close the stream if the previous transmission was incomplete */
1034 if (xs_send_request_was_aborted(transport, req)) {
1035 if (transport->sock != NULL)
1036 kernel_sock_shutdown(transport->sock, SHUT_RDWR);
1037 return -ENOTCONN;
1038 }
1039 if (!transport->inet)
1040 return -ENOTCONN;
1041
1042 xs_pktdump("packet data:",
1043 req->rq_svec->iov_base,
1044 req->rq_svec->iov_len);
1045
1046 if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
1047 xs_tcp_set_socket_timeouts(xprt, transport->sock);
1048
1049 xs_set_srcport(transport, transport->sock);
1050
1051 /* Continue transmitting the packet/record. We must be careful
1052 * to cope with writespace callbacks arriving _after_ we have
1053 * called sendmsg(). */
1054 req->rq_xtime = ktime_get();
1055 tcp_sock_set_cork(transport->inet, true);
1056
1057 vm_wait = sk_stream_is_writeable(transport->inet) ? true : false;
1058
1059 do {
1060 status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
1061 transport->xmit.offset, rm, &sent);
1062
1063 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
1064 xdr->len - transport->xmit.offset, status);
1065
1066 /* If we've sent the entire packet, immediately
1067 * reset the count of bytes sent. */
1068 transport->xmit.offset += sent;
1069 req->rq_bytes_sent = transport->xmit.offset;
1070 if (likely(req->rq_bytes_sent >= msglen)) {
1071 req->rq_xmit_bytes_sent += transport->xmit.offset;
1072 transport->xmit.offset = 0;
1073 if (atomic_long_read(&xprt->xmit_queuelen) == 1)
1074 tcp_sock_set_cork(transport->inet, false);
1075 return 0;
1076 }
1077
1078 WARN_ON_ONCE(sent == 0 && status == 0);
1079
1080 if (sent > 0)
1081 vm_wait = false;
1082
1083 } while (status == 0);
1084
1085 switch (status) {
1086 case -ENOTSOCK:
1087 status = -ENOTCONN;
1088 /* Should we call xs_close() here? */
1089 break;
1090 case -EAGAIN:
1091 status = xs_stream_nospace(req, vm_wait);
1092 break;
1093 case -ECONNRESET:
1094 case -ECONNREFUSED:
1095 case -ENOTCONN:
1096 case -EADDRINUSE:
1097 case -ENOBUFS:
1098 case -EPIPE:
1099 break;
1100 default:
1101 dprintk("RPC: sendmsg returned unrecognized error %d\n",
1102 -status);
1103 }
1104
1105 return status;
1106}
1107
1108static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1109{
1110 transport->old_data_ready = sk->sk_data_ready;
1111 transport->old_state_change = sk->sk_state_change;
1112 transport->old_write_space = sk->sk_write_space;
1113 transport->old_error_report = sk->sk_error_report;
1114}
1115
1116static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1117{
1118 sk->sk_data_ready = transport->old_data_ready;
1119 sk->sk_state_change = transport->old_state_change;
1120 sk->sk_write_space = transport->old_write_space;
1121 sk->sk_error_report = transport->old_error_report;
1122}
1123
1124static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
1125{
1126 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1127
1128 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
1129 clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state);
1130 clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state);
1131 clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state);
1132 clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state);
1133}
1134
1135static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr)
1136{
1137 set_bit(nr, &transport->sock_state);
1138 queue_work(xprtiod_workqueue, &transport->error_worker);
1139}
1140
1141static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1142{
1143 xprt->connect_cookie++;
1144 smp_mb__before_atomic();
1145 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1146 clear_bit(XPRT_CLOSING, &xprt->state);
1147 xs_sock_reset_state_flags(xprt);
1148 smp_mb__after_atomic();
1149}
1150
1151/**
1152 * xs_error_report - callback to handle TCP socket state errors
1153 * @sk: socket
1154 *
1155 * Note: we don't call sock_error() since there may be a rpc_task
1156 * using the socket, and so we don't want to clear sk->sk_err.
1157 */
1158static void xs_error_report(struct sock *sk)
1159{
1160 struct sock_xprt *transport;
1161 struct rpc_xprt *xprt;
1162
1163 if (!(xprt = xprt_from_sock(sk)))
1164 return;
1165
1166 transport = container_of(xprt, struct sock_xprt, xprt);
1167 transport->xprt_err = -sk->sk_err;
1168 if (transport->xprt_err == 0)
1169 return;
1170 dprintk("RPC: xs_error_report client %p, error=%d...\n",
1171 xprt, -transport->xprt_err);
1172 trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
1173
1174 /* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */
1175 smp_mb__before_atomic();
1176 xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR);
1177}
1178
1179static void xs_reset_transport(struct sock_xprt *transport)
1180{
1181 struct socket *sock = transport->sock;
1182 struct sock *sk = transport->inet;
1183 struct rpc_xprt *xprt = &transport->xprt;
1184 struct file *filp = transport->file;
1185
1186 if (sk == NULL)
1187 return;
1188 /*
1189 * Make sure we're calling this in a context from which it is safe
1190 * to call __fput_sync(). In practice that means rpciod and the
1191 * system workqueue.
1192 */
1193 if (!(current->flags & PF_WQ_WORKER)) {
1194 WARN_ON_ONCE(1);
1195 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
1196 return;
1197 }
1198
1199 if (atomic_read(&transport->xprt.swapper))
1200 sk_clear_memalloc(sk);
1201
1202 kernel_sock_shutdown(sock, SHUT_RDWR);
1203
1204 mutex_lock(&transport->recv_mutex);
1205 lock_sock(sk);
1206 transport->inet = NULL;
1207 transport->sock = NULL;
1208 transport->file = NULL;
1209
1210 sk->sk_user_data = NULL;
1211
1212 xs_restore_old_callbacks(transport, sk);
1213 xprt_clear_connected(xprt);
1214 xs_sock_reset_connection_flags(xprt);
1215 /* Reset stream record info */
1216 xs_stream_reset_connect(transport);
1217 release_sock(sk);
1218 mutex_unlock(&transport->recv_mutex);
1219
1220 trace_rpc_socket_close(xprt, sock);
1221 __fput_sync(filp);
1222
1223 xprt_disconnect_done(xprt);
1224}
1225
1226/**
1227 * xs_close - close a socket
1228 * @xprt: transport
1229 *
1230 * This is used when all requests are complete; ie, no DRC state remains
1231 * on the server we want to save.
1232 *
1233 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
1234 * xs_reset_transport() zeroing the socket from underneath a writer.
1235 */
1236static void xs_close(struct rpc_xprt *xprt)
1237{
1238 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1239
1240 dprintk("RPC: xs_close xprt %p\n", xprt);
1241
1242 xs_reset_transport(transport);
1243 xprt->reestablish_timeout = 0;
1244}
1245
1246static void xs_inject_disconnect(struct rpc_xprt *xprt)
1247{
1248 dprintk("RPC: injecting transport disconnect on xprt=%p\n",
1249 xprt);
1250 xprt_disconnect_done(xprt);
1251}
1252
1253static void xs_xprt_free(struct rpc_xprt *xprt)
1254{
1255 xs_free_peer_addresses(xprt);
1256 xprt_free(xprt);
1257}
1258
1259/**
1260 * xs_destroy - prepare to shutdown a transport
1261 * @xprt: doomed transport
1262 *
1263 */
1264static void xs_destroy(struct rpc_xprt *xprt)
1265{
1266 struct sock_xprt *transport = container_of(xprt,
1267 struct sock_xprt, xprt);
1268 dprintk("RPC: xs_destroy xprt %p\n", xprt);
1269
1270 cancel_delayed_work_sync(&transport->connect_worker);
1271 xs_close(xprt);
1272 cancel_work_sync(&transport->recv_worker);
1273 cancel_work_sync(&transport->error_worker);
1274 xs_xprt_free(xprt);
1275 module_put(THIS_MODULE);
1276}
1277
1278/**
1279 * xs_udp_data_read_skb - receive callback for UDP sockets
1280 * @xprt: transport
1281 * @sk: socket
1282 * @skb: skbuff
1283 *
1284 */
1285static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
1286 struct sock *sk,
1287 struct sk_buff *skb)
1288{
1289 struct rpc_task *task;
1290 struct rpc_rqst *rovr;
1291 int repsize, copied;
1292 u32 _xid;
1293 __be32 *xp;
1294
1295 repsize = skb->len;
1296 if (repsize < 4) {
1297 dprintk("RPC: impossible RPC reply size %d!\n", repsize);
1298 return;
1299 }
1300
1301 /* Copy the XID from the skb... */
1302 xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
1303 if (xp == NULL)
1304 return;
1305
1306 /* Look up and lock the request corresponding to the given XID */
1307 spin_lock(&xprt->queue_lock);
1308 rovr = xprt_lookup_rqst(xprt, *xp);
1309 if (!rovr)
1310 goto out_unlock;
1311 xprt_pin_rqst(rovr);
1312 xprt_update_rtt(rovr->rq_task);
1313 spin_unlock(&xprt->queue_lock);
1314 task = rovr->rq_task;
1315
1316 if ((copied = rovr->rq_private_buf.buflen) > repsize)
1317 copied = repsize;
1318
1319 /* Suck it into the iovec, verify checksum if not done by hw. */
1320 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1321 spin_lock(&xprt->queue_lock);
1322 __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
1323 goto out_unpin;
1324 }
1325
1326
1327 spin_lock(&xprt->transport_lock);
1328 xprt_adjust_cwnd(xprt, task, copied);
1329 spin_unlock(&xprt->transport_lock);
1330 spin_lock(&xprt->queue_lock);
1331 xprt_complete_rqst(task, copied);
1332 __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
1333out_unpin:
1334 xprt_unpin_rqst(rovr);
1335 out_unlock:
1336 spin_unlock(&xprt->queue_lock);
1337}
1338
1339static void xs_udp_data_receive(struct sock_xprt *transport)
1340{
1341 struct sk_buff *skb;
1342 struct sock *sk;
1343 int err;
1344
1345 mutex_lock(&transport->recv_mutex);
1346 sk = transport->inet;
1347 if (sk == NULL)
1348 goto out;
1349 for (;;) {
1350 skb = skb_recv_udp(sk, MSG_DONTWAIT, &err);
1351 if (skb == NULL)
1352 break;
1353 xs_udp_data_read_skb(&transport->xprt, sk, skb);
1354 consume_skb(skb);
1355 cond_resched();
1356 }
1357 xs_poll_check_readable(transport);
1358out:
1359 mutex_unlock(&transport->recv_mutex);
1360}
1361
1362static void xs_udp_data_receive_workfn(struct work_struct *work)
1363{
1364 struct sock_xprt *transport =
1365 container_of(work, struct sock_xprt, recv_worker);
1366 unsigned int pflags = memalloc_nofs_save();
1367
1368 xs_udp_data_receive(transport);
1369 memalloc_nofs_restore(pflags);
1370}
1371
1372/**
1373 * xs_data_ready - "data ready" callback for sockets
1374 * @sk: socket with data to read
1375 *
1376 */
1377static void xs_data_ready(struct sock *sk)
1378{
1379 struct rpc_xprt *xprt;
1380
1381 xprt = xprt_from_sock(sk);
1382 if (xprt != NULL) {
1383 struct sock_xprt *transport = container_of(xprt,
1384 struct sock_xprt, xprt);
1385
1386 trace_xs_data_ready(xprt);
1387
1388 transport->old_data_ready(sk);
1389 /* Any data means we had a useful conversation, so
1390 * then we don't need to delay the next reconnect
1391 */
1392 if (xprt->reestablish_timeout)
1393 xprt->reestablish_timeout = 0;
1394 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1395 queue_work(xprtiod_workqueue, &transport->recv_worker);
1396 }
1397}
1398
1399/*
1400 * Helper function to force a TCP close if the server is sending
1401 * junk and/or it has put us in CLOSE_WAIT
1402 */
1403static void xs_tcp_force_close(struct rpc_xprt *xprt)
1404{
1405 xprt_force_disconnect(xprt);
1406}
1407
1408#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1409static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
1410{
1411 return PAGE_SIZE;
1412}
1413#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1414
1415/**
1416 * xs_local_state_change - callback to handle AF_LOCAL socket state changes
1417 * @sk: socket whose state has changed
1418 *
1419 */
1420static void xs_local_state_change(struct sock *sk)
1421{
1422 struct rpc_xprt *xprt;
1423 struct sock_xprt *transport;
1424
1425 if (!(xprt = xprt_from_sock(sk)))
1426 return;
1427 transport = container_of(xprt, struct sock_xprt, xprt);
1428 if (sk->sk_shutdown & SHUTDOWN_MASK) {
1429 clear_bit(XPRT_CONNECTED, &xprt->state);
1430 /* Trigger the socket release */
1431 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1432 }
1433}
1434
1435/**
1436 * xs_tcp_state_change - callback to handle TCP socket state changes
1437 * @sk: socket whose state has changed
1438 *
1439 */
1440static void xs_tcp_state_change(struct sock *sk)
1441{
1442 struct rpc_xprt *xprt;
1443 struct sock_xprt *transport;
1444
1445 if (!(xprt = xprt_from_sock(sk)))
1446 return;
1447 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1448 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1449 sk->sk_state, xprt_connected(xprt),
1450 sock_flag(sk, SOCK_DEAD),
1451 sock_flag(sk, SOCK_ZAPPED),
1452 sk->sk_shutdown);
1453
1454 transport = container_of(xprt, struct sock_xprt, xprt);
1455 trace_rpc_socket_state_change(xprt, sk->sk_socket);
1456 switch (sk->sk_state) {
1457 case TCP_ESTABLISHED:
1458 if (!xprt_test_and_set_connected(xprt)) {
1459 xprt->connect_cookie++;
1460 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1461 xprt_clear_connecting(xprt);
1462
1463 xprt->stat.connect_count++;
1464 xprt->stat.connect_time += (long)jiffies -
1465 xprt->stat.connect_start;
1466 xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING);
1467 }
1468 break;
1469 case TCP_FIN_WAIT1:
1470 /* The client initiated a shutdown of the socket */
1471 xprt->connect_cookie++;
1472 xprt->reestablish_timeout = 0;
1473 set_bit(XPRT_CLOSING, &xprt->state);
1474 smp_mb__before_atomic();
1475 clear_bit(XPRT_CONNECTED, &xprt->state);
1476 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1477 smp_mb__after_atomic();
1478 break;
1479 case TCP_CLOSE_WAIT:
1480 /* The server initiated a shutdown of the socket */
1481 xprt->connect_cookie++;
1482 clear_bit(XPRT_CONNECTED, &xprt->state);
1483 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1484 fallthrough;
1485 case TCP_CLOSING:
1486 /*
1487 * If the server closed down the connection, make sure that
1488 * we back off before reconnecting
1489 */
1490 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1491 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1492 break;
1493 case TCP_LAST_ACK:
1494 set_bit(XPRT_CLOSING, &xprt->state);
1495 smp_mb__before_atomic();
1496 clear_bit(XPRT_CONNECTED, &xprt->state);
1497 smp_mb__after_atomic();
1498 break;
1499 case TCP_CLOSE:
1500 if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1501 &transport->sock_state))
1502 xprt_clear_connecting(xprt);
1503 clear_bit(XPRT_CLOSING, &xprt->state);
1504 /* Trigger the socket release */
1505 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1506 }
1507}
1508
1509static void xs_write_space(struct sock *sk)
1510{
1511 struct sock_xprt *transport;
1512 struct rpc_xprt *xprt;
1513
1514 if (!sk->sk_socket)
1515 return;
1516 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1517
1518 if (unlikely(!(xprt = xprt_from_sock(sk))))
1519 return;
1520 transport = container_of(xprt, struct sock_xprt, xprt);
1521 if (!test_and_clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state))
1522 return;
1523 xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE);
1524 sk->sk_write_pending--;
1525}
1526
1527/**
1528 * xs_udp_write_space - callback invoked when socket buffer space
1529 * becomes available
1530 * @sk: socket whose state has changed
1531 *
1532 * Called when more output buffer space is available for this socket.
1533 * We try not to wake our writers until they can make "significant"
1534 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1535 * with a bunch of small requests.
1536 */
1537static void xs_udp_write_space(struct sock *sk)
1538{
1539 /* from net/core/sock.c:sock_def_write_space */
1540 if (sock_writeable(sk))
1541 xs_write_space(sk);
1542}
1543
1544/**
1545 * xs_tcp_write_space - callback invoked when socket buffer space
1546 * becomes available
1547 * @sk: socket whose state has changed
1548 *
1549 * Called when more output buffer space is available for this socket.
1550 * We try not to wake our writers until they can make "significant"
1551 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1552 * with a bunch of small requests.
1553 */
1554static void xs_tcp_write_space(struct sock *sk)
1555{
1556 /* from net/core/stream.c:sk_stream_write_space */
1557 if (sk_stream_is_writeable(sk))
1558 xs_write_space(sk);
1559}
1560
1561static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1562{
1563 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1564 struct sock *sk = transport->inet;
1565
1566 if (transport->rcvsize) {
1567 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1568 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1569 }
1570 if (transport->sndsize) {
1571 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1572 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1573 sk->sk_write_space(sk);
1574 }
1575}
1576
1577/**
1578 * xs_udp_set_buffer_size - set send and receive limits
1579 * @xprt: generic transport
1580 * @sndsize: requested size of send buffer, in bytes
1581 * @rcvsize: requested size of receive buffer, in bytes
1582 *
1583 * Set socket send and receive buffer size limits.
1584 */
1585static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1586{
1587 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1588
1589 transport->sndsize = 0;
1590 if (sndsize)
1591 transport->sndsize = sndsize + 1024;
1592 transport->rcvsize = 0;
1593 if (rcvsize)
1594 transport->rcvsize = rcvsize + 1024;
1595
1596 xs_udp_do_set_buffer_size(xprt);
1597}
1598
1599/**
1600 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1601 * @xprt: controlling transport
1602 * @task: task that timed out
1603 *
1604 * Adjust the congestion window after a retransmit timeout has occurred.
1605 */
1606static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1607{
1608 spin_lock(&xprt->transport_lock);
1609 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1610 spin_unlock(&xprt->transport_lock);
1611}
1612
1613static int xs_get_random_port(void)
1614{
1615 unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
1616 unsigned short range;
1617 unsigned short rand;
1618
1619 if (max < min)
1620 return -EADDRINUSE;
1621 range = max - min + 1;
1622 rand = get_random_u32_below(range);
1623 return rand + min;
1624}
1625
1626static unsigned short xs_sock_getport(struct socket *sock)
1627{
1628 struct sockaddr_storage buf;
1629 unsigned short port = 0;
1630
1631 if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0)
1632 goto out;
1633 switch (buf.ss_family) {
1634 case AF_INET6:
1635 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1636 break;
1637 case AF_INET:
1638 port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1639 }
1640out:
1641 return port;
1642}
1643
1644/**
1645 * xs_set_port - reset the port number in the remote endpoint address
1646 * @xprt: generic transport
1647 * @port: new port number
1648 *
1649 */
1650static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1651{
1652 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1653
1654 rpc_set_port(xs_addr(xprt), port);
1655 xs_update_peer_port(xprt);
1656}
1657
1658static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1659{
1660 if (transport->srcport == 0 && transport->xprt.reuseport)
1661 transport->srcport = xs_sock_getport(sock);
1662}
1663
1664static int xs_get_srcport(struct sock_xprt *transport)
1665{
1666 int port = transport->srcport;
1667
1668 if (port == 0 && transport->xprt.resvport)
1669 port = xs_get_random_port();
1670 return port;
1671}
1672
1673static unsigned short xs_sock_srcport(struct rpc_xprt *xprt)
1674{
1675 struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
1676 unsigned short ret = 0;
1677 mutex_lock(&sock->recv_mutex);
1678 if (sock->sock)
1679 ret = xs_sock_getport(sock->sock);
1680 mutex_unlock(&sock->recv_mutex);
1681 return ret;
1682}
1683
1684static int xs_sock_srcaddr(struct rpc_xprt *xprt, char *buf, size_t buflen)
1685{
1686 struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
1687 union {
1688 struct sockaddr sa;
1689 struct sockaddr_storage st;
1690 } saddr;
1691 int ret = -ENOTCONN;
1692
1693 mutex_lock(&sock->recv_mutex);
1694 if (sock->sock) {
1695 ret = kernel_getsockname(sock->sock, &saddr.sa);
1696 if (ret >= 0)
1697 ret = snprintf(buf, buflen, "%pISc", &saddr.sa);
1698 }
1699 mutex_unlock(&sock->recv_mutex);
1700 return ret;
1701}
1702
1703static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1704{
1705 if (transport->srcport != 0)
1706 transport->srcport = 0;
1707 if (!transport->xprt.resvport)
1708 return 0;
1709 if (port <= xprt_min_resvport || port > xprt_max_resvport)
1710 return xprt_max_resvport;
1711 return --port;
1712}
1713static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1714{
1715 struct sockaddr_storage myaddr;
1716 int err, nloop = 0;
1717 int port = xs_get_srcport(transport);
1718 unsigned short last;
1719
1720 /*
1721 * If we are asking for any ephemeral port (i.e. port == 0 &&
1722 * transport->xprt.resvport == 0), don't bind. Let the local
1723 * port selection happen implicitly when the socket is used
1724 * (for example at connect time).
1725 *
1726 * This ensures that we can continue to establish TCP
1727 * connections even when all local ephemeral ports are already
1728 * a part of some TCP connection. This makes no difference
1729 * for UDP sockets, but also doesn't harm them.
1730 *
1731 * If we're asking for any reserved port (i.e. port == 0 &&
1732 * transport->xprt.resvport == 1) xs_get_srcport above will
1733 * ensure that port is non-zero and we will bind as needed.
1734 */
1735 if (port <= 0)
1736 return port;
1737
1738 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1739 do {
1740 rpc_set_port((struct sockaddr *)&myaddr, port);
1741 err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1742 transport->xprt.addrlen);
1743 if (err == 0) {
1744 if (transport->xprt.reuseport)
1745 transport->srcport = port;
1746 break;
1747 }
1748 last = port;
1749 port = xs_next_srcport(transport, port);
1750 if (port > last)
1751 nloop++;
1752 } while (err == -EADDRINUSE && nloop != 2);
1753
1754 if (myaddr.ss_family == AF_INET)
1755 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
1756 &((struct sockaddr_in *)&myaddr)->sin_addr,
1757 port, err ? "failed" : "ok", err);
1758 else
1759 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
1760 &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1761 port, err ? "failed" : "ok", err);
1762 return err;
1763}
1764
1765/*
1766 * We don't support autobind on AF_LOCAL sockets
1767 */
1768static void xs_local_rpcbind(struct rpc_task *task)
1769{
1770 xprt_set_bound(task->tk_xprt);
1771}
1772
1773static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1774{
1775}
1776
1777#ifdef CONFIG_DEBUG_LOCK_ALLOC
1778static struct lock_class_key xs_key[3];
1779static struct lock_class_key xs_slock_key[3];
1780
1781static inline void xs_reclassify_socketu(struct socket *sock)
1782{
1783 struct sock *sk = sock->sk;
1784
1785 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1786 &xs_slock_key[0], "sk_lock-AF_LOCAL-RPC", &xs_key[0]);
1787}
1788
1789static inline void xs_reclassify_socket4(struct socket *sock)
1790{
1791 struct sock *sk = sock->sk;
1792
1793 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1794 &xs_slock_key[1], "sk_lock-AF_INET-RPC", &xs_key[1]);
1795}
1796
1797static inline void xs_reclassify_socket6(struct socket *sock)
1798{
1799 struct sock *sk = sock->sk;
1800
1801 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1802 &xs_slock_key[2], "sk_lock-AF_INET6-RPC", &xs_key[2]);
1803}
1804
1805static inline void xs_reclassify_socket(int family, struct socket *sock)
1806{
1807 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
1808 return;
1809
1810 switch (family) {
1811 case AF_LOCAL:
1812 xs_reclassify_socketu(sock);
1813 break;
1814 case AF_INET:
1815 xs_reclassify_socket4(sock);
1816 break;
1817 case AF_INET6:
1818 xs_reclassify_socket6(sock);
1819 break;
1820 }
1821}
1822#else
1823static inline void xs_reclassify_socket(int family, struct socket *sock)
1824{
1825}
1826#endif
1827
1828static void xs_dummy_setup_socket(struct work_struct *work)
1829{
1830}
1831
1832static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1833 struct sock_xprt *transport, int family, int type,
1834 int protocol, bool reuseport)
1835{
1836 struct file *filp;
1837 struct socket *sock;
1838 int err;
1839
1840 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1841 if (err < 0) {
1842 dprintk("RPC: can't create %d transport socket (%d).\n",
1843 protocol, -err);
1844 goto out;
1845 }
1846 xs_reclassify_socket(family, sock);
1847
1848 if (reuseport)
1849 sock_set_reuseport(sock->sk);
1850
1851 err = xs_bind(transport, sock);
1852 if (err) {
1853 sock_release(sock);
1854 goto out;
1855 }
1856
1857 filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1858 if (IS_ERR(filp))
1859 return ERR_CAST(filp);
1860 transport->file = filp;
1861
1862 return sock;
1863out:
1864 return ERR_PTR(err);
1865}
1866
1867static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1868 struct socket *sock)
1869{
1870 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1871 xprt);
1872
1873 if (!transport->inet) {
1874 struct sock *sk = sock->sk;
1875
1876 lock_sock(sk);
1877
1878 xs_save_old_callbacks(transport, sk);
1879
1880 sk->sk_user_data = xprt;
1881 sk->sk_data_ready = xs_data_ready;
1882 sk->sk_write_space = xs_udp_write_space;
1883 sk->sk_state_change = xs_local_state_change;
1884 sk->sk_error_report = xs_error_report;
1885 sk->sk_use_task_frag = false;
1886
1887 xprt_clear_connected(xprt);
1888
1889 /* Reset to new socket */
1890 transport->sock = sock;
1891 transport->inet = sk;
1892
1893 release_sock(sk);
1894 }
1895
1896 xs_stream_start_connect(transport);
1897
1898 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1899}
1900
1901/**
1902 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1903 * @transport: socket transport to connect
1904 */
1905static int xs_local_setup_socket(struct sock_xprt *transport)
1906{
1907 struct rpc_xprt *xprt = &transport->xprt;
1908 struct file *filp;
1909 struct socket *sock;
1910 int status;
1911
1912 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1913 SOCK_STREAM, 0, &sock, 1);
1914 if (status < 0) {
1915 dprintk("RPC: can't create AF_LOCAL "
1916 "transport socket (%d).\n", -status);
1917 goto out;
1918 }
1919 xs_reclassify_socket(AF_LOCAL, sock);
1920
1921 filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1922 if (IS_ERR(filp)) {
1923 status = PTR_ERR(filp);
1924 goto out;
1925 }
1926 transport->file = filp;
1927
1928 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
1929 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1930
1931 status = xs_local_finish_connecting(xprt, sock);
1932 trace_rpc_socket_connect(xprt, sock, status);
1933 switch (status) {
1934 case 0:
1935 dprintk("RPC: xprt %p connected to %s\n",
1936 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1937 xprt->stat.connect_count++;
1938 xprt->stat.connect_time += (long)jiffies -
1939 xprt->stat.connect_start;
1940 xprt_set_connected(xprt);
1941 break;
1942 case -ENOBUFS:
1943 break;
1944 case -ENOENT:
1945 dprintk("RPC: xprt %p: socket %s does not exist\n",
1946 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1947 break;
1948 case -ECONNREFUSED:
1949 dprintk("RPC: xprt %p: connection refused for %s\n",
1950 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1951 break;
1952 default:
1953 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1954 __func__, -status,
1955 xprt->address_strings[RPC_DISPLAY_ADDR]);
1956 }
1957
1958out:
1959 xprt_clear_connecting(xprt);
1960 xprt_wake_pending_tasks(xprt, status);
1961 return status;
1962}
1963
1964static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
1965{
1966 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1967 int ret;
1968
1969 if (transport->file)
1970 goto force_disconnect;
1971
1972 if (RPC_IS_ASYNC(task)) {
1973 /*
1974 * We want the AF_LOCAL connect to be resolved in the
1975 * filesystem namespace of the process making the rpc
1976 * call. Thus we connect synchronously.
1977 *
1978 * If we want to support asynchronous AF_LOCAL calls,
1979 * we'll need to figure out how to pass a namespace to
1980 * connect.
1981 */
1982 rpc_task_set_rpc_status(task, -ENOTCONN);
1983 goto out_wake;
1984 }
1985 ret = xs_local_setup_socket(transport);
1986 if (ret && !RPC_IS_SOFTCONN(task))
1987 msleep_interruptible(15000);
1988 return;
1989force_disconnect:
1990 xprt_force_disconnect(xprt);
1991out_wake:
1992 xprt_clear_connecting(xprt);
1993 xprt_wake_pending_tasks(xprt, -ENOTCONN);
1994}
1995
1996#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
1997/*
1998 * Note that this should be called with XPRT_LOCKED held, or recv_mutex
1999 * held, or when we otherwise know that we have exclusive access to the
2000 * socket, to guard against races with xs_reset_transport.
2001 */
2002static void xs_set_memalloc(struct rpc_xprt *xprt)
2003{
2004 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
2005 xprt);
2006
2007 /*
2008 * If there's no sock, then we have nothing to set. The
2009 * reconnecting process will get it for us.
2010 */
2011 if (!transport->inet)
2012 return;
2013 if (atomic_read(&xprt->swapper))
2014 sk_set_memalloc(transport->inet);
2015}
2016
2017/**
2018 * xs_enable_swap - Tag this transport as being used for swap.
2019 * @xprt: transport to tag
2020 *
2021 * Take a reference to this transport on behalf of the rpc_clnt, and
2022 * optionally mark it for swapping if it wasn't already.
2023 */
2024static int
2025xs_enable_swap(struct rpc_xprt *xprt)
2026{
2027 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2028
2029 mutex_lock(&xs->recv_mutex);
2030 if (atomic_inc_return(&xprt->swapper) == 1 &&
2031 xs->inet)
2032 sk_set_memalloc(xs->inet);
2033 mutex_unlock(&xs->recv_mutex);
2034 return 0;
2035}
2036
2037/**
2038 * xs_disable_swap - Untag this transport as being used for swap.
2039 * @xprt: transport to tag
2040 *
2041 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
2042 * swapper refcount goes to 0, untag the socket as a memalloc socket.
2043 */
2044static void
2045xs_disable_swap(struct rpc_xprt *xprt)
2046{
2047 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2048
2049 mutex_lock(&xs->recv_mutex);
2050 if (atomic_dec_and_test(&xprt->swapper) &&
2051 xs->inet)
2052 sk_clear_memalloc(xs->inet);
2053 mutex_unlock(&xs->recv_mutex);
2054}
2055#else
2056static void xs_set_memalloc(struct rpc_xprt *xprt)
2057{
2058}
2059
2060static int
2061xs_enable_swap(struct rpc_xprt *xprt)
2062{
2063 return -EINVAL;
2064}
2065
2066static void
2067xs_disable_swap(struct rpc_xprt *xprt)
2068{
2069}
2070#endif
2071
2072static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2073{
2074 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2075
2076 if (!transport->inet) {
2077 struct sock *sk = sock->sk;
2078
2079 lock_sock(sk);
2080
2081 xs_save_old_callbacks(transport, sk);
2082
2083 sk->sk_user_data = xprt;
2084 sk->sk_data_ready = xs_data_ready;
2085 sk->sk_write_space = xs_udp_write_space;
2086 sk->sk_use_task_frag = false;
2087
2088 xprt_set_connected(xprt);
2089
2090 /* Reset to new socket */
2091 transport->sock = sock;
2092 transport->inet = sk;
2093
2094 xs_set_memalloc(xprt);
2095
2096 release_sock(sk);
2097 }
2098 xs_udp_do_set_buffer_size(xprt);
2099
2100 xprt->stat.connect_start = jiffies;
2101}
2102
2103static void xs_udp_setup_socket(struct work_struct *work)
2104{
2105 struct sock_xprt *transport =
2106 container_of(work, struct sock_xprt, connect_worker.work);
2107 struct rpc_xprt *xprt = &transport->xprt;
2108 struct socket *sock;
2109 int status = -EIO;
2110 unsigned int pflags = current->flags;
2111
2112 if (atomic_read(&xprt->swapper))
2113 current->flags |= PF_MEMALLOC;
2114 sock = xs_create_sock(xprt, transport,
2115 xs_addr(xprt)->sa_family, SOCK_DGRAM,
2116 IPPROTO_UDP, false);
2117 if (IS_ERR(sock))
2118 goto out;
2119
2120 dprintk("RPC: worker connecting xprt %p via %s to "
2121 "%s (port %s)\n", xprt,
2122 xprt->address_strings[RPC_DISPLAY_PROTO],
2123 xprt->address_strings[RPC_DISPLAY_ADDR],
2124 xprt->address_strings[RPC_DISPLAY_PORT]);
2125
2126 xs_udp_finish_connecting(xprt, sock);
2127 trace_rpc_socket_connect(xprt, sock, 0);
2128 status = 0;
2129out:
2130 xprt_clear_connecting(xprt);
2131 xprt_unlock_connect(xprt, transport);
2132 xprt_wake_pending_tasks(xprt, status);
2133 current_restore_flags(pflags, PF_MEMALLOC);
2134}
2135
2136/**
2137 * xs_tcp_shutdown - gracefully shut down a TCP socket
2138 * @xprt: transport
2139 *
2140 * Initiates a graceful shutdown of the TCP socket by calling the
2141 * equivalent of shutdown(SHUT_RDWR);
2142 */
2143static void xs_tcp_shutdown(struct rpc_xprt *xprt)
2144{
2145 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2146 struct socket *sock = transport->sock;
2147 int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE;
2148
2149 if (sock == NULL)
2150 return;
2151 if (!xprt->reuseport) {
2152 xs_close(xprt);
2153 return;
2154 }
2155 switch (skst) {
2156 case TCP_FIN_WAIT1:
2157 case TCP_FIN_WAIT2:
2158 break;
2159 case TCP_ESTABLISHED:
2160 case TCP_CLOSE_WAIT:
2161 kernel_sock_shutdown(sock, SHUT_RDWR);
2162 trace_rpc_socket_shutdown(xprt, sock);
2163 break;
2164 default:
2165 xs_reset_transport(transport);
2166 }
2167}
2168
2169static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
2170 struct socket *sock)
2171{
2172 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2173 unsigned int keepidle;
2174 unsigned int keepcnt;
2175 unsigned int timeo;
2176
2177 spin_lock(&xprt->transport_lock);
2178 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ);
2179 keepcnt = xprt->timeout->to_retries + 1;
2180 timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2181 (xprt->timeout->to_retries + 1);
2182 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2183 spin_unlock(&xprt->transport_lock);
2184
2185 /* TCP Keepalive options */
2186 sock_set_keepalive(sock->sk);
2187 tcp_sock_set_keepidle(sock->sk, keepidle);
2188 tcp_sock_set_keepintvl(sock->sk, keepidle);
2189 tcp_sock_set_keepcnt(sock->sk, keepcnt);
2190
2191 /* TCP user timeout (see RFC5482) */
2192 tcp_sock_set_user_timeout(sock->sk, timeo);
2193}
2194
2195static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
2196 unsigned long connect_timeout,
2197 unsigned long reconnect_timeout)
2198{
2199 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2200 struct rpc_timeout to;
2201 unsigned long initval;
2202
2203 spin_lock(&xprt->transport_lock);
2204 if (reconnect_timeout < xprt->max_reconnect_timeout)
2205 xprt->max_reconnect_timeout = reconnect_timeout;
2206 if (connect_timeout < xprt->connect_timeout) {
2207 memcpy(&to, xprt->timeout, sizeof(to));
2208 initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1);
2209 /* Arbitrary lower limit */
2210 if (initval < XS_TCP_INIT_REEST_TO << 1)
2211 initval = XS_TCP_INIT_REEST_TO << 1;
2212 to.to_initval = initval;
2213 to.to_maxval = initval;
2214 memcpy(&transport->tcp_timeout, &to,
2215 sizeof(transport->tcp_timeout));
2216 xprt->timeout = &transport->tcp_timeout;
2217 xprt->connect_timeout = connect_timeout;
2218 }
2219 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2220 spin_unlock(&xprt->transport_lock);
2221}
2222
2223static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2224{
2225 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2226
2227 if (!transport->inet) {
2228 struct sock *sk = sock->sk;
2229
2230 /* Avoid temporary address, they are bad for long-lived
2231 * connections such as NFS mounts.
2232 * RFC4941, section 3.6 suggests that:
2233 * Individual applications, which have specific
2234 * knowledge about the normal duration of connections,
2235 * MAY override this as appropriate.
2236 */
2237 if (xs_addr(xprt)->sa_family == PF_INET6) {
2238 ip6_sock_set_addr_preferences(sk,
2239 IPV6_PREFER_SRC_PUBLIC);
2240 }
2241
2242 xs_tcp_set_socket_timeouts(xprt, sock);
2243 tcp_sock_set_nodelay(sk);
2244
2245 lock_sock(sk);
2246
2247 xs_save_old_callbacks(transport, sk);
2248
2249 sk->sk_user_data = xprt;
2250 sk->sk_data_ready = xs_data_ready;
2251 sk->sk_state_change = xs_tcp_state_change;
2252 sk->sk_write_space = xs_tcp_write_space;
2253 sk->sk_error_report = xs_error_report;
2254 sk->sk_use_task_frag = false;
2255
2256 /* socket options */
2257 sock_reset_flag(sk, SOCK_LINGER);
2258
2259 xprt_clear_connected(xprt);
2260
2261 /* Reset to new socket */
2262 transport->sock = sock;
2263 transport->inet = sk;
2264
2265 release_sock(sk);
2266 }
2267
2268 if (!xprt_bound(xprt))
2269 return -ENOTCONN;
2270
2271 xs_set_memalloc(xprt);
2272
2273 xs_stream_start_connect(transport);
2274
2275 /* Tell the socket layer to start connecting... */
2276 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2277 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2278}
2279
2280/**
2281 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2282 * @work: queued work item
2283 *
2284 * Invoked by a work queue tasklet.
2285 */
2286static void xs_tcp_setup_socket(struct work_struct *work)
2287{
2288 struct sock_xprt *transport =
2289 container_of(work, struct sock_xprt, connect_worker.work);
2290 struct socket *sock = transport->sock;
2291 struct rpc_xprt *xprt = &transport->xprt;
2292 int status;
2293 unsigned int pflags = current->flags;
2294
2295 if (atomic_read(&xprt->swapper))
2296 current->flags |= PF_MEMALLOC;
2297
2298 if (xprt_connected(xprt))
2299 goto out;
2300 if (test_and_clear_bit(XPRT_SOCK_CONNECT_SENT,
2301 &transport->sock_state) ||
2302 !sock) {
2303 xs_reset_transport(transport);
2304 sock = xs_create_sock(xprt, transport, xs_addr(xprt)->sa_family,
2305 SOCK_STREAM, IPPROTO_TCP, true);
2306 if (IS_ERR(sock)) {
2307 xprt_wake_pending_tasks(xprt, PTR_ERR(sock));
2308 goto out;
2309 }
2310 }
2311
2312 dprintk("RPC: worker connecting xprt %p via %s to "
2313 "%s (port %s)\n", xprt,
2314 xprt->address_strings[RPC_DISPLAY_PROTO],
2315 xprt->address_strings[RPC_DISPLAY_ADDR],
2316 xprt->address_strings[RPC_DISPLAY_PORT]);
2317
2318 status = xs_tcp_finish_connecting(xprt, sock);
2319 trace_rpc_socket_connect(xprt, sock, status);
2320 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
2321 xprt, -status, xprt_connected(xprt),
2322 sock->sk->sk_state);
2323 switch (status) {
2324 case 0:
2325 case -EINPROGRESS:
2326 /* SYN_SENT! */
2327 set_bit(XPRT_SOCK_CONNECT_SENT, &transport->sock_state);
2328 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2329 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2330 fallthrough;
2331 case -EALREADY:
2332 goto out_unlock;
2333 case -EADDRNOTAVAIL:
2334 /* Source port number is unavailable. Try a new one! */
2335 transport->srcport = 0;
2336 status = -EAGAIN;
2337 break;
2338 case -EINVAL:
2339 /* Happens, for instance, if the user specified a link
2340 * local IPv6 address without a scope-id.
2341 */
2342 case -ECONNREFUSED:
2343 case -ECONNRESET:
2344 case -ENETDOWN:
2345 case -ENETUNREACH:
2346 case -EHOSTUNREACH:
2347 case -EADDRINUSE:
2348 case -ENOBUFS:
2349 break;
2350 default:
2351 printk("%s: connect returned unhandled error %d\n",
2352 __func__, status);
2353 status = -EAGAIN;
2354 }
2355
2356 /* xs_tcp_force_close() wakes tasks with a fixed error code.
2357 * We need to wake them first to ensure the correct error code.
2358 */
2359 xprt_wake_pending_tasks(xprt, status);
2360 xs_tcp_force_close(xprt);
2361out:
2362 xprt_clear_connecting(xprt);
2363out_unlock:
2364 xprt_unlock_connect(xprt, transport);
2365 current_restore_flags(pflags, PF_MEMALLOC);
2366}
2367
2368/**
2369 * xs_connect - connect a socket to a remote endpoint
2370 * @xprt: pointer to transport structure
2371 * @task: address of RPC task that manages state of connect request
2372 *
2373 * TCP: If the remote end dropped the connection, delay reconnecting.
2374 *
2375 * UDP socket connects are synchronous, but we use a work queue anyway
2376 * to guarantee that even unprivileged user processes can set up a
2377 * socket on a privileged port.
2378 *
2379 * If a UDP socket connect fails, the delay behavior here prevents
2380 * retry floods (hard mounts).
2381 */
2382static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2383{
2384 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2385 unsigned long delay = 0;
2386
2387 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2388
2389 if (transport->sock != NULL) {
2390 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2391 "seconds\n", xprt, xprt->reestablish_timeout / HZ);
2392
2393 delay = xprt_reconnect_delay(xprt);
2394 xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO);
2395
2396 } else
2397 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2398
2399 queue_delayed_work(xprtiod_workqueue,
2400 &transport->connect_worker,
2401 delay);
2402}
2403
2404static void xs_wake_disconnect(struct sock_xprt *transport)
2405{
2406 if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state))
2407 xs_tcp_force_close(&transport->xprt);
2408}
2409
2410static void xs_wake_write(struct sock_xprt *transport)
2411{
2412 if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state))
2413 xprt_write_space(&transport->xprt);
2414}
2415
2416static void xs_wake_error(struct sock_xprt *transport)
2417{
2418 int sockerr;
2419
2420 if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2421 return;
2422 mutex_lock(&transport->recv_mutex);
2423 if (transport->sock == NULL)
2424 goto out;
2425 if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2426 goto out;
2427 sockerr = xchg(&transport->xprt_err, 0);
2428 if (sockerr < 0)
2429 xprt_wake_pending_tasks(&transport->xprt, sockerr);
2430out:
2431 mutex_unlock(&transport->recv_mutex);
2432}
2433
2434static void xs_wake_pending(struct sock_xprt *transport)
2435{
2436 if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state))
2437 xprt_wake_pending_tasks(&transport->xprt, -EAGAIN);
2438}
2439
2440static void xs_error_handle(struct work_struct *work)
2441{
2442 struct sock_xprt *transport = container_of(work,
2443 struct sock_xprt, error_worker);
2444
2445 xs_wake_disconnect(transport);
2446 xs_wake_write(transport);
2447 xs_wake_error(transport);
2448 xs_wake_pending(transport);
2449}
2450
2451/**
2452 * xs_local_print_stats - display AF_LOCAL socket-specific stats
2453 * @xprt: rpc_xprt struct containing statistics
2454 * @seq: output file
2455 *
2456 */
2457static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2458{
2459 long idle_time = 0;
2460
2461 if (xprt_connected(xprt))
2462 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2463
2464 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2465 "%llu %llu %lu %llu %llu\n",
2466 xprt->stat.bind_count,
2467 xprt->stat.connect_count,
2468 xprt->stat.connect_time / HZ,
2469 idle_time,
2470 xprt->stat.sends,
2471 xprt->stat.recvs,
2472 xprt->stat.bad_xids,
2473 xprt->stat.req_u,
2474 xprt->stat.bklog_u,
2475 xprt->stat.max_slots,
2476 xprt->stat.sending_u,
2477 xprt->stat.pending_u);
2478}
2479
2480/**
2481 * xs_udp_print_stats - display UDP socket-specific stats
2482 * @xprt: rpc_xprt struct containing statistics
2483 * @seq: output file
2484 *
2485 */
2486static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2487{
2488 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2489
2490 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2491 "%lu %llu %llu\n",
2492 transport->srcport,
2493 xprt->stat.bind_count,
2494 xprt->stat.sends,
2495 xprt->stat.recvs,
2496 xprt->stat.bad_xids,
2497 xprt->stat.req_u,
2498 xprt->stat.bklog_u,
2499 xprt->stat.max_slots,
2500 xprt->stat.sending_u,
2501 xprt->stat.pending_u);
2502}
2503
2504/**
2505 * xs_tcp_print_stats - display TCP socket-specific stats
2506 * @xprt: rpc_xprt struct containing statistics
2507 * @seq: output file
2508 *
2509 */
2510static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2511{
2512 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2513 long idle_time = 0;
2514
2515 if (xprt_connected(xprt))
2516 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2517
2518 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2519 "%llu %llu %lu %llu %llu\n",
2520 transport->srcport,
2521 xprt->stat.bind_count,
2522 xprt->stat.connect_count,
2523 xprt->stat.connect_time / HZ,
2524 idle_time,
2525 xprt->stat.sends,
2526 xprt->stat.recvs,
2527 xprt->stat.bad_xids,
2528 xprt->stat.req_u,
2529 xprt->stat.bklog_u,
2530 xprt->stat.max_slots,
2531 xprt->stat.sending_u,
2532 xprt->stat.pending_u);
2533}
2534
2535/*
2536 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2537 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2538 * to use the server side send routines.
2539 */
2540static int bc_malloc(struct rpc_task *task)
2541{
2542 struct rpc_rqst *rqst = task->tk_rqstp;
2543 size_t size = rqst->rq_callsize;
2544 struct page *page;
2545 struct rpc_buffer *buf;
2546
2547 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) {
2548 WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n",
2549 size);
2550 return -EINVAL;
2551 }
2552
2553 page = alloc_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
2554 if (!page)
2555 return -ENOMEM;
2556
2557 buf = page_address(page);
2558 buf->len = PAGE_SIZE;
2559
2560 rqst->rq_buffer = buf->data;
2561 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
2562 return 0;
2563}
2564
2565/*
2566 * Free the space allocated in the bc_alloc routine
2567 */
2568static void bc_free(struct rpc_task *task)
2569{
2570 void *buffer = task->tk_rqstp->rq_buffer;
2571 struct rpc_buffer *buf;
2572
2573 buf = container_of(buffer, struct rpc_buffer, data);
2574 free_page((unsigned long)buf);
2575}
2576
2577static int bc_sendto(struct rpc_rqst *req)
2578{
2579 struct xdr_buf *xdr = &req->rq_snd_buf;
2580 struct sock_xprt *transport =
2581 container_of(req->rq_xprt, struct sock_xprt, xprt);
2582 struct msghdr msg = {
2583 .msg_flags = 0,
2584 };
2585 rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
2586 (u32)xdr->len);
2587 unsigned int sent = 0;
2588 int err;
2589
2590 req->rq_xtime = ktime_get();
2591 err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
2592 if (err < 0)
2593 return err;
2594 err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent);
2595 xdr_free_bvec(xdr);
2596 if (err < 0 || sent != (xdr->len + sizeof(marker)))
2597 return -EAGAIN;
2598 return sent;
2599}
2600
2601/**
2602 * bc_send_request - Send a backchannel Call on a TCP socket
2603 * @req: rpc_rqst containing Call message to be sent
2604 *
2605 * xpt_mutex ensures @rqstp's whole message is written to the socket
2606 * without interruption.
2607 *
2608 * Return values:
2609 * %0 if the message was sent successfully
2610 * %ENOTCONN if the message was not sent
2611 */
2612static int bc_send_request(struct rpc_rqst *req)
2613{
2614 struct svc_xprt *xprt;
2615 int len;
2616
2617 /*
2618 * Get the server socket associated with this callback xprt
2619 */
2620 xprt = req->rq_xprt->bc_xprt;
2621
2622 /*
2623 * Grab the mutex to serialize data as the connection is shared
2624 * with the fore channel
2625 */
2626 mutex_lock(&xprt->xpt_mutex);
2627 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2628 len = -ENOTCONN;
2629 else
2630 len = bc_sendto(req);
2631 mutex_unlock(&xprt->xpt_mutex);
2632
2633 if (len > 0)
2634 len = 0;
2635
2636 return len;
2637}
2638
2639/*
2640 * The close routine. Since this is client initiated, we do nothing
2641 */
2642
2643static void bc_close(struct rpc_xprt *xprt)
2644{
2645 xprt_disconnect_done(xprt);
2646}
2647
2648/*
2649 * The xprt destroy routine. Again, because this connection is client
2650 * initiated, we do nothing
2651 */
2652
2653static void bc_destroy(struct rpc_xprt *xprt)
2654{
2655 dprintk("RPC: bc_destroy xprt %p\n", xprt);
2656
2657 xs_xprt_free(xprt);
2658 module_put(THIS_MODULE);
2659}
2660
2661static const struct rpc_xprt_ops xs_local_ops = {
2662 .reserve_xprt = xprt_reserve_xprt,
2663 .release_xprt = xprt_release_xprt,
2664 .alloc_slot = xprt_alloc_slot,
2665 .free_slot = xprt_free_slot,
2666 .rpcbind = xs_local_rpcbind,
2667 .set_port = xs_local_set_port,
2668 .connect = xs_local_connect,
2669 .buf_alloc = rpc_malloc,
2670 .buf_free = rpc_free,
2671 .prepare_request = xs_stream_prepare_request,
2672 .send_request = xs_local_send_request,
2673 .wait_for_reply_request = xprt_wait_for_reply_request_def,
2674 .close = xs_close,
2675 .destroy = xs_destroy,
2676 .print_stats = xs_local_print_stats,
2677 .enable_swap = xs_enable_swap,
2678 .disable_swap = xs_disable_swap,
2679};
2680
2681static const struct rpc_xprt_ops xs_udp_ops = {
2682 .set_buffer_size = xs_udp_set_buffer_size,
2683 .reserve_xprt = xprt_reserve_xprt_cong,
2684 .release_xprt = xprt_release_xprt_cong,
2685 .alloc_slot = xprt_alloc_slot,
2686 .free_slot = xprt_free_slot,
2687 .rpcbind = rpcb_getport_async,
2688 .set_port = xs_set_port,
2689 .connect = xs_connect,
2690 .get_srcaddr = xs_sock_srcaddr,
2691 .get_srcport = xs_sock_srcport,
2692 .buf_alloc = rpc_malloc,
2693 .buf_free = rpc_free,
2694 .send_request = xs_udp_send_request,
2695 .wait_for_reply_request = xprt_wait_for_reply_request_rtt,
2696 .timer = xs_udp_timer,
2697 .release_request = xprt_release_rqst_cong,
2698 .close = xs_close,
2699 .destroy = xs_destroy,
2700 .print_stats = xs_udp_print_stats,
2701 .enable_swap = xs_enable_swap,
2702 .disable_swap = xs_disable_swap,
2703 .inject_disconnect = xs_inject_disconnect,
2704};
2705
2706static const struct rpc_xprt_ops xs_tcp_ops = {
2707 .reserve_xprt = xprt_reserve_xprt,
2708 .release_xprt = xprt_release_xprt,
2709 .alloc_slot = xprt_alloc_slot,
2710 .free_slot = xprt_free_slot,
2711 .rpcbind = rpcb_getport_async,
2712 .set_port = xs_set_port,
2713 .connect = xs_connect,
2714 .get_srcaddr = xs_sock_srcaddr,
2715 .get_srcport = xs_sock_srcport,
2716 .buf_alloc = rpc_malloc,
2717 .buf_free = rpc_free,
2718 .prepare_request = xs_stream_prepare_request,
2719 .send_request = xs_tcp_send_request,
2720 .wait_for_reply_request = xprt_wait_for_reply_request_def,
2721 .close = xs_tcp_shutdown,
2722 .destroy = xs_destroy,
2723 .set_connect_timeout = xs_tcp_set_connect_timeout,
2724 .print_stats = xs_tcp_print_stats,
2725 .enable_swap = xs_enable_swap,
2726 .disable_swap = xs_disable_swap,
2727 .inject_disconnect = xs_inject_disconnect,
2728#ifdef CONFIG_SUNRPC_BACKCHANNEL
2729 .bc_setup = xprt_setup_bc,
2730 .bc_maxpayload = xs_tcp_bc_maxpayload,
2731 .bc_num_slots = xprt_bc_max_slots,
2732 .bc_free_rqst = xprt_free_bc_rqst,
2733 .bc_destroy = xprt_destroy_bc,
2734#endif
2735};
2736
2737/*
2738 * The rpc_xprt_ops for the server backchannel
2739 */
2740
2741static const struct rpc_xprt_ops bc_tcp_ops = {
2742 .reserve_xprt = xprt_reserve_xprt,
2743 .release_xprt = xprt_release_xprt,
2744 .alloc_slot = xprt_alloc_slot,
2745 .free_slot = xprt_free_slot,
2746 .buf_alloc = bc_malloc,
2747 .buf_free = bc_free,
2748 .send_request = bc_send_request,
2749 .wait_for_reply_request = xprt_wait_for_reply_request_def,
2750 .close = bc_close,
2751 .destroy = bc_destroy,
2752 .print_stats = xs_tcp_print_stats,
2753 .enable_swap = xs_enable_swap,
2754 .disable_swap = xs_disable_swap,
2755 .inject_disconnect = xs_inject_disconnect,
2756};
2757
2758static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2759{
2760 static const struct sockaddr_in sin = {
2761 .sin_family = AF_INET,
2762 .sin_addr.s_addr = htonl(INADDR_ANY),
2763 };
2764 static const struct sockaddr_in6 sin6 = {
2765 .sin6_family = AF_INET6,
2766 .sin6_addr = IN6ADDR_ANY_INIT,
2767 };
2768
2769 switch (family) {
2770 case AF_LOCAL:
2771 break;
2772 case AF_INET:
2773 memcpy(sap, &sin, sizeof(sin));
2774 break;
2775 case AF_INET6:
2776 memcpy(sap, &sin6, sizeof(sin6));
2777 break;
2778 default:
2779 dprintk("RPC: %s: Bad address family\n", __func__);
2780 return -EAFNOSUPPORT;
2781 }
2782 return 0;
2783}
2784
2785static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2786 unsigned int slot_table_size,
2787 unsigned int max_slot_table_size)
2788{
2789 struct rpc_xprt *xprt;
2790 struct sock_xprt *new;
2791
2792 if (args->addrlen > sizeof(xprt->addr)) {
2793 dprintk("RPC: xs_setup_xprt: address too large\n");
2794 return ERR_PTR(-EBADF);
2795 }
2796
2797 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2798 max_slot_table_size);
2799 if (xprt == NULL) {
2800 dprintk("RPC: xs_setup_xprt: couldn't allocate "
2801 "rpc_xprt\n");
2802 return ERR_PTR(-ENOMEM);
2803 }
2804
2805 new = container_of(xprt, struct sock_xprt, xprt);
2806 mutex_init(&new->recv_mutex);
2807 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2808 xprt->addrlen = args->addrlen;
2809 if (args->srcaddr)
2810 memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2811 else {
2812 int err;
2813 err = xs_init_anyaddr(args->dstaddr->sa_family,
2814 (struct sockaddr *)&new->srcaddr);
2815 if (err != 0) {
2816 xprt_free(xprt);
2817 return ERR_PTR(err);
2818 }
2819 }
2820
2821 return xprt;
2822}
2823
2824static const struct rpc_timeout xs_local_default_timeout = {
2825 .to_initval = 10 * HZ,
2826 .to_maxval = 10 * HZ,
2827 .to_retries = 2,
2828};
2829
2830/**
2831 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2832 * @args: rpc transport creation arguments
2833 *
2834 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2835 */
2836static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2837{
2838 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2839 struct sock_xprt *transport;
2840 struct rpc_xprt *xprt;
2841 struct rpc_xprt *ret;
2842
2843 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2844 xprt_max_tcp_slot_table_entries);
2845 if (IS_ERR(xprt))
2846 return xprt;
2847 transport = container_of(xprt, struct sock_xprt, xprt);
2848
2849 xprt->prot = 0;
2850 xprt->xprt_class = &xs_local_transport;
2851 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2852
2853 xprt->bind_timeout = XS_BIND_TO;
2854 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2855 xprt->idle_timeout = XS_IDLE_DISC_TO;
2856
2857 xprt->ops = &xs_local_ops;
2858 xprt->timeout = &xs_local_default_timeout;
2859
2860 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
2861 INIT_WORK(&transport->error_worker, xs_error_handle);
2862 INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket);
2863
2864 switch (sun->sun_family) {
2865 case AF_LOCAL:
2866 if (sun->sun_path[0] != '/') {
2867 dprintk("RPC: bad AF_LOCAL address: %s\n",
2868 sun->sun_path);
2869 ret = ERR_PTR(-EINVAL);
2870 goto out_err;
2871 }
2872 xprt_set_bound(xprt);
2873 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2874 break;
2875 default:
2876 ret = ERR_PTR(-EAFNOSUPPORT);
2877 goto out_err;
2878 }
2879
2880 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2881 xprt->address_strings[RPC_DISPLAY_ADDR]);
2882
2883 if (try_module_get(THIS_MODULE))
2884 return xprt;
2885 ret = ERR_PTR(-EINVAL);
2886out_err:
2887 xs_xprt_free(xprt);
2888 return ret;
2889}
2890
2891static const struct rpc_timeout xs_udp_default_timeout = {
2892 .to_initval = 5 * HZ,
2893 .to_maxval = 30 * HZ,
2894 .to_increment = 5 * HZ,
2895 .to_retries = 5,
2896};
2897
2898/**
2899 * xs_setup_udp - Set up transport to use a UDP socket
2900 * @args: rpc transport creation arguments
2901 *
2902 */
2903static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2904{
2905 struct sockaddr *addr = args->dstaddr;
2906 struct rpc_xprt *xprt;
2907 struct sock_xprt *transport;
2908 struct rpc_xprt *ret;
2909
2910 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2911 xprt_udp_slot_table_entries);
2912 if (IS_ERR(xprt))
2913 return xprt;
2914 transport = container_of(xprt, struct sock_xprt, xprt);
2915
2916 xprt->prot = IPPROTO_UDP;
2917 xprt->xprt_class = &xs_udp_transport;
2918 /* XXX: header size can vary due to auth type, IPv6, etc. */
2919 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2920
2921 xprt->bind_timeout = XS_BIND_TO;
2922 xprt->reestablish_timeout = XS_UDP_REEST_TO;
2923 xprt->idle_timeout = XS_IDLE_DISC_TO;
2924
2925 xprt->ops = &xs_udp_ops;
2926
2927 xprt->timeout = &xs_udp_default_timeout;
2928
2929 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
2930 INIT_WORK(&transport->error_worker, xs_error_handle);
2931 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
2932
2933 switch (addr->sa_family) {
2934 case AF_INET:
2935 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2936 xprt_set_bound(xprt);
2937
2938 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
2939 break;
2940 case AF_INET6:
2941 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2942 xprt_set_bound(xprt);
2943
2944 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2945 break;
2946 default:
2947 ret = ERR_PTR(-EAFNOSUPPORT);
2948 goto out_err;
2949 }
2950
2951 if (xprt_bound(xprt))
2952 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2953 xprt->address_strings[RPC_DISPLAY_ADDR],
2954 xprt->address_strings[RPC_DISPLAY_PORT],
2955 xprt->address_strings[RPC_DISPLAY_PROTO]);
2956 else
2957 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2958 xprt->address_strings[RPC_DISPLAY_ADDR],
2959 xprt->address_strings[RPC_DISPLAY_PROTO]);
2960
2961 if (try_module_get(THIS_MODULE))
2962 return xprt;
2963 ret = ERR_PTR(-EINVAL);
2964out_err:
2965 xs_xprt_free(xprt);
2966 return ret;
2967}
2968
2969static const struct rpc_timeout xs_tcp_default_timeout = {
2970 .to_initval = 60 * HZ,
2971 .to_maxval = 60 * HZ,
2972 .to_retries = 2,
2973};
2974
2975/**
2976 * xs_setup_tcp - Set up transport to use a TCP socket
2977 * @args: rpc transport creation arguments
2978 *
2979 */
2980static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2981{
2982 struct sockaddr *addr = args->dstaddr;
2983 struct rpc_xprt *xprt;
2984 struct sock_xprt *transport;
2985 struct rpc_xprt *ret;
2986 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
2987
2988 if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
2989 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
2990
2991 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2992 max_slot_table_size);
2993 if (IS_ERR(xprt))
2994 return xprt;
2995 transport = container_of(xprt, struct sock_xprt, xprt);
2996
2997 xprt->prot = IPPROTO_TCP;
2998 xprt->xprt_class = &xs_tcp_transport;
2999 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3000
3001 xprt->bind_timeout = XS_BIND_TO;
3002 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
3003 xprt->idle_timeout = XS_IDLE_DISC_TO;
3004
3005 xprt->ops = &xs_tcp_ops;
3006 xprt->timeout = &xs_tcp_default_timeout;
3007
3008 xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
3009 xprt->connect_timeout = xprt->timeout->to_initval *
3010 (xprt->timeout->to_retries + 1);
3011
3012 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
3013 INIT_WORK(&transport->error_worker, xs_error_handle);
3014 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
3015
3016 switch (addr->sa_family) {
3017 case AF_INET:
3018 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3019 xprt_set_bound(xprt);
3020
3021 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
3022 break;
3023 case AF_INET6:
3024 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
3025 xprt_set_bound(xprt);
3026
3027 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
3028 break;
3029 default:
3030 ret = ERR_PTR(-EAFNOSUPPORT);
3031 goto out_err;
3032 }
3033
3034 if (xprt_bound(xprt))
3035 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3036 xprt->address_strings[RPC_DISPLAY_ADDR],
3037 xprt->address_strings[RPC_DISPLAY_PORT],
3038 xprt->address_strings[RPC_DISPLAY_PROTO]);
3039 else
3040 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
3041 xprt->address_strings[RPC_DISPLAY_ADDR],
3042 xprt->address_strings[RPC_DISPLAY_PROTO]);
3043
3044 if (try_module_get(THIS_MODULE))
3045 return xprt;
3046 ret = ERR_PTR(-EINVAL);
3047out_err:
3048 xs_xprt_free(xprt);
3049 return ret;
3050}
3051
3052/**
3053 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
3054 * @args: rpc transport creation arguments
3055 *
3056 */
3057static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
3058{
3059 struct sockaddr *addr = args->dstaddr;
3060 struct rpc_xprt *xprt;
3061 struct sock_xprt *transport;
3062 struct svc_sock *bc_sock;
3063 struct rpc_xprt *ret;
3064
3065 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
3066 xprt_tcp_slot_table_entries);
3067 if (IS_ERR(xprt))
3068 return xprt;
3069 transport = container_of(xprt, struct sock_xprt, xprt);
3070
3071 xprt->prot = IPPROTO_TCP;
3072 xprt->xprt_class = &xs_bc_tcp_transport;
3073 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3074 xprt->timeout = &xs_tcp_default_timeout;
3075
3076 /* backchannel */
3077 xprt_set_bound(xprt);
3078 xprt->bind_timeout = 0;
3079 xprt->reestablish_timeout = 0;
3080 xprt->idle_timeout = 0;
3081
3082 xprt->ops = &bc_tcp_ops;
3083
3084 switch (addr->sa_family) {
3085 case AF_INET:
3086 xs_format_peer_addresses(xprt, "tcp",
3087 RPCBIND_NETID_TCP);
3088 break;
3089 case AF_INET6:
3090 xs_format_peer_addresses(xprt, "tcp",
3091 RPCBIND_NETID_TCP6);
3092 break;
3093 default:
3094 ret = ERR_PTR(-EAFNOSUPPORT);
3095 goto out_err;
3096 }
3097
3098 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3099 xprt->address_strings[RPC_DISPLAY_ADDR],
3100 xprt->address_strings[RPC_DISPLAY_PORT],
3101 xprt->address_strings[RPC_DISPLAY_PROTO]);
3102
3103 /*
3104 * Once we've associated a backchannel xprt with a connection,
3105 * we want to keep it around as long as the connection lasts,
3106 * in case we need to start using it for a backchannel again;
3107 * this reference won't be dropped until bc_xprt is destroyed.
3108 */
3109 xprt_get(xprt);
3110 args->bc_xprt->xpt_bc_xprt = xprt;
3111 xprt->bc_xprt = args->bc_xprt;
3112 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
3113 transport->sock = bc_sock->sk_sock;
3114 transport->inet = bc_sock->sk_sk;
3115
3116 /*
3117 * Since we don't want connections for the backchannel, we set
3118 * the xprt status to connected
3119 */
3120 xprt_set_connected(xprt);
3121
3122 if (try_module_get(THIS_MODULE))
3123 return xprt;
3124
3125 args->bc_xprt->xpt_bc_xprt = NULL;
3126 args->bc_xprt->xpt_bc_xps = NULL;
3127 xprt_put(xprt);
3128 ret = ERR_PTR(-EINVAL);
3129out_err:
3130 xs_xprt_free(xprt);
3131 return ret;
3132}
3133
3134static struct xprt_class xs_local_transport = {
3135 .list = LIST_HEAD_INIT(xs_local_transport.list),
3136 .name = "named UNIX socket",
3137 .owner = THIS_MODULE,
3138 .ident = XPRT_TRANSPORT_LOCAL,
3139 .setup = xs_setup_local,
3140 .netid = { "" },
3141};
3142
3143static struct xprt_class xs_udp_transport = {
3144 .list = LIST_HEAD_INIT(xs_udp_transport.list),
3145 .name = "udp",
3146 .owner = THIS_MODULE,
3147 .ident = XPRT_TRANSPORT_UDP,
3148 .setup = xs_setup_udp,
3149 .netid = { "udp", "udp6", "" },
3150};
3151
3152static struct xprt_class xs_tcp_transport = {
3153 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
3154 .name = "tcp",
3155 .owner = THIS_MODULE,
3156 .ident = XPRT_TRANSPORT_TCP,
3157 .setup = xs_setup_tcp,
3158 .netid = { "tcp", "tcp6", "" },
3159};
3160
3161static struct xprt_class xs_bc_tcp_transport = {
3162 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
3163 .name = "tcp NFSv4.1 backchannel",
3164 .owner = THIS_MODULE,
3165 .ident = XPRT_TRANSPORT_BC_TCP,
3166 .setup = xs_setup_bc_tcp,
3167 .netid = { "" },
3168};
3169
3170/**
3171 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3172 *
3173 */
3174int init_socket_xprt(void)
3175{
3176 if (!sunrpc_table_header)
3177 sunrpc_table_header = register_sysctl_table(sunrpc_table);
3178
3179 xprt_register_transport(&xs_local_transport);
3180 xprt_register_transport(&xs_udp_transport);
3181 xprt_register_transport(&xs_tcp_transport);
3182 xprt_register_transport(&xs_bc_tcp_transport);
3183
3184 return 0;
3185}
3186
3187/**
3188 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3189 *
3190 */
3191void cleanup_socket_xprt(void)
3192{
3193 if (sunrpc_table_header) {
3194 unregister_sysctl_table(sunrpc_table_header);
3195 sunrpc_table_header = NULL;
3196 }
3197
3198 xprt_unregister_transport(&xs_local_transport);
3199 xprt_unregister_transport(&xs_udp_transport);
3200 xprt_unregister_transport(&xs_tcp_transport);
3201 xprt_unregister_transport(&xs_bc_tcp_transport);
3202}
3203
3204static int param_set_portnr(const char *val, const struct kernel_param *kp)
3205{
3206 return param_set_uint_minmax(val, kp,
3207 RPC_MIN_RESVPORT,
3208 RPC_MAX_RESVPORT);
3209}
3210
3211static const struct kernel_param_ops param_ops_portnr = {
3212 .set = param_set_portnr,
3213 .get = param_get_uint,
3214};
3215
3216#define param_check_portnr(name, p) \
3217 __param_check(name, p, unsigned int);
3218
3219module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3220module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3221
3222static int param_set_slot_table_size(const char *val,
3223 const struct kernel_param *kp)
3224{
3225 return param_set_uint_minmax(val, kp,
3226 RPC_MIN_SLOT_TABLE,
3227 RPC_MAX_SLOT_TABLE);
3228}
3229
3230static const struct kernel_param_ops param_ops_slot_table_size = {
3231 .set = param_set_slot_table_size,
3232 .get = param_get_uint,
3233};
3234
3235#define param_check_slot_table_size(name, p) \
3236 __param_check(name, p, unsigned int);
3237
3238static int param_set_max_slot_table_size(const char *val,
3239 const struct kernel_param *kp)
3240{
3241 return param_set_uint_minmax(val, kp,
3242 RPC_MIN_SLOT_TABLE,
3243 RPC_MAX_SLOT_TABLE_LIMIT);
3244}
3245
3246static const struct kernel_param_ops param_ops_max_slot_table_size = {
3247 .set = param_set_max_slot_table_size,
3248 .get = param_get_uint,
3249};
3250
3251#define param_check_max_slot_table_size(name, p) \
3252 __param_check(name, p, unsigned int);
3253
3254module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3255 slot_table_size, 0644);
3256module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3257 max_slot_table_size, 0644);
3258module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3259 slot_table_size, 0644);