Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/net/sunrpc/xprtsock.c
4 *
5 * Client-side transport implementation for sockets.
6 *
7 * TCP callback races fixes (C) 1998 Red Hat
8 * TCP send fixes (C) 1998 Red Hat
9 * TCP NFS related read + write fixes
10 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11 *
12 * Rewrite of larges part of the code in order to stabilize TCP stuff.
13 * Fix behaviour when socket buffer is full.
14 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
15 *
16 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
17 *
18 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
19 * <gilles.quillard@bull.net>
20 */
21
22#include <linux/types.h>
23#include <linux/string.h>
24#include <linux/slab.h>
25#include <linux/module.h>
26#include <linux/capability.h>
27#include <linux/pagemap.h>
28#include <linux/errno.h>
29#include <linux/socket.h>
30#include <linux/in.h>
31#include <linux/net.h>
32#include <linux/mm.h>
33#include <linux/un.h>
34#include <linux/udp.h>
35#include <linux/tcp.h>
36#include <linux/sunrpc/clnt.h>
37#include <linux/sunrpc/addr.h>
38#include <linux/sunrpc/sched.h>
39#include <linux/sunrpc/svcsock.h>
40#include <linux/sunrpc/xprtsock.h>
41#include <linux/file.h>
42#ifdef CONFIG_SUNRPC_BACKCHANNEL
43#include <linux/sunrpc/bc_xprt.h>
44#endif
45
46#include <net/sock.h>
47#include <net/checksum.h>
48#include <net/udp.h>
49#include <net/tcp.h>
50#include <linux/bvec.h>
51#include <linux/highmem.h>
52#include <linux/uio.h>
53#include <linux/sched/mm.h>
54
55#include <trace/events/sunrpc.h>
56
57#include "sunrpc.h"
58
59static void xs_close(struct rpc_xprt *xprt);
60static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
61 struct socket *sock);
62
63/*
64 * xprtsock tunables
65 */
66static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
67static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
68static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
69
70static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
71static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
72
73#define XS_TCP_LINGER_TO (15U * HZ)
74static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
75
76/*
77 * We can register our own files under /proc/sys/sunrpc by
78 * calling register_sysctl_table() again. The files in that
79 * directory become the union of all files registered there.
80 *
81 * We simply need to make sure that we don't collide with
82 * someone else's file names!
83 */
84
85static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
86static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
87static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
88static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
89static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
90
91static struct ctl_table_header *sunrpc_table_header;
92
93/*
94 * FIXME: changing the UDP slot table size should also resize the UDP
95 * socket buffers for existing UDP transports
96 */
97static struct ctl_table xs_tunables_table[] = {
98 {
99 .procname = "udp_slot_table_entries",
100 .data = &xprt_udp_slot_table_entries,
101 .maxlen = sizeof(unsigned int),
102 .mode = 0644,
103 .proc_handler = proc_dointvec_minmax,
104 .extra1 = &min_slot_table_size,
105 .extra2 = &max_slot_table_size
106 },
107 {
108 .procname = "tcp_slot_table_entries",
109 .data = &xprt_tcp_slot_table_entries,
110 .maxlen = sizeof(unsigned int),
111 .mode = 0644,
112 .proc_handler = proc_dointvec_minmax,
113 .extra1 = &min_slot_table_size,
114 .extra2 = &max_slot_table_size
115 },
116 {
117 .procname = "tcp_max_slot_table_entries",
118 .data = &xprt_max_tcp_slot_table_entries,
119 .maxlen = sizeof(unsigned int),
120 .mode = 0644,
121 .proc_handler = proc_dointvec_minmax,
122 .extra1 = &min_slot_table_size,
123 .extra2 = &max_tcp_slot_table_limit
124 },
125 {
126 .procname = "min_resvport",
127 .data = &xprt_min_resvport,
128 .maxlen = sizeof(unsigned int),
129 .mode = 0644,
130 .proc_handler = proc_dointvec_minmax,
131 .extra1 = &xprt_min_resvport_limit,
132 .extra2 = &xprt_max_resvport_limit
133 },
134 {
135 .procname = "max_resvport",
136 .data = &xprt_max_resvport,
137 .maxlen = sizeof(unsigned int),
138 .mode = 0644,
139 .proc_handler = proc_dointvec_minmax,
140 .extra1 = &xprt_min_resvport_limit,
141 .extra2 = &xprt_max_resvport_limit
142 },
143 {
144 .procname = "tcp_fin_timeout",
145 .data = &xs_tcp_fin_timeout,
146 .maxlen = sizeof(xs_tcp_fin_timeout),
147 .mode = 0644,
148 .proc_handler = proc_dointvec_jiffies,
149 },
150 { },
151};
152
153static struct ctl_table sunrpc_table[] = {
154 {
155 .procname = "sunrpc",
156 .mode = 0555,
157 .child = xs_tunables_table
158 },
159 { },
160};
161
162/*
163 * Wait duration for a reply from the RPC portmapper.
164 */
165#define XS_BIND_TO (60U * HZ)
166
167/*
168 * Delay if a UDP socket connect error occurs. This is most likely some
169 * kind of resource problem on the local host.
170 */
171#define XS_UDP_REEST_TO (2U * HZ)
172
173/*
174 * The reestablish timeout allows clients to delay for a bit before attempting
175 * to reconnect to a server that just dropped our connection.
176 *
177 * We implement an exponential backoff when trying to reestablish a TCP
178 * transport connection with the server. Some servers like to drop a TCP
179 * connection when they are overworked, so we start with a short timeout and
180 * increase over time if the server is down or not responding.
181 */
182#define XS_TCP_INIT_REEST_TO (3U * HZ)
183
184/*
185 * TCP idle timeout; client drops the transport socket if it is idle
186 * for this long. Note that we also timeout UDP sockets to prevent
187 * holding port numbers when there is no RPC traffic.
188 */
189#define XS_IDLE_DISC_TO (5U * 60 * HZ)
190
191#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
192# undef RPC_DEBUG_DATA
193# define RPCDBG_FACILITY RPCDBG_TRANS
194#endif
195
196#ifdef RPC_DEBUG_DATA
197static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
198{
199 u8 *buf = (u8 *) packet;
200 int j;
201
202 dprintk("RPC: %s\n", msg);
203 for (j = 0; j < count && j < 128; j += 4) {
204 if (!(j & 31)) {
205 if (j)
206 dprintk("\n");
207 dprintk("0x%04x ", j);
208 }
209 dprintk("%02x%02x%02x%02x ",
210 buf[j], buf[j+1], buf[j+2], buf[j+3]);
211 }
212 dprintk("\n");
213}
214#else
215static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
216{
217 /* NOP */
218}
219#endif
220
221static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
222{
223 return (struct rpc_xprt *) sk->sk_user_data;
224}
225
226static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
227{
228 return (struct sockaddr *) &xprt->addr;
229}
230
231static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
232{
233 return (struct sockaddr_un *) &xprt->addr;
234}
235
236static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
237{
238 return (struct sockaddr_in *) &xprt->addr;
239}
240
241static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
242{
243 return (struct sockaddr_in6 *) &xprt->addr;
244}
245
246static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
247{
248 struct sockaddr *sap = xs_addr(xprt);
249 struct sockaddr_in6 *sin6;
250 struct sockaddr_in *sin;
251 struct sockaddr_un *sun;
252 char buf[128];
253
254 switch (sap->sa_family) {
255 case AF_LOCAL:
256 sun = xs_addr_un(xprt);
257 strlcpy(buf, sun->sun_path, sizeof(buf));
258 xprt->address_strings[RPC_DISPLAY_ADDR] =
259 kstrdup(buf, GFP_KERNEL);
260 break;
261 case AF_INET:
262 (void)rpc_ntop(sap, buf, sizeof(buf));
263 xprt->address_strings[RPC_DISPLAY_ADDR] =
264 kstrdup(buf, GFP_KERNEL);
265 sin = xs_addr_in(xprt);
266 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
267 break;
268 case AF_INET6:
269 (void)rpc_ntop(sap, buf, sizeof(buf));
270 xprt->address_strings[RPC_DISPLAY_ADDR] =
271 kstrdup(buf, GFP_KERNEL);
272 sin6 = xs_addr_in6(xprt);
273 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
274 break;
275 default:
276 BUG();
277 }
278
279 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
280}
281
282static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
283{
284 struct sockaddr *sap = xs_addr(xprt);
285 char buf[128];
286
287 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
288 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
289
290 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
291 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
292}
293
294static void xs_format_peer_addresses(struct rpc_xprt *xprt,
295 const char *protocol,
296 const char *netid)
297{
298 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
299 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
300 xs_format_common_peer_addresses(xprt);
301 xs_format_common_peer_ports(xprt);
302}
303
304static void xs_update_peer_port(struct rpc_xprt *xprt)
305{
306 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
307 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
308
309 xs_format_common_peer_ports(xprt);
310}
311
312static void xs_free_peer_addresses(struct rpc_xprt *xprt)
313{
314 unsigned int i;
315
316 for (i = 0; i < RPC_DISPLAY_MAX; i++)
317 switch (i) {
318 case RPC_DISPLAY_PROTO:
319 case RPC_DISPLAY_NETID:
320 continue;
321 default:
322 kfree(xprt->address_strings[i]);
323 }
324}
325
326static size_t
327xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
328{
329 size_t i,n;
330
331 if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
332 return want;
333 n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
334 for (i = 0; i < n; i++) {
335 if (buf->pages[i])
336 continue;
337 buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
338 if (!buf->pages[i]) {
339 i *= PAGE_SIZE;
340 return i > buf->page_base ? i - buf->page_base : 0;
341 }
342 }
343 return want;
344}
345
346static ssize_t
347xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
348{
349 ssize_t ret;
350 if (seek != 0)
351 iov_iter_advance(&msg->msg_iter, seek);
352 ret = sock_recvmsg(sock, msg, flags);
353 return ret > 0 ? ret + seek : ret;
354}
355
356static ssize_t
357xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
358 struct kvec *kvec, size_t count, size_t seek)
359{
360 iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count);
361 return xs_sock_recvmsg(sock, msg, flags, seek);
362}
363
364static ssize_t
365xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
366 struct bio_vec *bvec, unsigned long nr, size_t count,
367 size_t seek)
368{
369 iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count);
370 return xs_sock_recvmsg(sock, msg, flags, seek);
371}
372
373static ssize_t
374xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
375 size_t count)
376{
377 iov_iter_discard(&msg->msg_iter, READ, count);
378 return sock_recvmsg(sock, msg, flags);
379}
380
381#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
382static void
383xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
384{
385 struct bvec_iter bi = {
386 .bi_size = count,
387 };
388 struct bio_vec bv;
389
390 bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
391 for_each_bvec(bv, bvec, bi, bi)
392 flush_dcache_page(bv.bv_page);
393}
394#else
395static inline void
396xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
397{
398}
399#endif
400
401static ssize_t
402xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
403 struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
404{
405 size_t want, seek_init = seek, offset = 0;
406 ssize_t ret;
407
408 want = min_t(size_t, count, buf->head[0].iov_len);
409 if (seek < want) {
410 ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek);
411 if (ret <= 0)
412 goto sock_err;
413 offset += ret;
414 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
415 goto out;
416 if (ret != want)
417 goto out;
418 seek = 0;
419 } else {
420 seek -= want;
421 offset += want;
422 }
423
424 want = xs_alloc_sparse_pages(buf,
425 min_t(size_t, count - offset, buf->page_len),
426 GFP_KERNEL);
427 if (seek < want) {
428 ret = xs_read_bvec(sock, msg, flags, buf->bvec,
429 xdr_buf_pagecount(buf),
430 want + buf->page_base,
431 seek + buf->page_base);
432 if (ret <= 0)
433 goto sock_err;
434 xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
435 offset += ret - buf->page_base;
436 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
437 goto out;
438 if (ret != want)
439 goto out;
440 seek = 0;
441 } else {
442 seek -= want;
443 offset += want;
444 }
445
446 want = min_t(size_t, count - offset, buf->tail[0].iov_len);
447 if (seek < want) {
448 ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
449 if (ret <= 0)
450 goto sock_err;
451 offset += ret;
452 if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
453 goto out;
454 if (ret != want)
455 goto out;
456 } else if (offset < seek_init)
457 offset = seek_init;
458 ret = -EMSGSIZE;
459out:
460 *read = offset - seek_init;
461 return ret;
462sock_err:
463 offset += seek;
464 goto out;
465}
466
467static void
468xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf)
469{
470 if (!transport->recv.copied) {
471 if (buf->head[0].iov_len >= transport->recv.offset)
472 memcpy(buf->head[0].iov_base,
473 &transport->recv.xid,
474 transport->recv.offset);
475 transport->recv.copied = transport->recv.offset;
476 }
477}
478
479static bool
480xs_read_stream_request_done(struct sock_xprt *transport)
481{
482 return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT);
483}
484
485static void
486xs_read_stream_check_eor(struct sock_xprt *transport,
487 struct msghdr *msg)
488{
489 if (xs_read_stream_request_done(transport))
490 msg->msg_flags |= MSG_EOR;
491}
492
493static ssize_t
494xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
495 int flags, struct rpc_rqst *req)
496{
497 struct xdr_buf *buf = &req->rq_private_buf;
498 size_t want, uninitialized_var(read);
499 ssize_t uninitialized_var(ret);
500
501 xs_read_header(transport, buf);
502
503 want = transport->recv.len - transport->recv.offset;
504 if (want != 0) {
505 ret = xs_read_xdr_buf(transport->sock, msg, flags, buf,
506 transport->recv.copied + want,
507 transport->recv.copied,
508 &read);
509 transport->recv.offset += read;
510 transport->recv.copied += read;
511 }
512
513 if (transport->recv.offset == transport->recv.len)
514 xs_read_stream_check_eor(transport, msg);
515
516 if (want == 0)
517 return 0;
518
519 switch (ret) {
520 default:
521 break;
522 case -EFAULT:
523 case -EMSGSIZE:
524 msg->msg_flags |= MSG_TRUNC;
525 return read;
526 case 0:
527 return -ESHUTDOWN;
528 }
529 return ret < 0 ? ret : read;
530}
531
532static size_t
533xs_read_stream_headersize(bool isfrag)
534{
535 if (isfrag)
536 return sizeof(__be32);
537 return 3 * sizeof(__be32);
538}
539
540static ssize_t
541xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg,
542 int flags, size_t want, size_t seek)
543{
544 struct kvec kvec = {
545 .iov_base = &transport->recv.fraghdr,
546 .iov_len = want,
547 };
548 return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek);
549}
550
551#if defined(CONFIG_SUNRPC_BACKCHANNEL)
552static ssize_t
553xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
554{
555 struct rpc_xprt *xprt = &transport->xprt;
556 struct rpc_rqst *req;
557 ssize_t ret;
558
559 /* Look up and lock the request corresponding to the given XID */
560 req = xprt_lookup_bc_request(xprt, transport->recv.xid);
561 if (!req) {
562 printk(KERN_WARNING "Callback slot table overflowed\n");
563 return -ESHUTDOWN;
564 }
565 if (transport->recv.copied && !req->rq_private_buf.len)
566 return -ESHUTDOWN;
567
568 ret = xs_read_stream_request(transport, msg, flags, req);
569 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
570 xprt_complete_bc_request(req, transport->recv.copied);
571 else
572 req->rq_private_buf.len = transport->recv.copied;
573
574 return ret;
575}
576#else /* CONFIG_SUNRPC_BACKCHANNEL */
577static ssize_t
578xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
579{
580 return -ESHUTDOWN;
581}
582#endif /* CONFIG_SUNRPC_BACKCHANNEL */
583
584static ssize_t
585xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
586{
587 struct rpc_xprt *xprt = &transport->xprt;
588 struct rpc_rqst *req;
589 ssize_t ret = 0;
590
591 /* Look up and lock the request corresponding to the given XID */
592 spin_lock(&xprt->queue_lock);
593 req = xprt_lookup_rqst(xprt, transport->recv.xid);
594 if (!req || (transport->recv.copied && !req->rq_private_buf.len)) {
595 msg->msg_flags |= MSG_TRUNC;
596 goto out;
597 }
598 xprt_pin_rqst(req);
599 spin_unlock(&xprt->queue_lock);
600
601 ret = xs_read_stream_request(transport, msg, flags, req);
602
603 spin_lock(&xprt->queue_lock);
604 if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
605 xprt_complete_rqst(req->rq_task, transport->recv.copied);
606 else
607 req->rq_private_buf.len = transport->recv.copied;
608 xprt_unpin_rqst(req);
609out:
610 spin_unlock(&xprt->queue_lock);
611 return ret;
612}
613
614static ssize_t
615xs_read_stream(struct sock_xprt *transport, int flags)
616{
617 struct msghdr msg = { 0 };
618 size_t want, read = 0;
619 ssize_t ret = 0;
620
621 if (transport->recv.len == 0) {
622 want = xs_read_stream_headersize(transport->recv.copied != 0);
623 ret = xs_read_stream_header(transport, &msg, flags, want,
624 transport->recv.offset);
625 if (ret <= 0)
626 goto out_err;
627 transport->recv.offset = ret;
628 if (transport->recv.offset != want)
629 return transport->recv.offset;
630 transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
631 RPC_FRAGMENT_SIZE_MASK;
632 transport->recv.offset -= sizeof(transport->recv.fraghdr);
633 read = ret;
634 }
635
636 switch (be32_to_cpu(transport->recv.calldir)) {
637 default:
638 msg.msg_flags |= MSG_TRUNC;
639 break;
640 case RPC_CALL:
641 ret = xs_read_stream_call(transport, &msg, flags);
642 break;
643 case RPC_REPLY:
644 ret = xs_read_stream_reply(transport, &msg, flags);
645 }
646 if (msg.msg_flags & MSG_TRUNC) {
647 transport->recv.calldir = cpu_to_be32(-1);
648 transport->recv.copied = -1;
649 }
650 if (ret < 0)
651 goto out_err;
652 read += ret;
653 if (transport->recv.offset < transport->recv.len) {
654 if (!(msg.msg_flags & MSG_TRUNC))
655 return read;
656 msg.msg_flags = 0;
657 ret = xs_read_discard(transport->sock, &msg, flags,
658 transport->recv.len - transport->recv.offset);
659 if (ret <= 0)
660 goto out_err;
661 transport->recv.offset += ret;
662 read += ret;
663 if (transport->recv.offset != transport->recv.len)
664 return read;
665 }
666 if (xs_read_stream_request_done(transport)) {
667 trace_xs_stream_read_request(transport);
668 transport->recv.copied = 0;
669 }
670 transport->recv.offset = 0;
671 transport->recv.len = 0;
672 return read;
673out_err:
674 return ret != 0 ? ret : -ESHUTDOWN;
675}
676
677static __poll_t xs_poll_socket(struct sock_xprt *transport)
678{
679 return transport->sock->ops->poll(transport->file, transport->sock,
680 NULL);
681}
682
683static bool xs_poll_socket_readable(struct sock_xprt *transport)
684{
685 __poll_t events = xs_poll_socket(transport);
686
687 return (events & (EPOLLIN | EPOLLRDNORM)) && !(events & EPOLLRDHUP);
688}
689
690static void xs_poll_check_readable(struct sock_xprt *transport)
691{
692
693 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
694 if (!xs_poll_socket_readable(transport))
695 return;
696 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
697 queue_work(xprtiod_workqueue, &transport->recv_worker);
698}
699
700static void xs_stream_data_receive(struct sock_xprt *transport)
701{
702 size_t read = 0;
703 ssize_t ret = 0;
704
705 mutex_lock(&transport->recv_mutex);
706 if (transport->sock == NULL)
707 goto out;
708 for (;;) {
709 ret = xs_read_stream(transport, MSG_DONTWAIT);
710 if (ret < 0)
711 break;
712 read += ret;
713 cond_resched();
714 }
715 if (ret == -ESHUTDOWN)
716 kernel_sock_shutdown(transport->sock, SHUT_RDWR);
717 else
718 xs_poll_check_readable(transport);
719out:
720 mutex_unlock(&transport->recv_mutex);
721 trace_xs_stream_read_data(&transport->xprt, ret, read);
722}
723
724static void xs_stream_data_receive_workfn(struct work_struct *work)
725{
726 struct sock_xprt *transport =
727 container_of(work, struct sock_xprt, recv_worker);
728 unsigned int pflags = memalloc_nofs_save();
729
730 xs_stream_data_receive(transport);
731 memalloc_nofs_restore(pflags);
732}
733
734static void
735xs_stream_reset_connect(struct sock_xprt *transport)
736{
737 transport->recv.offset = 0;
738 transport->recv.len = 0;
739 transport->recv.copied = 0;
740 transport->xmit.offset = 0;
741}
742
743static void
744xs_stream_start_connect(struct sock_xprt *transport)
745{
746 transport->xprt.stat.connect_count++;
747 transport->xprt.stat.connect_start = jiffies;
748}
749
750#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
751
752static int xs_sendmsg(struct socket *sock, struct msghdr *msg, size_t seek)
753{
754 if (seek)
755 iov_iter_advance(&msg->msg_iter, seek);
756 return sock_sendmsg(sock, msg);
757}
758
759static int xs_send_kvec(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t seek)
760{
761 iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len);
762 return xs_sendmsg(sock, msg, seek);
763}
764
765static int xs_send_pagedata(struct socket *sock, struct msghdr *msg, struct xdr_buf *xdr, size_t base)
766{
767 int err;
768
769 err = xdr_alloc_bvec(xdr, GFP_KERNEL);
770 if (err < 0)
771 return err;
772
773 iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec,
774 xdr_buf_pagecount(xdr),
775 xdr->page_len + xdr->page_base);
776 return xs_sendmsg(sock, msg, base + xdr->page_base);
777}
778
779#define xs_record_marker_len() sizeof(rpc_fraghdr)
780
781/* Common case:
782 * - stream transport
783 * - sending from byte 0 of the message
784 * - the message is wholly contained in @xdr's head iovec
785 */
786static int xs_send_rm_and_kvec(struct socket *sock, struct msghdr *msg,
787 rpc_fraghdr marker, struct kvec *vec, size_t base)
788{
789 struct kvec iov[2] = {
790 [0] = {
791 .iov_base = &marker,
792 .iov_len = sizeof(marker)
793 },
794 [1] = *vec,
795 };
796 size_t len = iov[0].iov_len + iov[1].iov_len;
797
798 iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len);
799 return xs_sendmsg(sock, msg, base);
800}
801
802/**
803 * xs_sendpages - write pages directly to a socket
804 * @sock: socket to send on
805 * @addr: UDP only -- address of destination
806 * @addrlen: UDP only -- length of destination address
807 * @xdr: buffer containing this request
808 * @base: starting position in the buffer
809 * @rm: stream record marker field
810 * @sent_p: return the total number of bytes successfully queued for sending
811 *
812 */
813static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, rpc_fraghdr rm, int *sent_p)
814{
815 struct msghdr msg = {
816 .msg_name = addr,
817 .msg_namelen = addrlen,
818 .msg_flags = XS_SENDMSG_FLAGS | MSG_MORE,
819 };
820 unsigned int rmsize = rm ? sizeof(rm) : 0;
821 unsigned int remainder = rmsize + xdr->len - base;
822 unsigned int want;
823 int err = 0;
824
825 if (unlikely(!sock))
826 return -ENOTSOCK;
827
828 want = xdr->head[0].iov_len + rmsize;
829 if (base < want) {
830 unsigned int len = want - base;
831 remainder -= len;
832 if (remainder == 0)
833 msg.msg_flags &= ~MSG_MORE;
834 if (rmsize)
835 err = xs_send_rm_and_kvec(sock, &msg, rm,
836 &xdr->head[0], base);
837 else
838 err = xs_send_kvec(sock, &msg, &xdr->head[0], base);
839 if (remainder == 0 || err != len)
840 goto out;
841 *sent_p += err;
842 base = 0;
843 } else
844 base -= want;
845
846 if (base < xdr->page_len) {
847 unsigned int len = xdr->page_len - base;
848 remainder -= len;
849 if (remainder == 0)
850 msg.msg_flags &= ~MSG_MORE;
851 err = xs_send_pagedata(sock, &msg, xdr, base);
852 if (remainder == 0 || err != len)
853 goto out;
854 *sent_p += err;
855 base = 0;
856 } else
857 base -= xdr->page_len;
858
859 if (base >= xdr->tail[0].iov_len)
860 return 0;
861 msg.msg_flags &= ~MSG_MORE;
862 err = xs_send_kvec(sock, &msg, &xdr->tail[0], base);
863out:
864 if (err > 0) {
865 *sent_p += err;
866 err = 0;
867 }
868 return err;
869}
870
871/**
872 * xs_nospace - handle transmit was incomplete
873 * @req: pointer to RPC request
874 *
875 */
876static int xs_nospace(struct rpc_rqst *req)
877{
878 struct rpc_xprt *xprt = req->rq_xprt;
879 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
880 struct sock *sk = transport->inet;
881 int ret = -EAGAIN;
882
883 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
884 req->rq_task->tk_pid,
885 req->rq_slen - transport->xmit.offset,
886 req->rq_slen);
887
888 /* Protect against races with write_space */
889 spin_lock(&xprt->transport_lock);
890
891 /* Don't race with disconnect */
892 if (xprt_connected(xprt)) {
893 /* wait for more buffer space */
894 sk->sk_write_pending++;
895 xprt_wait_for_buffer_space(xprt);
896 } else
897 ret = -ENOTCONN;
898
899 spin_unlock(&xprt->transport_lock);
900
901 /* Race breaker in case memory is freed before above code is called */
902 if (ret == -EAGAIN) {
903 struct socket_wq *wq;
904
905 rcu_read_lock();
906 wq = rcu_dereference(sk->sk_wq);
907 set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
908 rcu_read_unlock();
909
910 sk->sk_write_space(sk);
911 }
912 return ret;
913}
914
915static void
916xs_stream_prepare_request(struct rpc_rqst *req)
917{
918 xdr_free_bvec(&req->rq_rcv_buf);
919 req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL);
920}
921
922/*
923 * Determine if the previous message in the stream was aborted before it
924 * could complete transmission.
925 */
926static bool
927xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
928{
929 return transport->xmit.offset != 0 && req->rq_bytes_sent == 0;
930}
931
932/*
933 * Return the stream record marker field for a record of length < 2^31-1
934 */
935static rpc_fraghdr
936xs_stream_record_marker(struct xdr_buf *xdr)
937{
938 if (!xdr->len)
939 return 0;
940 return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len);
941}
942
943/**
944 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
945 * @req: pointer to RPC request
946 *
947 * Return values:
948 * 0: The request has been sent
949 * EAGAIN: The socket was blocked, please call again later to
950 * complete the request
951 * ENOTCONN: Caller needs to invoke connect logic then call again
952 * other: Some other error occured, the request was not sent
953 */
954static int xs_local_send_request(struct rpc_rqst *req)
955{
956 struct rpc_xprt *xprt = req->rq_xprt;
957 struct sock_xprt *transport =
958 container_of(xprt, struct sock_xprt, xprt);
959 struct xdr_buf *xdr = &req->rq_snd_buf;
960 rpc_fraghdr rm = xs_stream_record_marker(xdr);
961 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
962 int status;
963 int sent = 0;
964
965 /* Close the stream if the previous transmission was incomplete */
966 if (xs_send_request_was_aborted(transport, req)) {
967 xs_close(xprt);
968 return -ENOTCONN;
969 }
970
971 xs_pktdump("packet data:",
972 req->rq_svec->iov_base, req->rq_svec->iov_len);
973
974 req->rq_xtime = ktime_get();
975 status = xs_sendpages(transport->sock, NULL, 0, xdr,
976 transport->xmit.offset, rm, &sent);
977 dprintk("RPC: %s(%u) = %d\n",
978 __func__, xdr->len - transport->xmit.offset, status);
979
980 if (status == -EAGAIN && sock_writeable(transport->inet))
981 status = -ENOBUFS;
982
983 if (likely(sent > 0) || status == 0) {
984 transport->xmit.offset += sent;
985 req->rq_bytes_sent = transport->xmit.offset;
986 if (likely(req->rq_bytes_sent >= msglen)) {
987 req->rq_xmit_bytes_sent += transport->xmit.offset;
988 transport->xmit.offset = 0;
989 return 0;
990 }
991 status = -EAGAIN;
992 }
993
994 switch (status) {
995 case -ENOBUFS:
996 break;
997 case -EAGAIN:
998 status = xs_nospace(req);
999 break;
1000 default:
1001 dprintk("RPC: sendmsg returned unrecognized error %d\n",
1002 -status);
1003 /* fall through */
1004 case -EPIPE:
1005 xs_close(xprt);
1006 status = -ENOTCONN;
1007 }
1008
1009 return status;
1010}
1011
1012/**
1013 * xs_udp_send_request - write an RPC request to a UDP socket
1014 * @req: pointer to RPC request
1015 *
1016 * Return values:
1017 * 0: The request has been sent
1018 * EAGAIN: The socket was blocked, please call again later to
1019 * complete the request
1020 * ENOTCONN: Caller needs to invoke connect logic then call again
1021 * other: Some other error occurred, the request was not sent
1022 */
1023static int xs_udp_send_request(struct rpc_rqst *req)
1024{
1025 struct rpc_xprt *xprt = req->rq_xprt;
1026 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1027 struct xdr_buf *xdr = &req->rq_snd_buf;
1028 int sent = 0;
1029 int status;
1030
1031 xs_pktdump("packet data:",
1032 req->rq_svec->iov_base,
1033 req->rq_svec->iov_len);
1034
1035 if (!xprt_bound(xprt))
1036 return -ENOTCONN;
1037
1038 if (!xprt_request_get_cong(xprt, req))
1039 return -EBADSLT;
1040
1041 req->rq_xtime = ktime_get();
1042 status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
1043 xdr, 0, 0, &sent);
1044
1045 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
1046 xdr->len, status);
1047
1048 /* firewall is blocking us, don't return -EAGAIN or we end up looping */
1049 if (status == -EPERM)
1050 goto process_status;
1051
1052 if (status == -EAGAIN && sock_writeable(transport->inet))
1053 status = -ENOBUFS;
1054
1055 if (sent > 0 || status == 0) {
1056 req->rq_xmit_bytes_sent += sent;
1057 if (sent >= req->rq_slen)
1058 return 0;
1059 /* Still some bytes left; set up for a retry later. */
1060 status = -EAGAIN;
1061 }
1062
1063process_status:
1064 switch (status) {
1065 case -ENOTSOCK:
1066 status = -ENOTCONN;
1067 /* Should we call xs_close() here? */
1068 break;
1069 case -EAGAIN:
1070 status = xs_nospace(req);
1071 break;
1072 case -ENETUNREACH:
1073 case -ENOBUFS:
1074 case -EPIPE:
1075 case -ECONNREFUSED:
1076 case -EPERM:
1077 /* When the server has died, an ICMP port unreachable message
1078 * prompts ECONNREFUSED. */
1079 break;
1080 default:
1081 dprintk("RPC: sendmsg returned unrecognized error %d\n",
1082 -status);
1083 }
1084
1085 return status;
1086}
1087
1088/**
1089 * xs_tcp_send_request - write an RPC request to a TCP socket
1090 * @req: pointer to RPC request
1091 *
1092 * Return values:
1093 * 0: The request has been sent
1094 * EAGAIN: The socket was blocked, please call again later to
1095 * complete the request
1096 * ENOTCONN: Caller needs to invoke connect logic then call again
1097 * other: Some other error occurred, the request was not sent
1098 *
1099 * XXX: In the case of soft timeouts, should we eventually give up
1100 * if sendmsg is not able to make progress?
1101 */
1102static int xs_tcp_send_request(struct rpc_rqst *req)
1103{
1104 struct rpc_xprt *xprt = req->rq_xprt;
1105 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1106 struct xdr_buf *xdr = &req->rq_snd_buf;
1107 rpc_fraghdr rm = xs_stream_record_marker(xdr);
1108 unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
1109 bool vm_wait = false;
1110 int status;
1111 int sent;
1112
1113 /* Close the stream if the previous transmission was incomplete */
1114 if (xs_send_request_was_aborted(transport, req)) {
1115 if (transport->sock != NULL)
1116 kernel_sock_shutdown(transport->sock, SHUT_RDWR);
1117 return -ENOTCONN;
1118 }
1119
1120 xs_pktdump("packet data:",
1121 req->rq_svec->iov_base,
1122 req->rq_svec->iov_len);
1123
1124 if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
1125 xs_tcp_set_socket_timeouts(xprt, transport->sock);
1126
1127 /* Continue transmitting the packet/record. We must be careful
1128 * to cope with writespace callbacks arriving _after_ we have
1129 * called sendmsg(). */
1130 req->rq_xtime = ktime_get();
1131 while (1) {
1132 sent = 0;
1133 status = xs_sendpages(transport->sock, NULL, 0, xdr,
1134 transport->xmit.offset, rm, &sent);
1135
1136 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
1137 xdr->len - transport->xmit.offset, status);
1138
1139 /* If we've sent the entire packet, immediately
1140 * reset the count of bytes sent. */
1141 transport->xmit.offset += sent;
1142 req->rq_bytes_sent = transport->xmit.offset;
1143 if (likely(req->rq_bytes_sent >= msglen)) {
1144 req->rq_xmit_bytes_sent += transport->xmit.offset;
1145 transport->xmit.offset = 0;
1146 return 0;
1147 }
1148
1149 WARN_ON_ONCE(sent == 0 && status == 0);
1150
1151 if (status == -EAGAIN ) {
1152 /*
1153 * Return EAGAIN if we're sure we're hitting the
1154 * socket send buffer limits.
1155 */
1156 if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
1157 break;
1158 /*
1159 * Did we hit a memory allocation failure?
1160 */
1161 if (sent == 0) {
1162 status = -ENOBUFS;
1163 if (vm_wait)
1164 break;
1165 /* Retry, knowing now that we're below the
1166 * socket send buffer limit
1167 */
1168 vm_wait = true;
1169 }
1170 continue;
1171 }
1172 if (status < 0)
1173 break;
1174 vm_wait = false;
1175 }
1176
1177 switch (status) {
1178 case -ENOTSOCK:
1179 status = -ENOTCONN;
1180 /* Should we call xs_close() here? */
1181 break;
1182 case -EAGAIN:
1183 status = xs_nospace(req);
1184 break;
1185 case -ECONNRESET:
1186 case -ECONNREFUSED:
1187 case -ENOTCONN:
1188 case -EADDRINUSE:
1189 case -ENOBUFS:
1190 case -EPIPE:
1191 break;
1192 default:
1193 dprintk("RPC: sendmsg returned unrecognized error %d\n",
1194 -status);
1195 }
1196
1197 return status;
1198}
1199
1200static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1201{
1202 transport->old_data_ready = sk->sk_data_ready;
1203 transport->old_state_change = sk->sk_state_change;
1204 transport->old_write_space = sk->sk_write_space;
1205 transport->old_error_report = sk->sk_error_report;
1206}
1207
1208static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1209{
1210 sk->sk_data_ready = transport->old_data_ready;
1211 sk->sk_state_change = transport->old_state_change;
1212 sk->sk_write_space = transport->old_write_space;
1213 sk->sk_error_report = transport->old_error_report;
1214}
1215
1216static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
1217{
1218 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1219
1220 clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
1221 clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state);
1222 clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state);
1223 clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state);
1224}
1225
1226static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr)
1227{
1228 set_bit(nr, &transport->sock_state);
1229 queue_work(xprtiod_workqueue, &transport->error_worker);
1230}
1231
1232static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1233{
1234 smp_mb__before_atomic();
1235 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1236 clear_bit(XPRT_CLOSING, &xprt->state);
1237 xs_sock_reset_state_flags(xprt);
1238 smp_mb__after_atomic();
1239}
1240
1241/**
1242 * xs_error_report - callback to handle TCP socket state errors
1243 * @sk: socket
1244 *
1245 * Note: we don't call sock_error() since there may be a rpc_task
1246 * using the socket, and so we don't want to clear sk->sk_err.
1247 */
1248static void xs_error_report(struct sock *sk)
1249{
1250 struct sock_xprt *transport;
1251 struct rpc_xprt *xprt;
1252
1253 read_lock_bh(&sk->sk_callback_lock);
1254 if (!(xprt = xprt_from_sock(sk)))
1255 goto out;
1256
1257 transport = container_of(xprt, struct sock_xprt, xprt);
1258 transport->xprt_err = -sk->sk_err;
1259 if (transport->xprt_err == 0)
1260 goto out;
1261 dprintk("RPC: xs_error_report client %p, error=%d...\n",
1262 xprt, -transport->xprt_err);
1263 trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
1264
1265 /* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */
1266 smp_mb__before_atomic();
1267 xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR);
1268 out:
1269 read_unlock_bh(&sk->sk_callback_lock);
1270}
1271
1272static void xs_reset_transport(struct sock_xprt *transport)
1273{
1274 struct socket *sock = transport->sock;
1275 struct sock *sk = transport->inet;
1276 struct rpc_xprt *xprt = &transport->xprt;
1277 struct file *filp = transport->file;
1278
1279 if (sk == NULL)
1280 return;
1281
1282 if (atomic_read(&transport->xprt.swapper))
1283 sk_clear_memalloc(sk);
1284
1285 kernel_sock_shutdown(sock, SHUT_RDWR);
1286
1287 mutex_lock(&transport->recv_mutex);
1288 write_lock_bh(&sk->sk_callback_lock);
1289 transport->inet = NULL;
1290 transport->sock = NULL;
1291 transport->file = NULL;
1292
1293 sk->sk_user_data = NULL;
1294
1295 xs_restore_old_callbacks(transport, sk);
1296 xprt_clear_connected(xprt);
1297 write_unlock_bh(&sk->sk_callback_lock);
1298 xs_sock_reset_connection_flags(xprt);
1299 /* Reset stream record info */
1300 xs_stream_reset_connect(transport);
1301 mutex_unlock(&transport->recv_mutex);
1302
1303 trace_rpc_socket_close(xprt, sock);
1304 fput(filp);
1305
1306 xprt_disconnect_done(xprt);
1307}
1308
1309/**
1310 * xs_close - close a socket
1311 * @xprt: transport
1312 *
1313 * This is used when all requests are complete; ie, no DRC state remains
1314 * on the server we want to save.
1315 *
1316 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
1317 * xs_reset_transport() zeroing the socket from underneath a writer.
1318 */
1319static void xs_close(struct rpc_xprt *xprt)
1320{
1321 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1322
1323 dprintk("RPC: xs_close xprt %p\n", xprt);
1324
1325 xs_reset_transport(transport);
1326 xprt->reestablish_timeout = 0;
1327}
1328
1329static void xs_inject_disconnect(struct rpc_xprt *xprt)
1330{
1331 dprintk("RPC: injecting transport disconnect on xprt=%p\n",
1332 xprt);
1333 xprt_disconnect_done(xprt);
1334}
1335
1336static void xs_xprt_free(struct rpc_xprt *xprt)
1337{
1338 xs_free_peer_addresses(xprt);
1339 xprt_free(xprt);
1340}
1341
1342/**
1343 * xs_destroy - prepare to shutdown a transport
1344 * @xprt: doomed transport
1345 *
1346 */
1347static void xs_destroy(struct rpc_xprt *xprt)
1348{
1349 struct sock_xprt *transport = container_of(xprt,
1350 struct sock_xprt, xprt);
1351 dprintk("RPC: xs_destroy xprt %p\n", xprt);
1352
1353 cancel_delayed_work_sync(&transport->connect_worker);
1354 xs_close(xprt);
1355 cancel_work_sync(&transport->recv_worker);
1356 cancel_work_sync(&transport->error_worker);
1357 xs_xprt_free(xprt);
1358 module_put(THIS_MODULE);
1359}
1360
1361/**
1362 * xs_udp_data_read_skb - receive callback for UDP sockets
1363 * @xprt: transport
1364 * @sk: socket
1365 * @skb: skbuff
1366 *
1367 */
1368static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
1369 struct sock *sk,
1370 struct sk_buff *skb)
1371{
1372 struct rpc_task *task;
1373 struct rpc_rqst *rovr;
1374 int repsize, copied;
1375 u32 _xid;
1376 __be32 *xp;
1377
1378 repsize = skb->len;
1379 if (repsize < 4) {
1380 dprintk("RPC: impossible RPC reply size %d!\n", repsize);
1381 return;
1382 }
1383
1384 /* Copy the XID from the skb... */
1385 xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
1386 if (xp == NULL)
1387 return;
1388
1389 /* Look up and lock the request corresponding to the given XID */
1390 spin_lock(&xprt->queue_lock);
1391 rovr = xprt_lookup_rqst(xprt, *xp);
1392 if (!rovr)
1393 goto out_unlock;
1394 xprt_pin_rqst(rovr);
1395 xprt_update_rtt(rovr->rq_task);
1396 spin_unlock(&xprt->queue_lock);
1397 task = rovr->rq_task;
1398
1399 if ((copied = rovr->rq_private_buf.buflen) > repsize)
1400 copied = repsize;
1401
1402 /* Suck it into the iovec, verify checksum if not done by hw. */
1403 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1404 spin_lock(&xprt->queue_lock);
1405 __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
1406 goto out_unpin;
1407 }
1408
1409
1410 spin_lock(&xprt->transport_lock);
1411 xprt_adjust_cwnd(xprt, task, copied);
1412 spin_unlock(&xprt->transport_lock);
1413 spin_lock(&xprt->queue_lock);
1414 xprt_complete_rqst(task, copied);
1415 __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
1416out_unpin:
1417 xprt_unpin_rqst(rovr);
1418 out_unlock:
1419 spin_unlock(&xprt->queue_lock);
1420}
1421
1422static void xs_udp_data_receive(struct sock_xprt *transport)
1423{
1424 struct sk_buff *skb;
1425 struct sock *sk;
1426 int err;
1427
1428 mutex_lock(&transport->recv_mutex);
1429 sk = transport->inet;
1430 if (sk == NULL)
1431 goto out;
1432 for (;;) {
1433 skb = skb_recv_udp(sk, 0, 1, &err);
1434 if (skb == NULL)
1435 break;
1436 xs_udp_data_read_skb(&transport->xprt, sk, skb);
1437 consume_skb(skb);
1438 cond_resched();
1439 }
1440 xs_poll_check_readable(transport);
1441out:
1442 mutex_unlock(&transport->recv_mutex);
1443}
1444
1445static void xs_udp_data_receive_workfn(struct work_struct *work)
1446{
1447 struct sock_xprt *transport =
1448 container_of(work, struct sock_xprt, recv_worker);
1449 unsigned int pflags = memalloc_nofs_save();
1450
1451 xs_udp_data_receive(transport);
1452 memalloc_nofs_restore(pflags);
1453}
1454
1455/**
1456 * xs_data_ready - "data ready" callback for UDP sockets
1457 * @sk: socket with data to read
1458 *
1459 */
1460static void xs_data_ready(struct sock *sk)
1461{
1462 struct rpc_xprt *xprt;
1463
1464 read_lock_bh(&sk->sk_callback_lock);
1465 dprintk("RPC: xs_data_ready...\n");
1466 xprt = xprt_from_sock(sk);
1467 if (xprt != NULL) {
1468 struct sock_xprt *transport = container_of(xprt,
1469 struct sock_xprt, xprt);
1470 transport->old_data_ready(sk);
1471 /* Any data means we had a useful conversation, so
1472 * then we don't need to delay the next reconnect
1473 */
1474 if (xprt->reestablish_timeout)
1475 xprt->reestablish_timeout = 0;
1476 if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1477 queue_work(xprtiod_workqueue, &transport->recv_worker);
1478 }
1479 read_unlock_bh(&sk->sk_callback_lock);
1480}
1481
1482/*
1483 * Helper function to force a TCP close if the server is sending
1484 * junk and/or it has put us in CLOSE_WAIT
1485 */
1486static void xs_tcp_force_close(struct rpc_xprt *xprt)
1487{
1488 xprt_force_disconnect(xprt);
1489}
1490
1491#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1492static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
1493{
1494 return PAGE_SIZE;
1495}
1496#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1497
1498/**
1499 * xs_tcp_state_change - callback to handle TCP socket state changes
1500 * @sk: socket whose state has changed
1501 *
1502 */
1503static void xs_tcp_state_change(struct sock *sk)
1504{
1505 struct rpc_xprt *xprt;
1506 struct sock_xprt *transport;
1507
1508 read_lock_bh(&sk->sk_callback_lock);
1509 if (!(xprt = xprt_from_sock(sk)))
1510 goto out;
1511 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1512 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1513 sk->sk_state, xprt_connected(xprt),
1514 sock_flag(sk, SOCK_DEAD),
1515 sock_flag(sk, SOCK_ZAPPED),
1516 sk->sk_shutdown);
1517
1518 transport = container_of(xprt, struct sock_xprt, xprt);
1519 trace_rpc_socket_state_change(xprt, sk->sk_socket);
1520 switch (sk->sk_state) {
1521 case TCP_ESTABLISHED:
1522 if (!xprt_test_and_set_connected(xprt)) {
1523 xprt->connect_cookie++;
1524 clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1525 xprt_clear_connecting(xprt);
1526
1527 xprt->stat.connect_count++;
1528 xprt->stat.connect_time += (long)jiffies -
1529 xprt->stat.connect_start;
1530 xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING);
1531 }
1532 break;
1533 case TCP_FIN_WAIT1:
1534 /* The client initiated a shutdown of the socket */
1535 xprt->connect_cookie++;
1536 xprt->reestablish_timeout = 0;
1537 set_bit(XPRT_CLOSING, &xprt->state);
1538 smp_mb__before_atomic();
1539 clear_bit(XPRT_CONNECTED, &xprt->state);
1540 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1541 smp_mb__after_atomic();
1542 break;
1543 case TCP_CLOSE_WAIT:
1544 /* The server initiated a shutdown of the socket */
1545 xprt->connect_cookie++;
1546 clear_bit(XPRT_CONNECTED, &xprt->state);
1547 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1548 /* fall through */
1549 case TCP_CLOSING:
1550 /*
1551 * If the server closed down the connection, make sure that
1552 * we back off before reconnecting
1553 */
1554 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1555 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1556 break;
1557 case TCP_LAST_ACK:
1558 set_bit(XPRT_CLOSING, &xprt->state);
1559 smp_mb__before_atomic();
1560 clear_bit(XPRT_CONNECTED, &xprt->state);
1561 smp_mb__after_atomic();
1562 break;
1563 case TCP_CLOSE:
1564 if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1565 &transport->sock_state))
1566 xprt_clear_connecting(xprt);
1567 clear_bit(XPRT_CLOSING, &xprt->state);
1568 /* Trigger the socket release */
1569 xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1570 }
1571 out:
1572 read_unlock_bh(&sk->sk_callback_lock);
1573}
1574
1575static void xs_write_space(struct sock *sk)
1576{
1577 struct socket_wq *wq;
1578 struct sock_xprt *transport;
1579 struct rpc_xprt *xprt;
1580
1581 if (!sk->sk_socket)
1582 return;
1583 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1584
1585 if (unlikely(!(xprt = xprt_from_sock(sk))))
1586 return;
1587 transport = container_of(xprt, struct sock_xprt, xprt);
1588 rcu_read_lock();
1589 wq = rcu_dereference(sk->sk_wq);
1590 if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
1591 goto out;
1592
1593 xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE);
1594 sk->sk_write_pending--;
1595out:
1596 rcu_read_unlock();
1597}
1598
1599/**
1600 * xs_udp_write_space - callback invoked when socket buffer space
1601 * becomes available
1602 * @sk: socket whose state has changed
1603 *
1604 * Called when more output buffer space is available for this socket.
1605 * We try not to wake our writers until they can make "significant"
1606 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1607 * with a bunch of small requests.
1608 */
1609static void xs_udp_write_space(struct sock *sk)
1610{
1611 read_lock_bh(&sk->sk_callback_lock);
1612
1613 /* from net/core/sock.c:sock_def_write_space */
1614 if (sock_writeable(sk))
1615 xs_write_space(sk);
1616
1617 read_unlock_bh(&sk->sk_callback_lock);
1618}
1619
1620/**
1621 * xs_tcp_write_space - callback invoked when socket buffer space
1622 * becomes available
1623 * @sk: socket whose state has changed
1624 *
1625 * Called when more output buffer space is available for this socket.
1626 * We try not to wake our writers until they can make "significant"
1627 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1628 * with a bunch of small requests.
1629 */
1630static void xs_tcp_write_space(struct sock *sk)
1631{
1632 read_lock_bh(&sk->sk_callback_lock);
1633
1634 /* from net/core/stream.c:sk_stream_write_space */
1635 if (sk_stream_is_writeable(sk))
1636 xs_write_space(sk);
1637
1638 read_unlock_bh(&sk->sk_callback_lock);
1639}
1640
1641static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1642{
1643 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1644 struct sock *sk = transport->inet;
1645
1646 if (transport->rcvsize) {
1647 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1648 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1649 }
1650 if (transport->sndsize) {
1651 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1652 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1653 sk->sk_write_space(sk);
1654 }
1655}
1656
1657/**
1658 * xs_udp_set_buffer_size - set send and receive limits
1659 * @xprt: generic transport
1660 * @sndsize: requested size of send buffer, in bytes
1661 * @rcvsize: requested size of receive buffer, in bytes
1662 *
1663 * Set socket send and receive buffer size limits.
1664 */
1665static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1666{
1667 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1668
1669 transport->sndsize = 0;
1670 if (sndsize)
1671 transport->sndsize = sndsize + 1024;
1672 transport->rcvsize = 0;
1673 if (rcvsize)
1674 transport->rcvsize = rcvsize + 1024;
1675
1676 xs_udp_do_set_buffer_size(xprt);
1677}
1678
1679/**
1680 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1681 * @xprt: controlling transport
1682 * @task: task that timed out
1683 *
1684 * Adjust the congestion window after a retransmit timeout has occurred.
1685 */
1686static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1687{
1688 spin_lock(&xprt->transport_lock);
1689 xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1690 spin_unlock(&xprt->transport_lock);
1691}
1692
1693static int xs_get_random_port(void)
1694{
1695 unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
1696 unsigned short range;
1697 unsigned short rand;
1698
1699 if (max < min)
1700 return -EADDRINUSE;
1701 range = max - min + 1;
1702 rand = (unsigned short) prandom_u32() % range;
1703 return rand + min;
1704}
1705
1706/**
1707 * xs_set_reuseaddr_port - set the socket's port and address reuse options
1708 * @sock: socket
1709 *
1710 * Note that this function has to be called on all sockets that share the
1711 * same port, and it must be called before binding.
1712 */
1713static void xs_sock_set_reuseport(struct socket *sock)
1714{
1715 int opt = 1;
1716
1717 kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT,
1718 (char *)&opt, sizeof(opt));
1719}
1720
1721static unsigned short xs_sock_getport(struct socket *sock)
1722{
1723 struct sockaddr_storage buf;
1724 unsigned short port = 0;
1725
1726 if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0)
1727 goto out;
1728 switch (buf.ss_family) {
1729 case AF_INET6:
1730 port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1731 break;
1732 case AF_INET:
1733 port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1734 }
1735out:
1736 return port;
1737}
1738
1739/**
1740 * xs_set_port - reset the port number in the remote endpoint address
1741 * @xprt: generic transport
1742 * @port: new port number
1743 *
1744 */
1745static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1746{
1747 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1748
1749 rpc_set_port(xs_addr(xprt), port);
1750 xs_update_peer_port(xprt);
1751}
1752
1753static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1754{
1755 if (transport->srcport == 0)
1756 transport->srcport = xs_sock_getport(sock);
1757}
1758
1759static int xs_get_srcport(struct sock_xprt *transport)
1760{
1761 int port = transport->srcport;
1762
1763 if (port == 0 && transport->xprt.resvport)
1764 port = xs_get_random_port();
1765 return port;
1766}
1767
1768static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1769{
1770 if (transport->srcport != 0)
1771 transport->srcport = 0;
1772 if (!transport->xprt.resvport)
1773 return 0;
1774 if (port <= xprt_min_resvport || port > xprt_max_resvport)
1775 return xprt_max_resvport;
1776 return --port;
1777}
1778static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1779{
1780 struct sockaddr_storage myaddr;
1781 int err, nloop = 0;
1782 int port = xs_get_srcport(transport);
1783 unsigned short last;
1784
1785 /*
1786 * If we are asking for any ephemeral port (i.e. port == 0 &&
1787 * transport->xprt.resvport == 0), don't bind. Let the local
1788 * port selection happen implicitly when the socket is used
1789 * (for example at connect time).
1790 *
1791 * This ensures that we can continue to establish TCP
1792 * connections even when all local ephemeral ports are already
1793 * a part of some TCP connection. This makes no difference
1794 * for UDP sockets, but also doens't harm them.
1795 *
1796 * If we're asking for any reserved port (i.e. port == 0 &&
1797 * transport->xprt.resvport == 1) xs_get_srcport above will
1798 * ensure that port is non-zero and we will bind as needed.
1799 */
1800 if (port <= 0)
1801 return port;
1802
1803 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1804 do {
1805 rpc_set_port((struct sockaddr *)&myaddr, port);
1806 err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1807 transport->xprt.addrlen);
1808 if (err == 0) {
1809 transport->srcport = port;
1810 break;
1811 }
1812 last = port;
1813 port = xs_next_srcport(transport, port);
1814 if (port > last)
1815 nloop++;
1816 } while (err == -EADDRINUSE && nloop != 2);
1817
1818 if (myaddr.ss_family == AF_INET)
1819 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
1820 &((struct sockaddr_in *)&myaddr)->sin_addr,
1821 port, err ? "failed" : "ok", err);
1822 else
1823 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
1824 &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1825 port, err ? "failed" : "ok", err);
1826 return err;
1827}
1828
1829/*
1830 * We don't support autobind on AF_LOCAL sockets
1831 */
1832static void xs_local_rpcbind(struct rpc_task *task)
1833{
1834 xprt_set_bound(task->tk_xprt);
1835}
1836
1837static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1838{
1839}
1840
1841#ifdef CONFIG_DEBUG_LOCK_ALLOC
1842static struct lock_class_key xs_key[2];
1843static struct lock_class_key xs_slock_key[2];
1844
1845static inline void xs_reclassify_socketu(struct socket *sock)
1846{
1847 struct sock *sk = sock->sk;
1848
1849 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1850 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1851}
1852
1853static inline void xs_reclassify_socket4(struct socket *sock)
1854{
1855 struct sock *sk = sock->sk;
1856
1857 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1858 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1859}
1860
1861static inline void xs_reclassify_socket6(struct socket *sock)
1862{
1863 struct sock *sk = sock->sk;
1864
1865 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1866 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1867}
1868
1869static inline void xs_reclassify_socket(int family, struct socket *sock)
1870{
1871 if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
1872 return;
1873
1874 switch (family) {
1875 case AF_LOCAL:
1876 xs_reclassify_socketu(sock);
1877 break;
1878 case AF_INET:
1879 xs_reclassify_socket4(sock);
1880 break;
1881 case AF_INET6:
1882 xs_reclassify_socket6(sock);
1883 break;
1884 }
1885}
1886#else
1887static inline void xs_reclassify_socket(int family, struct socket *sock)
1888{
1889}
1890#endif
1891
1892static void xs_dummy_setup_socket(struct work_struct *work)
1893{
1894}
1895
1896static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1897 struct sock_xprt *transport, int family, int type,
1898 int protocol, bool reuseport)
1899{
1900 struct file *filp;
1901 struct socket *sock;
1902 int err;
1903
1904 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1905 if (err < 0) {
1906 dprintk("RPC: can't create %d transport socket (%d).\n",
1907 protocol, -err);
1908 goto out;
1909 }
1910 xs_reclassify_socket(family, sock);
1911
1912 if (reuseport)
1913 xs_sock_set_reuseport(sock);
1914
1915 err = xs_bind(transport, sock);
1916 if (err) {
1917 sock_release(sock);
1918 goto out;
1919 }
1920
1921 filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1922 if (IS_ERR(filp))
1923 return ERR_CAST(filp);
1924 transport->file = filp;
1925
1926 return sock;
1927out:
1928 return ERR_PTR(err);
1929}
1930
1931static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1932 struct socket *sock)
1933{
1934 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1935 xprt);
1936
1937 if (!transport->inet) {
1938 struct sock *sk = sock->sk;
1939
1940 write_lock_bh(&sk->sk_callback_lock);
1941
1942 xs_save_old_callbacks(transport, sk);
1943
1944 sk->sk_user_data = xprt;
1945 sk->sk_data_ready = xs_data_ready;
1946 sk->sk_write_space = xs_udp_write_space;
1947 sock_set_flag(sk, SOCK_FASYNC);
1948 sk->sk_error_report = xs_error_report;
1949
1950 xprt_clear_connected(xprt);
1951
1952 /* Reset to new socket */
1953 transport->sock = sock;
1954 transport->inet = sk;
1955
1956 write_unlock_bh(&sk->sk_callback_lock);
1957 }
1958
1959 xs_stream_start_connect(transport);
1960
1961 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1962}
1963
1964/**
1965 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1966 * @transport: socket transport to connect
1967 */
1968static int xs_local_setup_socket(struct sock_xprt *transport)
1969{
1970 struct rpc_xprt *xprt = &transport->xprt;
1971 struct file *filp;
1972 struct socket *sock;
1973 int status = -EIO;
1974
1975 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1976 SOCK_STREAM, 0, &sock, 1);
1977 if (status < 0) {
1978 dprintk("RPC: can't create AF_LOCAL "
1979 "transport socket (%d).\n", -status);
1980 goto out;
1981 }
1982 xs_reclassify_socket(AF_LOCAL, sock);
1983
1984 filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1985 if (IS_ERR(filp)) {
1986 status = PTR_ERR(filp);
1987 goto out;
1988 }
1989 transport->file = filp;
1990
1991 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
1992 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1993
1994 status = xs_local_finish_connecting(xprt, sock);
1995 trace_rpc_socket_connect(xprt, sock, status);
1996 switch (status) {
1997 case 0:
1998 dprintk("RPC: xprt %p connected to %s\n",
1999 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2000 xprt->stat.connect_count++;
2001 xprt->stat.connect_time += (long)jiffies -
2002 xprt->stat.connect_start;
2003 xprt_set_connected(xprt);
2004 case -ENOBUFS:
2005 break;
2006 case -ENOENT:
2007 dprintk("RPC: xprt %p: socket %s does not exist\n",
2008 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2009 break;
2010 case -ECONNREFUSED:
2011 dprintk("RPC: xprt %p: connection refused for %s\n",
2012 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2013 break;
2014 default:
2015 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
2016 __func__, -status,
2017 xprt->address_strings[RPC_DISPLAY_ADDR]);
2018 }
2019
2020out:
2021 xprt_clear_connecting(xprt);
2022 xprt_wake_pending_tasks(xprt, status);
2023 return status;
2024}
2025
2026static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2027{
2028 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2029 int ret;
2030
2031 if (RPC_IS_ASYNC(task)) {
2032 /*
2033 * We want the AF_LOCAL connect to be resolved in the
2034 * filesystem namespace of the process making the rpc
2035 * call. Thus we connect synchronously.
2036 *
2037 * If we want to support asynchronous AF_LOCAL calls,
2038 * we'll need to figure out how to pass a namespace to
2039 * connect.
2040 */
2041 task->tk_rpc_status = -ENOTCONN;
2042 rpc_exit(task, -ENOTCONN);
2043 return;
2044 }
2045 ret = xs_local_setup_socket(transport);
2046 if (ret && !RPC_IS_SOFTCONN(task))
2047 msleep_interruptible(15000);
2048}
2049
2050#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
2051/*
2052 * Note that this should be called with XPRT_LOCKED held (or when we otherwise
2053 * know that we have exclusive access to the socket), to guard against
2054 * races with xs_reset_transport.
2055 */
2056static void xs_set_memalloc(struct rpc_xprt *xprt)
2057{
2058 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
2059 xprt);
2060
2061 /*
2062 * If there's no sock, then we have nothing to set. The
2063 * reconnecting process will get it for us.
2064 */
2065 if (!transport->inet)
2066 return;
2067 if (atomic_read(&xprt->swapper))
2068 sk_set_memalloc(transport->inet);
2069}
2070
2071/**
2072 * xs_enable_swap - Tag this transport as being used for swap.
2073 * @xprt: transport to tag
2074 *
2075 * Take a reference to this transport on behalf of the rpc_clnt, and
2076 * optionally mark it for swapping if it wasn't already.
2077 */
2078static int
2079xs_enable_swap(struct rpc_xprt *xprt)
2080{
2081 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2082
2083 if (atomic_inc_return(&xprt->swapper) != 1)
2084 return 0;
2085 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2086 return -ERESTARTSYS;
2087 if (xs->inet)
2088 sk_set_memalloc(xs->inet);
2089 xprt_release_xprt(xprt, NULL);
2090 return 0;
2091}
2092
2093/**
2094 * xs_disable_swap - Untag this transport as being used for swap.
2095 * @xprt: transport to tag
2096 *
2097 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
2098 * swapper refcount goes to 0, untag the socket as a memalloc socket.
2099 */
2100static void
2101xs_disable_swap(struct rpc_xprt *xprt)
2102{
2103 struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2104
2105 if (!atomic_dec_and_test(&xprt->swapper))
2106 return;
2107 if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2108 return;
2109 if (xs->inet)
2110 sk_clear_memalloc(xs->inet);
2111 xprt_release_xprt(xprt, NULL);
2112}
2113#else
2114static void xs_set_memalloc(struct rpc_xprt *xprt)
2115{
2116}
2117
2118static int
2119xs_enable_swap(struct rpc_xprt *xprt)
2120{
2121 return -EINVAL;
2122}
2123
2124static void
2125xs_disable_swap(struct rpc_xprt *xprt)
2126{
2127}
2128#endif
2129
2130static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2131{
2132 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2133
2134 if (!transport->inet) {
2135 struct sock *sk = sock->sk;
2136
2137 write_lock_bh(&sk->sk_callback_lock);
2138
2139 xs_save_old_callbacks(transport, sk);
2140
2141 sk->sk_user_data = xprt;
2142 sk->sk_data_ready = xs_data_ready;
2143 sk->sk_write_space = xs_udp_write_space;
2144 sock_set_flag(sk, SOCK_FASYNC);
2145
2146 xprt_set_connected(xprt);
2147
2148 /* Reset to new socket */
2149 transport->sock = sock;
2150 transport->inet = sk;
2151
2152 xs_set_memalloc(xprt);
2153
2154 write_unlock_bh(&sk->sk_callback_lock);
2155 }
2156 xs_udp_do_set_buffer_size(xprt);
2157
2158 xprt->stat.connect_start = jiffies;
2159}
2160
2161static void xs_udp_setup_socket(struct work_struct *work)
2162{
2163 struct sock_xprt *transport =
2164 container_of(work, struct sock_xprt, connect_worker.work);
2165 struct rpc_xprt *xprt = &transport->xprt;
2166 struct socket *sock;
2167 int status = -EIO;
2168
2169 sock = xs_create_sock(xprt, transport,
2170 xs_addr(xprt)->sa_family, SOCK_DGRAM,
2171 IPPROTO_UDP, false);
2172 if (IS_ERR(sock))
2173 goto out;
2174
2175 dprintk("RPC: worker connecting xprt %p via %s to "
2176 "%s (port %s)\n", xprt,
2177 xprt->address_strings[RPC_DISPLAY_PROTO],
2178 xprt->address_strings[RPC_DISPLAY_ADDR],
2179 xprt->address_strings[RPC_DISPLAY_PORT]);
2180
2181 xs_udp_finish_connecting(xprt, sock);
2182 trace_rpc_socket_connect(xprt, sock, 0);
2183 status = 0;
2184out:
2185 xprt_clear_connecting(xprt);
2186 xprt_unlock_connect(xprt, transport);
2187 xprt_wake_pending_tasks(xprt, status);
2188}
2189
2190/**
2191 * xs_tcp_shutdown - gracefully shut down a TCP socket
2192 * @xprt: transport
2193 *
2194 * Initiates a graceful shutdown of the TCP socket by calling the
2195 * equivalent of shutdown(SHUT_RDWR);
2196 */
2197static void xs_tcp_shutdown(struct rpc_xprt *xprt)
2198{
2199 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2200 struct socket *sock = transport->sock;
2201 int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE;
2202
2203 if (sock == NULL)
2204 return;
2205 switch (skst) {
2206 default:
2207 kernel_sock_shutdown(sock, SHUT_RDWR);
2208 trace_rpc_socket_shutdown(xprt, sock);
2209 break;
2210 case TCP_CLOSE:
2211 case TCP_TIME_WAIT:
2212 xs_reset_transport(transport);
2213 }
2214}
2215
2216static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
2217 struct socket *sock)
2218{
2219 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2220 unsigned int keepidle;
2221 unsigned int keepcnt;
2222 unsigned int opt_on = 1;
2223 unsigned int timeo;
2224
2225 spin_lock(&xprt->transport_lock);
2226 keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ);
2227 keepcnt = xprt->timeout->to_retries + 1;
2228 timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2229 (xprt->timeout->to_retries + 1);
2230 clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2231 spin_unlock(&xprt->transport_lock);
2232
2233 /* TCP Keepalive options */
2234 kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
2235 (char *)&opt_on, sizeof(opt_on));
2236 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
2237 (char *)&keepidle, sizeof(keepidle));
2238 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
2239 (char *)&keepidle, sizeof(keepidle));
2240 kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
2241 (char *)&keepcnt, sizeof(keepcnt));
2242
2243 /* TCP user timeout (see RFC5482) */
2244 kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
2245 (char *)&timeo, sizeof(timeo));
2246}
2247
2248static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
2249 unsigned long connect_timeout,
2250 unsigned long reconnect_timeout)
2251{
2252 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2253 struct rpc_timeout to;
2254 unsigned long initval;
2255
2256 spin_lock(&xprt->transport_lock);
2257 if (reconnect_timeout < xprt->max_reconnect_timeout)
2258 xprt->max_reconnect_timeout = reconnect_timeout;
2259 if (connect_timeout < xprt->connect_timeout) {
2260 memcpy(&to, xprt->timeout, sizeof(to));
2261 initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1);
2262 /* Arbitrary lower limit */
2263 if (initval < XS_TCP_INIT_REEST_TO << 1)
2264 initval = XS_TCP_INIT_REEST_TO << 1;
2265 to.to_initval = initval;
2266 to.to_maxval = initval;
2267 memcpy(&transport->tcp_timeout, &to,
2268 sizeof(transport->tcp_timeout));
2269 xprt->timeout = &transport->tcp_timeout;
2270 xprt->connect_timeout = connect_timeout;
2271 }
2272 set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2273 spin_unlock(&xprt->transport_lock);
2274}
2275
2276static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2277{
2278 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2279 int ret = -ENOTCONN;
2280
2281 if (!transport->inet) {
2282 struct sock *sk = sock->sk;
2283 unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC;
2284
2285 /* Avoid temporary address, they are bad for long-lived
2286 * connections such as NFS mounts.
2287 * RFC4941, section 3.6 suggests that:
2288 * Individual applications, which have specific
2289 * knowledge about the normal duration of connections,
2290 * MAY override this as appropriate.
2291 */
2292 kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES,
2293 (char *)&addr_pref, sizeof(addr_pref));
2294
2295 xs_tcp_set_socket_timeouts(xprt, sock);
2296
2297 write_lock_bh(&sk->sk_callback_lock);
2298
2299 xs_save_old_callbacks(transport, sk);
2300
2301 sk->sk_user_data = xprt;
2302 sk->sk_data_ready = xs_data_ready;
2303 sk->sk_state_change = xs_tcp_state_change;
2304 sk->sk_write_space = xs_tcp_write_space;
2305 sock_set_flag(sk, SOCK_FASYNC);
2306 sk->sk_error_report = xs_error_report;
2307
2308 /* socket options */
2309 sock_reset_flag(sk, SOCK_LINGER);
2310 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
2311
2312 xprt_clear_connected(xprt);
2313
2314 /* Reset to new socket */
2315 transport->sock = sock;
2316 transport->inet = sk;
2317
2318 write_unlock_bh(&sk->sk_callback_lock);
2319 }
2320
2321 if (!xprt_bound(xprt))
2322 goto out;
2323
2324 xs_set_memalloc(xprt);
2325
2326 xs_stream_start_connect(transport);
2327
2328 /* Tell the socket layer to start connecting... */
2329 set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2330 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2331 switch (ret) {
2332 case 0:
2333 xs_set_srcport(transport, sock);
2334 /* fall through */
2335 case -EINPROGRESS:
2336 /* SYN_SENT! */
2337 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2338 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2339 break;
2340 case -EADDRNOTAVAIL:
2341 /* Source port number is unavailable. Try a new one! */
2342 transport->srcport = 0;
2343 }
2344out:
2345 return ret;
2346}
2347
2348/**
2349 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2350 * @work: queued work item
2351 *
2352 * Invoked by a work queue tasklet.
2353 */
2354static void xs_tcp_setup_socket(struct work_struct *work)
2355{
2356 struct sock_xprt *transport =
2357 container_of(work, struct sock_xprt, connect_worker.work);
2358 struct socket *sock = transport->sock;
2359 struct rpc_xprt *xprt = &transport->xprt;
2360 int status = -EIO;
2361
2362 if (!sock) {
2363 sock = xs_create_sock(xprt, transport,
2364 xs_addr(xprt)->sa_family, SOCK_STREAM,
2365 IPPROTO_TCP, true);
2366 if (IS_ERR(sock)) {
2367 status = PTR_ERR(sock);
2368 goto out;
2369 }
2370 }
2371
2372 dprintk("RPC: worker connecting xprt %p via %s to "
2373 "%s (port %s)\n", xprt,
2374 xprt->address_strings[RPC_DISPLAY_PROTO],
2375 xprt->address_strings[RPC_DISPLAY_ADDR],
2376 xprt->address_strings[RPC_DISPLAY_PORT]);
2377
2378 status = xs_tcp_finish_connecting(xprt, sock);
2379 trace_rpc_socket_connect(xprt, sock, status);
2380 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
2381 xprt, -status, xprt_connected(xprt),
2382 sock->sk->sk_state);
2383 switch (status) {
2384 default:
2385 printk("%s: connect returned unhandled error %d\n",
2386 __func__, status);
2387 /* fall through */
2388 case -EADDRNOTAVAIL:
2389 /* We're probably in TIME_WAIT. Get rid of existing socket,
2390 * and retry
2391 */
2392 xs_tcp_force_close(xprt);
2393 break;
2394 case 0:
2395 case -EINPROGRESS:
2396 case -EALREADY:
2397 xprt_unlock_connect(xprt, transport);
2398 return;
2399 case -EINVAL:
2400 /* Happens, for instance, if the user specified a link
2401 * local IPv6 address without a scope-id.
2402 */
2403 case -ECONNREFUSED:
2404 case -ECONNRESET:
2405 case -ENETDOWN:
2406 case -ENETUNREACH:
2407 case -EHOSTUNREACH:
2408 case -EADDRINUSE:
2409 case -ENOBUFS:
2410 /*
2411 * xs_tcp_force_close() wakes tasks with -EIO.
2412 * We need to wake them first to ensure the
2413 * correct error code.
2414 */
2415 xprt_wake_pending_tasks(xprt, status);
2416 xs_tcp_force_close(xprt);
2417 goto out;
2418 }
2419 status = -EAGAIN;
2420out:
2421 xprt_clear_connecting(xprt);
2422 xprt_unlock_connect(xprt, transport);
2423 xprt_wake_pending_tasks(xprt, status);
2424}
2425
2426/**
2427 * xs_connect - connect a socket to a remote endpoint
2428 * @xprt: pointer to transport structure
2429 * @task: address of RPC task that manages state of connect request
2430 *
2431 * TCP: If the remote end dropped the connection, delay reconnecting.
2432 *
2433 * UDP socket connects are synchronous, but we use a work queue anyway
2434 * to guarantee that even unprivileged user processes can set up a
2435 * socket on a privileged port.
2436 *
2437 * If a UDP socket connect fails, the delay behavior here prevents
2438 * retry floods (hard mounts).
2439 */
2440static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2441{
2442 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2443 unsigned long delay = 0;
2444
2445 WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2446
2447 if (transport->sock != NULL) {
2448 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2449 "seconds\n",
2450 xprt, xprt->reestablish_timeout / HZ);
2451
2452 /* Start by resetting any existing state */
2453 xs_reset_transport(transport);
2454
2455 delay = xprt_reconnect_delay(xprt);
2456 xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO);
2457
2458 } else
2459 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2460
2461 queue_delayed_work(xprtiod_workqueue,
2462 &transport->connect_worker,
2463 delay);
2464}
2465
2466static void xs_wake_disconnect(struct sock_xprt *transport)
2467{
2468 if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state))
2469 xs_tcp_force_close(&transport->xprt);
2470}
2471
2472static void xs_wake_write(struct sock_xprt *transport)
2473{
2474 if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state))
2475 xprt_write_space(&transport->xprt);
2476}
2477
2478static void xs_wake_error(struct sock_xprt *transport)
2479{
2480 int sockerr;
2481
2482 if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2483 return;
2484 mutex_lock(&transport->recv_mutex);
2485 if (transport->sock == NULL)
2486 goto out;
2487 if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2488 goto out;
2489 sockerr = xchg(&transport->xprt_err, 0);
2490 if (sockerr < 0)
2491 xprt_wake_pending_tasks(&transport->xprt, sockerr);
2492out:
2493 mutex_unlock(&transport->recv_mutex);
2494}
2495
2496static void xs_wake_pending(struct sock_xprt *transport)
2497{
2498 if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state))
2499 xprt_wake_pending_tasks(&transport->xprt, -EAGAIN);
2500}
2501
2502static void xs_error_handle(struct work_struct *work)
2503{
2504 struct sock_xprt *transport = container_of(work,
2505 struct sock_xprt, error_worker);
2506
2507 xs_wake_disconnect(transport);
2508 xs_wake_write(transport);
2509 xs_wake_error(transport);
2510 xs_wake_pending(transport);
2511}
2512
2513/**
2514 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2515 * @xprt: rpc_xprt struct containing statistics
2516 * @seq: output file
2517 *
2518 */
2519static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2520{
2521 long idle_time = 0;
2522
2523 if (xprt_connected(xprt))
2524 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2525
2526 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2527 "%llu %llu %lu %llu %llu\n",
2528 xprt->stat.bind_count,
2529 xprt->stat.connect_count,
2530 xprt->stat.connect_time / HZ,
2531 idle_time,
2532 xprt->stat.sends,
2533 xprt->stat.recvs,
2534 xprt->stat.bad_xids,
2535 xprt->stat.req_u,
2536 xprt->stat.bklog_u,
2537 xprt->stat.max_slots,
2538 xprt->stat.sending_u,
2539 xprt->stat.pending_u);
2540}
2541
2542/**
2543 * xs_udp_print_stats - display UDP socket-specifc stats
2544 * @xprt: rpc_xprt struct containing statistics
2545 * @seq: output file
2546 *
2547 */
2548static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2549{
2550 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2551
2552 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2553 "%lu %llu %llu\n",
2554 transport->srcport,
2555 xprt->stat.bind_count,
2556 xprt->stat.sends,
2557 xprt->stat.recvs,
2558 xprt->stat.bad_xids,
2559 xprt->stat.req_u,
2560 xprt->stat.bklog_u,
2561 xprt->stat.max_slots,
2562 xprt->stat.sending_u,
2563 xprt->stat.pending_u);
2564}
2565
2566/**
2567 * xs_tcp_print_stats - display TCP socket-specifc stats
2568 * @xprt: rpc_xprt struct containing statistics
2569 * @seq: output file
2570 *
2571 */
2572static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2573{
2574 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2575 long idle_time = 0;
2576
2577 if (xprt_connected(xprt))
2578 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2579
2580 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2581 "%llu %llu %lu %llu %llu\n",
2582 transport->srcport,
2583 xprt->stat.bind_count,
2584 xprt->stat.connect_count,
2585 xprt->stat.connect_time / HZ,
2586 idle_time,
2587 xprt->stat.sends,
2588 xprt->stat.recvs,
2589 xprt->stat.bad_xids,
2590 xprt->stat.req_u,
2591 xprt->stat.bklog_u,
2592 xprt->stat.max_slots,
2593 xprt->stat.sending_u,
2594 xprt->stat.pending_u);
2595}
2596
2597/*
2598 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2599 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2600 * to use the server side send routines.
2601 */
2602static int bc_malloc(struct rpc_task *task)
2603{
2604 struct rpc_rqst *rqst = task->tk_rqstp;
2605 size_t size = rqst->rq_callsize;
2606 struct page *page;
2607 struct rpc_buffer *buf;
2608
2609 if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) {
2610 WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n",
2611 size);
2612 return -EINVAL;
2613 }
2614
2615 page = alloc_page(GFP_KERNEL);
2616 if (!page)
2617 return -ENOMEM;
2618
2619 buf = page_address(page);
2620 buf->len = PAGE_SIZE;
2621
2622 rqst->rq_buffer = buf->data;
2623 rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
2624 return 0;
2625}
2626
2627/*
2628 * Free the space allocated in the bc_alloc routine
2629 */
2630static void bc_free(struct rpc_task *task)
2631{
2632 void *buffer = task->tk_rqstp->rq_buffer;
2633 struct rpc_buffer *buf;
2634
2635 buf = container_of(buffer, struct rpc_buffer, data);
2636 free_page((unsigned long)buf);
2637}
2638
2639/*
2640 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2641 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2642 */
2643static int bc_sendto(struct rpc_rqst *req)
2644{
2645 int len;
2646 struct xdr_buf *xbufp = &req->rq_snd_buf;
2647 struct sock_xprt *transport =
2648 container_of(req->rq_xprt, struct sock_xprt, xprt);
2649 unsigned long headoff;
2650 unsigned long tailoff;
2651 struct page *tailpage;
2652 struct msghdr msg = {
2653 .msg_flags = MSG_MORE
2654 };
2655 rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
2656 (u32)xbufp->len);
2657 struct kvec iov = {
2658 .iov_base = &marker,
2659 .iov_len = sizeof(marker),
2660 };
2661
2662 len = kernel_sendmsg(transport->sock, &msg, &iov, 1, iov.iov_len);
2663 if (len != iov.iov_len)
2664 return -EAGAIN;
2665
2666 tailpage = NULL;
2667 if (xbufp->tail[0].iov_len)
2668 tailpage = virt_to_page(xbufp->tail[0].iov_base);
2669 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2670 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2671 len = svc_send_common(transport->sock, xbufp,
2672 virt_to_page(xbufp->head[0].iov_base), headoff,
2673 tailpage, tailoff);
2674 if (len != xbufp->len)
2675 return -EAGAIN;
2676 return len;
2677}
2678
2679/*
2680 * The send routine. Borrows from svc_send
2681 */
2682static int bc_send_request(struct rpc_rqst *req)
2683{
2684 struct svc_xprt *xprt;
2685 int len;
2686
2687 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2688 /*
2689 * Get the server socket associated with this callback xprt
2690 */
2691 xprt = req->rq_xprt->bc_xprt;
2692
2693 /*
2694 * Grab the mutex to serialize data as the connection is shared
2695 * with the fore channel
2696 */
2697 mutex_lock(&xprt->xpt_mutex);
2698 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2699 len = -ENOTCONN;
2700 else
2701 len = bc_sendto(req);
2702 mutex_unlock(&xprt->xpt_mutex);
2703
2704 if (len > 0)
2705 len = 0;
2706
2707 return len;
2708}
2709
2710/*
2711 * The close routine. Since this is client initiated, we do nothing
2712 */
2713
2714static void bc_close(struct rpc_xprt *xprt)
2715{
2716}
2717
2718/*
2719 * The xprt destroy routine. Again, because this connection is client
2720 * initiated, we do nothing
2721 */
2722
2723static void bc_destroy(struct rpc_xprt *xprt)
2724{
2725 dprintk("RPC: bc_destroy xprt %p\n", xprt);
2726
2727 xs_xprt_free(xprt);
2728 module_put(THIS_MODULE);
2729}
2730
2731static const struct rpc_xprt_ops xs_local_ops = {
2732 .reserve_xprt = xprt_reserve_xprt,
2733 .release_xprt = xprt_release_xprt,
2734 .alloc_slot = xprt_alloc_slot,
2735 .free_slot = xprt_free_slot,
2736 .rpcbind = xs_local_rpcbind,
2737 .set_port = xs_local_set_port,
2738 .connect = xs_local_connect,
2739 .buf_alloc = rpc_malloc,
2740 .buf_free = rpc_free,
2741 .prepare_request = xs_stream_prepare_request,
2742 .send_request = xs_local_send_request,
2743 .wait_for_reply_request = xprt_wait_for_reply_request_def,
2744 .close = xs_close,
2745 .destroy = xs_destroy,
2746 .print_stats = xs_local_print_stats,
2747 .enable_swap = xs_enable_swap,
2748 .disable_swap = xs_disable_swap,
2749};
2750
2751static const struct rpc_xprt_ops xs_udp_ops = {
2752 .set_buffer_size = xs_udp_set_buffer_size,
2753 .reserve_xprt = xprt_reserve_xprt_cong,
2754 .release_xprt = xprt_release_xprt_cong,
2755 .alloc_slot = xprt_alloc_slot,
2756 .free_slot = xprt_free_slot,
2757 .rpcbind = rpcb_getport_async,
2758 .set_port = xs_set_port,
2759 .connect = xs_connect,
2760 .buf_alloc = rpc_malloc,
2761 .buf_free = rpc_free,
2762 .send_request = xs_udp_send_request,
2763 .wait_for_reply_request = xprt_wait_for_reply_request_rtt,
2764 .timer = xs_udp_timer,
2765 .release_request = xprt_release_rqst_cong,
2766 .close = xs_close,
2767 .destroy = xs_destroy,
2768 .print_stats = xs_udp_print_stats,
2769 .enable_swap = xs_enable_swap,
2770 .disable_swap = xs_disable_swap,
2771 .inject_disconnect = xs_inject_disconnect,
2772};
2773
2774static const struct rpc_xprt_ops xs_tcp_ops = {
2775 .reserve_xprt = xprt_reserve_xprt,
2776 .release_xprt = xprt_release_xprt,
2777 .alloc_slot = xprt_alloc_slot,
2778 .free_slot = xprt_free_slot,
2779 .rpcbind = rpcb_getport_async,
2780 .set_port = xs_set_port,
2781 .connect = xs_connect,
2782 .buf_alloc = rpc_malloc,
2783 .buf_free = rpc_free,
2784 .prepare_request = xs_stream_prepare_request,
2785 .send_request = xs_tcp_send_request,
2786 .wait_for_reply_request = xprt_wait_for_reply_request_def,
2787 .close = xs_tcp_shutdown,
2788 .destroy = xs_destroy,
2789 .set_connect_timeout = xs_tcp_set_connect_timeout,
2790 .print_stats = xs_tcp_print_stats,
2791 .enable_swap = xs_enable_swap,
2792 .disable_swap = xs_disable_swap,
2793 .inject_disconnect = xs_inject_disconnect,
2794#ifdef CONFIG_SUNRPC_BACKCHANNEL
2795 .bc_setup = xprt_setup_bc,
2796 .bc_maxpayload = xs_tcp_bc_maxpayload,
2797 .bc_num_slots = xprt_bc_max_slots,
2798 .bc_free_rqst = xprt_free_bc_rqst,
2799 .bc_destroy = xprt_destroy_bc,
2800#endif
2801};
2802
2803/*
2804 * The rpc_xprt_ops for the server backchannel
2805 */
2806
2807static const struct rpc_xprt_ops bc_tcp_ops = {
2808 .reserve_xprt = xprt_reserve_xprt,
2809 .release_xprt = xprt_release_xprt,
2810 .alloc_slot = xprt_alloc_slot,
2811 .free_slot = xprt_free_slot,
2812 .buf_alloc = bc_malloc,
2813 .buf_free = bc_free,
2814 .send_request = bc_send_request,
2815 .wait_for_reply_request = xprt_wait_for_reply_request_def,
2816 .close = bc_close,
2817 .destroy = bc_destroy,
2818 .print_stats = xs_tcp_print_stats,
2819 .enable_swap = xs_enable_swap,
2820 .disable_swap = xs_disable_swap,
2821 .inject_disconnect = xs_inject_disconnect,
2822};
2823
2824static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2825{
2826 static const struct sockaddr_in sin = {
2827 .sin_family = AF_INET,
2828 .sin_addr.s_addr = htonl(INADDR_ANY),
2829 };
2830 static const struct sockaddr_in6 sin6 = {
2831 .sin6_family = AF_INET6,
2832 .sin6_addr = IN6ADDR_ANY_INIT,
2833 };
2834
2835 switch (family) {
2836 case AF_LOCAL:
2837 break;
2838 case AF_INET:
2839 memcpy(sap, &sin, sizeof(sin));
2840 break;
2841 case AF_INET6:
2842 memcpy(sap, &sin6, sizeof(sin6));
2843 break;
2844 default:
2845 dprintk("RPC: %s: Bad address family\n", __func__);
2846 return -EAFNOSUPPORT;
2847 }
2848 return 0;
2849}
2850
2851static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2852 unsigned int slot_table_size,
2853 unsigned int max_slot_table_size)
2854{
2855 struct rpc_xprt *xprt;
2856 struct sock_xprt *new;
2857
2858 if (args->addrlen > sizeof(xprt->addr)) {
2859 dprintk("RPC: xs_setup_xprt: address too large\n");
2860 return ERR_PTR(-EBADF);
2861 }
2862
2863 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2864 max_slot_table_size);
2865 if (xprt == NULL) {
2866 dprintk("RPC: xs_setup_xprt: couldn't allocate "
2867 "rpc_xprt\n");
2868 return ERR_PTR(-ENOMEM);
2869 }
2870
2871 new = container_of(xprt, struct sock_xprt, xprt);
2872 mutex_init(&new->recv_mutex);
2873 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2874 xprt->addrlen = args->addrlen;
2875 if (args->srcaddr)
2876 memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2877 else {
2878 int err;
2879 err = xs_init_anyaddr(args->dstaddr->sa_family,
2880 (struct sockaddr *)&new->srcaddr);
2881 if (err != 0) {
2882 xprt_free(xprt);
2883 return ERR_PTR(err);
2884 }
2885 }
2886
2887 return xprt;
2888}
2889
2890static const struct rpc_timeout xs_local_default_timeout = {
2891 .to_initval = 10 * HZ,
2892 .to_maxval = 10 * HZ,
2893 .to_retries = 2,
2894};
2895
2896/**
2897 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2898 * @args: rpc transport creation arguments
2899 *
2900 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2901 */
2902static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2903{
2904 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2905 struct sock_xprt *transport;
2906 struct rpc_xprt *xprt;
2907 struct rpc_xprt *ret;
2908
2909 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2910 xprt_max_tcp_slot_table_entries);
2911 if (IS_ERR(xprt))
2912 return xprt;
2913 transport = container_of(xprt, struct sock_xprt, xprt);
2914
2915 xprt->prot = 0;
2916 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2917
2918 xprt->bind_timeout = XS_BIND_TO;
2919 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2920 xprt->idle_timeout = XS_IDLE_DISC_TO;
2921
2922 xprt->ops = &xs_local_ops;
2923 xprt->timeout = &xs_local_default_timeout;
2924
2925 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
2926 INIT_WORK(&transport->error_worker, xs_error_handle);
2927 INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket);
2928
2929 switch (sun->sun_family) {
2930 case AF_LOCAL:
2931 if (sun->sun_path[0] != '/') {
2932 dprintk("RPC: bad AF_LOCAL address: %s\n",
2933 sun->sun_path);
2934 ret = ERR_PTR(-EINVAL);
2935 goto out_err;
2936 }
2937 xprt_set_bound(xprt);
2938 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2939 ret = ERR_PTR(xs_local_setup_socket(transport));
2940 if (ret)
2941 goto out_err;
2942 break;
2943 default:
2944 ret = ERR_PTR(-EAFNOSUPPORT);
2945 goto out_err;
2946 }
2947
2948 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2949 xprt->address_strings[RPC_DISPLAY_ADDR]);
2950
2951 if (try_module_get(THIS_MODULE))
2952 return xprt;
2953 ret = ERR_PTR(-EINVAL);
2954out_err:
2955 xs_xprt_free(xprt);
2956 return ret;
2957}
2958
2959static const struct rpc_timeout xs_udp_default_timeout = {
2960 .to_initval = 5 * HZ,
2961 .to_maxval = 30 * HZ,
2962 .to_increment = 5 * HZ,
2963 .to_retries = 5,
2964};
2965
2966/**
2967 * xs_setup_udp - Set up transport to use a UDP socket
2968 * @args: rpc transport creation arguments
2969 *
2970 */
2971static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2972{
2973 struct sockaddr *addr = args->dstaddr;
2974 struct rpc_xprt *xprt;
2975 struct sock_xprt *transport;
2976 struct rpc_xprt *ret;
2977
2978 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2979 xprt_udp_slot_table_entries);
2980 if (IS_ERR(xprt))
2981 return xprt;
2982 transport = container_of(xprt, struct sock_xprt, xprt);
2983
2984 xprt->prot = IPPROTO_UDP;
2985 /* XXX: header size can vary due to auth type, IPv6, etc. */
2986 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2987
2988 xprt->bind_timeout = XS_BIND_TO;
2989 xprt->reestablish_timeout = XS_UDP_REEST_TO;
2990 xprt->idle_timeout = XS_IDLE_DISC_TO;
2991
2992 xprt->ops = &xs_udp_ops;
2993
2994 xprt->timeout = &xs_udp_default_timeout;
2995
2996 INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
2997 INIT_WORK(&transport->error_worker, xs_error_handle);
2998 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
2999
3000 switch (addr->sa_family) {
3001 case AF_INET:
3002 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3003 xprt_set_bound(xprt);
3004
3005 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
3006 break;
3007 case AF_INET6:
3008 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
3009 xprt_set_bound(xprt);
3010
3011 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
3012 break;
3013 default:
3014 ret = ERR_PTR(-EAFNOSUPPORT);
3015 goto out_err;
3016 }
3017
3018 if (xprt_bound(xprt))
3019 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3020 xprt->address_strings[RPC_DISPLAY_ADDR],
3021 xprt->address_strings[RPC_DISPLAY_PORT],
3022 xprt->address_strings[RPC_DISPLAY_PROTO]);
3023 else
3024 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
3025 xprt->address_strings[RPC_DISPLAY_ADDR],
3026 xprt->address_strings[RPC_DISPLAY_PROTO]);
3027
3028 if (try_module_get(THIS_MODULE))
3029 return xprt;
3030 ret = ERR_PTR(-EINVAL);
3031out_err:
3032 xs_xprt_free(xprt);
3033 return ret;
3034}
3035
3036static const struct rpc_timeout xs_tcp_default_timeout = {
3037 .to_initval = 60 * HZ,
3038 .to_maxval = 60 * HZ,
3039 .to_retries = 2,
3040};
3041
3042/**
3043 * xs_setup_tcp - Set up transport to use a TCP socket
3044 * @args: rpc transport creation arguments
3045 *
3046 */
3047static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
3048{
3049 struct sockaddr *addr = args->dstaddr;
3050 struct rpc_xprt *xprt;
3051 struct sock_xprt *transport;
3052 struct rpc_xprt *ret;
3053 unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
3054
3055 if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
3056 max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
3057
3058 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
3059 max_slot_table_size);
3060 if (IS_ERR(xprt))
3061 return xprt;
3062 transport = container_of(xprt, struct sock_xprt, xprt);
3063
3064 xprt->prot = IPPROTO_TCP;
3065 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3066
3067 xprt->bind_timeout = XS_BIND_TO;
3068 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
3069 xprt->idle_timeout = XS_IDLE_DISC_TO;
3070
3071 xprt->ops = &xs_tcp_ops;
3072 xprt->timeout = &xs_tcp_default_timeout;
3073
3074 xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
3075 xprt->connect_timeout = xprt->timeout->to_initval *
3076 (xprt->timeout->to_retries + 1);
3077
3078 INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
3079 INIT_WORK(&transport->error_worker, xs_error_handle);
3080 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
3081
3082 switch (addr->sa_family) {
3083 case AF_INET:
3084 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3085 xprt_set_bound(xprt);
3086
3087 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
3088 break;
3089 case AF_INET6:
3090 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
3091 xprt_set_bound(xprt);
3092
3093 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
3094 break;
3095 default:
3096 ret = ERR_PTR(-EAFNOSUPPORT);
3097 goto out_err;
3098 }
3099
3100 if (xprt_bound(xprt))
3101 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3102 xprt->address_strings[RPC_DISPLAY_ADDR],
3103 xprt->address_strings[RPC_DISPLAY_PORT],
3104 xprt->address_strings[RPC_DISPLAY_PROTO]);
3105 else
3106 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
3107 xprt->address_strings[RPC_DISPLAY_ADDR],
3108 xprt->address_strings[RPC_DISPLAY_PROTO]);
3109
3110 if (try_module_get(THIS_MODULE))
3111 return xprt;
3112 ret = ERR_PTR(-EINVAL);
3113out_err:
3114 xs_xprt_free(xprt);
3115 return ret;
3116}
3117
3118/**
3119 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
3120 * @args: rpc transport creation arguments
3121 *
3122 */
3123static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
3124{
3125 struct sockaddr *addr = args->dstaddr;
3126 struct rpc_xprt *xprt;
3127 struct sock_xprt *transport;
3128 struct svc_sock *bc_sock;
3129 struct rpc_xprt *ret;
3130
3131 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
3132 xprt_tcp_slot_table_entries);
3133 if (IS_ERR(xprt))
3134 return xprt;
3135 transport = container_of(xprt, struct sock_xprt, xprt);
3136
3137 xprt->prot = IPPROTO_TCP;
3138 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3139 xprt->timeout = &xs_tcp_default_timeout;
3140
3141 /* backchannel */
3142 xprt_set_bound(xprt);
3143 xprt->bind_timeout = 0;
3144 xprt->reestablish_timeout = 0;
3145 xprt->idle_timeout = 0;
3146
3147 xprt->ops = &bc_tcp_ops;
3148
3149 switch (addr->sa_family) {
3150 case AF_INET:
3151 xs_format_peer_addresses(xprt, "tcp",
3152 RPCBIND_NETID_TCP);
3153 break;
3154 case AF_INET6:
3155 xs_format_peer_addresses(xprt, "tcp",
3156 RPCBIND_NETID_TCP6);
3157 break;
3158 default:
3159 ret = ERR_PTR(-EAFNOSUPPORT);
3160 goto out_err;
3161 }
3162
3163 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
3164 xprt->address_strings[RPC_DISPLAY_ADDR],
3165 xprt->address_strings[RPC_DISPLAY_PORT],
3166 xprt->address_strings[RPC_DISPLAY_PROTO]);
3167
3168 /*
3169 * Once we've associated a backchannel xprt with a connection,
3170 * we want to keep it around as long as the connection lasts,
3171 * in case we need to start using it for a backchannel again;
3172 * this reference won't be dropped until bc_xprt is destroyed.
3173 */
3174 xprt_get(xprt);
3175 args->bc_xprt->xpt_bc_xprt = xprt;
3176 xprt->bc_xprt = args->bc_xprt;
3177 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
3178 transport->sock = bc_sock->sk_sock;
3179 transport->inet = bc_sock->sk_sk;
3180
3181 /*
3182 * Since we don't want connections for the backchannel, we set
3183 * the xprt status to connected
3184 */
3185 xprt_set_connected(xprt);
3186
3187 if (try_module_get(THIS_MODULE))
3188 return xprt;
3189
3190 args->bc_xprt->xpt_bc_xprt = NULL;
3191 args->bc_xprt->xpt_bc_xps = NULL;
3192 xprt_put(xprt);
3193 ret = ERR_PTR(-EINVAL);
3194out_err:
3195 xs_xprt_free(xprt);
3196 return ret;
3197}
3198
3199static struct xprt_class xs_local_transport = {
3200 .list = LIST_HEAD_INIT(xs_local_transport.list),
3201 .name = "named UNIX socket",
3202 .owner = THIS_MODULE,
3203 .ident = XPRT_TRANSPORT_LOCAL,
3204 .setup = xs_setup_local,
3205};
3206
3207static struct xprt_class xs_udp_transport = {
3208 .list = LIST_HEAD_INIT(xs_udp_transport.list),
3209 .name = "udp",
3210 .owner = THIS_MODULE,
3211 .ident = XPRT_TRANSPORT_UDP,
3212 .setup = xs_setup_udp,
3213};
3214
3215static struct xprt_class xs_tcp_transport = {
3216 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
3217 .name = "tcp",
3218 .owner = THIS_MODULE,
3219 .ident = XPRT_TRANSPORT_TCP,
3220 .setup = xs_setup_tcp,
3221};
3222
3223static struct xprt_class xs_bc_tcp_transport = {
3224 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
3225 .name = "tcp NFSv4.1 backchannel",
3226 .owner = THIS_MODULE,
3227 .ident = XPRT_TRANSPORT_BC_TCP,
3228 .setup = xs_setup_bc_tcp,
3229};
3230
3231/**
3232 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3233 *
3234 */
3235int init_socket_xprt(void)
3236{
3237 if (!sunrpc_table_header)
3238 sunrpc_table_header = register_sysctl_table(sunrpc_table);
3239
3240 xprt_register_transport(&xs_local_transport);
3241 xprt_register_transport(&xs_udp_transport);
3242 xprt_register_transport(&xs_tcp_transport);
3243 xprt_register_transport(&xs_bc_tcp_transport);
3244
3245 return 0;
3246}
3247
3248/**
3249 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3250 *
3251 */
3252void cleanup_socket_xprt(void)
3253{
3254 if (sunrpc_table_header) {
3255 unregister_sysctl_table(sunrpc_table_header);
3256 sunrpc_table_header = NULL;
3257 }
3258
3259 xprt_unregister_transport(&xs_local_transport);
3260 xprt_unregister_transport(&xs_udp_transport);
3261 xprt_unregister_transport(&xs_tcp_transport);
3262 xprt_unregister_transport(&xs_bc_tcp_transport);
3263}
3264
3265static int param_set_uint_minmax(const char *val,
3266 const struct kernel_param *kp,
3267 unsigned int min, unsigned int max)
3268{
3269 unsigned int num;
3270 int ret;
3271
3272 if (!val)
3273 return -EINVAL;
3274 ret = kstrtouint(val, 0, &num);
3275 if (ret)
3276 return ret;
3277 if (num < min || num > max)
3278 return -EINVAL;
3279 *((unsigned int *)kp->arg) = num;
3280 return 0;
3281}
3282
3283static int param_set_portnr(const char *val, const struct kernel_param *kp)
3284{
3285 return param_set_uint_minmax(val, kp,
3286 RPC_MIN_RESVPORT,
3287 RPC_MAX_RESVPORT);
3288}
3289
3290static const struct kernel_param_ops param_ops_portnr = {
3291 .set = param_set_portnr,
3292 .get = param_get_uint,
3293};
3294
3295#define param_check_portnr(name, p) \
3296 __param_check(name, p, unsigned int);
3297
3298module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3299module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3300
3301static int param_set_slot_table_size(const char *val,
3302 const struct kernel_param *kp)
3303{
3304 return param_set_uint_minmax(val, kp,
3305 RPC_MIN_SLOT_TABLE,
3306 RPC_MAX_SLOT_TABLE);
3307}
3308
3309static const struct kernel_param_ops param_ops_slot_table_size = {
3310 .set = param_set_slot_table_size,
3311 .get = param_get_uint,
3312};
3313
3314#define param_check_slot_table_size(name, p) \
3315 __param_check(name, p, unsigned int);
3316
3317static int param_set_max_slot_table_size(const char *val,
3318 const struct kernel_param *kp)
3319{
3320 return param_set_uint_minmax(val, kp,
3321 RPC_MIN_SLOT_TABLE,
3322 RPC_MAX_SLOT_TABLE_LIMIT);
3323}
3324
3325static const struct kernel_param_ops param_ops_max_slot_table_size = {
3326 .set = param_set_max_slot_table_size,
3327 .get = param_get_uint,
3328};
3329
3330#define param_check_max_slot_table_size(name, p) \
3331 __param_check(name, p, unsigned int);
3332
3333module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3334 slot_table_size, 0644);
3335module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3336 max_slot_table_size, 0644);
3337module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3338 slot_table_size, 0644);
1/*
2 * linux/net/sunrpc/xprtsock.c
3 *
4 * Client-side transport implementation for sockets.
5 *
6 * TCP callback races fixes (C) 1998 Red Hat
7 * TCP send fixes (C) 1998 Red Hat
8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 *
11 * Rewrite of larges part of the code in order to stabilize TCP stuff.
12 * Fix behaviour when socket buffer is full.
13 * (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
14 *
15 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
16 *
17 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
18 * <gilles.quillard@bull.net>
19 */
20
21#include <linux/types.h>
22#include <linux/string.h>
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/capability.h>
26#include <linux/pagemap.h>
27#include <linux/errno.h>
28#include <linux/socket.h>
29#include <linux/in.h>
30#include <linux/net.h>
31#include <linux/mm.h>
32#include <linux/un.h>
33#include <linux/udp.h>
34#include <linux/tcp.h>
35#include <linux/sunrpc/clnt.h>
36#include <linux/sunrpc/sched.h>
37#include <linux/sunrpc/svcsock.h>
38#include <linux/sunrpc/xprtsock.h>
39#include <linux/file.h>
40#ifdef CONFIG_SUNRPC_BACKCHANNEL
41#include <linux/sunrpc/bc_xprt.h>
42#endif
43
44#include <net/sock.h>
45#include <net/checksum.h>
46#include <net/udp.h>
47#include <net/tcp.h>
48
49#include "sunrpc.h"
50
51static void xs_close(struct rpc_xprt *xprt);
52
53/*
54 * xprtsock tunables
55 */
56static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
57static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
58static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
59
60static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
61static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
62
63#define XS_TCP_LINGER_TO (15U * HZ)
64static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
65
66/*
67 * We can register our own files under /proc/sys/sunrpc by
68 * calling register_sysctl_table() again. The files in that
69 * directory become the union of all files registered there.
70 *
71 * We simply need to make sure that we don't collide with
72 * someone else's file names!
73 */
74
75#ifdef RPC_DEBUG
76
77static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
78static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
79static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
80static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
81static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
82
83static struct ctl_table_header *sunrpc_table_header;
84
85/*
86 * FIXME: changing the UDP slot table size should also resize the UDP
87 * socket buffers for existing UDP transports
88 */
89static ctl_table xs_tunables_table[] = {
90 {
91 .procname = "udp_slot_table_entries",
92 .data = &xprt_udp_slot_table_entries,
93 .maxlen = sizeof(unsigned int),
94 .mode = 0644,
95 .proc_handler = proc_dointvec_minmax,
96 .extra1 = &min_slot_table_size,
97 .extra2 = &max_slot_table_size
98 },
99 {
100 .procname = "tcp_slot_table_entries",
101 .data = &xprt_tcp_slot_table_entries,
102 .maxlen = sizeof(unsigned int),
103 .mode = 0644,
104 .proc_handler = proc_dointvec_minmax,
105 .extra1 = &min_slot_table_size,
106 .extra2 = &max_slot_table_size
107 },
108 {
109 .procname = "tcp_max_slot_table_entries",
110 .data = &xprt_max_tcp_slot_table_entries,
111 .maxlen = sizeof(unsigned int),
112 .mode = 0644,
113 .proc_handler = proc_dointvec_minmax,
114 .extra1 = &min_slot_table_size,
115 .extra2 = &max_tcp_slot_table_limit
116 },
117 {
118 .procname = "min_resvport",
119 .data = &xprt_min_resvport,
120 .maxlen = sizeof(unsigned int),
121 .mode = 0644,
122 .proc_handler = proc_dointvec_minmax,
123 .extra1 = &xprt_min_resvport_limit,
124 .extra2 = &xprt_max_resvport_limit
125 },
126 {
127 .procname = "max_resvport",
128 .data = &xprt_max_resvport,
129 .maxlen = sizeof(unsigned int),
130 .mode = 0644,
131 .proc_handler = proc_dointvec_minmax,
132 .extra1 = &xprt_min_resvport_limit,
133 .extra2 = &xprt_max_resvport_limit
134 },
135 {
136 .procname = "tcp_fin_timeout",
137 .data = &xs_tcp_fin_timeout,
138 .maxlen = sizeof(xs_tcp_fin_timeout),
139 .mode = 0644,
140 .proc_handler = proc_dointvec_jiffies,
141 },
142 { },
143};
144
145static ctl_table sunrpc_table[] = {
146 {
147 .procname = "sunrpc",
148 .mode = 0555,
149 .child = xs_tunables_table
150 },
151 { },
152};
153
154#endif
155
156/*
157 * Wait duration for a reply from the RPC portmapper.
158 */
159#define XS_BIND_TO (60U * HZ)
160
161/*
162 * Delay if a UDP socket connect error occurs. This is most likely some
163 * kind of resource problem on the local host.
164 */
165#define XS_UDP_REEST_TO (2U * HZ)
166
167/*
168 * The reestablish timeout allows clients to delay for a bit before attempting
169 * to reconnect to a server that just dropped our connection.
170 *
171 * We implement an exponential backoff when trying to reestablish a TCP
172 * transport connection with the server. Some servers like to drop a TCP
173 * connection when they are overworked, so we start with a short timeout and
174 * increase over time if the server is down or not responding.
175 */
176#define XS_TCP_INIT_REEST_TO (3U * HZ)
177#define XS_TCP_MAX_REEST_TO (5U * 60 * HZ)
178
179/*
180 * TCP idle timeout; client drops the transport socket if it is idle
181 * for this long. Note that we also timeout UDP sockets to prevent
182 * holding port numbers when there is no RPC traffic.
183 */
184#define XS_IDLE_DISC_TO (5U * 60 * HZ)
185
186#ifdef RPC_DEBUG
187# undef RPC_DEBUG_DATA
188# define RPCDBG_FACILITY RPCDBG_TRANS
189#endif
190
191#ifdef RPC_DEBUG_DATA
192static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
193{
194 u8 *buf = (u8 *) packet;
195 int j;
196
197 dprintk("RPC: %s\n", msg);
198 for (j = 0; j < count && j < 128; j += 4) {
199 if (!(j & 31)) {
200 if (j)
201 dprintk("\n");
202 dprintk("0x%04x ", j);
203 }
204 dprintk("%02x%02x%02x%02x ",
205 buf[j], buf[j+1], buf[j+2], buf[j+3]);
206 }
207 dprintk("\n");
208}
209#else
210static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
211{
212 /* NOP */
213}
214#endif
215
216struct sock_xprt {
217 struct rpc_xprt xprt;
218
219 /*
220 * Network layer
221 */
222 struct socket * sock;
223 struct sock * inet;
224
225 /*
226 * State of TCP reply receive
227 */
228 __be32 tcp_fraghdr,
229 tcp_xid,
230 tcp_calldir;
231
232 u32 tcp_offset,
233 tcp_reclen;
234
235 unsigned long tcp_copied,
236 tcp_flags;
237
238 /*
239 * Connection of transports
240 */
241 struct delayed_work connect_worker;
242 struct sockaddr_storage srcaddr;
243 unsigned short srcport;
244
245 /*
246 * UDP socket buffer size parameters
247 */
248 size_t rcvsize,
249 sndsize;
250
251 /*
252 * Saved socket callback addresses
253 */
254 void (*old_data_ready)(struct sock *, int);
255 void (*old_state_change)(struct sock *);
256 void (*old_write_space)(struct sock *);
257 void (*old_error_report)(struct sock *);
258};
259
260/*
261 * TCP receive state flags
262 */
263#define TCP_RCV_LAST_FRAG (1UL << 0)
264#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
265#define TCP_RCV_COPY_XID (1UL << 2)
266#define TCP_RCV_COPY_DATA (1UL << 3)
267#define TCP_RCV_READ_CALLDIR (1UL << 4)
268#define TCP_RCV_COPY_CALLDIR (1UL << 5)
269
270/*
271 * TCP RPC flags
272 */
273#define TCP_RPC_REPLY (1UL << 6)
274
275static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
276{
277 return (struct sockaddr *) &xprt->addr;
278}
279
280static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
281{
282 return (struct sockaddr_un *) &xprt->addr;
283}
284
285static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
286{
287 return (struct sockaddr_in *) &xprt->addr;
288}
289
290static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
291{
292 return (struct sockaddr_in6 *) &xprt->addr;
293}
294
295static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
296{
297 struct sockaddr *sap = xs_addr(xprt);
298 struct sockaddr_in6 *sin6;
299 struct sockaddr_in *sin;
300 struct sockaddr_un *sun;
301 char buf[128];
302
303 switch (sap->sa_family) {
304 case AF_LOCAL:
305 sun = xs_addr_un(xprt);
306 strlcpy(buf, sun->sun_path, sizeof(buf));
307 xprt->address_strings[RPC_DISPLAY_ADDR] =
308 kstrdup(buf, GFP_KERNEL);
309 break;
310 case AF_INET:
311 (void)rpc_ntop(sap, buf, sizeof(buf));
312 xprt->address_strings[RPC_DISPLAY_ADDR] =
313 kstrdup(buf, GFP_KERNEL);
314 sin = xs_addr_in(xprt);
315 snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
316 break;
317 case AF_INET6:
318 (void)rpc_ntop(sap, buf, sizeof(buf));
319 xprt->address_strings[RPC_DISPLAY_ADDR] =
320 kstrdup(buf, GFP_KERNEL);
321 sin6 = xs_addr_in6(xprt);
322 snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
323 break;
324 default:
325 BUG();
326 }
327
328 xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
329}
330
331static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
332{
333 struct sockaddr *sap = xs_addr(xprt);
334 char buf[128];
335
336 snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
337 xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
338
339 snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
340 xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
341}
342
343static void xs_format_peer_addresses(struct rpc_xprt *xprt,
344 const char *protocol,
345 const char *netid)
346{
347 xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
348 xprt->address_strings[RPC_DISPLAY_NETID] = netid;
349 xs_format_common_peer_addresses(xprt);
350 xs_format_common_peer_ports(xprt);
351}
352
353static void xs_update_peer_port(struct rpc_xprt *xprt)
354{
355 kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
356 kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
357
358 xs_format_common_peer_ports(xprt);
359}
360
361static void xs_free_peer_addresses(struct rpc_xprt *xprt)
362{
363 unsigned int i;
364
365 for (i = 0; i < RPC_DISPLAY_MAX; i++)
366 switch (i) {
367 case RPC_DISPLAY_PROTO:
368 case RPC_DISPLAY_NETID:
369 continue;
370 default:
371 kfree(xprt->address_strings[i]);
372 }
373}
374
375#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
376
377static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
378{
379 struct msghdr msg = {
380 .msg_name = addr,
381 .msg_namelen = addrlen,
382 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
383 };
384 struct kvec iov = {
385 .iov_base = vec->iov_base + base,
386 .iov_len = vec->iov_len - base,
387 };
388
389 if (iov.iov_len != 0)
390 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
391 return kernel_sendmsg(sock, &msg, NULL, 0, 0);
392}
393
394static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
395{
396 struct page **ppage;
397 unsigned int remainder;
398 int err, sent = 0;
399
400 remainder = xdr->page_len - base;
401 base += xdr->page_base;
402 ppage = xdr->pages + (base >> PAGE_SHIFT);
403 base &= ~PAGE_MASK;
404 for(;;) {
405 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
406 int flags = XS_SENDMSG_FLAGS;
407
408 remainder -= len;
409 if (remainder != 0 || more)
410 flags |= MSG_MORE;
411 err = sock->ops->sendpage(sock, *ppage, base, len, flags);
412 if (remainder == 0 || err != len)
413 break;
414 sent += err;
415 ppage++;
416 base = 0;
417 }
418 if (sent == 0)
419 return err;
420 if (err > 0)
421 sent += err;
422 return sent;
423}
424
425/**
426 * xs_sendpages - write pages directly to a socket
427 * @sock: socket to send on
428 * @addr: UDP only -- address of destination
429 * @addrlen: UDP only -- length of destination address
430 * @xdr: buffer containing this request
431 * @base: starting position in the buffer
432 *
433 */
434static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
435{
436 unsigned int remainder = xdr->len - base;
437 int err, sent = 0;
438
439 if (unlikely(!sock))
440 return -ENOTSOCK;
441
442 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
443 if (base != 0) {
444 addr = NULL;
445 addrlen = 0;
446 }
447
448 if (base < xdr->head[0].iov_len || addr != NULL) {
449 unsigned int len = xdr->head[0].iov_len - base;
450 remainder -= len;
451 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
452 if (remainder == 0 || err != len)
453 goto out;
454 sent += err;
455 base = 0;
456 } else
457 base -= xdr->head[0].iov_len;
458
459 if (base < xdr->page_len) {
460 unsigned int len = xdr->page_len - base;
461 remainder -= len;
462 err = xs_send_pagedata(sock, xdr, base, remainder != 0);
463 if (remainder == 0 || err != len)
464 goto out;
465 sent += err;
466 base = 0;
467 } else
468 base -= xdr->page_len;
469
470 if (base >= xdr->tail[0].iov_len)
471 return sent;
472 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
473out:
474 if (sent == 0)
475 return err;
476 if (err > 0)
477 sent += err;
478 return sent;
479}
480
481static void xs_nospace_callback(struct rpc_task *task)
482{
483 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
484
485 transport->inet->sk_write_pending--;
486 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
487}
488
489/**
490 * xs_nospace - place task on wait queue if transmit was incomplete
491 * @task: task to put to sleep
492 *
493 */
494static int xs_nospace(struct rpc_task *task)
495{
496 struct rpc_rqst *req = task->tk_rqstp;
497 struct rpc_xprt *xprt = req->rq_xprt;
498 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
499 int ret = -EAGAIN;
500
501 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
502 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
503 req->rq_slen);
504
505 /* Protect against races with write_space */
506 spin_lock_bh(&xprt->transport_lock);
507
508 /* Don't race with disconnect */
509 if (xprt_connected(xprt)) {
510 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
511 /*
512 * Notify TCP that we're limited by the application
513 * window size
514 */
515 set_bit(SOCK_NOSPACE, &transport->sock->flags);
516 transport->inet->sk_write_pending++;
517 /* ...and wait for more buffer space */
518 xprt_wait_for_buffer_space(task, xs_nospace_callback);
519 }
520 } else {
521 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
522 ret = -ENOTCONN;
523 }
524
525 spin_unlock_bh(&xprt->transport_lock);
526 return ret;
527}
528
529/*
530 * Construct a stream transport record marker in @buf.
531 */
532static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
533{
534 u32 reclen = buf->len - sizeof(rpc_fraghdr);
535 rpc_fraghdr *base = buf->head[0].iov_base;
536 *base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
537}
538
539/**
540 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
541 * @task: RPC task that manages the state of an RPC request
542 *
543 * Return values:
544 * 0: The request has been sent
545 * EAGAIN: The socket was blocked, please call again later to
546 * complete the request
547 * ENOTCONN: Caller needs to invoke connect logic then call again
548 * other: Some other error occured, the request was not sent
549 */
550static int xs_local_send_request(struct rpc_task *task)
551{
552 struct rpc_rqst *req = task->tk_rqstp;
553 struct rpc_xprt *xprt = req->rq_xprt;
554 struct sock_xprt *transport =
555 container_of(xprt, struct sock_xprt, xprt);
556 struct xdr_buf *xdr = &req->rq_snd_buf;
557 int status;
558
559 xs_encode_stream_record_marker(&req->rq_snd_buf);
560
561 xs_pktdump("packet data:",
562 req->rq_svec->iov_base, req->rq_svec->iov_len);
563
564 status = xs_sendpages(transport->sock, NULL, 0,
565 xdr, req->rq_bytes_sent);
566 dprintk("RPC: %s(%u) = %d\n",
567 __func__, xdr->len - req->rq_bytes_sent, status);
568 if (likely(status >= 0)) {
569 req->rq_bytes_sent += status;
570 req->rq_xmit_bytes_sent += status;
571 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
572 req->rq_bytes_sent = 0;
573 return 0;
574 }
575 status = -EAGAIN;
576 }
577
578 switch (status) {
579 case -EAGAIN:
580 status = xs_nospace(task);
581 break;
582 default:
583 dprintk("RPC: sendmsg returned unrecognized error %d\n",
584 -status);
585 case -EPIPE:
586 xs_close(xprt);
587 status = -ENOTCONN;
588 }
589
590 return status;
591}
592
593/**
594 * xs_udp_send_request - write an RPC request to a UDP socket
595 * @task: address of RPC task that manages the state of an RPC request
596 *
597 * Return values:
598 * 0: The request has been sent
599 * EAGAIN: The socket was blocked, please call again later to
600 * complete the request
601 * ENOTCONN: Caller needs to invoke connect logic then call again
602 * other: Some other error occurred, the request was not sent
603 */
604static int xs_udp_send_request(struct rpc_task *task)
605{
606 struct rpc_rqst *req = task->tk_rqstp;
607 struct rpc_xprt *xprt = req->rq_xprt;
608 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
609 struct xdr_buf *xdr = &req->rq_snd_buf;
610 int status;
611
612 xs_pktdump("packet data:",
613 req->rq_svec->iov_base,
614 req->rq_svec->iov_len);
615
616 if (!xprt_bound(xprt))
617 return -ENOTCONN;
618 status = xs_sendpages(transport->sock,
619 xs_addr(xprt),
620 xprt->addrlen, xdr,
621 req->rq_bytes_sent);
622
623 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
624 xdr->len - req->rq_bytes_sent, status);
625
626 if (status >= 0) {
627 req->rq_xmit_bytes_sent += status;
628 if (status >= req->rq_slen)
629 return 0;
630 /* Still some bytes left; set up for a retry later. */
631 status = -EAGAIN;
632 }
633
634 switch (status) {
635 case -ENOTSOCK:
636 status = -ENOTCONN;
637 /* Should we call xs_close() here? */
638 break;
639 case -EAGAIN:
640 status = xs_nospace(task);
641 break;
642 default:
643 dprintk("RPC: sendmsg returned unrecognized error %d\n",
644 -status);
645 case -ENETUNREACH:
646 case -EPIPE:
647 case -ECONNREFUSED:
648 /* When the server has died, an ICMP port unreachable message
649 * prompts ECONNREFUSED. */
650 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
651 }
652
653 return status;
654}
655
656/**
657 * xs_tcp_shutdown - gracefully shut down a TCP socket
658 * @xprt: transport
659 *
660 * Initiates a graceful shutdown of the TCP socket by calling the
661 * equivalent of shutdown(SHUT_WR);
662 */
663static void xs_tcp_shutdown(struct rpc_xprt *xprt)
664{
665 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
666 struct socket *sock = transport->sock;
667
668 if (sock != NULL)
669 kernel_sock_shutdown(sock, SHUT_WR);
670}
671
672/**
673 * xs_tcp_send_request - write an RPC request to a TCP socket
674 * @task: address of RPC task that manages the state of an RPC request
675 *
676 * Return values:
677 * 0: The request has been sent
678 * EAGAIN: The socket was blocked, please call again later to
679 * complete the request
680 * ENOTCONN: Caller needs to invoke connect logic then call again
681 * other: Some other error occurred, the request was not sent
682 *
683 * XXX: In the case of soft timeouts, should we eventually give up
684 * if sendmsg is not able to make progress?
685 */
686static int xs_tcp_send_request(struct rpc_task *task)
687{
688 struct rpc_rqst *req = task->tk_rqstp;
689 struct rpc_xprt *xprt = req->rq_xprt;
690 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
691 struct xdr_buf *xdr = &req->rq_snd_buf;
692 int status;
693
694 xs_encode_stream_record_marker(&req->rq_snd_buf);
695
696 xs_pktdump("packet data:",
697 req->rq_svec->iov_base,
698 req->rq_svec->iov_len);
699
700 /* Continue transmitting the packet/record. We must be careful
701 * to cope with writespace callbacks arriving _after_ we have
702 * called sendmsg(). */
703 while (1) {
704 status = xs_sendpages(transport->sock,
705 NULL, 0, xdr, req->rq_bytes_sent);
706
707 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
708 xdr->len - req->rq_bytes_sent, status);
709
710 if (unlikely(status < 0))
711 break;
712
713 /* If we've sent the entire packet, immediately
714 * reset the count of bytes sent. */
715 req->rq_bytes_sent += status;
716 req->rq_xmit_bytes_sent += status;
717 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
718 req->rq_bytes_sent = 0;
719 return 0;
720 }
721
722 if (status != 0)
723 continue;
724 status = -EAGAIN;
725 break;
726 }
727
728 switch (status) {
729 case -ENOTSOCK:
730 status = -ENOTCONN;
731 /* Should we call xs_close() here? */
732 break;
733 case -EAGAIN:
734 status = xs_nospace(task);
735 break;
736 default:
737 dprintk("RPC: sendmsg returned unrecognized error %d\n",
738 -status);
739 case -ECONNRESET:
740 case -EPIPE:
741 xs_tcp_shutdown(xprt);
742 case -ECONNREFUSED:
743 case -ENOTCONN:
744 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
745 }
746
747 return status;
748}
749
750/**
751 * xs_tcp_release_xprt - clean up after a tcp transmission
752 * @xprt: transport
753 * @task: rpc task
754 *
755 * This cleans up if an error causes us to abort the transmission of a request.
756 * In this case, the socket may need to be reset in order to avoid confusing
757 * the server.
758 */
759static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
760{
761 struct rpc_rqst *req;
762
763 if (task != xprt->snd_task)
764 return;
765 if (task == NULL)
766 goto out_release;
767 req = task->tk_rqstp;
768 if (req == NULL)
769 goto out_release;
770 if (req->rq_bytes_sent == 0)
771 goto out_release;
772 if (req->rq_bytes_sent == req->rq_snd_buf.len)
773 goto out_release;
774 set_bit(XPRT_CLOSE_WAIT, &task->tk_xprt->state);
775out_release:
776 xprt_release_xprt(xprt, task);
777}
778
779static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
780{
781 transport->old_data_ready = sk->sk_data_ready;
782 transport->old_state_change = sk->sk_state_change;
783 transport->old_write_space = sk->sk_write_space;
784 transport->old_error_report = sk->sk_error_report;
785}
786
787static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
788{
789 sk->sk_data_ready = transport->old_data_ready;
790 sk->sk_state_change = transport->old_state_change;
791 sk->sk_write_space = transport->old_write_space;
792 sk->sk_error_report = transport->old_error_report;
793}
794
795static void xs_reset_transport(struct sock_xprt *transport)
796{
797 struct socket *sock = transport->sock;
798 struct sock *sk = transport->inet;
799
800 if (sk == NULL)
801 return;
802
803 transport->srcport = 0;
804
805 write_lock_bh(&sk->sk_callback_lock);
806 transport->inet = NULL;
807 transport->sock = NULL;
808
809 sk->sk_user_data = NULL;
810
811 xs_restore_old_callbacks(transport, sk);
812 write_unlock_bh(&sk->sk_callback_lock);
813
814 sk->sk_no_check = 0;
815
816 sock_release(sock);
817}
818
819/**
820 * xs_close - close a socket
821 * @xprt: transport
822 *
823 * This is used when all requests are complete; ie, no DRC state remains
824 * on the server we want to save.
825 *
826 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
827 * xs_reset_transport() zeroing the socket from underneath a writer.
828 */
829static void xs_close(struct rpc_xprt *xprt)
830{
831 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
832
833 dprintk("RPC: xs_close xprt %p\n", xprt);
834
835 xs_reset_transport(transport);
836 xprt->reestablish_timeout = 0;
837
838 smp_mb__before_clear_bit();
839 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
840 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
841 clear_bit(XPRT_CLOSING, &xprt->state);
842 smp_mb__after_clear_bit();
843 xprt_disconnect_done(xprt);
844}
845
846static void xs_tcp_close(struct rpc_xprt *xprt)
847{
848 if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state))
849 xs_close(xprt);
850 else
851 xs_tcp_shutdown(xprt);
852}
853
854/**
855 * xs_destroy - prepare to shutdown a transport
856 * @xprt: doomed transport
857 *
858 */
859static void xs_destroy(struct rpc_xprt *xprt)
860{
861 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
862
863 dprintk("RPC: xs_destroy xprt %p\n", xprt);
864
865 cancel_delayed_work_sync(&transport->connect_worker);
866
867 xs_close(xprt);
868 xs_free_peer_addresses(xprt);
869 xprt_free(xprt);
870 module_put(THIS_MODULE);
871}
872
873static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
874{
875 return (struct rpc_xprt *) sk->sk_user_data;
876}
877
878static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
879{
880 struct xdr_skb_reader desc = {
881 .skb = skb,
882 .offset = sizeof(rpc_fraghdr),
883 .count = skb->len - sizeof(rpc_fraghdr),
884 };
885
886 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
887 return -1;
888 if (desc.count)
889 return -1;
890 return 0;
891}
892
893/**
894 * xs_local_data_ready - "data ready" callback for AF_LOCAL sockets
895 * @sk: socket with data to read
896 * @len: how much data to read
897 *
898 * Currently this assumes we can read the whole reply in a single gulp.
899 */
900static void xs_local_data_ready(struct sock *sk, int len)
901{
902 struct rpc_task *task;
903 struct rpc_xprt *xprt;
904 struct rpc_rqst *rovr;
905 struct sk_buff *skb;
906 int err, repsize, copied;
907 u32 _xid;
908 __be32 *xp;
909
910 read_lock_bh(&sk->sk_callback_lock);
911 dprintk("RPC: %s...\n", __func__);
912 xprt = xprt_from_sock(sk);
913 if (xprt == NULL)
914 goto out;
915
916 skb = skb_recv_datagram(sk, 0, 1, &err);
917 if (skb == NULL)
918 goto out;
919
920 if (xprt->shutdown)
921 goto dropit;
922
923 repsize = skb->len - sizeof(rpc_fraghdr);
924 if (repsize < 4) {
925 dprintk("RPC: impossible RPC reply size %d\n", repsize);
926 goto dropit;
927 }
928
929 /* Copy the XID from the skb... */
930 xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
931 if (xp == NULL)
932 goto dropit;
933
934 /* Look up and lock the request corresponding to the given XID */
935 spin_lock(&xprt->transport_lock);
936 rovr = xprt_lookup_rqst(xprt, *xp);
937 if (!rovr)
938 goto out_unlock;
939 task = rovr->rq_task;
940
941 copied = rovr->rq_private_buf.buflen;
942 if (copied > repsize)
943 copied = repsize;
944
945 if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
946 dprintk("RPC: sk_buff copy failed\n");
947 goto out_unlock;
948 }
949
950 xprt_complete_rqst(task, copied);
951
952 out_unlock:
953 spin_unlock(&xprt->transport_lock);
954 dropit:
955 skb_free_datagram(sk, skb);
956 out:
957 read_unlock_bh(&sk->sk_callback_lock);
958}
959
960/**
961 * xs_udp_data_ready - "data ready" callback for UDP sockets
962 * @sk: socket with data to read
963 * @len: how much data to read
964 *
965 */
966static void xs_udp_data_ready(struct sock *sk, int len)
967{
968 struct rpc_task *task;
969 struct rpc_xprt *xprt;
970 struct rpc_rqst *rovr;
971 struct sk_buff *skb;
972 int err, repsize, copied;
973 u32 _xid;
974 __be32 *xp;
975
976 read_lock_bh(&sk->sk_callback_lock);
977 dprintk("RPC: xs_udp_data_ready...\n");
978 if (!(xprt = xprt_from_sock(sk)))
979 goto out;
980
981 if ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL)
982 goto out;
983
984 if (xprt->shutdown)
985 goto dropit;
986
987 repsize = skb->len - sizeof(struct udphdr);
988 if (repsize < 4) {
989 dprintk("RPC: impossible RPC reply size %d!\n", repsize);
990 goto dropit;
991 }
992
993 /* Copy the XID from the skb... */
994 xp = skb_header_pointer(skb, sizeof(struct udphdr),
995 sizeof(_xid), &_xid);
996 if (xp == NULL)
997 goto dropit;
998
999 /* Look up and lock the request corresponding to the given XID */
1000 spin_lock(&xprt->transport_lock);
1001 rovr = xprt_lookup_rqst(xprt, *xp);
1002 if (!rovr)
1003 goto out_unlock;
1004 task = rovr->rq_task;
1005
1006 if ((copied = rovr->rq_private_buf.buflen) > repsize)
1007 copied = repsize;
1008
1009 /* Suck it into the iovec, verify checksum if not done by hw. */
1010 if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1011 UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
1012 goto out_unlock;
1013 }
1014
1015 UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
1016
1017 /* Something worked... */
1018 dst_confirm(skb_dst(skb));
1019
1020 xprt_adjust_cwnd(task, copied);
1021 xprt_complete_rqst(task, copied);
1022
1023 out_unlock:
1024 spin_unlock(&xprt->transport_lock);
1025 dropit:
1026 skb_free_datagram(sk, skb);
1027 out:
1028 read_unlock_bh(&sk->sk_callback_lock);
1029}
1030
1031static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
1032{
1033 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1034 size_t len, used;
1035 char *p;
1036
1037 p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
1038 len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
1039 used = xdr_skb_read_bits(desc, p, len);
1040 transport->tcp_offset += used;
1041 if (used != len)
1042 return;
1043
1044 transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
1045 if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
1046 transport->tcp_flags |= TCP_RCV_LAST_FRAG;
1047 else
1048 transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
1049 transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
1050
1051 transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
1052 transport->tcp_offset = 0;
1053
1054 /* Sanity check of the record length */
1055 if (unlikely(transport->tcp_reclen < 8)) {
1056 dprintk("RPC: invalid TCP record fragment length\n");
1057 xprt_force_disconnect(xprt);
1058 return;
1059 }
1060 dprintk("RPC: reading TCP record fragment of length %d\n",
1061 transport->tcp_reclen);
1062}
1063
1064static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
1065{
1066 if (transport->tcp_offset == transport->tcp_reclen) {
1067 transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
1068 transport->tcp_offset = 0;
1069 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
1070 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1071 transport->tcp_flags |= TCP_RCV_COPY_XID;
1072 transport->tcp_copied = 0;
1073 }
1074 }
1075}
1076
1077static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1078{
1079 size_t len, used;
1080 char *p;
1081
1082 len = sizeof(transport->tcp_xid) - transport->tcp_offset;
1083 dprintk("RPC: reading XID (%Zu bytes)\n", len);
1084 p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
1085 used = xdr_skb_read_bits(desc, p, len);
1086 transport->tcp_offset += used;
1087 if (used != len)
1088 return;
1089 transport->tcp_flags &= ~TCP_RCV_COPY_XID;
1090 transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
1091 transport->tcp_copied = 4;
1092 dprintk("RPC: reading %s XID %08x\n",
1093 (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
1094 : "request with",
1095 ntohl(transport->tcp_xid));
1096 xs_tcp_check_fraghdr(transport);
1097}
1098
1099static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
1100 struct xdr_skb_reader *desc)
1101{
1102 size_t len, used;
1103 u32 offset;
1104 char *p;
1105
1106 /*
1107 * We want transport->tcp_offset to be 8 at the end of this routine
1108 * (4 bytes for the xid and 4 bytes for the call/reply flag).
1109 * When this function is called for the first time,
1110 * transport->tcp_offset is 4 (after having already read the xid).
1111 */
1112 offset = transport->tcp_offset - sizeof(transport->tcp_xid);
1113 len = sizeof(transport->tcp_calldir) - offset;
1114 dprintk("RPC: reading CALL/REPLY flag (%Zu bytes)\n", len);
1115 p = ((char *) &transport->tcp_calldir) + offset;
1116 used = xdr_skb_read_bits(desc, p, len);
1117 transport->tcp_offset += used;
1118 if (used != len)
1119 return;
1120 transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
1121 /*
1122 * We don't yet have the XDR buffer, so we will write the calldir
1123 * out after we get the buffer from the 'struct rpc_rqst'
1124 */
1125 switch (ntohl(transport->tcp_calldir)) {
1126 case RPC_REPLY:
1127 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1128 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1129 transport->tcp_flags |= TCP_RPC_REPLY;
1130 break;
1131 case RPC_CALL:
1132 transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1133 transport->tcp_flags |= TCP_RCV_COPY_DATA;
1134 transport->tcp_flags &= ~TCP_RPC_REPLY;
1135 break;
1136 default:
1137 dprintk("RPC: invalid request message type\n");
1138 xprt_force_disconnect(&transport->xprt);
1139 }
1140 xs_tcp_check_fraghdr(transport);
1141}
1142
1143static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
1144 struct xdr_skb_reader *desc,
1145 struct rpc_rqst *req)
1146{
1147 struct sock_xprt *transport =
1148 container_of(xprt, struct sock_xprt, xprt);
1149 struct xdr_buf *rcvbuf;
1150 size_t len;
1151 ssize_t r;
1152
1153 rcvbuf = &req->rq_private_buf;
1154
1155 if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
1156 /*
1157 * Save the RPC direction in the XDR buffer
1158 */
1159 memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
1160 &transport->tcp_calldir,
1161 sizeof(transport->tcp_calldir));
1162 transport->tcp_copied += sizeof(transport->tcp_calldir);
1163 transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
1164 }
1165
1166 len = desc->count;
1167 if (len > transport->tcp_reclen - transport->tcp_offset) {
1168 struct xdr_skb_reader my_desc;
1169
1170 len = transport->tcp_reclen - transport->tcp_offset;
1171 memcpy(&my_desc, desc, sizeof(my_desc));
1172 my_desc.count = len;
1173 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1174 &my_desc, xdr_skb_read_bits);
1175 desc->count -= r;
1176 desc->offset += r;
1177 } else
1178 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1179 desc, xdr_skb_read_bits);
1180
1181 if (r > 0) {
1182 transport->tcp_copied += r;
1183 transport->tcp_offset += r;
1184 }
1185 if (r != len) {
1186 /* Error when copying to the receive buffer,
1187 * usually because we weren't able to allocate
1188 * additional buffer pages. All we can do now
1189 * is turn off TCP_RCV_COPY_DATA, so the request
1190 * will not receive any additional updates,
1191 * and time out.
1192 * Any remaining data from this record will
1193 * be discarded.
1194 */
1195 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1196 dprintk("RPC: XID %08x truncated request\n",
1197 ntohl(transport->tcp_xid));
1198 dprintk("RPC: xprt = %p, tcp_copied = %lu, "
1199 "tcp_offset = %u, tcp_reclen = %u\n",
1200 xprt, transport->tcp_copied,
1201 transport->tcp_offset, transport->tcp_reclen);
1202 return;
1203 }
1204
1205 dprintk("RPC: XID %08x read %Zd bytes\n",
1206 ntohl(transport->tcp_xid), r);
1207 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
1208 "tcp_reclen = %u\n", xprt, transport->tcp_copied,
1209 transport->tcp_offset, transport->tcp_reclen);
1210
1211 if (transport->tcp_copied == req->rq_private_buf.buflen)
1212 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1213 else if (transport->tcp_offset == transport->tcp_reclen) {
1214 if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
1215 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1216 }
1217}
1218
1219/*
1220 * Finds the request corresponding to the RPC xid and invokes the common
1221 * tcp read code to read the data.
1222 */
1223static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
1224 struct xdr_skb_reader *desc)
1225{
1226 struct sock_xprt *transport =
1227 container_of(xprt, struct sock_xprt, xprt);
1228 struct rpc_rqst *req;
1229
1230 dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
1231
1232 /* Find and lock the request corresponding to this xid */
1233 spin_lock(&xprt->transport_lock);
1234 req = xprt_lookup_rqst(xprt, transport->tcp_xid);
1235 if (!req) {
1236 dprintk("RPC: XID %08x request not found!\n",
1237 ntohl(transport->tcp_xid));
1238 spin_unlock(&xprt->transport_lock);
1239 return -1;
1240 }
1241
1242 xs_tcp_read_common(xprt, desc, req);
1243
1244 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1245 xprt_complete_rqst(req->rq_task, transport->tcp_copied);
1246
1247 spin_unlock(&xprt->transport_lock);
1248 return 0;
1249}
1250
1251#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1252/*
1253 * Obtains an rpc_rqst previously allocated and invokes the common
1254 * tcp read code to read the data. The result is placed in the callback
1255 * queue.
1256 * If we're unable to obtain the rpc_rqst we schedule the closing of the
1257 * connection and return -1.
1258 */
1259static inline int xs_tcp_read_callback(struct rpc_xprt *xprt,
1260 struct xdr_skb_reader *desc)
1261{
1262 struct sock_xprt *transport =
1263 container_of(xprt, struct sock_xprt, xprt);
1264 struct rpc_rqst *req;
1265
1266 req = xprt_alloc_bc_request(xprt);
1267 if (req == NULL) {
1268 printk(KERN_WARNING "Callback slot table overflowed\n");
1269 xprt_force_disconnect(xprt);
1270 return -1;
1271 }
1272
1273 req->rq_xid = transport->tcp_xid;
1274 dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
1275 xs_tcp_read_common(xprt, desc, req);
1276
1277 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) {
1278 struct svc_serv *bc_serv = xprt->bc_serv;
1279
1280 /*
1281 * Add callback request to callback list. The callback
1282 * service sleeps on the sv_cb_waitq waiting for new
1283 * requests. Wake it up after adding enqueing the
1284 * request.
1285 */
1286 dprintk("RPC: add callback request to list\n");
1287 spin_lock(&bc_serv->sv_cb_lock);
1288 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
1289 spin_unlock(&bc_serv->sv_cb_lock);
1290 wake_up(&bc_serv->sv_cb_waitq);
1291 }
1292
1293 req->rq_private_buf.len = transport->tcp_copied;
1294
1295 return 0;
1296}
1297
1298static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1299 struct xdr_skb_reader *desc)
1300{
1301 struct sock_xprt *transport =
1302 container_of(xprt, struct sock_xprt, xprt);
1303
1304 return (transport->tcp_flags & TCP_RPC_REPLY) ?
1305 xs_tcp_read_reply(xprt, desc) :
1306 xs_tcp_read_callback(xprt, desc);
1307}
1308#else
1309static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1310 struct xdr_skb_reader *desc)
1311{
1312 return xs_tcp_read_reply(xprt, desc);
1313}
1314#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1315
1316/*
1317 * Read data off the transport. This can be either an RPC_CALL or an
1318 * RPC_REPLY. Relay the processing to helper functions.
1319 */
1320static void xs_tcp_read_data(struct rpc_xprt *xprt,
1321 struct xdr_skb_reader *desc)
1322{
1323 struct sock_xprt *transport =
1324 container_of(xprt, struct sock_xprt, xprt);
1325
1326 if (_xs_tcp_read_data(xprt, desc) == 0)
1327 xs_tcp_check_fraghdr(transport);
1328 else {
1329 /*
1330 * The transport_lock protects the request handling.
1331 * There's no need to hold it to update the tcp_flags.
1332 */
1333 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1334 }
1335}
1336
1337static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1338{
1339 size_t len;
1340
1341 len = transport->tcp_reclen - transport->tcp_offset;
1342 if (len > desc->count)
1343 len = desc->count;
1344 desc->count -= len;
1345 desc->offset += len;
1346 transport->tcp_offset += len;
1347 dprintk("RPC: discarded %Zu bytes\n", len);
1348 xs_tcp_check_fraghdr(transport);
1349}
1350
1351static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
1352{
1353 struct rpc_xprt *xprt = rd_desc->arg.data;
1354 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1355 struct xdr_skb_reader desc = {
1356 .skb = skb,
1357 .offset = offset,
1358 .count = len,
1359 };
1360
1361 dprintk("RPC: xs_tcp_data_recv started\n");
1362 do {
1363 /* Read in a new fragment marker if necessary */
1364 /* Can we ever really expect to get completely empty fragments? */
1365 if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
1366 xs_tcp_read_fraghdr(xprt, &desc);
1367 continue;
1368 }
1369 /* Read in the xid if necessary */
1370 if (transport->tcp_flags & TCP_RCV_COPY_XID) {
1371 xs_tcp_read_xid(transport, &desc);
1372 continue;
1373 }
1374 /* Read in the call/reply flag */
1375 if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
1376 xs_tcp_read_calldir(transport, &desc);
1377 continue;
1378 }
1379 /* Read in the request data */
1380 if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
1381 xs_tcp_read_data(xprt, &desc);
1382 continue;
1383 }
1384 /* Skip over any trailing bytes on short reads */
1385 xs_tcp_read_discard(transport, &desc);
1386 } while (desc.count);
1387 dprintk("RPC: xs_tcp_data_recv done\n");
1388 return len - desc.count;
1389}
1390
1391/**
1392 * xs_tcp_data_ready - "data ready" callback for TCP sockets
1393 * @sk: socket with data to read
1394 * @bytes: how much data to read
1395 *
1396 */
1397static void xs_tcp_data_ready(struct sock *sk, int bytes)
1398{
1399 struct rpc_xprt *xprt;
1400 read_descriptor_t rd_desc;
1401 int read;
1402
1403 dprintk("RPC: xs_tcp_data_ready...\n");
1404
1405 read_lock_bh(&sk->sk_callback_lock);
1406 if (!(xprt = xprt_from_sock(sk)))
1407 goto out;
1408 if (xprt->shutdown)
1409 goto out;
1410
1411 /* Any data means we had a useful conversation, so
1412 * the we don't need to delay the next reconnect
1413 */
1414 if (xprt->reestablish_timeout)
1415 xprt->reestablish_timeout = 0;
1416
1417 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
1418 rd_desc.arg.data = xprt;
1419 do {
1420 rd_desc.count = 65536;
1421 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1422 } while (read > 0);
1423out:
1424 read_unlock_bh(&sk->sk_callback_lock);
1425}
1426
1427/*
1428 * Do the equivalent of linger/linger2 handling for dealing with
1429 * broken servers that don't close the socket in a timely
1430 * fashion
1431 */
1432static void xs_tcp_schedule_linger_timeout(struct rpc_xprt *xprt,
1433 unsigned long timeout)
1434{
1435 struct sock_xprt *transport;
1436
1437 if (xprt_test_and_set_connecting(xprt))
1438 return;
1439 set_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1440 transport = container_of(xprt, struct sock_xprt, xprt);
1441 queue_delayed_work(rpciod_workqueue, &transport->connect_worker,
1442 timeout);
1443}
1444
1445static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
1446{
1447 struct sock_xprt *transport;
1448
1449 transport = container_of(xprt, struct sock_xprt, xprt);
1450
1451 if (!test_bit(XPRT_CONNECTION_ABORT, &xprt->state) ||
1452 !cancel_delayed_work(&transport->connect_worker))
1453 return;
1454 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1455 xprt_clear_connecting(xprt);
1456}
1457
1458static void xs_sock_mark_closed(struct rpc_xprt *xprt)
1459{
1460 smp_mb__before_clear_bit();
1461 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1462 clear_bit(XPRT_CLOSING, &xprt->state);
1463 smp_mb__after_clear_bit();
1464 /* Mark transport as closed and wake up all pending tasks */
1465 xprt_disconnect_done(xprt);
1466}
1467
1468/**
1469 * xs_tcp_state_change - callback to handle TCP socket state changes
1470 * @sk: socket whose state has changed
1471 *
1472 */
1473static void xs_tcp_state_change(struct sock *sk)
1474{
1475 struct rpc_xprt *xprt;
1476
1477 read_lock_bh(&sk->sk_callback_lock);
1478 if (!(xprt = xprt_from_sock(sk)))
1479 goto out;
1480 dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
1481 dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1482 sk->sk_state, xprt_connected(xprt),
1483 sock_flag(sk, SOCK_DEAD),
1484 sock_flag(sk, SOCK_ZAPPED),
1485 sk->sk_shutdown);
1486
1487 switch (sk->sk_state) {
1488 case TCP_ESTABLISHED:
1489 spin_lock(&xprt->transport_lock);
1490 if (!xprt_test_and_set_connected(xprt)) {
1491 struct sock_xprt *transport = container_of(xprt,
1492 struct sock_xprt, xprt);
1493
1494 /* Reset TCP record info */
1495 transport->tcp_offset = 0;
1496 transport->tcp_reclen = 0;
1497 transport->tcp_copied = 0;
1498 transport->tcp_flags =
1499 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
1500
1501 xprt_wake_pending_tasks(xprt, -EAGAIN);
1502 }
1503 spin_unlock(&xprt->transport_lock);
1504 break;
1505 case TCP_FIN_WAIT1:
1506 /* The client initiated a shutdown of the socket */
1507 xprt->connect_cookie++;
1508 xprt->reestablish_timeout = 0;
1509 set_bit(XPRT_CLOSING, &xprt->state);
1510 smp_mb__before_clear_bit();
1511 clear_bit(XPRT_CONNECTED, &xprt->state);
1512 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1513 smp_mb__after_clear_bit();
1514 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
1515 break;
1516 case TCP_CLOSE_WAIT:
1517 /* The server initiated a shutdown of the socket */
1518 xprt_force_disconnect(xprt);
1519 xprt->connect_cookie++;
1520 case TCP_CLOSING:
1521 /*
1522 * If the server closed down the connection, make sure that
1523 * we back off before reconnecting
1524 */
1525 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1526 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1527 break;
1528 case TCP_LAST_ACK:
1529 set_bit(XPRT_CLOSING, &xprt->state);
1530 xs_tcp_schedule_linger_timeout(xprt, xs_tcp_fin_timeout);
1531 smp_mb__before_clear_bit();
1532 clear_bit(XPRT_CONNECTED, &xprt->state);
1533 smp_mb__after_clear_bit();
1534 break;
1535 case TCP_CLOSE:
1536 xs_tcp_cancel_linger_timeout(xprt);
1537 xs_sock_mark_closed(xprt);
1538 }
1539 out:
1540 read_unlock_bh(&sk->sk_callback_lock);
1541}
1542
1543/**
1544 * xs_error_report - callback mainly for catching socket errors
1545 * @sk: socket
1546 */
1547static void xs_error_report(struct sock *sk)
1548{
1549 struct rpc_xprt *xprt;
1550
1551 read_lock_bh(&sk->sk_callback_lock);
1552 if (!(xprt = xprt_from_sock(sk)))
1553 goto out;
1554 dprintk("RPC: %s client %p...\n"
1555 "RPC: error %d\n",
1556 __func__, xprt, sk->sk_err);
1557 xprt_wake_pending_tasks(xprt, -EAGAIN);
1558out:
1559 read_unlock_bh(&sk->sk_callback_lock);
1560}
1561
1562static void xs_write_space(struct sock *sk)
1563{
1564 struct socket *sock;
1565 struct rpc_xprt *xprt;
1566
1567 if (unlikely(!(sock = sk->sk_socket)))
1568 return;
1569 clear_bit(SOCK_NOSPACE, &sock->flags);
1570
1571 if (unlikely(!(xprt = xprt_from_sock(sk))))
1572 return;
1573 if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
1574 return;
1575
1576 xprt_write_space(xprt);
1577}
1578
1579/**
1580 * xs_udp_write_space - callback invoked when socket buffer space
1581 * becomes available
1582 * @sk: socket whose state has changed
1583 *
1584 * Called when more output buffer space is available for this socket.
1585 * We try not to wake our writers until they can make "significant"
1586 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1587 * with a bunch of small requests.
1588 */
1589static void xs_udp_write_space(struct sock *sk)
1590{
1591 read_lock_bh(&sk->sk_callback_lock);
1592
1593 /* from net/core/sock.c:sock_def_write_space */
1594 if (sock_writeable(sk))
1595 xs_write_space(sk);
1596
1597 read_unlock_bh(&sk->sk_callback_lock);
1598}
1599
1600/**
1601 * xs_tcp_write_space - callback invoked when socket buffer space
1602 * becomes available
1603 * @sk: socket whose state has changed
1604 *
1605 * Called when more output buffer space is available for this socket.
1606 * We try not to wake our writers until they can make "significant"
1607 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1608 * with a bunch of small requests.
1609 */
1610static void xs_tcp_write_space(struct sock *sk)
1611{
1612 read_lock_bh(&sk->sk_callback_lock);
1613
1614 /* from net/core/stream.c:sk_stream_write_space */
1615 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
1616 xs_write_space(sk);
1617
1618 read_unlock_bh(&sk->sk_callback_lock);
1619}
1620
1621static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1622{
1623 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1624 struct sock *sk = transport->inet;
1625
1626 if (transport->rcvsize) {
1627 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1628 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1629 }
1630 if (transport->sndsize) {
1631 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1632 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1633 sk->sk_write_space(sk);
1634 }
1635}
1636
1637/**
1638 * xs_udp_set_buffer_size - set send and receive limits
1639 * @xprt: generic transport
1640 * @sndsize: requested size of send buffer, in bytes
1641 * @rcvsize: requested size of receive buffer, in bytes
1642 *
1643 * Set socket send and receive buffer size limits.
1644 */
1645static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1646{
1647 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1648
1649 transport->sndsize = 0;
1650 if (sndsize)
1651 transport->sndsize = sndsize + 1024;
1652 transport->rcvsize = 0;
1653 if (rcvsize)
1654 transport->rcvsize = rcvsize + 1024;
1655
1656 xs_udp_do_set_buffer_size(xprt);
1657}
1658
1659/**
1660 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1661 * @task: task that timed out
1662 *
1663 * Adjust the congestion window after a retransmit timeout has occurred.
1664 */
1665static void xs_udp_timer(struct rpc_task *task)
1666{
1667 xprt_adjust_cwnd(task, -ETIMEDOUT);
1668}
1669
1670static unsigned short xs_get_random_port(void)
1671{
1672 unsigned short range = xprt_max_resvport - xprt_min_resvport;
1673 unsigned short rand = (unsigned short) net_random() % range;
1674 return rand + xprt_min_resvport;
1675}
1676
1677/**
1678 * xs_set_port - reset the port number in the remote endpoint address
1679 * @xprt: generic transport
1680 * @port: new port number
1681 *
1682 */
1683static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1684{
1685 dprintk("RPC: setting port for xprt %p to %u\n", xprt, port);
1686
1687 rpc_set_port(xs_addr(xprt), port);
1688 xs_update_peer_port(xprt);
1689}
1690
1691static unsigned short xs_get_srcport(struct sock_xprt *transport)
1692{
1693 unsigned short port = transport->srcport;
1694
1695 if (port == 0 && transport->xprt.resvport)
1696 port = xs_get_random_port();
1697 return port;
1698}
1699
1700static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1701{
1702 if (transport->srcport != 0)
1703 transport->srcport = 0;
1704 if (!transport->xprt.resvport)
1705 return 0;
1706 if (port <= xprt_min_resvport || port > xprt_max_resvport)
1707 return xprt_max_resvport;
1708 return --port;
1709}
1710static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1711{
1712 struct sockaddr_storage myaddr;
1713 int err, nloop = 0;
1714 unsigned short port = xs_get_srcport(transport);
1715 unsigned short last;
1716
1717 memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1718 do {
1719 rpc_set_port((struct sockaddr *)&myaddr, port);
1720 err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1721 transport->xprt.addrlen);
1722 if (port == 0)
1723 break;
1724 if (err == 0) {
1725 transport->srcport = port;
1726 break;
1727 }
1728 last = port;
1729 port = xs_next_srcport(transport, port);
1730 if (port > last)
1731 nloop++;
1732 } while (err == -EADDRINUSE && nloop != 2);
1733
1734 if (myaddr.ss_family == AF_INET)
1735 dprintk("RPC: %s %pI4:%u: %s (%d)\n", __func__,
1736 &((struct sockaddr_in *)&myaddr)->sin_addr,
1737 port, err ? "failed" : "ok", err);
1738 else
1739 dprintk("RPC: %s %pI6:%u: %s (%d)\n", __func__,
1740 &((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1741 port, err ? "failed" : "ok", err);
1742 return err;
1743}
1744
1745/*
1746 * We don't support autobind on AF_LOCAL sockets
1747 */
1748static void xs_local_rpcbind(struct rpc_task *task)
1749{
1750 xprt_set_bound(task->tk_xprt);
1751}
1752
1753static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1754{
1755}
1756
1757#ifdef CONFIG_DEBUG_LOCK_ALLOC
1758static struct lock_class_key xs_key[2];
1759static struct lock_class_key xs_slock_key[2];
1760
1761static inline void xs_reclassify_socketu(struct socket *sock)
1762{
1763 struct sock *sk = sock->sk;
1764
1765 BUG_ON(sock_owned_by_user(sk));
1766 sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1767 &xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1768}
1769
1770static inline void xs_reclassify_socket4(struct socket *sock)
1771{
1772 struct sock *sk = sock->sk;
1773
1774 BUG_ON(sock_owned_by_user(sk));
1775 sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1776 &xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1777}
1778
1779static inline void xs_reclassify_socket6(struct socket *sock)
1780{
1781 struct sock *sk = sock->sk;
1782
1783 BUG_ON(sock_owned_by_user(sk));
1784 sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1785 &xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1786}
1787
1788static inline void xs_reclassify_socket(int family, struct socket *sock)
1789{
1790 switch (family) {
1791 case AF_LOCAL:
1792 xs_reclassify_socketu(sock);
1793 break;
1794 case AF_INET:
1795 xs_reclassify_socket4(sock);
1796 break;
1797 case AF_INET6:
1798 xs_reclassify_socket6(sock);
1799 break;
1800 }
1801}
1802#else
1803static inline void xs_reclassify_socketu(struct socket *sock)
1804{
1805}
1806
1807static inline void xs_reclassify_socket4(struct socket *sock)
1808{
1809}
1810
1811static inline void xs_reclassify_socket6(struct socket *sock)
1812{
1813}
1814
1815static inline void xs_reclassify_socket(int family, struct socket *sock)
1816{
1817}
1818#endif
1819
1820static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1821 struct sock_xprt *transport, int family, int type, int protocol)
1822{
1823 struct socket *sock;
1824 int err;
1825
1826 err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1827 if (err < 0) {
1828 dprintk("RPC: can't create %d transport socket (%d).\n",
1829 protocol, -err);
1830 goto out;
1831 }
1832 xs_reclassify_socket(family, sock);
1833
1834 err = xs_bind(transport, sock);
1835 if (err) {
1836 sock_release(sock);
1837 goto out;
1838 }
1839
1840 return sock;
1841out:
1842 return ERR_PTR(err);
1843}
1844
1845static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1846 struct socket *sock)
1847{
1848 struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1849 xprt);
1850
1851 if (!transport->inet) {
1852 struct sock *sk = sock->sk;
1853
1854 write_lock_bh(&sk->sk_callback_lock);
1855
1856 xs_save_old_callbacks(transport, sk);
1857
1858 sk->sk_user_data = xprt;
1859 sk->sk_data_ready = xs_local_data_ready;
1860 sk->sk_write_space = xs_udp_write_space;
1861 sk->sk_error_report = xs_error_report;
1862 sk->sk_allocation = GFP_ATOMIC;
1863
1864 xprt_clear_connected(xprt);
1865
1866 /* Reset to new socket */
1867 transport->sock = sock;
1868 transport->inet = sk;
1869
1870 write_unlock_bh(&sk->sk_callback_lock);
1871 }
1872
1873 /* Tell the socket layer to start connecting... */
1874 xprt->stat.connect_count++;
1875 xprt->stat.connect_start = jiffies;
1876 return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1877}
1878
1879/**
1880 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1881 * @xprt: RPC transport to connect
1882 * @transport: socket transport to connect
1883 * @create_sock: function to create a socket of the correct type
1884 *
1885 * Invoked by a work queue tasklet.
1886 */
1887static void xs_local_setup_socket(struct work_struct *work)
1888{
1889 struct sock_xprt *transport =
1890 container_of(work, struct sock_xprt, connect_worker.work);
1891 struct rpc_xprt *xprt = &transport->xprt;
1892 struct socket *sock;
1893 int status = -EIO;
1894
1895 if (xprt->shutdown)
1896 goto out;
1897
1898 current->flags |= PF_FSTRANS;
1899
1900 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
1901 status = __sock_create(xprt->xprt_net, AF_LOCAL,
1902 SOCK_STREAM, 0, &sock, 1);
1903 if (status < 0) {
1904 dprintk("RPC: can't create AF_LOCAL "
1905 "transport socket (%d).\n", -status);
1906 goto out;
1907 }
1908 xs_reclassify_socketu(sock);
1909
1910 dprintk("RPC: worker connecting xprt %p via AF_LOCAL to %s\n",
1911 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1912
1913 status = xs_local_finish_connecting(xprt, sock);
1914 switch (status) {
1915 case 0:
1916 dprintk("RPC: xprt %p connected to %s\n",
1917 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1918 xprt_set_connected(xprt);
1919 break;
1920 case -ENOENT:
1921 dprintk("RPC: xprt %p: socket %s does not exist\n",
1922 xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1923 break;
1924 default:
1925 printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1926 __func__, -status,
1927 xprt->address_strings[RPC_DISPLAY_ADDR]);
1928 }
1929
1930out:
1931 xprt_clear_connecting(xprt);
1932 xprt_wake_pending_tasks(xprt, status);
1933 current->flags &= ~PF_FSTRANS;
1934}
1935
1936static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
1937{
1938 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1939
1940 if (!transport->inet) {
1941 struct sock *sk = sock->sk;
1942
1943 write_lock_bh(&sk->sk_callback_lock);
1944
1945 xs_save_old_callbacks(transport, sk);
1946
1947 sk->sk_user_data = xprt;
1948 sk->sk_data_ready = xs_udp_data_ready;
1949 sk->sk_write_space = xs_udp_write_space;
1950 sk->sk_error_report = xs_error_report;
1951 sk->sk_no_check = UDP_CSUM_NORCV;
1952 sk->sk_allocation = GFP_ATOMIC;
1953
1954 xprt_set_connected(xprt);
1955
1956 /* Reset to new socket */
1957 transport->sock = sock;
1958 transport->inet = sk;
1959
1960 write_unlock_bh(&sk->sk_callback_lock);
1961 }
1962 xs_udp_do_set_buffer_size(xprt);
1963}
1964
1965static void xs_udp_setup_socket(struct work_struct *work)
1966{
1967 struct sock_xprt *transport =
1968 container_of(work, struct sock_xprt, connect_worker.work);
1969 struct rpc_xprt *xprt = &transport->xprt;
1970 struct socket *sock = transport->sock;
1971 int status = -EIO;
1972
1973 if (xprt->shutdown)
1974 goto out;
1975
1976 current->flags |= PF_FSTRANS;
1977
1978 /* Start by resetting any existing state */
1979 xs_reset_transport(transport);
1980 sock = xs_create_sock(xprt, transport,
1981 xs_addr(xprt)->sa_family, SOCK_DGRAM, IPPROTO_UDP);
1982 if (IS_ERR(sock))
1983 goto out;
1984
1985 dprintk("RPC: worker connecting xprt %p via %s to "
1986 "%s (port %s)\n", xprt,
1987 xprt->address_strings[RPC_DISPLAY_PROTO],
1988 xprt->address_strings[RPC_DISPLAY_ADDR],
1989 xprt->address_strings[RPC_DISPLAY_PORT]);
1990
1991 xs_udp_finish_connecting(xprt, sock);
1992 status = 0;
1993out:
1994 xprt_clear_connecting(xprt);
1995 xprt_wake_pending_tasks(xprt, status);
1996 current->flags &= ~PF_FSTRANS;
1997}
1998
1999/*
2000 * We need to preserve the port number so the reply cache on the server can
2001 * find our cached RPC replies when we get around to reconnecting.
2002 */
2003static void xs_abort_connection(struct sock_xprt *transport)
2004{
2005 int result;
2006 struct sockaddr any;
2007
2008 dprintk("RPC: disconnecting xprt %p to reuse port\n", transport);
2009
2010 /*
2011 * Disconnect the transport socket by doing a connect operation
2012 * with AF_UNSPEC. This should return immediately...
2013 */
2014 memset(&any, 0, sizeof(any));
2015 any.sa_family = AF_UNSPEC;
2016 result = kernel_connect(transport->sock, &any, sizeof(any), 0);
2017 if (!result)
2018 xs_sock_mark_closed(&transport->xprt);
2019 else
2020 dprintk("RPC: AF_UNSPEC connect return code %d\n",
2021 result);
2022}
2023
2024static void xs_tcp_reuse_connection(struct sock_xprt *transport)
2025{
2026 unsigned int state = transport->inet->sk_state;
2027
2028 if (state == TCP_CLOSE && transport->sock->state == SS_UNCONNECTED) {
2029 /* we don't need to abort the connection if the socket
2030 * hasn't undergone a shutdown
2031 */
2032 if (transport->inet->sk_shutdown == 0)
2033 return;
2034 dprintk("RPC: %s: TCP_CLOSEd and sk_shutdown set to %d\n",
2035 __func__, transport->inet->sk_shutdown);
2036 }
2037 if ((1 << state) & (TCPF_ESTABLISHED|TCPF_SYN_SENT)) {
2038 /* we don't need to abort the connection if the socket
2039 * hasn't undergone a shutdown
2040 */
2041 if (transport->inet->sk_shutdown == 0)
2042 return;
2043 dprintk("RPC: %s: ESTABLISHED/SYN_SENT "
2044 "sk_shutdown set to %d\n",
2045 __func__, transport->inet->sk_shutdown);
2046 }
2047 xs_abort_connection(transport);
2048}
2049
2050static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2051{
2052 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2053 int ret = -ENOTCONN;
2054
2055 if (!transport->inet) {
2056 struct sock *sk = sock->sk;
2057
2058 write_lock_bh(&sk->sk_callback_lock);
2059
2060 xs_save_old_callbacks(transport, sk);
2061
2062 sk->sk_user_data = xprt;
2063 sk->sk_data_ready = xs_tcp_data_ready;
2064 sk->sk_state_change = xs_tcp_state_change;
2065 sk->sk_write_space = xs_tcp_write_space;
2066 sk->sk_error_report = xs_error_report;
2067 sk->sk_allocation = GFP_ATOMIC;
2068
2069 /* socket options */
2070 sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
2071 sock_reset_flag(sk, SOCK_LINGER);
2072 tcp_sk(sk)->linger2 = 0;
2073 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
2074
2075 xprt_clear_connected(xprt);
2076
2077 /* Reset to new socket */
2078 transport->sock = sock;
2079 transport->inet = sk;
2080
2081 write_unlock_bh(&sk->sk_callback_lock);
2082 }
2083
2084 if (!xprt_bound(xprt))
2085 goto out;
2086
2087 /* Tell the socket layer to start connecting... */
2088 xprt->stat.connect_count++;
2089 xprt->stat.connect_start = jiffies;
2090 ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2091 switch (ret) {
2092 case 0:
2093 case -EINPROGRESS:
2094 /* SYN_SENT! */
2095 xprt->connect_cookie++;
2096 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2097 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2098 }
2099out:
2100 return ret;
2101}
2102
2103/**
2104 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2105 * @xprt: RPC transport to connect
2106 * @transport: socket transport to connect
2107 * @create_sock: function to create a socket of the correct type
2108 *
2109 * Invoked by a work queue tasklet.
2110 */
2111static void xs_tcp_setup_socket(struct work_struct *work)
2112{
2113 struct sock_xprt *transport =
2114 container_of(work, struct sock_xprt, connect_worker.work);
2115 struct socket *sock = transport->sock;
2116 struct rpc_xprt *xprt = &transport->xprt;
2117 int status = -EIO;
2118
2119 if (xprt->shutdown)
2120 goto out;
2121
2122 current->flags |= PF_FSTRANS;
2123
2124 if (!sock) {
2125 clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
2126 sock = xs_create_sock(xprt, transport,
2127 xs_addr(xprt)->sa_family, SOCK_STREAM, IPPROTO_TCP);
2128 if (IS_ERR(sock)) {
2129 status = PTR_ERR(sock);
2130 goto out;
2131 }
2132 } else {
2133 int abort_and_exit;
2134
2135 abort_and_exit = test_and_clear_bit(XPRT_CONNECTION_ABORT,
2136 &xprt->state);
2137 /* "close" the socket, preserving the local port */
2138 xs_tcp_reuse_connection(transport);
2139
2140 if (abort_and_exit)
2141 goto out_eagain;
2142 }
2143
2144 dprintk("RPC: worker connecting xprt %p via %s to "
2145 "%s (port %s)\n", xprt,
2146 xprt->address_strings[RPC_DISPLAY_PROTO],
2147 xprt->address_strings[RPC_DISPLAY_ADDR],
2148 xprt->address_strings[RPC_DISPLAY_PORT]);
2149
2150 status = xs_tcp_finish_connecting(xprt, sock);
2151 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
2152 xprt, -status, xprt_connected(xprt),
2153 sock->sk->sk_state);
2154 switch (status) {
2155 default:
2156 printk("%s: connect returned unhandled error %d\n",
2157 __func__, status);
2158 case -EADDRNOTAVAIL:
2159 /* We're probably in TIME_WAIT. Get rid of existing socket,
2160 * and retry
2161 */
2162 set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
2163 xprt_force_disconnect(xprt);
2164 break;
2165 case -ECONNREFUSED:
2166 case -ECONNRESET:
2167 case -ENETUNREACH:
2168 /* retry with existing socket, after a delay */
2169 case 0:
2170 case -EINPROGRESS:
2171 case -EALREADY:
2172 xprt_clear_connecting(xprt);
2173 current->flags &= ~PF_FSTRANS;
2174 return;
2175 case -EINVAL:
2176 /* Happens, for instance, if the user specified a link
2177 * local IPv6 address without a scope-id.
2178 */
2179 goto out;
2180 }
2181out_eagain:
2182 status = -EAGAIN;
2183out:
2184 xprt_clear_connecting(xprt);
2185 xprt_wake_pending_tasks(xprt, status);
2186 current->flags &= ~PF_FSTRANS;
2187}
2188
2189/**
2190 * xs_connect - connect a socket to a remote endpoint
2191 * @task: address of RPC task that manages state of connect request
2192 *
2193 * TCP: If the remote end dropped the connection, delay reconnecting.
2194 *
2195 * UDP socket connects are synchronous, but we use a work queue anyway
2196 * to guarantee that even unprivileged user processes can set up a
2197 * socket on a privileged port.
2198 *
2199 * If a UDP socket connect fails, the delay behavior here prevents
2200 * retry floods (hard mounts).
2201 */
2202static void xs_connect(struct rpc_task *task)
2203{
2204 struct rpc_xprt *xprt = task->tk_xprt;
2205 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2206
2207 if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) {
2208 dprintk("RPC: xs_connect delayed xprt %p for %lu "
2209 "seconds\n",
2210 xprt, xprt->reestablish_timeout / HZ);
2211 queue_delayed_work(rpciod_workqueue,
2212 &transport->connect_worker,
2213 xprt->reestablish_timeout);
2214 xprt->reestablish_timeout <<= 1;
2215 if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2216 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2217 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
2218 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
2219 } else {
2220 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
2221 queue_delayed_work(rpciod_workqueue,
2222 &transport->connect_worker, 0);
2223 }
2224}
2225
2226/**
2227 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2228 * @xprt: rpc_xprt struct containing statistics
2229 * @seq: output file
2230 *
2231 */
2232static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2233{
2234 long idle_time = 0;
2235
2236 if (xprt_connected(xprt))
2237 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2238
2239 seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2240 "%llu %llu %lu %llu %llu\n",
2241 xprt->stat.bind_count,
2242 xprt->stat.connect_count,
2243 xprt->stat.connect_time,
2244 idle_time,
2245 xprt->stat.sends,
2246 xprt->stat.recvs,
2247 xprt->stat.bad_xids,
2248 xprt->stat.req_u,
2249 xprt->stat.bklog_u,
2250 xprt->stat.max_slots,
2251 xprt->stat.sending_u,
2252 xprt->stat.pending_u);
2253}
2254
2255/**
2256 * xs_udp_print_stats - display UDP socket-specifc stats
2257 * @xprt: rpc_xprt struct containing statistics
2258 * @seq: output file
2259 *
2260 */
2261static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2262{
2263 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2264
2265 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2266 "%lu %llu %llu\n",
2267 transport->srcport,
2268 xprt->stat.bind_count,
2269 xprt->stat.sends,
2270 xprt->stat.recvs,
2271 xprt->stat.bad_xids,
2272 xprt->stat.req_u,
2273 xprt->stat.bklog_u,
2274 xprt->stat.max_slots,
2275 xprt->stat.sending_u,
2276 xprt->stat.pending_u);
2277}
2278
2279/**
2280 * xs_tcp_print_stats - display TCP socket-specifc stats
2281 * @xprt: rpc_xprt struct containing statistics
2282 * @seq: output file
2283 *
2284 */
2285static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2286{
2287 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2288 long idle_time = 0;
2289
2290 if (xprt_connected(xprt))
2291 idle_time = (long)(jiffies - xprt->last_used) / HZ;
2292
2293 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2294 "%llu %llu %lu %llu %llu\n",
2295 transport->srcport,
2296 xprt->stat.bind_count,
2297 xprt->stat.connect_count,
2298 xprt->stat.connect_time,
2299 idle_time,
2300 xprt->stat.sends,
2301 xprt->stat.recvs,
2302 xprt->stat.bad_xids,
2303 xprt->stat.req_u,
2304 xprt->stat.bklog_u,
2305 xprt->stat.max_slots,
2306 xprt->stat.sending_u,
2307 xprt->stat.pending_u);
2308}
2309
2310/*
2311 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2312 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2313 * to use the server side send routines.
2314 */
2315static void *bc_malloc(struct rpc_task *task, size_t size)
2316{
2317 struct page *page;
2318 struct rpc_buffer *buf;
2319
2320 BUG_ON(size > PAGE_SIZE - sizeof(struct rpc_buffer));
2321 page = alloc_page(GFP_KERNEL);
2322
2323 if (!page)
2324 return NULL;
2325
2326 buf = page_address(page);
2327 buf->len = PAGE_SIZE;
2328
2329 return buf->data;
2330}
2331
2332/*
2333 * Free the space allocated in the bc_alloc routine
2334 */
2335static void bc_free(void *buffer)
2336{
2337 struct rpc_buffer *buf;
2338
2339 if (!buffer)
2340 return;
2341
2342 buf = container_of(buffer, struct rpc_buffer, data);
2343 free_page((unsigned long)buf);
2344}
2345
2346/*
2347 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2348 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2349 */
2350static int bc_sendto(struct rpc_rqst *req)
2351{
2352 int len;
2353 struct xdr_buf *xbufp = &req->rq_snd_buf;
2354 struct rpc_xprt *xprt = req->rq_xprt;
2355 struct sock_xprt *transport =
2356 container_of(xprt, struct sock_xprt, xprt);
2357 struct socket *sock = transport->sock;
2358 unsigned long headoff;
2359 unsigned long tailoff;
2360
2361 xs_encode_stream_record_marker(xbufp);
2362
2363 tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2364 headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2365 len = svc_send_common(sock, xbufp,
2366 virt_to_page(xbufp->head[0].iov_base), headoff,
2367 xbufp->tail[0].iov_base, tailoff);
2368
2369 if (len != xbufp->len) {
2370 printk(KERN_NOTICE "Error sending entire callback!\n");
2371 len = -EAGAIN;
2372 }
2373
2374 return len;
2375}
2376
2377/*
2378 * The send routine. Borrows from svc_send
2379 */
2380static int bc_send_request(struct rpc_task *task)
2381{
2382 struct rpc_rqst *req = task->tk_rqstp;
2383 struct svc_xprt *xprt;
2384 struct svc_sock *svsk;
2385 u32 len;
2386
2387 dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2388 /*
2389 * Get the server socket associated with this callback xprt
2390 */
2391 xprt = req->rq_xprt->bc_xprt;
2392 svsk = container_of(xprt, struct svc_sock, sk_xprt);
2393
2394 /*
2395 * Grab the mutex to serialize data as the connection is shared
2396 * with the fore channel
2397 */
2398 if (!mutex_trylock(&xprt->xpt_mutex)) {
2399 rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
2400 if (!mutex_trylock(&xprt->xpt_mutex))
2401 return -EAGAIN;
2402 rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
2403 }
2404 if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2405 len = -ENOTCONN;
2406 else
2407 len = bc_sendto(req);
2408 mutex_unlock(&xprt->xpt_mutex);
2409
2410 if (len > 0)
2411 len = 0;
2412
2413 return len;
2414}
2415
2416/*
2417 * The close routine. Since this is client initiated, we do nothing
2418 */
2419
2420static void bc_close(struct rpc_xprt *xprt)
2421{
2422}
2423
2424/*
2425 * The xprt destroy routine. Again, because this connection is client
2426 * initiated, we do nothing
2427 */
2428
2429static void bc_destroy(struct rpc_xprt *xprt)
2430{
2431}
2432
2433static struct rpc_xprt_ops xs_local_ops = {
2434 .reserve_xprt = xprt_reserve_xprt,
2435 .release_xprt = xs_tcp_release_xprt,
2436 .rpcbind = xs_local_rpcbind,
2437 .set_port = xs_local_set_port,
2438 .connect = xs_connect,
2439 .buf_alloc = rpc_malloc,
2440 .buf_free = rpc_free,
2441 .send_request = xs_local_send_request,
2442 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2443 .close = xs_close,
2444 .destroy = xs_destroy,
2445 .print_stats = xs_local_print_stats,
2446};
2447
2448static struct rpc_xprt_ops xs_udp_ops = {
2449 .set_buffer_size = xs_udp_set_buffer_size,
2450 .reserve_xprt = xprt_reserve_xprt_cong,
2451 .release_xprt = xprt_release_xprt_cong,
2452 .rpcbind = rpcb_getport_async,
2453 .set_port = xs_set_port,
2454 .connect = xs_connect,
2455 .buf_alloc = rpc_malloc,
2456 .buf_free = rpc_free,
2457 .send_request = xs_udp_send_request,
2458 .set_retrans_timeout = xprt_set_retrans_timeout_rtt,
2459 .timer = xs_udp_timer,
2460 .release_request = xprt_release_rqst_cong,
2461 .close = xs_close,
2462 .destroy = xs_destroy,
2463 .print_stats = xs_udp_print_stats,
2464};
2465
2466static struct rpc_xprt_ops xs_tcp_ops = {
2467 .reserve_xprt = xprt_reserve_xprt,
2468 .release_xprt = xs_tcp_release_xprt,
2469 .rpcbind = rpcb_getport_async,
2470 .set_port = xs_set_port,
2471 .connect = xs_connect,
2472 .buf_alloc = rpc_malloc,
2473 .buf_free = rpc_free,
2474 .send_request = xs_tcp_send_request,
2475 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2476 .close = xs_tcp_close,
2477 .destroy = xs_destroy,
2478 .print_stats = xs_tcp_print_stats,
2479};
2480
2481/*
2482 * The rpc_xprt_ops for the server backchannel
2483 */
2484
2485static struct rpc_xprt_ops bc_tcp_ops = {
2486 .reserve_xprt = xprt_reserve_xprt,
2487 .release_xprt = xprt_release_xprt,
2488 .rpcbind = xs_local_rpcbind,
2489 .buf_alloc = bc_malloc,
2490 .buf_free = bc_free,
2491 .send_request = bc_send_request,
2492 .set_retrans_timeout = xprt_set_retrans_timeout_def,
2493 .close = bc_close,
2494 .destroy = bc_destroy,
2495 .print_stats = xs_tcp_print_stats,
2496};
2497
2498static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2499{
2500 static const struct sockaddr_in sin = {
2501 .sin_family = AF_INET,
2502 .sin_addr.s_addr = htonl(INADDR_ANY),
2503 };
2504 static const struct sockaddr_in6 sin6 = {
2505 .sin6_family = AF_INET6,
2506 .sin6_addr = IN6ADDR_ANY_INIT,
2507 };
2508
2509 switch (family) {
2510 case AF_LOCAL:
2511 break;
2512 case AF_INET:
2513 memcpy(sap, &sin, sizeof(sin));
2514 break;
2515 case AF_INET6:
2516 memcpy(sap, &sin6, sizeof(sin6));
2517 break;
2518 default:
2519 dprintk("RPC: %s: Bad address family\n", __func__);
2520 return -EAFNOSUPPORT;
2521 }
2522 return 0;
2523}
2524
2525static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2526 unsigned int slot_table_size,
2527 unsigned int max_slot_table_size)
2528{
2529 struct rpc_xprt *xprt;
2530 struct sock_xprt *new;
2531
2532 if (args->addrlen > sizeof(xprt->addr)) {
2533 dprintk("RPC: xs_setup_xprt: address too large\n");
2534 return ERR_PTR(-EBADF);
2535 }
2536
2537 xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2538 max_slot_table_size);
2539 if (xprt == NULL) {
2540 dprintk("RPC: xs_setup_xprt: couldn't allocate "
2541 "rpc_xprt\n");
2542 return ERR_PTR(-ENOMEM);
2543 }
2544
2545 new = container_of(xprt, struct sock_xprt, xprt);
2546 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2547 xprt->addrlen = args->addrlen;
2548 if (args->srcaddr)
2549 memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2550 else {
2551 int err;
2552 err = xs_init_anyaddr(args->dstaddr->sa_family,
2553 (struct sockaddr *)&new->srcaddr);
2554 if (err != 0) {
2555 xprt_free(xprt);
2556 return ERR_PTR(err);
2557 }
2558 }
2559
2560 return xprt;
2561}
2562
2563static const struct rpc_timeout xs_local_default_timeout = {
2564 .to_initval = 10 * HZ,
2565 .to_maxval = 10 * HZ,
2566 .to_retries = 2,
2567};
2568
2569/**
2570 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2571 * @args: rpc transport creation arguments
2572 *
2573 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2574 */
2575static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2576{
2577 struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2578 struct sock_xprt *transport;
2579 struct rpc_xprt *xprt;
2580 struct rpc_xprt *ret;
2581
2582 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2583 xprt_max_tcp_slot_table_entries);
2584 if (IS_ERR(xprt))
2585 return xprt;
2586 transport = container_of(xprt, struct sock_xprt, xprt);
2587
2588 xprt->prot = 0;
2589 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2590 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2591
2592 xprt->bind_timeout = XS_BIND_TO;
2593 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2594 xprt->idle_timeout = XS_IDLE_DISC_TO;
2595
2596 xprt->ops = &xs_local_ops;
2597 xprt->timeout = &xs_local_default_timeout;
2598
2599 switch (sun->sun_family) {
2600 case AF_LOCAL:
2601 if (sun->sun_path[0] != '/') {
2602 dprintk("RPC: bad AF_LOCAL address: %s\n",
2603 sun->sun_path);
2604 ret = ERR_PTR(-EINVAL);
2605 goto out_err;
2606 }
2607 xprt_set_bound(xprt);
2608 INIT_DELAYED_WORK(&transport->connect_worker,
2609 xs_local_setup_socket);
2610 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2611 break;
2612 default:
2613 ret = ERR_PTR(-EAFNOSUPPORT);
2614 goto out_err;
2615 }
2616
2617 dprintk("RPC: set up xprt to %s via AF_LOCAL\n",
2618 xprt->address_strings[RPC_DISPLAY_ADDR]);
2619
2620 if (try_module_get(THIS_MODULE))
2621 return xprt;
2622 ret = ERR_PTR(-EINVAL);
2623out_err:
2624 xprt_free(xprt);
2625 return ret;
2626}
2627
2628static const struct rpc_timeout xs_udp_default_timeout = {
2629 .to_initval = 5 * HZ,
2630 .to_maxval = 30 * HZ,
2631 .to_increment = 5 * HZ,
2632 .to_retries = 5,
2633};
2634
2635/**
2636 * xs_setup_udp - Set up transport to use a UDP socket
2637 * @args: rpc transport creation arguments
2638 *
2639 */
2640static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2641{
2642 struct sockaddr *addr = args->dstaddr;
2643 struct rpc_xprt *xprt;
2644 struct sock_xprt *transport;
2645 struct rpc_xprt *ret;
2646
2647 xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2648 xprt_udp_slot_table_entries);
2649 if (IS_ERR(xprt))
2650 return xprt;
2651 transport = container_of(xprt, struct sock_xprt, xprt);
2652
2653 xprt->prot = IPPROTO_UDP;
2654 xprt->tsh_size = 0;
2655 /* XXX: header size can vary due to auth type, IPv6, etc. */
2656 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2657
2658 xprt->bind_timeout = XS_BIND_TO;
2659 xprt->reestablish_timeout = XS_UDP_REEST_TO;
2660 xprt->idle_timeout = XS_IDLE_DISC_TO;
2661
2662 xprt->ops = &xs_udp_ops;
2663
2664 xprt->timeout = &xs_udp_default_timeout;
2665
2666 switch (addr->sa_family) {
2667 case AF_INET:
2668 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2669 xprt_set_bound(xprt);
2670
2671 INIT_DELAYED_WORK(&transport->connect_worker,
2672 xs_udp_setup_socket);
2673 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
2674 break;
2675 case AF_INET6:
2676 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2677 xprt_set_bound(xprt);
2678
2679 INIT_DELAYED_WORK(&transport->connect_worker,
2680 xs_udp_setup_socket);
2681 xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2682 break;
2683 default:
2684 ret = ERR_PTR(-EAFNOSUPPORT);
2685 goto out_err;
2686 }
2687
2688 if (xprt_bound(xprt))
2689 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2690 xprt->address_strings[RPC_DISPLAY_ADDR],
2691 xprt->address_strings[RPC_DISPLAY_PORT],
2692 xprt->address_strings[RPC_DISPLAY_PROTO]);
2693 else
2694 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2695 xprt->address_strings[RPC_DISPLAY_ADDR],
2696 xprt->address_strings[RPC_DISPLAY_PROTO]);
2697
2698 if (try_module_get(THIS_MODULE))
2699 return xprt;
2700 ret = ERR_PTR(-EINVAL);
2701out_err:
2702 xprt_free(xprt);
2703 return ret;
2704}
2705
2706static const struct rpc_timeout xs_tcp_default_timeout = {
2707 .to_initval = 60 * HZ,
2708 .to_maxval = 60 * HZ,
2709 .to_retries = 2,
2710};
2711
2712/**
2713 * xs_setup_tcp - Set up transport to use a TCP socket
2714 * @args: rpc transport creation arguments
2715 *
2716 */
2717static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2718{
2719 struct sockaddr *addr = args->dstaddr;
2720 struct rpc_xprt *xprt;
2721 struct sock_xprt *transport;
2722 struct rpc_xprt *ret;
2723
2724 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2725 xprt_max_tcp_slot_table_entries);
2726 if (IS_ERR(xprt))
2727 return xprt;
2728 transport = container_of(xprt, struct sock_xprt, xprt);
2729
2730 xprt->prot = IPPROTO_TCP;
2731 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2732 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2733
2734 xprt->bind_timeout = XS_BIND_TO;
2735 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2736 xprt->idle_timeout = XS_IDLE_DISC_TO;
2737
2738 xprt->ops = &xs_tcp_ops;
2739 xprt->timeout = &xs_tcp_default_timeout;
2740
2741 switch (addr->sa_family) {
2742 case AF_INET:
2743 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2744 xprt_set_bound(xprt);
2745
2746 INIT_DELAYED_WORK(&transport->connect_worker,
2747 xs_tcp_setup_socket);
2748 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
2749 break;
2750 case AF_INET6:
2751 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2752 xprt_set_bound(xprt);
2753
2754 INIT_DELAYED_WORK(&transport->connect_worker,
2755 xs_tcp_setup_socket);
2756 xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2757 break;
2758 default:
2759 ret = ERR_PTR(-EAFNOSUPPORT);
2760 goto out_err;
2761 }
2762
2763 if (xprt_bound(xprt))
2764 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2765 xprt->address_strings[RPC_DISPLAY_ADDR],
2766 xprt->address_strings[RPC_DISPLAY_PORT],
2767 xprt->address_strings[RPC_DISPLAY_PROTO]);
2768 else
2769 dprintk("RPC: set up xprt to %s (autobind) via %s\n",
2770 xprt->address_strings[RPC_DISPLAY_ADDR],
2771 xprt->address_strings[RPC_DISPLAY_PROTO]);
2772
2773
2774 if (try_module_get(THIS_MODULE))
2775 return xprt;
2776 ret = ERR_PTR(-EINVAL);
2777out_err:
2778 xprt_free(xprt);
2779 return ret;
2780}
2781
2782/**
2783 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
2784 * @args: rpc transport creation arguments
2785 *
2786 */
2787static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2788{
2789 struct sockaddr *addr = args->dstaddr;
2790 struct rpc_xprt *xprt;
2791 struct sock_xprt *transport;
2792 struct svc_sock *bc_sock;
2793 struct rpc_xprt *ret;
2794
2795 if (args->bc_xprt->xpt_bc_xprt) {
2796 /*
2797 * This server connection already has a backchannel
2798 * export; we can't create a new one, as we wouldn't be
2799 * able to match replies based on xid any more. So,
2800 * reuse the already-existing one:
2801 */
2802 return args->bc_xprt->xpt_bc_xprt;
2803 }
2804 xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2805 xprt_tcp_slot_table_entries);
2806 if (IS_ERR(xprt))
2807 return xprt;
2808 transport = container_of(xprt, struct sock_xprt, xprt);
2809
2810 xprt->prot = IPPROTO_TCP;
2811 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2812 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2813 xprt->timeout = &xs_tcp_default_timeout;
2814
2815 /* backchannel */
2816 xprt_set_bound(xprt);
2817 xprt->bind_timeout = 0;
2818 xprt->reestablish_timeout = 0;
2819 xprt->idle_timeout = 0;
2820
2821 xprt->ops = &bc_tcp_ops;
2822
2823 switch (addr->sa_family) {
2824 case AF_INET:
2825 xs_format_peer_addresses(xprt, "tcp",
2826 RPCBIND_NETID_TCP);
2827 break;
2828 case AF_INET6:
2829 xs_format_peer_addresses(xprt, "tcp",
2830 RPCBIND_NETID_TCP6);
2831 break;
2832 default:
2833 ret = ERR_PTR(-EAFNOSUPPORT);
2834 goto out_err;
2835 }
2836
2837 dprintk("RPC: set up xprt to %s (port %s) via %s\n",
2838 xprt->address_strings[RPC_DISPLAY_ADDR],
2839 xprt->address_strings[RPC_DISPLAY_PORT],
2840 xprt->address_strings[RPC_DISPLAY_PROTO]);
2841
2842 /*
2843 * Once we've associated a backchannel xprt with a connection,
2844 * we want to keep it around as long as long as the connection
2845 * lasts, in case we need to start using it for a backchannel
2846 * again; this reference won't be dropped until bc_xprt is
2847 * destroyed.
2848 */
2849 xprt_get(xprt);
2850 args->bc_xprt->xpt_bc_xprt = xprt;
2851 xprt->bc_xprt = args->bc_xprt;
2852 bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
2853 transport->sock = bc_sock->sk_sock;
2854 transport->inet = bc_sock->sk_sk;
2855
2856 /*
2857 * Since we don't want connections for the backchannel, we set
2858 * the xprt status to connected
2859 */
2860 xprt_set_connected(xprt);
2861
2862
2863 if (try_module_get(THIS_MODULE))
2864 return xprt;
2865 xprt_put(xprt);
2866 ret = ERR_PTR(-EINVAL);
2867out_err:
2868 xprt_free(xprt);
2869 return ret;
2870}
2871
2872static struct xprt_class xs_local_transport = {
2873 .list = LIST_HEAD_INIT(xs_local_transport.list),
2874 .name = "named UNIX socket",
2875 .owner = THIS_MODULE,
2876 .ident = XPRT_TRANSPORT_LOCAL,
2877 .setup = xs_setup_local,
2878};
2879
2880static struct xprt_class xs_udp_transport = {
2881 .list = LIST_HEAD_INIT(xs_udp_transport.list),
2882 .name = "udp",
2883 .owner = THIS_MODULE,
2884 .ident = XPRT_TRANSPORT_UDP,
2885 .setup = xs_setup_udp,
2886};
2887
2888static struct xprt_class xs_tcp_transport = {
2889 .list = LIST_HEAD_INIT(xs_tcp_transport.list),
2890 .name = "tcp",
2891 .owner = THIS_MODULE,
2892 .ident = XPRT_TRANSPORT_TCP,
2893 .setup = xs_setup_tcp,
2894};
2895
2896static struct xprt_class xs_bc_tcp_transport = {
2897 .list = LIST_HEAD_INIT(xs_bc_tcp_transport.list),
2898 .name = "tcp NFSv4.1 backchannel",
2899 .owner = THIS_MODULE,
2900 .ident = XPRT_TRANSPORT_BC_TCP,
2901 .setup = xs_setup_bc_tcp,
2902};
2903
2904/**
2905 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
2906 *
2907 */
2908int init_socket_xprt(void)
2909{
2910#ifdef RPC_DEBUG
2911 if (!sunrpc_table_header)
2912 sunrpc_table_header = register_sysctl_table(sunrpc_table);
2913#endif
2914
2915 xprt_register_transport(&xs_local_transport);
2916 xprt_register_transport(&xs_udp_transport);
2917 xprt_register_transport(&xs_tcp_transport);
2918 xprt_register_transport(&xs_bc_tcp_transport);
2919
2920 return 0;
2921}
2922
2923/**
2924 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
2925 *
2926 */
2927void cleanup_socket_xprt(void)
2928{
2929#ifdef RPC_DEBUG
2930 if (sunrpc_table_header) {
2931 unregister_sysctl_table(sunrpc_table_header);
2932 sunrpc_table_header = NULL;
2933 }
2934#endif
2935
2936 xprt_unregister_transport(&xs_local_transport);
2937 xprt_unregister_transport(&xs_udp_transport);
2938 xprt_unregister_transport(&xs_tcp_transport);
2939 xprt_unregister_transport(&xs_bc_tcp_transport);
2940}
2941
2942static int param_set_uint_minmax(const char *val,
2943 const struct kernel_param *kp,
2944 unsigned int min, unsigned int max)
2945{
2946 unsigned long num;
2947 int ret;
2948
2949 if (!val)
2950 return -EINVAL;
2951 ret = strict_strtoul(val, 0, &num);
2952 if (ret == -EINVAL || num < min || num > max)
2953 return -EINVAL;
2954 *((unsigned int *)kp->arg) = num;
2955 return 0;
2956}
2957
2958static int param_set_portnr(const char *val, const struct kernel_param *kp)
2959{
2960 return param_set_uint_minmax(val, kp,
2961 RPC_MIN_RESVPORT,
2962 RPC_MAX_RESVPORT);
2963}
2964
2965static struct kernel_param_ops param_ops_portnr = {
2966 .set = param_set_portnr,
2967 .get = param_get_uint,
2968};
2969
2970#define param_check_portnr(name, p) \
2971 __param_check(name, p, unsigned int);
2972
2973module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
2974module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
2975
2976static int param_set_slot_table_size(const char *val,
2977 const struct kernel_param *kp)
2978{
2979 return param_set_uint_minmax(val, kp,
2980 RPC_MIN_SLOT_TABLE,
2981 RPC_MAX_SLOT_TABLE);
2982}
2983
2984static struct kernel_param_ops param_ops_slot_table_size = {
2985 .set = param_set_slot_table_size,
2986 .get = param_get_uint,
2987};
2988
2989#define param_check_slot_table_size(name, p) \
2990 __param_check(name, p, unsigned int);
2991
2992static int param_set_max_slot_table_size(const char *val,
2993 const struct kernel_param *kp)
2994{
2995 return param_set_uint_minmax(val, kp,
2996 RPC_MIN_SLOT_TABLE,
2997 RPC_MAX_SLOT_TABLE_LIMIT);
2998}
2999
3000static struct kernel_param_ops param_ops_max_slot_table_size = {
3001 .set = param_set_max_slot_table_size,
3002 .get = param_get_uint,
3003};
3004
3005#define param_check_max_slot_table_size(name, p) \
3006 __param_check(name, p, unsigned int);
3007
3008module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3009 slot_table_size, 0644);
3010module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3011 max_slot_table_size, 0644);
3012module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3013 slot_table_size, 0644);
3014