Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/net/sunrpc/xprtsock.c
   4 *
   5 * Client-side transport implementation for sockets.
   6 *
   7 * TCP callback races fixes (C) 1998 Red Hat
   8 * TCP send fixes (C) 1998 Red Hat
   9 * TCP NFS related read + write fixes
  10 *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  11 *
  12 * Rewrite of larges part of the code in order to stabilize TCP stuff.
  13 * Fix behaviour when socket buffer is full.
  14 *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
  15 *
  16 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
  17 *
  18 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
  19 *   <gilles.quillard@bull.net>
  20 */
  21
  22#include <linux/types.h>
  23#include <linux/string.h>
  24#include <linux/slab.h>
  25#include <linux/module.h>
  26#include <linux/capability.h>
  27#include <linux/pagemap.h>
  28#include <linux/errno.h>
  29#include <linux/socket.h>
  30#include <linux/in.h>
  31#include <linux/net.h>
  32#include <linux/mm.h>
  33#include <linux/un.h>
  34#include <linux/udp.h>
  35#include <linux/tcp.h>
  36#include <linux/sunrpc/clnt.h>
  37#include <linux/sunrpc/addr.h>
  38#include <linux/sunrpc/sched.h>
  39#include <linux/sunrpc/svcsock.h>
  40#include <linux/sunrpc/xprtsock.h>
  41#include <linux/file.h>
  42#ifdef CONFIG_SUNRPC_BACKCHANNEL
  43#include <linux/sunrpc/bc_xprt.h>
  44#endif
  45
  46#include <net/sock.h>
  47#include <net/checksum.h>
  48#include <net/udp.h>
  49#include <net/tcp.h>
  50#include <linux/bvec.h>
  51#include <linux/highmem.h>
  52#include <linux/uio.h>
  53#include <linux/sched/mm.h>
  54
  55#include <trace/events/sunrpc.h>
  56
 
  57#include "sunrpc.h"
  58
  59static void xs_close(struct rpc_xprt *xprt);
 
  60static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
  61		struct socket *sock);
  62
  63/*
  64 * xprtsock tunables
  65 */
  66static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
  67static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
  68static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
  69
  70static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
  71static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
  72
  73#define XS_TCP_LINGER_TO	(15U * HZ)
  74static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
  75
  76/*
  77 * We can register our own files under /proc/sys/sunrpc by
  78 * calling register_sysctl_table() again.  The files in that
  79 * directory become the union of all files registered there.
  80 *
  81 * We simply need to make sure that we don't collide with
  82 * someone else's file names!
  83 */
  84
  85static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
  86static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
  87static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
  88static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
  89static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
  90
  91static struct ctl_table_header *sunrpc_table_header;
  92
 
 
 
 
 
  93/*
  94 * FIXME: changing the UDP slot table size should also resize the UDP
  95 *        socket buffers for existing UDP transports
  96 */
  97static struct ctl_table xs_tunables_table[] = {
  98	{
  99		.procname	= "udp_slot_table_entries",
 100		.data		= &xprt_udp_slot_table_entries,
 101		.maxlen		= sizeof(unsigned int),
 102		.mode		= 0644,
 103		.proc_handler	= proc_dointvec_minmax,
 104		.extra1		= &min_slot_table_size,
 105		.extra2		= &max_slot_table_size
 106	},
 107	{
 108		.procname	= "tcp_slot_table_entries",
 109		.data		= &xprt_tcp_slot_table_entries,
 110		.maxlen		= sizeof(unsigned int),
 111		.mode		= 0644,
 112		.proc_handler	= proc_dointvec_minmax,
 113		.extra1		= &min_slot_table_size,
 114		.extra2		= &max_slot_table_size
 115	},
 116	{
 117		.procname	= "tcp_max_slot_table_entries",
 118		.data		= &xprt_max_tcp_slot_table_entries,
 119		.maxlen		= sizeof(unsigned int),
 120		.mode		= 0644,
 121		.proc_handler	= proc_dointvec_minmax,
 122		.extra1		= &min_slot_table_size,
 123		.extra2		= &max_tcp_slot_table_limit
 124	},
 125	{
 126		.procname	= "min_resvport",
 127		.data		= &xprt_min_resvport,
 128		.maxlen		= sizeof(unsigned int),
 129		.mode		= 0644,
 130		.proc_handler	= proc_dointvec_minmax,
 131		.extra1		= &xprt_min_resvport_limit,
 132		.extra2		= &xprt_max_resvport_limit
 133	},
 134	{
 135		.procname	= "max_resvport",
 136		.data		= &xprt_max_resvport,
 137		.maxlen		= sizeof(unsigned int),
 138		.mode		= 0644,
 139		.proc_handler	= proc_dointvec_minmax,
 140		.extra1		= &xprt_min_resvport_limit,
 141		.extra2		= &xprt_max_resvport_limit
 142	},
 143	{
 144		.procname	= "tcp_fin_timeout",
 145		.data		= &xs_tcp_fin_timeout,
 146		.maxlen		= sizeof(xs_tcp_fin_timeout),
 147		.mode		= 0644,
 148		.proc_handler	= proc_dointvec_jiffies,
 149	},
 150	{ },
 151};
 152
 153static struct ctl_table sunrpc_table[] = {
 154	{
 155		.procname	= "sunrpc",
 156		.mode		= 0555,
 157		.child		= xs_tunables_table
 158	},
 159	{ },
 160};
 161
 162/*
 163 * Wait duration for a reply from the RPC portmapper.
 164 */
 165#define XS_BIND_TO		(60U * HZ)
 166
 167/*
 168 * Delay if a UDP socket connect error occurs.  This is most likely some
 169 * kind of resource problem on the local host.
 170 */
 171#define XS_UDP_REEST_TO		(2U * HZ)
 172
 173/*
 174 * The reestablish timeout allows clients to delay for a bit before attempting
 175 * to reconnect to a server that just dropped our connection.
 176 *
 177 * We implement an exponential backoff when trying to reestablish a TCP
 178 * transport connection with the server.  Some servers like to drop a TCP
 179 * connection when they are overworked, so we start with a short timeout and
 180 * increase over time if the server is down or not responding.
 181 */
 182#define XS_TCP_INIT_REEST_TO	(3U * HZ)
 183
 184/*
 185 * TCP idle timeout; client drops the transport socket if it is idle
 186 * for this long.  Note that we also timeout UDP sockets to prevent
 187 * holding port numbers when there is no RPC traffic.
 188 */
 189#define XS_IDLE_DISC_TO		(5U * 60 * HZ)
 190
 191#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 192# undef  RPC_DEBUG_DATA
 193# define RPCDBG_FACILITY	RPCDBG_TRANS
 194#endif
 195
 196#ifdef RPC_DEBUG_DATA
 197static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
 198{
 199	u8 *buf = (u8 *) packet;
 200	int j;
 201
 202	dprintk("RPC:       %s\n", msg);
 203	for (j = 0; j < count && j < 128; j += 4) {
 204		if (!(j & 31)) {
 205			if (j)
 206				dprintk("\n");
 207			dprintk("0x%04x ", j);
 208		}
 209		dprintk("%02x%02x%02x%02x ",
 210			buf[j], buf[j+1], buf[j+2], buf[j+3]);
 211	}
 212	dprintk("\n");
 213}
 214#else
 215static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
 216{
 217	/* NOP */
 218}
 219#endif
 220
 221static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
 222{
 223	return (struct rpc_xprt *) sk->sk_user_data;
 224}
 225
 226static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
 227{
 228	return (struct sockaddr *) &xprt->addr;
 229}
 230
 231static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
 232{
 233	return (struct sockaddr_un *) &xprt->addr;
 234}
 235
 236static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
 237{
 238	return (struct sockaddr_in *) &xprt->addr;
 239}
 240
 241static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
 242{
 243	return (struct sockaddr_in6 *) &xprt->addr;
 244}
 245
 246static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
 247{
 248	struct sockaddr *sap = xs_addr(xprt);
 249	struct sockaddr_in6 *sin6;
 250	struct sockaddr_in *sin;
 251	struct sockaddr_un *sun;
 252	char buf[128];
 253
 254	switch (sap->sa_family) {
 255	case AF_LOCAL:
 256		sun = xs_addr_un(xprt);
 257		strlcpy(buf, sun->sun_path, sizeof(buf));
 258		xprt->address_strings[RPC_DISPLAY_ADDR] =
 259						kstrdup(buf, GFP_KERNEL);
 260		break;
 261	case AF_INET:
 262		(void)rpc_ntop(sap, buf, sizeof(buf));
 263		xprt->address_strings[RPC_DISPLAY_ADDR] =
 264						kstrdup(buf, GFP_KERNEL);
 265		sin = xs_addr_in(xprt);
 266		snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
 267		break;
 268	case AF_INET6:
 269		(void)rpc_ntop(sap, buf, sizeof(buf));
 270		xprt->address_strings[RPC_DISPLAY_ADDR] =
 271						kstrdup(buf, GFP_KERNEL);
 272		sin6 = xs_addr_in6(xprt);
 273		snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
 274		break;
 275	default:
 276		BUG();
 277	}
 278
 279	xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
 280}
 281
 282static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
 283{
 284	struct sockaddr *sap = xs_addr(xprt);
 285	char buf[128];
 286
 287	snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
 288	xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
 289
 290	snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
 291	xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
 292}
 293
 294static void xs_format_peer_addresses(struct rpc_xprt *xprt,
 295				     const char *protocol,
 296				     const char *netid)
 297{
 298	xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
 299	xprt->address_strings[RPC_DISPLAY_NETID] = netid;
 300	xs_format_common_peer_addresses(xprt);
 301	xs_format_common_peer_ports(xprt);
 302}
 303
 304static void xs_update_peer_port(struct rpc_xprt *xprt)
 305{
 306	kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
 307	kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
 308
 309	xs_format_common_peer_ports(xprt);
 310}
 311
 312static void xs_free_peer_addresses(struct rpc_xprt *xprt)
 313{
 314	unsigned int i;
 315
 316	for (i = 0; i < RPC_DISPLAY_MAX; i++)
 317		switch (i) {
 318		case RPC_DISPLAY_PROTO:
 319		case RPC_DISPLAY_NETID:
 320			continue;
 321		default:
 322			kfree(xprt->address_strings[i]);
 323		}
 324}
 325
 326static size_t
 327xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
 328{
 329	size_t i,n;
 330
 331	if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
 332		return want;
 333	n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
 334	for (i = 0; i < n; i++) {
 335		if (buf->pages[i])
 336			continue;
 337		buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
 338		if (!buf->pages[i]) {
 339			i *= PAGE_SIZE;
 340			return i > buf->page_base ? i - buf->page_base : 0;
 341		}
 342	}
 343	return want;
 344}
 345
 346static ssize_t
 347xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
 348{
 349	ssize_t ret;
 350	if (seek != 0)
 351		iov_iter_advance(&msg->msg_iter, seek);
 352	ret = sock_recvmsg(sock, msg, flags);
 353	return ret > 0 ? ret + seek : ret;
 354}
 355
 356static ssize_t
 357xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
 358		struct kvec *kvec, size_t count, size_t seek)
 359{
 360	iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count);
 361	return xs_sock_recvmsg(sock, msg, flags, seek);
 362}
 363
 364static ssize_t
 365xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
 366		struct bio_vec *bvec, unsigned long nr, size_t count,
 367		size_t seek)
 368{
 369	iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count);
 370	return xs_sock_recvmsg(sock, msg, flags, seek);
 371}
 372
 373static ssize_t
 374xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
 375		size_t count)
 376{
 377	iov_iter_discard(&msg->msg_iter, READ, count);
 378	return sock_recvmsg(sock, msg, flags);
 379}
 380
 381#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 382static void
 383xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
 384{
 385	struct bvec_iter bi = {
 386		.bi_size = count,
 387	};
 388	struct bio_vec bv;
 389
 390	bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
 391	for_each_bvec(bv, bvec, bi, bi)
 392		flush_dcache_page(bv.bv_page);
 393}
 394#else
 395static inline void
 396xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
 397{
 398}
 399#endif
 400
 401static ssize_t
 402xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
 403		struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
 404{
 405	size_t want, seek_init = seek, offset = 0;
 406	ssize_t ret;
 407
 408	want = min_t(size_t, count, buf->head[0].iov_len);
 409	if (seek < want) {
 410		ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek);
 411		if (ret <= 0)
 412			goto sock_err;
 413		offset += ret;
 414		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 415			goto out;
 416		if (ret != want)
 417			goto out;
 418		seek = 0;
 419	} else {
 420		seek -= want;
 421		offset += want;
 422	}
 423
 424	want = xs_alloc_sparse_pages(buf,
 425			min_t(size_t, count - offset, buf->page_len),
 426			GFP_KERNEL);
 427	if (seek < want) {
 428		ret = xs_read_bvec(sock, msg, flags, buf->bvec,
 429				xdr_buf_pagecount(buf),
 430				want + buf->page_base,
 431				seek + buf->page_base);
 432		if (ret <= 0)
 433			goto sock_err;
 434		xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
 435		offset += ret - buf->page_base;
 
 436		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 437			goto out;
 438		if (ret != want)
 439			goto out;
 440		seek = 0;
 441	} else {
 442		seek -= want;
 443		offset += want;
 444	}
 445
 446	want = min_t(size_t, count - offset, buf->tail[0].iov_len);
 447	if (seek < want) {
 448		ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
 449		if (ret <= 0)
 450			goto sock_err;
 451		offset += ret;
 452		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 453			goto out;
 454		if (ret != want)
 455			goto out;
 456	} else if (offset < seek_init)
 457		offset = seek_init;
 458	ret = -EMSGSIZE;
 459out:
 460	*read = offset - seek_init;
 461	return ret;
 462sock_err:
 463	offset += seek;
 464	goto out;
 465}
 466
 467static void
 468xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf)
 469{
 470	if (!transport->recv.copied) {
 471		if (buf->head[0].iov_len >= transport->recv.offset)
 472			memcpy(buf->head[0].iov_base,
 473					&transport->recv.xid,
 474					transport->recv.offset);
 475		transport->recv.copied = transport->recv.offset;
 476	}
 477}
 478
 479static bool
 480xs_read_stream_request_done(struct sock_xprt *transport)
 481{
 482	return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT);
 483}
 484
 485static void
 486xs_read_stream_check_eor(struct sock_xprt *transport,
 487		struct msghdr *msg)
 488{
 489	if (xs_read_stream_request_done(transport))
 490		msg->msg_flags |= MSG_EOR;
 491}
 492
 493static ssize_t
 494xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
 495		int flags, struct rpc_rqst *req)
 496{
 497	struct xdr_buf *buf = &req->rq_private_buf;
 498	size_t want, uninitialized_var(read);
 499	ssize_t uninitialized_var(ret);
 500
 501	xs_read_header(transport, buf);
 502
 503	want = transport->recv.len - transport->recv.offset;
 504	if (want != 0) {
 505		ret = xs_read_xdr_buf(transport->sock, msg, flags, buf,
 506				transport->recv.copied + want,
 507				transport->recv.copied,
 508				&read);
 509		transport->recv.offset += read;
 510		transport->recv.copied += read;
 511	}
 512
 513	if (transport->recv.offset == transport->recv.len)
 514		xs_read_stream_check_eor(transport, msg);
 515
 516	if (want == 0)
 517		return 0;
 518
 519	switch (ret) {
 520	default:
 521		break;
 522	case -EFAULT:
 523	case -EMSGSIZE:
 524		msg->msg_flags |= MSG_TRUNC;
 525		return read;
 526	case 0:
 527		return -ESHUTDOWN;
 528	}
 529	return ret < 0 ? ret : read;
 530}
 531
 532static size_t
 533xs_read_stream_headersize(bool isfrag)
 534{
 535	if (isfrag)
 536		return sizeof(__be32);
 537	return 3 * sizeof(__be32);
 538}
 539
 540static ssize_t
 541xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg,
 542		int flags, size_t want, size_t seek)
 543{
 544	struct kvec kvec = {
 545		.iov_base = &transport->recv.fraghdr,
 546		.iov_len = want,
 547	};
 548	return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek);
 549}
 550
 551#if defined(CONFIG_SUNRPC_BACKCHANNEL)
 552static ssize_t
 553xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
 554{
 555	struct rpc_xprt *xprt = &transport->xprt;
 556	struct rpc_rqst *req;
 557	ssize_t ret;
 558
 
 
 
 
 559	/* Look up and lock the request corresponding to the given XID */
 560	req = xprt_lookup_bc_request(xprt, transport->recv.xid);
 561	if (!req) {
 562		printk(KERN_WARNING "Callback slot table overflowed\n");
 563		return -ESHUTDOWN;
 564	}
 565	if (transport->recv.copied && !req->rq_private_buf.len)
 566		return -ESHUTDOWN;
 567
 568	ret = xs_read_stream_request(transport, msg, flags, req);
 569	if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 570		xprt_complete_bc_request(req, transport->recv.copied);
 571	else
 572		req->rq_private_buf.len = transport->recv.copied;
 573
 574	return ret;
 575}
 576#else /* CONFIG_SUNRPC_BACKCHANNEL */
 577static ssize_t
 578xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
 579{
 580	return -ESHUTDOWN;
 581}
 582#endif /* CONFIG_SUNRPC_BACKCHANNEL */
 583
 584static ssize_t
 585xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
 586{
 587	struct rpc_xprt *xprt = &transport->xprt;
 588	struct rpc_rqst *req;
 589	ssize_t ret = 0;
 590
 591	/* Look up and lock the request corresponding to the given XID */
 592	spin_lock(&xprt->queue_lock);
 593	req = xprt_lookup_rqst(xprt, transport->recv.xid);
 594	if (!req || (transport->recv.copied && !req->rq_private_buf.len)) {
 595		msg->msg_flags |= MSG_TRUNC;
 596		goto out;
 597	}
 598	xprt_pin_rqst(req);
 599	spin_unlock(&xprt->queue_lock);
 600
 601	ret = xs_read_stream_request(transport, msg, flags, req);
 602
 603	spin_lock(&xprt->queue_lock);
 604	if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 605		xprt_complete_rqst(req->rq_task, transport->recv.copied);
 606	else
 607		req->rq_private_buf.len = transport->recv.copied;
 608	xprt_unpin_rqst(req);
 609out:
 610	spin_unlock(&xprt->queue_lock);
 611	return ret;
 612}
 613
 614static ssize_t
 615xs_read_stream(struct sock_xprt *transport, int flags)
 616{
 617	struct msghdr msg = { 0 };
 618	size_t want, read = 0;
 619	ssize_t ret = 0;
 620
 621	if (transport->recv.len == 0) {
 622		want = xs_read_stream_headersize(transport->recv.copied != 0);
 623		ret = xs_read_stream_header(transport, &msg, flags, want,
 624				transport->recv.offset);
 625		if (ret <= 0)
 626			goto out_err;
 627		transport->recv.offset = ret;
 628		if (transport->recv.offset != want)
 629			return transport->recv.offset;
 630		transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
 631			RPC_FRAGMENT_SIZE_MASK;
 632		transport->recv.offset -= sizeof(transport->recv.fraghdr);
 633		read = ret;
 634	}
 635
 636	switch (be32_to_cpu(transport->recv.calldir)) {
 637	default:
 638		msg.msg_flags |= MSG_TRUNC;
 639		break;
 640	case RPC_CALL:
 641		ret = xs_read_stream_call(transport, &msg, flags);
 642		break;
 643	case RPC_REPLY:
 644		ret = xs_read_stream_reply(transport, &msg, flags);
 645	}
 646	if (msg.msg_flags & MSG_TRUNC) {
 647		transport->recv.calldir = cpu_to_be32(-1);
 648		transport->recv.copied = -1;
 649	}
 650	if (ret < 0)
 651		goto out_err;
 652	read += ret;
 653	if (transport->recv.offset < transport->recv.len) {
 654		if (!(msg.msg_flags & MSG_TRUNC))
 655			return read;
 656		msg.msg_flags = 0;
 657		ret = xs_read_discard(transport->sock, &msg, flags,
 658				transport->recv.len - transport->recv.offset);
 659		if (ret <= 0)
 660			goto out_err;
 661		transport->recv.offset += ret;
 662		read += ret;
 663		if (transport->recv.offset != transport->recv.len)
 664			return read;
 665	}
 666	if (xs_read_stream_request_done(transport)) {
 667		trace_xs_stream_read_request(transport);
 668		transport->recv.copied = 0;
 669	}
 670	transport->recv.offset = 0;
 671	transport->recv.len = 0;
 672	return read;
 673out_err:
 674	return ret != 0 ? ret : -ESHUTDOWN;
 675}
 676
 677static __poll_t xs_poll_socket(struct sock_xprt *transport)
 678{
 679	return transport->sock->ops->poll(transport->file, transport->sock,
 680			NULL);
 681}
 682
 683static bool xs_poll_socket_readable(struct sock_xprt *transport)
 684{
 685	__poll_t events = xs_poll_socket(transport);
 686
 687	return (events & (EPOLLIN | EPOLLRDNORM)) && !(events & EPOLLRDHUP);
 688}
 689
 690static void xs_poll_check_readable(struct sock_xprt *transport)
 691{
 692
 693	clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
 694	if (!xs_poll_socket_readable(transport))
 695		return;
 696	if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
 697		queue_work(xprtiod_workqueue, &transport->recv_worker);
 698}
 699
 700static void xs_stream_data_receive(struct sock_xprt *transport)
 701{
 702	size_t read = 0;
 703	ssize_t ret = 0;
 704
 705	mutex_lock(&transport->recv_mutex);
 706	if (transport->sock == NULL)
 707		goto out;
 708	for (;;) {
 709		ret = xs_read_stream(transport, MSG_DONTWAIT);
 710		if (ret < 0)
 711			break;
 712		read += ret;
 713		cond_resched();
 714	}
 715	if (ret == -ESHUTDOWN)
 716		kernel_sock_shutdown(transport->sock, SHUT_RDWR);
 717	else
 718		xs_poll_check_readable(transport);
 719out:
 720	mutex_unlock(&transport->recv_mutex);
 721	trace_xs_stream_read_data(&transport->xprt, ret, read);
 722}
 723
 724static void xs_stream_data_receive_workfn(struct work_struct *work)
 725{
 726	struct sock_xprt *transport =
 727		container_of(work, struct sock_xprt, recv_worker);
 728	unsigned int pflags = memalloc_nofs_save();
 729
 730	xs_stream_data_receive(transport);
 731	memalloc_nofs_restore(pflags);
 732}
 733
 734static void
 735xs_stream_reset_connect(struct sock_xprt *transport)
 736{
 737	transport->recv.offset = 0;
 738	transport->recv.len = 0;
 739	transport->recv.copied = 0;
 740	transport->xmit.offset = 0;
 741}
 742
 743static void
 744xs_stream_start_connect(struct sock_xprt *transport)
 745{
 746	transport->xprt.stat.connect_count++;
 747	transport->xprt.stat.connect_start = jiffies;
 748}
 749
 750#define XS_SENDMSG_FLAGS	(MSG_DONTWAIT | MSG_NOSIGNAL)
 751
 752static int xs_sendmsg(struct socket *sock, struct msghdr *msg, size_t seek)
 753{
 754	if (seek)
 755		iov_iter_advance(&msg->msg_iter, seek);
 756	return sock_sendmsg(sock, msg);
 757}
 758
 759static int xs_send_kvec(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t seek)
 760{
 761	iov_iter_kvec(&msg->msg_iter, WRITE, vec, 1, vec->iov_len);
 762	return xs_sendmsg(sock, msg, seek);
 763}
 764
 765static int xs_send_pagedata(struct socket *sock, struct msghdr *msg, struct xdr_buf *xdr, size_t base)
 766{
 767	int err;
 768
 769	err = xdr_alloc_bvec(xdr, GFP_KERNEL);
 770	if (err < 0)
 771		return err;
 772
 773	iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec,
 774			xdr_buf_pagecount(xdr),
 775			xdr->page_len + xdr->page_base);
 776	return xs_sendmsg(sock, msg, base + xdr->page_base);
 777}
 778
 779#define xs_record_marker_len() sizeof(rpc_fraghdr)
 780
 781/* Common case:
 782 *  - stream transport
 783 *  - sending from byte 0 of the message
 784 *  - the message is wholly contained in @xdr's head iovec
 785 */
 786static int xs_send_rm_and_kvec(struct socket *sock, struct msghdr *msg,
 787		rpc_fraghdr marker, struct kvec *vec, size_t base)
 788{
 789	struct kvec iov[2] = {
 790		[0] = {
 791			.iov_base	= &marker,
 792			.iov_len	= sizeof(marker)
 793		},
 794		[1] = *vec,
 795	};
 796	size_t len = iov[0].iov_len + iov[1].iov_len;
 797
 798	iov_iter_kvec(&msg->msg_iter, WRITE, iov, 2, len);
 799	return xs_sendmsg(sock, msg, base);
 800}
 801
 802/**
 803 * xs_sendpages - write pages directly to a socket
 804 * @sock: socket to send on
 805 * @addr: UDP only -- address of destination
 806 * @addrlen: UDP only -- length of destination address
 807 * @xdr: buffer containing this request
 808 * @base: starting position in the buffer
 809 * @rm: stream record marker field
 810 * @sent_p: return the total number of bytes successfully queued for sending
 811 *
 812 */
 813static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, rpc_fraghdr rm, int *sent_p)
 814{
 815	struct msghdr msg = {
 816		.msg_name = addr,
 817		.msg_namelen = addrlen,
 818		.msg_flags = XS_SENDMSG_FLAGS | MSG_MORE,
 819	};
 820	unsigned int rmsize = rm ? sizeof(rm) : 0;
 821	unsigned int remainder = rmsize + xdr->len - base;
 822	unsigned int want;
 823	int err = 0;
 824
 825	if (unlikely(!sock))
 826		return -ENOTSOCK;
 827
 828	want = xdr->head[0].iov_len + rmsize;
 829	if (base < want) {
 830		unsigned int len = want - base;
 831		remainder -= len;
 832		if (remainder == 0)
 833			msg.msg_flags &= ~MSG_MORE;
 834		if (rmsize)
 835			err = xs_send_rm_and_kvec(sock, &msg, rm,
 836					&xdr->head[0], base);
 837		else
 838			err = xs_send_kvec(sock, &msg, &xdr->head[0], base);
 839		if (remainder == 0 || err != len)
 840			goto out;
 841		*sent_p += err;
 842		base = 0;
 843	} else
 844		base -= want;
 845
 846	if (base < xdr->page_len) {
 847		unsigned int len = xdr->page_len - base;
 848		remainder -= len;
 849		if (remainder == 0)
 850			msg.msg_flags &= ~MSG_MORE;
 851		err = xs_send_pagedata(sock, &msg, xdr, base);
 852		if (remainder == 0 || err != len)
 853			goto out;
 854		*sent_p += err;
 855		base = 0;
 856	} else
 857		base -= xdr->page_len;
 858
 859	if (base >= xdr->tail[0].iov_len)
 860		return 0;
 861	msg.msg_flags &= ~MSG_MORE;
 862	err = xs_send_kvec(sock, &msg, &xdr->tail[0], base);
 863out:
 864	if (err > 0) {
 865		*sent_p += err;
 866		err = 0;
 867	}
 868	return err;
 869}
 870
 871/**
 872 * xs_nospace - handle transmit was incomplete
 873 * @req: pointer to RPC request
 
 874 *
 875 */
 876static int xs_nospace(struct rpc_rqst *req)
 877{
 878	struct rpc_xprt *xprt = req->rq_xprt;
 879	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 880	struct sock *sk = transport->inet;
 881	int ret = -EAGAIN;
 882
 883	dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
 884			req->rq_task->tk_pid,
 885			req->rq_slen - transport->xmit.offset,
 886			req->rq_slen);
 887
 888	/* Protect against races with write_space */
 889	spin_lock(&xprt->transport_lock);
 890
 891	/* Don't race with disconnect */
 892	if (xprt_connected(xprt)) {
 893		/* wait for more buffer space */
 
 
 894		sk->sk_write_pending++;
 895		xprt_wait_for_buffer_space(xprt);
 896	} else
 897		ret = -ENOTCONN;
 898
 899	spin_unlock(&xprt->transport_lock);
 
 
 900
 901	/* Race breaker in case memory is freed before above code is called */
 902	if (ret == -EAGAIN) {
 903		struct socket_wq *wq;
 904
 905		rcu_read_lock();
 906		wq = rcu_dereference(sk->sk_wq);
 907		set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
 908		rcu_read_unlock();
 909
 910		sk->sk_write_space(sk);
 911	}
 
 
 912	return ret;
 913}
 914
 915static void
 916xs_stream_prepare_request(struct rpc_rqst *req)
 917{
 918	xdr_free_bvec(&req->rq_rcv_buf);
 919	req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920}
 921
 922/*
 923 * Determine if the previous message in the stream was aborted before it
 924 * could complete transmission.
 925 */
 926static bool
 927xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
 928{
 929	return transport->xmit.offset != 0 && req->rq_bytes_sent == 0;
 930}
 931
 932/*
 933 * Return the stream record marker field for a record of length < 2^31-1
 934 */
 935static rpc_fraghdr
 936xs_stream_record_marker(struct xdr_buf *xdr)
 937{
 938	if (!xdr->len)
 939		return 0;
 940	return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len);
 941}
 942
 943/**
 944 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
 945 * @req: pointer to RPC request
 946 *
 947 * Return values:
 948 *        0:	The request has been sent
 949 *   EAGAIN:	The socket was blocked, please call again later to
 950 *		complete the request
 951 * ENOTCONN:	Caller needs to invoke connect logic then call again
 952 *    other:	Some other error occured, the request was not sent
 953 */
 954static int xs_local_send_request(struct rpc_rqst *req)
 955{
 956	struct rpc_xprt *xprt = req->rq_xprt;
 957	struct sock_xprt *transport =
 958				container_of(xprt, struct sock_xprt, xprt);
 959	struct xdr_buf *xdr = &req->rq_snd_buf;
 960	rpc_fraghdr rm = xs_stream_record_marker(xdr);
 961	unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
 
 
 
 
 
 962	int status;
 963	int sent = 0;
 964
 965	/* Close the stream if the previous transmission was incomplete */
 966	if (xs_send_request_was_aborted(transport, req)) {
 967		xs_close(xprt);
 968		return -ENOTCONN;
 969	}
 970
 971	xs_pktdump("packet data:",
 972			req->rq_svec->iov_base, req->rq_svec->iov_len);
 973
 
 
 974	req->rq_xtime = ktime_get();
 975	status = xs_sendpages(transport->sock, NULL, 0, xdr,
 976			      transport->xmit.offset, rm, &sent);
 977	dprintk("RPC:       %s(%u) = %d\n",
 978			__func__, xdr->len - transport->xmit.offset, status);
 979
 980	if (status == -EAGAIN && sock_writeable(transport->inet))
 981		status = -ENOBUFS;
 982
 983	if (likely(sent > 0) || status == 0) {
 984		transport->xmit.offset += sent;
 985		req->rq_bytes_sent = transport->xmit.offset;
 986		if (likely(req->rq_bytes_sent >= msglen)) {
 987			req->rq_xmit_bytes_sent += transport->xmit.offset;
 988			transport->xmit.offset = 0;
 989			return 0;
 990		}
 991		status = -EAGAIN;
 
 992	}
 993
 994	switch (status) {
 995	case -ENOBUFS:
 996		break;
 997	case -EAGAIN:
 998		status = xs_nospace(req);
 999		break;
1000	default:
1001		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
1002			-status);
1003		/* fall through */
1004	case -EPIPE:
1005		xs_close(xprt);
1006		status = -ENOTCONN;
1007	}
1008
1009	return status;
1010}
1011
1012/**
1013 * xs_udp_send_request - write an RPC request to a UDP socket
1014 * @req: pointer to RPC request
1015 *
1016 * Return values:
1017 *        0:	The request has been sent
1018 *   EAGAIN:	The socket was blocked, please call again later to
1019 *		complete the request
1020 * ENOTCONN:	Caller needs to invoke connect logic then call again
1021 *    other:	Some other error occurred, the request was not sent
1022 */
1023static int xs_udp_send_request(struct rpc_rqst *req)
1024{
1025	struct rpc_xprt *xprt = req->rq_xprt;
1026	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1027	struct xdr_buf *xdr = &req->rq_snd_buf;
1028	int sent = 0;
 
 
 
 
 
1029	int status;
1030
1031	xs_pktdump("packet data:",
1032				req->rq_svec->iov_base,
1033				req->rq_svec->iov_len);
1034
1035	if (!xprt_bound(xprt))
1036		return -ENOTCONN;
1037
1038	if (!xprt_request_get_cong(xprt, req))
1039		return -EBADSLT;
1040
 
 
 
1041	req->rq_xtime = ktime_get();
1042	status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
1043			      xdr, 0, 0, &sent);
1044
1045	dprintk("RPC:       xs_udp_send_request(%u) = %d\n",
1046			xdr->len, status);
1047
1048	/* firewall is blocking us, don't return -EAGAIN or we end up looping */
1049	if (status == -EPERM)
1050		goto process_status;
1051
1052	if (status == -EAGAIN && sock_writeable(transport->inet))
1053		status = -ENOBUFS;
1054
1055	if (sent > 0 || status == 0) {
1056		req->rq_xmit_bytes_sent += sent;
1057		if (sent >= req->rq_slen)
1058			return 0;
1059		/* Still some bytes left; set up for a retry later. */
1060		status = -EAGAIN;
1061	}
1062
1063process_status:
1064	switch (status) {
1065	case -ENOTSOCK:
1066		status = -ENOTCONN;
1067		/* Should we call xs_close() here? */
1068		break;
1069	case -EAGAIN:
1070		status = xs_nospace(req);
1071		break;
1072	case -ENETUNREACH:
1073	case -ENOBUFS:
1074	case -EPIPE:
1075	case -ECONNREFUSED:
1076	case -EPERM:
1077		/* When the server has died, an ICMP port unreachable message
1078		 * prompts ECONNREFUSED. */
1079		break;
1080	default:
1081		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
1082			-status);
1083	}
1084
1085	return status;
1086}
1087
1088/**
1089 * xs_tcp_send_request - write an RPC request to a TCP socket
1090 * @req: pointer to RPC request
1091 *
1092 * Return values:
1093 *        0:	The request has been sent
1094 *   EAGAIN:	The socket was blocked, please call again later to
1095 *		complete the request
1096 * ENOTCONN:	Caller needs to invoke connect logic then call again
1097 *    other:	Some other error occurred, the request was not sent
1098 *
1099 * XXX: In the case of soft timeouts, should we eventually give up
1100 *	if sendmsg is not able to make progress?
1101 */
1102static int xs_tcp_send_request(struct rpc_rqst *req)
1103{
1104	struct rpc_xprt *xprt = req->rq_xprt;
1105	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1106	struct xdr_buf *xdr = &req->rq_snd_buf;
1107	rpc_fraghdr rm = xs_stream_record_marker(xdr);
1108	unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
1109	bool vm_wait = false;
 
 
 
 
1110	int status;
1111	int sent;
1112
1113	/* Close the stream if the previous transmission was incomplete */
1114	if (xs_send_request_was_aborted(transport, req)) {
1115		if (transport->sock != NULL)
1116			kernel_sock_shutdown(transport->sock, SHUT_RDWR);
1117		return -ENOTCONN;
1118	}
 
 
1119
1120	xs_pktdump("packet data:",
1121				req->rq_svec->iov_base,
1122				req->rq_svec->iov_len);
1123
1124	if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
1125		xs_tcp_set_socket_timeouts(xprt, transport->sock);
1126
 
 
1127	/* Continue transmitting the packet/record. We must be careful
1128	 * to cope with writespace callbacks arriving _after_ we have
1129	 * called sendmsg(). */
1130	req->rq_xtime = ktime_get();
1131	while (1) {
1132		sent = 0;
1133		status = xs_sendpages(transport->sock, NULL, 0, xdr,
1134				      transport->xmit.offset, rm, &sent);
 
 
 
1135
1136		dprintk("RPC:       xs_tcp_send_request(%u) = %d\n",
1137				xdr->len - transport->xmit.offset, status);
1138
1139		/* If we've sent the entire packet, immediately
1140		 * reset the count of bytes sent. */
1141		transport->xmit.offset += sent;
1142		req->rq_bytes_sent = transport->xmit.offset;
1143		if (likely(req->rq_bytes_sent >= msglen)) {
1144			req->rq_xmit_bytes_sent += transport->xmit.offset;
1145			transport->xmit.offset = 0;
 
 
1146			return 0;
1147		}
1148
1149		WARN_ON_ONCE(sent == 0 && status == 0);
1150
1151		if (status == -EAGAIN ) {
1152			/*
1153			 * Return EAGAIN if we're sure we're hitting the
1154			 * socket send buffer limits.
1155			 */
1156			if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
1157				break;
1158			/*
1159			 * Did we hit a memory allocation failure?
1160			 */
1161			if (sent == 0) {
1162				status = -ENOBUFS;
1163				if (vm_wait)
1164					break;
1165				/* Retry, knowing now that we're below the
1166				 * socket send buffer limit
1167				 */
1168				vm_wait = true;
1169			}
1170			continue;
1171		}
1172		if (status < 0)
1173			break;
1174		vm_wait = false;
1175	}
1176
1177	switch (status) {
1178	case -ENOTSOCK:
1179		status = -ENOTCONN;
1180		/* Should we call xs_close() here? */
1181		break;
1182	case -EAGAIN:
1183		status = xs_nospace(req);
1184		break;
1185	case -ECONNRESET:
1186	case -ECONNREFUSED:
1187	case -ENOTCONN:
1188	case -EADDRINUSE:
1189	case -ENOBUFS:
1190	case -EPIPE:
1191		break;
1192	default:
1193		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
1194			-status);
1195	}
1196
1197	return status;
1198}
1199
1200static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1201{
1202	transport->old_data_ready = sk->sk_data_ready;
1203	transport->old_state_change = sk->sk_state_change;
1204	transport->old_write_space = sk->sk_write_space;
1205	transport->old_error_report = sk->sk_error_report;
1206}
1207
1208static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1209{
1210	sk->sk_data_ready = transport->old_data_ready;
1211	sk->sk_state_change = transport->old_state_change;
1212	sk->sk_write_space = transport->old_write_space;
1213	sk->sk_error_report = transport->old_error_report;
1214}
1215
1216static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
1217{
1218	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1219
1220	clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
1221	clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state);
1222	clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state);
1223	clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state);
 
1224}
1225
1226static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr)
1227{
1228	set_bit(nr, &transport->sock_state);
1229	queue_work(xprtiod_workqueue, &transport->error_worker);
1230}
1231
1232static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1233{
 
1234	smp_mb__before_atomic();
1235	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1236	clear_bit(XPRT_CLOSING, &xprt->state);
1237	xs_sock_reset_state_flags(xprt);
1238	smp_mb__after_atomic();
1239}
1240
1241/**
1242 * xs_error_report - callback to handle TCP socket state errors
1243 * @sk: socket
1244 *
1245 * Note: we don't call sock_error() since there may be a rpc_task
1246 * using the socket, and so we don't want to clear sk->sk_err.
1247 */
1248static void xs_error_report(struct sock *sk)
1249{
1250	struct sock_xprt *transport;
1251	struct rpc_xprt *xprt;
1252
1253	read_lock_bh(&sk->sk_callback_lock);
1254	if (!(xprt = xprt_from_sock(sk)))
1255		goto out;
1256
1257	transport = container_of(xprt, struct sock_xprt, xprt);
1258	transport->xprt_err = -sk->sk_err;
1259	if (transport->xprt_err == 0)
1260		goto out;
1261	dprintk("RPC:       xs_error_report client %p, error=%d...\n",
1262			xprt, -transport->xprt_err);
1263	trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
1264
1265	/* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */
1266	smp_mb__before_atomic();
1267	xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR);
1268 out:
1269	read_unlock_bh(&sk->sk_callback_lock);
1270}
1271
1272static void xs_reset_transport(struct sock_xprt *transport)
1273{
1274	struct socket *sock = transport->sock;
1275	struct sock *sk = transport->inet;
1276	struct rpc_xprt *xprt = &transport->xprt;
1277	struct file *filp = transport->file;
1278
1279	if (sk == NULL)
1280		return;
 
 
 
 
 
 
 
 
 
 
1281
1282	if (atomic_read(&transport->xprt.swapper))
1283		sk_clear_memalloc(sk);
1284
1285	kernel_sock_shutdown(sock, SHUT_RDWR);
1286
1287	mutex_lock(&transport->recv_mutex);
1288	write_lock_bh(&sk->sk_callback_lock);
1289	transport->inet = NULL;
1290	transport->sock = NULL;
1291	transport->file = NULL;
1292
1293	sk->sk_user_data = NULL;
1294
1295	xs_restore_old_callbacks(transport, sk);
1296	xprt_clear_connected(xprt);
1297	write_unlock_bh(&sk->sk_callback_lock);
1298	xs_sock_reset_connection_flags(xprt);
1299	/* Reset stream record info */
1300	xs_stream_reset_connect(transport);
 
1301	mutex_unlock(&transport->recv_mutex);
1302
1303	trace_rpc_socket_close(xprt, sock);
1304	fput(filp);
1305
1306	xprt_disconnect_done(xprt);
1307}
1308
1309/**
1310 * xs_close - close a socket
1311 * @xprt: transport
1312 *
1313 * This is used when all requests are complete; ie, no DRC state remains
1314 * on the server we want to save.
1315 *
1316 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
1317 * xs_reset_transport() zeroing the socket from underneath a writer.
1318 */
1319static void xs_close(struct rpc_xprt *xprt)
1320{
1321	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1322
1323	dprintk("RPC:       xs_close xprt %p\n", xprt);
1324
1325	xs_reset_transport(transport);
1326	xprt->reestablish_timeout = 0;
1327}
1328
1329static void xs_inject_disconnect(struct rpc_xprt *xprt)
1330{
1331	dprintk("RPC:       injecting transport disconnect on xprt=%p\n",
1332		xprt);
1333	xprt_disconnect_done(xprt);
1334}
1335
1336static void xs_xprt_free(struct rpc_xprt *xprt)
1337{
1338	xs_free_peer_addresses(xprt);
1339	xprt_free(xprt);
1340}
1341
1342/**
1343 * xs_destroy - prepare to shutdown a transport
1344 * @xprt: doomed transport
1345 *
1346 */
1347static void xs_destroy(struct rpc_xprt *xprt)
1348{
1349	struct sock_xprt *transport = container_of(xprt,
1350			struct sock_xprt, xprt);
1351	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
1352
1353	cancel_delayed_work_sync(&transport->connect_worker);
1354	xs_close(xprt);
1355	cancel_work_sync(&transport->recv_worker);
1356	cancel_work_sync(&transport->error_worker);
1357	xs_xprt_free(xprt);
1358	module_put(THIS_MODULE);
1359}
1360
1361/**
1362 * xs_udp_data_read_skb - receive callback for UDP sockets
1363 * @xprt: transport
1364 * @sk: socket
1365 * @skb: skbuff
1366 *
1367 */
1368static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
1369		struct sock *sk,
1370		struct sk_buff *skb)
1371{
1372	struct rpc_task *task;
1373	struct rpc_rqst *rovr;
1374	int repsize, copied;
1375	u32 _xid;
1376	__be32 *xp;
1377
1378	repsize = skb->len;
1379	if (repsize < 4) {
1380		dprintk("RPC:       impossible RPC reply size %d!\n", repsize);
1381		return;
1382	}
1383
1384	/* Copy the XID from the skb... */
1385	xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
1386	if (xp == NULL)
1387		return;
1388
1389	/* Look up and lock the request corresponding to the given XID */
1390	spin_lock(&xprt->queue_lock);
1391	rovr = xprt_lookup_rqst(xprt, *xp);
1392	if (!rovr)
1393		goto out_unlock;
1394	xprt_pin_rqst(rovr);
1395	xprt_update_rtt(rovr->rq_task);
1396	spin_unlock(&xprt->queue_lock);
1397	task = rovr->rq_task;
1398
1399	if ((copied = rovr->rq_private_buf.buflen) > repsize)
1400		copied = repsize;
1401
1402	/* Suck it into the iovec, verify checksum if not done by hw. */
1403	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1404		spin_lock(&xprt->queue_lock);
1405		__UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
1406		goto out_unpin;
1407	}
1408
1409
1410	spin_lock(&xprt->transport_lock);
1411	xprt_adjust_cwnd(xprt, task, copied);
1412	spin_unlock(&xprt->transport_lock);
1413	spin_lock(&xprt->queue_lock);
1414	xprt_complete_rqst(task, copied);
1415	__UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
1416out_unpin:
1417	xprt_unpin_rqst(rovr);
1418 out_unlock:
1419	spin_unlock(&xprt->queue_lock);
1420}
1421
1422static void xs_udp_data_receive(struct sock_xprt *transport)
1423{
1424	struct sk_buff *skb;
1425	struct sock *sk;
1426	int err;
1427
1428	mutex_lock(&transport->recv_mutex);
1429	sk = transport->inet;
1430	if (sk == NULL)
1431		goto out;
1432	for (;;) {
1433		skb = skb_recv_udp(sk, 0, 1, &err);
1434		if (skb == NULL)
1435			break;
1436		xs_udp_data_read_skb(&transport->xprt, sk, skb);
1437		consume_skb(skb);
1438		cond_resched();
1439	}
1440	xs_poll_check_readable(transport);
1441out:
1442	mutex_unlock(&transport->recv_mutex);
1443}
1444
1445static void xs_udp_data_receive_workfn(struct work_struct *work)
1446{
1447	struct sock_xprt *transport =
1448		container_of(work, struct sock_xprt, recv_worker);
1449	unsigned int pflags = memalloc_nofs_save();
1450
1451	xs_udp_data_receive(transport);
1452	memalloc_nofs_restore(pflags);
1453}
1454
1455/**
1456 * xs_data_ready - "data ready" callback for UDP sockets
1457 * @sk: socket with data to read
1458 *
1459 */
1460static void xs_data_ready(struct sock *sk)
1461{
1462	struct rpc_xprt *xprt;
1463
1464	read_lock_bh(&sk->sk_callback_lock);
1465	dprintk("RPC:       xs_data_ready...\n");
1466	xprt = xprt_from_sock(sk);
1467	if (xprt != NULL) {
1468		struct sock_xprt *transport = container_of(xprt,
1469				struct sock_xprt, xprt);
 
 
 
1470		transport->old_data_ready(sk);
1471		/* Any data means we had a useful conversation, so
1472		 * then we don't need to delay the next reconnect
1473		 */
1474		if (xprt->reestablish_timeout)
1475			xprt->reestablish_timeout = 0;
1476		if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1477			queue_work(xprtiod_workqueue, &transport->recv_worker);
1478	}
1479	read_unlock_bh(&sk->sk_callback_lock);
1480}
1481
1482/*
1483 * Helper function to force a TCP close if the server is sending
1484 * junk and/or it has put us in CLOSE_WAIT
1485 */
1486static void xs_tcp_force_close(struct rpc_xprt *xprt)
1487{
1488	xprt_force_disconnect(xprt);
1489}
1490
1491#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1492static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
1493{
1494	return PAGE_SIZE;
1495}
1496#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1497
1498/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1499 * xs_tcp_state_change - callback to handle TCP socket state changes
1500 * @sk: socket whose state has changed
1501 *
1502 */
1503static void xs_tcp_state_change(struct sock *sk)
1504{
1505	struct rpc_xprt *xprt;
1506	struct sock_xprt *transport;
1507
1508	read_lock_bh(&sk->sk_callback_lock);
1509	if (!(xprt = xprt_from_sock(sk)))
1510		goto out;
1511	dprintk("RPC:       xs_tcp_state_change client %p...\n", xprt);
1512	dprintk("RPC:       state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1513			sk->sk_state, xprt_connected(xprt),
1514			sock_flag(sk, SOCK_DEAD),
1515			sock_flag(sk, SOCK_ZAPPED),
1516			sk->sk_shutdown);
1517
1518	transport = container_of(xprt, struct sock_xprt, xprt);
1519	trace_rpc_socket_state_change(xprt, sk->sk_socket);
1520	switch (sk->sk_state) {
1521	case TCP_ESTABLISHED:
1522		if (!xprt_test_and_set_connected(xprt)) {
1523			xprt->connect_cookie++;
1524			clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1525			xprt_clear_connecting(xprt);
1526
1527			xprt->stat.connect_count++;
1528			xprt->stat.connect_time += (long)jiffies -
1529						   xprt->stat.connect_start;
1530			xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING);
1531		}
1532		break;
1533	case TCP_FIN_WAIT1:
1534		/* The client initiated a shutdown of the socket */
1535		xprt->connect_cookie++;
1536		xprt->reestablish_timeout = 0;
1537		set_bit(XPRT_CLOSING, &xprt->state);
1538		smp_mb__before_atomic();
1539		clear_bit(XPRT_CONNECTED, &xprt->state);
1540		clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1541		smp_mb__after_atomic();
1542		break;
1543	case TCP_CLOSE_WAIT:
1544		/* The server initiated a shutdown of the socket */
1545		xprt->connect_cookie++;
1546		clear_bit(XPRT_CONNECTED, &xprt->state);
1547		xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1548		/* fall through */
1549	case TCP_CLOSING:
1550		/*
1551		 * If the server closed down the connection, make sure that
1552		 * we back off before reconnecting
1553		 */
1554		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1555			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1556		break;
1557	case TCP_LAST_ACK:
1558		set_bit(XPRT_CLOSING, &xprt->state);
1559		smp_mb__before_atomic();
1560		clear_bit(XPRT_CONNECTED, &xprt->state);
1561		smp_mb__after_atomic();
1562		break;
1563	case TCP_CLOSE:
1564		if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1565					&transport->sock_state))
1566			xprt_clear_connecting(xprt);
1567		clear_bit(XPRT_CLOSING, &xprt->state);
1568		/* Trigger the socket release */
1569		xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1570	}
1571 out:
1572	read_unlock_bh(&sk->sk_callback_lock);
1573}
1574
1575static void xs_write_space(struct sock *sk)
1576{
1577	struct socket_wq *wq;
1578	struct sock_xprt *transport;
1579	struct rpc_xprt *xprt;
1580
1581	if (!sk->sk_socket)
1582		return;
1583	clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1584
1585	if (unlikely(!(xprt = xprt_from_sock(sk))))
1586		return;
1587	transport = container_of(xprt, struct sock_xprt, xprt);
1588	rcu_read_lock();
1589	wq = rcu_dereference(sk->sk_wq);
1590	if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
1591		goto out;
1592
1593	xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE);
1594	sk->sk_write_pending--;
1595out:
1596	rcu_read_unlock();
1597}
1598
1599/**
1600 * xs_udp_write_space - callback invoked when socket buffer space
1601 *                             becomes available
1602 * @sk: socket whose state has changed
1603 *
1604 * Called when more output buffer space is available for this socket.
1605 * We try not to wake our writers until they can make "significant"
1606 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1607 * with a bunch of small requests.
1608 */
1609static void xs_udp_write_space(struct sock *sk)
1610{
1611	read_lock_bh(&sk->sk_callback_lock);
1612
1613	/* from net/core/sock.c:sock_def_write_space */
1614	if (sock_writeable(sk))
1615		xs_write_space(sk);
1616
1617	read_unlock_bh(&sk->sk_callback_lock);
1618}
1619
1620/**
1621 * xs_tcp_write_space - callback invoked when socket buffer space
1622 *                             becomes available
1623 * @sk: socket whose state has changed
1624 *
1625 * Called when more output buffer space is available for this socket.
1626 * We try not to wake our writers until they can make "significant"
1627 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1628 * with a bunch of small requests.
1629 */
1630static void xs_tcp_write_space(struct sock *sk)
1631{
1632	read_lock_bh(&sk->sk_callback_lock);
1633
1634	/* from net/core/stream.c:sk_stream_write_space */
1635	if (sk_stream_is_writeable(sk))
1636		xs_write_space(sk);
1637
1638	read_unlock_bh(&sk->sk_callback_lock);
1639}
1640
1641static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1642{
1643	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1644	struct sock *sk = transport->inet;
1645
1646	if (transport->rcvsize) {
1647		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1648		sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1649	}
1650	if (transport->sndsize) {
1651		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1652		sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1653		sk->sk_write_space(sk);
1654	}
1655}
1656
1657/**
1658 * xs_udp_set_buffer_size - set send and receive limits
1659 * @xprt: generic transport
1660 * @sndsize: requested size of send buffer, in bytes
1661 * @rcvsize: requested size of receive buffer, in bytes
1662 *
1663 * Set socket send and receive buffer size limits.
1664 */
1665static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1666{
1667	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1668
1669	transport->sndsize = 0;
1670	if (sndsize)
1671		transport->sndsize = sndsize + 1024;
1672	transport->rcvsize = 0;
1673	if (rcvsize)
1674		transport->rcvsize = rcvsize + 1024;
1675
1676	xs_udp_do_set_buffer_size(xprt);
1677}
1678
1679/**
1680 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1681 * @xprt: controlling transport
1682 * @task: task that timed out
1683 *
1684 * Adjust the congestion window after a retransmit timeout has occurred.
1685 */
1686static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1687{
1688	spin_lock(&xprt->transport_lock);
1689	xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1690	spin_unlock(&xprt->transport_lock);
1691}
1692
1693static int xs_get_random_port(void)
1694{
1695	unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
1696	unsigned short range;
1697	unsigned short rand;
1698
1699	if (max < min)
1700		return -EADDRINUSE;
1701	range = max - min + 1;
1702	rand = (unsigned short) prandom_u32() % range;
1703	return rand + min;
1704}
1705
1706/**
1707 * xs_set_reuseaddr_port - set the socket's port and address reuse options
1708 * @sock: socket
1709 *
1710 * Note that this function has to be called on all sockets that share the
1711 * same port, and it must be called before binding.
1712 */
1713static void xs_sock_set_reuseport(struct socket *sock)
1714{
1715	int opt = 1;
1716
1717	kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT,
1718			(char *)&opt, sizeof(opt));
1719}
1720
1721static unsigned short xs_sock_getport(struct socket *sock)
1722{
1723	struct sockaddr_storage buf;
1724	unsigned short port = 0;
1725
1726	if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0)
1727		goto out;
1728	switch (buf.ss_family) {
1729	case AF_INET6:
1730		port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1731		break;
1732	case AF_INET:
1733		port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1734	}
1735out:
1736	return port;
1737}
1738
1739/**
1740 * xs_set_port - reset the port number in the remote endpoint address
1741 * @xprt: generic transport
1742 * @port: new port number
1743 *
1744 */
1745static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1746{
1747	dprintk("RPC:       setting port for xprt %p to %u\n", xprt, port);
1748
1749	rpc_set_port(xs_addr(xprt), port);
1750	xs_update_peer_port(xprt);
1751}
1752
1753static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1754{
1755	if (transport->srcport == 0)
1756		transport->srcport = xs_sock_getport(sock);
1757}
1758
1759static int xs_get_srcport(struct sock_xprt *transport)
1760{
1761	int port = transport->srcport;
1762
1763	if (port == 0 && transport->xprt.resvport)
1764		port = xs_get_random_port();
1765	return port;
1766}
1767
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1768static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1769{
1770	if (transport->srcport != 0)
1771		transport->srcport = 0;
1772	if (!transport->xprt.resvport)
1773		return 0;
1774	if (port <= xprt_min_resvport || port > xprt_max_resvport)
1775		return xprt_max_resvport;
1776	return --port;
1777}
1778static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1779{
1780	struct sockaddr_storage myaddr;
1781	int err, nloop = 0;
1782	int port = xs_get_srcport(transport);
1783	unsigned short last;
1784
1785	/*
1786	 * If we are asking for any ephemeral port (i.e. port == 0 &&
1787	 * transport->xprt.resvport == 0), don't bind.  Let the local
1788	 * port selection happen implicitly when the socket is used
1789	 * (for example at connect time).
1790	 *
1791	 * This ensures that we can continue to establish TCP
1792	 * connections even when all local ephemeral ports are already
1793	 * a part of some TCP connection.  This makes no difference
1794	 * for UDP sockets, but also doens't harm them.
1795	 *
1796	 * If we're asking for any reserved port (i.e. port == 0 &&
1797	 * transport->xprt.resvport == 1) xs_get_srcport above will
1798	 * ensure that port is non-zero and we will bind as needed.
1799	 */
1800	if (port <= 0)
1801		return port;
1802
1803	memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1804	do {
1805		rpc_set_port((struct sockaddr *)&myaddr, port);
1806		err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1807				transport->xprt.addrlen);
1808		if (err == 0) {
1809			transport->srcport = port;
 
1810			break;
1811		}
1812		last = port;
1813		port = xs_next_srcport(transport, port);
1814		if (port > last)
1815			nloop++;
1816	} while (err == -EADDRINUSE && nloop != 2);
1817
1818	if (myaddr.ss_family == AF_INET)
1819		dprintk("RPC:       %s %pI4:%u: %s (%d)\n", __func__,
1820				&((struct sockaddr_in *)&myaddr)->sin_addr,
1821				port, err ? "failed" : "ok", err);
1822	else
1823		dprintk("RPC:       %s %pI6:%u: %s (%d)\n", __func__,
1824				&((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1825				port, err ? "failed" : "ok", err);
1826	return err;
1827}
1828
1829/*
1830 * We don't support autobind on AF_LOCAL sockets
1831 */
1832static void xs_local_rpcbind(struct rpc_task *task)
1833{
1834	xprt_set_bound(task->tk_xprt);
1835}
1836
1837static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1838{
1839}
1840
1841#ifdef CONFIG_DEBUG_LOCK_ALLOC
1842static struct lock_class_key xs_key[2];
1843static struct lock_class_key xs_slock_key[2];
1844
1845static inline void xs_reclassify_socketu(struct socket *sock)
1846{
1847	struct sock *sk = sock->sk;
1848
1849	sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1850		&xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1851}
1852
1853static inline void xs_reclassify_socket4(struct socket *sock)
1854{
1855	struct sock *sk = sock->sk;
1856
1857	sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1858		&xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1859}
1860
1861static inline void xs_reclassify_socket6(struct socket *sock)
1862{
1863	struct sock *sk = sock->sk;
1864
1865	sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1866		&xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1867}
1868
1869static inline void xs_reclassify_socket(int family, struct socket *sock)
1870{
1871	if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
1872		return;
1873
1874	switch (family) {
1875	case AF_LOCAL:
1876		xs_reclassify_socketu(sock);
1877		break;
1878	case AF_INET:
1879		xs_reclassify_socket4(sock);
1880		break;
1881	case AF_INET6:
1882		xs_reclassify_socket6(sock);
1883		break;
1884	}
1885}
1886#else
1887static inline void xs_reclassify_socket(int family, struct socket *sock)
1888{
1889}
1890#endif
1891
1892static void xs_dummy_setup_socket(struct work_struct *work)
1893{
1894}
1895
1896static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1897		struct sock_xprt *transport, int family, int type,
1898		int protocol, bool reuseport)
1899{
1900	struct file *filp;
1901	struct socket *sock;
1902	int err;
1903
1904	err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1905	if (err < 0) {
1906		dprintk("RPC:       can't create %d transport socket (%d).\n",
1907				protocol, -err);
1908		goto out;
1909	}
1910	xs_reclassify_socket(family, sock);
1911
1912	if (reuseport)
1913		xs_sock_set_reuseport(sock);
1914
1915	err = xs_bind(transport, sock);
1916	if (err) {
1917		sock_release(sock);
1918		goto out;
1919	}
1920
1921	filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1922	if (IS_ERR(filp))
1923		return ERR_CAST(filp);
1924	transport->file = filp;
1925
1926	return sock;
1927out:
1928	return ERR_PTR(err);
1929}
1930
1931static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1932				      struct socket *sock)
1933{
1934	struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1935									xprt);
1936
1937	if (!transport->inet) {
1938		struct sock *sk = sock->sk;
1939
1940		write_lock_bh(&sk->sk_callback_lock);
1941
1942		xs_save_old_callbacks(transport, sk);
1943
1944		sk->sk_user_data = xprt;
1945		sk->sk_data_ready = xs_data_ready;
1946		sk->sk_write_space = xs_udp_write_space;
1947		sock_set_flag(sk, SOCK_FASYNC);
1948		sk->sk_error_report = xs_error_report;
 
1949
1950		xprt_clear_connected(xprt);
1951
1952		/* Reset to new socket */
1953		transport->sock = sock;
1954		transport->inet = sk;
1955
1956		write_unlock_bh(&sk->sk_callback_lock);
1957	}
1958
1959	xs_stream_start_connect(transport);
1960
1961	return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1962}
1963
1964/**
1965 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1966 * @transport: socket transport to connect
1967 */
1968static int xs_local_setup_socket(struct sock_xprt *transport)
1969{
1970	struct rpc_xprt *xprt = &transport->xprt;
1971	struct file *filp;
1972	struct socket *sock;
1973	int status = -EIO;
1974
1975	status = __sock_create(xprt->xprt_net, AF_LOCAL,
1976					SOCK_STREAM, 0, &sock, 1);
1977	if (status < 0) {
1978		dprintk("RPC:       can't create AF_LOCAL "
1979			"transport socket (%d).\n", -status);
1980		goto out;
1981	}
1982	xs_reclassify_socket(AF_LOCAL, sock);
1983
1984	filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1985	if (IS_ERR(filp)) {
1986		status = PTR_ERR(filp);
1987		goto out;
1988	}
1989	transport->file = filp;
1990
1991	dprintk("RPC:       worker connecting xprt %p via AF_LOCAL to %s\n",
1992			xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1993
1994	status = xs_local_finish_connecting(xprt, sock);
1995	trace_rpc_socket_connect(xprt, sock, status);
1996	switch (status) {
1997	case 0:
1998		dprintk("RPC:       xprt %p connected to %s\n",
1999				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2000		xprt->stat.connect_count++;
2001		xprt->stat.connect_time += (long)jiffies -
2002					   xprt->stat.connect_start;
2003		xprt_set_connected(xprt);
 
2004	case -ENOBUFS:
2005		break;
2006	case -ENOENT:
2007		dprintk("RPC:       xprt %p: socket %s does not exist\n",
2008				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2009		break;
2010	case -ECONNREFUSED:
2011		dprintk("RPC:       xprt %p: connection refused for %s\n",
2012				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2013		break;
2014	default:
2015		printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
2016				__func__, -status,
2017				xprt->address_strings[RPC_DISPLAY_ADDR]);
2018	}
2019
2020out:
2021	xprt_clear_connecting(xprt);
2022	xprt_wake_pending_tasks(xprt, status);
2023	return status;
2024}
2025
2026static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2027{
2028	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2029	int ret;
2030
2031	 if (RPC_IS_ASYNC(task)) {
 
 
 
2032		/*
2033		 * We want the AF_LOCAL connect to be resolved in the
2034		 * filesystem namespace of the process making the rpc
2035		 * call.  Thus we connect synchronously.
2036		 *
2037		 * If we want to support asynchronous AF_LOCAL calls,
2038		 * we'll need to figure out how to pass a namespace to
2039		 * connect.
2040		 */
2041		task->tk_rpc_status = -ENOTCONN;
2042		rpc_exit(task, -ENOTCONN);
2043		return;
2044	}
2045	ret = xs_local_setup_socket(transport);
2046	if (ret && !RPC_IS_SOFTCONN(task))
2047		msleep_interruptible(15000);
 
 
 
 
 
 
2048}
2049
2050#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
2051/*
2052 * Note that this should be called with XPRT_LOCKED held (or when we otherwise
2053 * know that we have exclusive access to the socket), to guard against
2054 * races with xs_reset_transport.
2055 */
2056static void xs_set_memalloc(struct rpc_xprt *xprt)
2057{
2058	struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
2059			xprt);
2060
2061	/*
2062	 * If there's no sock, then we have nothing to set. The
2063	 * reconnecting process will get it for us.
2064	 */
2065	if (!transport->inet)
2066		return;
2067	if (atomic_read(&xprt->swapper))
2068		sk_set_memalloc(transport->inet);
2069}
2070
2071/**
2072 * xs_enable_swap - Tag this transport as being used for swap.
2073 * @xprt: transport to tag
2074 *
2075 * Take a reference to this transport on behalf of the rpc_clnt, and
2076 * optionally mark it for swapping if it wasn't already.
2077 */
2078static int
2079xs_enable_swap(struct rpc_xprt *xprt)
2080{
2081	struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2082
2083	if (atomic_inc_return(&xprt->swapper) != 1)
2084		return 0;
2085	if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2086		return -ERESTARTSYS;
2087	if (xs->inet)
2088		sk_set_memalloc(xs->inet);
2089	xprt_release_xprt(xprt, NULL);
2090	return 0;
2091}
2092
2093/**
2094 * xs_disable_swap - Untag this transport as being used for swap.
2095 * @xprt: transport to tag
2096 *
2097 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
2098 * swapper refcount goes to 0, untag the socket as a memalloc socket.
2099 */
2100static void
2101xs_disable_swap(struct rpc_xprt *xprt)
2102{
2103	struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2104
2105	if (!atomic_dec_and_test(&xprt->swapper))
2106		return;
2107	if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2108		return;
2109	if (xs->inet)
2110		sk_clear_memalloc(xs->inet);
2111	xprt_release_xprt(xprt, NULL);
2112}
2113#else
2114static void xs_set_memalloc(struct rpc_xprt *xprt)
2115{
2116}
2117
2118static int
2119xs_enable_swap(struct rpc_xprt *xprt)
2120{
2121	return -EINVAL;
2122}
2123
2124static void
2125xs_disable_swap(struct rpc_xprt *xprt)
2126{
2127}
2128#endif
2129
2130static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2131{
2132	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2133
2134	if (!transport->inet) {
2135		struct sock *sk = sock->sk;
2136
2137		write_lock_bh(&sk->sk_callback_lock);
2138
2139		xs_save_old_callbacks(transport, sk);
2140
2141		sk->sk_user_data = xprt;
2142		sk->sk_data_ready = xs_data_ready;
2143		sk->sk_write_space = xs_udp_write_space;
2144		sock_set_flag(sk, SOCK_FASYNC);
2145
2146		xprt_set_connected(xprt);
2147
2148		/* Reset to new socket */
2149		transport->sock = sock;
2150		transport->inet = sk;
2151
2152		xs_set_memalloc(xprt);
2153
2154		write_unlock_bh(&sk->sk_callback_lock);
2155	}
2156	xs_udp_do_set_buffer_size(xprt);
2157
2158	xprt->stat.connect_start = jiffies;
2159}
2160
2161static void xs_udp_setup_socket(struct work_struct *work)
2162{
2163	struct sock_xprt *transport =
2164		container_of(work, struct sock_xprt, connect_worker.work);
2165	struct rpc_xprt *xprt = &transport->xprt;
2166	struct socket *sock;
2167	int status = -EIO;
 
2168
 
 
2169	sock = xs_create_sock(xprt, transport,
2170			xs_addr(xprt)->sa_family, SOCK_DGRAM,
2171			IPPROTO_UDP, false);
2172	if (IS_ERR(sock))
2173		goto out;
2174
2175	dprintk("RPC:       worker connecting xprt %p via %s to "
2176				"%s (port %s)\n", xprt,
2177			xprt->address_strings[RPC_DISPLAY_PROTO],
2178			xprt->address_strings[RPC_DISPLAY_ADDR],
2179			xprt->address_strings[RPC_DISPLAY_PORT]);
2180
2181	xs_udp_finish_connecting(xprt, sock);
2182	trace_rpc_socket_connect(xprt, sock, 0);
2183	status = 0;
2184out:
2185	xprt_clear_connecting(xprt);
2186	xprt_unlock_connect(xprt, transport);
2187	xprt_wake_pending_tasks(xprt, status);
 
2188}
2189
2190/**
2191 * xs_tcp_shutdown - gracefully shut down a TCP socket
2192 * @xprt: transport
2193 *
2194 * Initiates a graceful shutdown of the TCP socket by calling the
2195 * equivalent of shutdown(SHUT_RDWR);
2196 */
2197static void xs_tcp_shutdown(struct rpc_xprt *xprt)
2198{
2199	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2200	struct socket *sock = transport->sock;
2201	int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE;
2202
2203	if (sock == NULL)
2204		return;
 
 
 
 
2205	switch (skst) {
2206	default:
 
 
 
 
2207		kernel_sock_shutdown(sock, SHUT_RDWR);
2208		trace_rpc_socket_shutdown(xprt, sock);
2209		break;
2210	case TCP_CLOSE:
2211	case TCP_TIME_WAIT:
2212		xs_reset_transport(transport);
2213	}
2214}
2215
2216static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
2217		struct socket *sock)
2218{
2219	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2220	unsigned int keepidle;
2221	unsigned int keepcnt;
2222	unsigned int opt_on = 1;
2223	unsigned int timeo;
2224
2225	spin_lock(&xprt->transport_lock);
2226	keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ);
2227	keepcnt = xprt->timeout->to_retries + 1;
2228	timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2229		(xprt->timeout->to_retries + 1);
2230	clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2231	spin_unlock(&xprt->transport_lock);
2232
2233	/* TCP Keepalive options */
2234	kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
2235			(char *)&opt_on, sizeof(opt_on));
2236	kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
2237			(char *)&keepidle, sizeof(keepidle));
2238	kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
2239			(char *)&keepidle, sizeof(keepidle));
2240	kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
2241			(char *)&keepcnt, sizeof(keepcnt));
2242
2243	/* TCP user timeout (see RFC5482) */
2244	kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
2245			(char *)&timeo, sizeof(timeo));
2246}
2247
2248static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
2249		unsigned long connect_timeout,
2250		unsigned long reconnect_timeout)
2251{
2252	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2253	struct rpc_timeout to;
2254	unsigned long initval;
2255
2256	spin_lock(&xprt->transport_lock);
2257	if (reconnect_timeout < xprt->max_reconnect_timeout)
2258		xprt->max_reconnect_timeout = reconnect_timeout;
2259	if (connect_timeout < xprt->connect_timeout) {
2260		memcpy(&to, xprt->timeout, sizeof(to));
2261		initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1);
2262		/* Arbitrary lower limit */
2263		if (initval <  XS_TCP_INIT_REEST_TO << 1)
2264			initval = XS_TCP_INIT_REEST_TO << 1;
2265		to.to_initval = initval;
2266		to.to_maxval = initval;
2267		memcpy(&transport->tcp_timeout, &to,
2268				sizeof(transport->tcp_timeout));
2269		xprt->timeout = &transport->tcp_timeout;
2270		xprt->connect_timeout = connect_timeout;
2271	}
2272	set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2273	spin_unlock(&xprt->transport_lock);
2274}
2275
2276static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2277{
2278	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2279	int ret = -ENOTCONN;
2280
2281	if (!transport->inet) {
2282		struct sock *sk = sock->sk;
2283		unsigned int addr_pref = IPV6_PREFER_SRC_PUBLIC;
2284
2285		/* Avoid temporary address, they are bad for long-lived
2286		 * connections such as NFS mounts.
2287		 * RFC4941, section 3.6 suggests that:
2288		 *    Individual applications, which have specific
2289		 *    knowledge about the normal duration of connections,
2290		 *    MAY override this as appropriate.
2291		 */
2292		kernel_setsockopt(sock, SOL_IPV6, IPV6_ADDR_PREFERENCES,
2293				(char *)&addr_pref, sizeof(addr_pref));
 
 
2294
2295		xs_tcp_set_socket_timeouts(xprt, sock);
 
2296
2297		write_lock_bh(&sk->sk_callback_lock);
2298
2299		xs_save_old_callbacks(transport, sk);
2300
2301		sk->sk_user_data = xprt;
2302		sk->sk_data_ready = xs_data_ready;
2303		sk->sk_state_change = xs_tcp_state_change;
2304		sk->sk_write_space = xs_tcp_write_space;
2305		sock_set_flag(sk, SOCK_FASYNC);
2306		sk->sk_error_report = xs_error_report;
 
2307
2308		/* socket options */
2309		sock_reset_flag(sk, SOCK_LINGER);
2310		tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
2311
2312		xprt_clear_connected(xprt);
2313
2314		/* Reset to new socket */
2315		transport->sock = sock;
2316		transport->inet = sk;
2317
2318		write_unlock_bh(&sk->sk_callback_lock);
2319	}
2320
2321	if (!xprt_bound(xprt))
2322		goto out;
2323
2324	xs_set_memalloc(xprt);
2325
2326	xs_stream_start_connect(transport);
2327
2328	/* Tell the socket layer to start connecting... */
2329	set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2330	ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2331	switch (ret) {
2332	case 0:
2333		xs_set_srcport(transport, sock);
2334		/* fall through */
2335	case -EINPROGRESS:
2336		/* SYN_SENT! */
2337		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2338			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2339		break;
2340	case -EADDRNOTAVAIL:
2341		/* Source port number is unavailable. Try a new one! */
2342		transport->srcport = 0;
2343	}
2344out:
2345	return ret;
2346}
2347
2348/**
2349 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2350 * @work: queued work item
2351 *
2352 * Invoked by a work queue tasklet.
2353 */
2354static void xs_tcp_setup_socket(struct work_struct *work)
2355{
2356	struct sock_xprt *transport =
2357		container_of(work, struct sock_xprt, connect_worker.work);
2358	struct socket *sock = transport->sock;
2359	struct rpc_xprt *xprt = &transport->xprt;
2360	int status = -EIO;
 
 
 
 
2361
2362	if (!sock) {
2363		sock = xs_create_sock(xprt, transport,
2364				xs_addr(xprt)->sa_family, SOCK_STREAM,
2365				IPPROTO_TCP, true);
 
 
 
 
2366		if (IS_ERR(sock)) {
2367			status = PTR_ERR(sock);
2368			goto out;
2369		}
2370	}
2371
2372	dprintk("RPC:       worker connecting xprt %p via %s to "
2373				"%s (port %s)\n", xprt,
2374			xprt->address_strings[RPC_DISPLAY_PROTO],
2375			xprt->address_strings[RPC_DISPLAY_ADDR],
2376			xprt->address_strings[RPC_DISPLAY_PORT]);
2377
2378	status = xs_tcp_finish_connecting(xprt, sock);
2379	trace_rpc_socket_connect(xprt, sock, status);
2380	dprintk("RPC:       %p connect status %d connected %d sock state %d\n",
2381			xprt, -status, xprt_connected(xprt),
2382			sock->sk->sk_state);
2383	switch (status) {
2384	default:
2385		printk("%s: connect returned unhandled error %d\n",
2386			__func__, status);
2387		/* fall through */
2388	case -EADDRNOTAVAIL:
2389		/* We're probably in TIME_WAIT. Get rid of existing socket,
2390		 * and retry
2391		 */
2392		xs_tcp_force_close(xprt);
2393		break;
2394	case 0:
2395	case -EINPROGRESS:
 
 
 
 
 
2396	case -EALREADY:
2397		xprt_unlock_connect(xprt, transport);
2398		return;
 
 
 
 
2399	case -EINVAL:
2400		/* Happens, for instance, if the user specified a link
2401		 * local IPv6 address without a scope-id.
2402		 */
2403	case -ECONNREFUSED:
2404	case -ECONNRESET:
2405	case -ENETDOWN:
2406	case -ENETUNREACH:
2407	case -EHOSTUNREACH:
2408	case -EADDRINUSE:
2409	case -ENOBUFS:
2410		/*
2411		 * xs_tcp_force_close() wakes tasks with -EIO.
2412		 * We need to wake them first to ensure the
2413		 * correct error code.
2414		 */
2415		xprt_wake_pending_tasks(xprt, status);
2416		xs_tcp_force_close(xprt);
2417		goto out;
2418	}
2419	status = -EAGAIN;
 
 
 
 
 
2420out:
2421	xprt_clear_connecting(xprt);
 
2422	xprt_unlock_connect(xprt, transport);
2423	xprt_wake_pending_tasks(xprt, status);
2424}
2425
2426/**
2427 * xs_connect - connect a socket to a remote endpoint
2428 * @xprt: pointer to transport structure
2429 * @task: address of RPC task that manages state of connect request
2430 *
2431 * TCP: If the remote end dropped the connection, delay reconnecting.
2432 *
2433 * UDP socket connects are synchronous, but we use a work queue anyway
2434 * to guarantee that even unprivileged user processes can set up a
2435 * socket on a privileged port.
2436 *
2437 * If a UDP socket connect fails, the delay behavior here prevents
2438 * retry floods (hard mounts).
2439 */
2440static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2441{
2442	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2443	unsigned long delay = 0;
2444
2445	WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2446
2447	if (transport->sock != NULL) {
2448		dprintk("RPC:       xs_connect delayed xprt %p for %lu "
2449				"seconds\n",
2450				xprt, xprt->reestablish_timeout / HZ);
2451
2452		/* Start by resetting any existing state */
2453		xs_reset_transport(transport);
2454
2455		delay = xprt_reconnect_delay(xprt);
2456		xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO);
2457
2458	} else
2459		dprintk("RPC:       xs_connect scheduled xprt %p\n", xprt);
2460
2461	queue_delayed_work(xprtiod_workqueue,
2462			&transport->connect_worker,
2463			delay);
2464}
2465
2466static void xs_wake_disconnect(struct sock_xprt *transport)
2467{
2468	if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state))
2469		xs_tcp_force_close(&transport->xprt);
2470}
2471
2472static void xs_wake_write(struct sock_xprt *transport)
2473{
2474	if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state))
2475		xprt_write_space(&transport->xprt);
2476}
2477
2478static void xs_wake_error(struct sock_xprt *transport)
2479{
2480	int sockerr;
2481
2482	if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2483		return;
2484	mutex_lock(&transport->recv_mutex);
2485	if (transport->sock == NULL)
2486		goto out;
2487	if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2488		goto out;
2489	sockerr = xchg(&transport->xprt_err, 0);
2490	if (sockerr < 0)
2491		xprt_wake_pending_tasks(&transport->xprt, sockerr);
2492out:
2493	mutex_unlock(&transport->recv_mutex);
2494}
2495
2496static void xs_wake_pending(struct sock_xprt *transport)
2497{
2498	if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state))
2499		xprt_wake_pending_tasks(&transport->xprt, -EAGAIN);
2500}
2501
2502static void xs_error_handle(struct work_struct *work)
2503{
2504	struct sock_xprt *transport = container_of(work,
2505			struct sock_xprt, error_worker);
2506
2507	xs_wake_disconnect(transport);
2508	xs_wake_write(transport);
2509	xs_wake_error(transport);
2510	xs_wake_pending(transport);
2511}
2512
2513/**
2514 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2515 * @xprt: rpc_xprt struct containing statistics
2516 * @seq: output file
2517 *
2518 */
2519static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2520{
2521	long idle_time = 0;
2522
2523	if (xprt_connected(xprt))
2524		idle_time = (long)(jiffies - xprt->last_used) / HZ;
2525
2526	seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2527			"%llu %llu %lu %llu %llu\n",
2528			xprt->stat.bind_count,
2529			xprt->stat.connect_count,
2530			xprt->stat.connect_time / HZ,
2531			idle_time,
2532			xprt->stat.sends,
2533			xprt->stat.recvs,
2534			xprt->stat.bad_xids,
2535			xprt->stat.req_u,
2536			xprt->stat.bklog_u,
2537			xprt->stat.max_slots,
2538			xprt->stat.sending_u,
2539			xprt->stat.pending_u);
2540}
2541
2542/**
2543 * xs_udp_print_stats - display UDP socket-specifc stats
2544 * @xprt: rpc_xprt struct containing statistics
2545 * @seq: output file
2546 *
2547 */
2548static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2549{
2550	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2551
2552	seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2553			"%lu %llu %llu\n",
2554			transport->srcport,
2555			xprt->stat.bind_count,
2556			xprt->stat.sends,
2557			xprt->stat.recvs,
2558			xprt->stat.bad_xids,
2559			xprt->stat.req_u,
2560			xprt->stat.bklog_u,
2561			xprt->stat.max_slots,
2562			xprt->stat.sending_u,
2563			xprt->stat.pending_u);
2564}
2565
2566/**
2567 * xs_tcp_print_stats - display TCP socket-specifc stats
2568 * @xprt: rpc_xprt struct containing statistics
2569 * @seq: output file
2570 *
2571 */
2572static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2573{
2574	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2575	long idle_time = 0;
2576
2577	if (xprt_connected(xprt))
2578		idle_time = (long)(jiffies - xprt->last_used) / HZ;
2579
2580	seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2581			"%llu %llu %lu %llu %llu\n",
2582			transport->srcport,
2583			xprt->stat.bind_count,
2584			xprt->stat.connect_count,
2585			xprt->stat.connect_time / HZ,
2586			idle_time,
2587			xprt->stat.sends,
2588			xprt->stat.recvs,
2589			xprt->stat.bad_xids,
2590			xprt->stat.req_u,
2591			xprt->stat.bklog_u,
2592			xprt->stat.max_slots,
2593			xprt->stat.sending_u,
2594			xprt->stat.pending_u);
2595}
2596
2597/*
2598 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2599 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2600 * to use the server side send routines.
2601 */
2602static int bc_malloc(struct rpc_task *task)
2603{
2604	struct rpc_rqst *rqst = task->tk_rqstp;
2605	size_t size = rqst->rq_callsize;
2606	struct page *page;
2607	struct rpc_buffer *buf;
2608
2609	if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) {
2610		WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n",
2611			  size);
2612		return -EINVAL;
2613	}
2614
2615	page = alloc_page(GFP_KERNEL);
2616	if (!page)
2617		return -ENOMEM;
2618
2619	buf = page_address(page);
2620	buf->len = PAGE_SIZE;
2621
2622	rqst->rq_buffer = buf->data;
2623	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
2624	return 0;
2625}
2626
2627/*
2628 * Free the space allocated in the bc_alloc routine
2629 */
2630static void bc_free(struct rpc_task *task)
2631{
2632	void *buffer = task->tk_rqstp->rq_buffer;
2633	struct rpc_buffer *buf;
2634
2635	buf = container_of(buffer, struct rpc_buffer, data);
2636	free_page((unsigned long)buf);
2637}
2638
2639/*
2640 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2641 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2642 */
2643static int bc_sendto(struct rpc_rqst *req)
2644{
2645	int len;
2646	struct xdr_buf *xbufp = &req->rq_snd_buf;
2647	struct sock_xprt *transport =
2648			container_of(req->rq_xprt, struct sock_xprt, xprt);
2649	unsigned long headoff;
2650	unsigned long tailoff;
2651	struct page *tailpage;
2652	struct msghdr msg = {
2653		.msg_flags	= MSG_MORE
2654	};
2655	rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
2656					 (u32)xbufp->len);
2657	struct kvec iov = {
2658		.iov_base	= &marker,
2659		.iov_len	= sizeof(marker),
2660	};
2661
2662	len = kernel_sendmsg(transport->sock, &msg, &iov, 1, iov.iov_len);
2663	if (len != iov.iov_len)
2664		return -EAGAIN;
2665
2666	tailpage = NULL;
2667	if (xbufp->tail[0].iov_len)
2668		tailpage = virt_to_page(xbufp->tail[0].iov_base);
2669	tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2670	headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2671	len = svc_send_common(transport->sock, xbufp,
2672			      virt_to_page(xbufp->head[0].iov_base), headoff,
2673			      tailpage, tailoff);
2674	if (len != xbufp->len)
2675		return -EAGAIN;
2676	return len;
2677}
2678
2679/*
2680 * The send routine. Borrows from svc_send
 
 
 
 
 
 
 
 
2681 */
2682static int bc_send_request(struct rpc_rqst *req)
2683{
2684	struct svc_xprt	*xprt;
2685	int len;
2686
2687	dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2688	/*
2689	 * Get the server socket associated with this callback xprt
2690	 */
2691	xprt = req->rq_xprt->bc_xprt;
2692
2693	/*
2694	 * Grab the mutex to serialize data as the connection is shared
2695	 * with the fore channel
2696	 */
2697	mutex_lock(&xprt->xpt_mutex);
2698	if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2699		len = -ENOTCONN;
2700	else
2701		len = bc_sendto(req);
2702	mutex_unlock(&xprt->xpt_mutex);
2703
2704	if (len > 0)
2705		len = 0;
2706
2707	return len;
2708}
2709
2710/*
2711 * The close routine. Since this is client initiated, we do nothing
2712 */
2713
2714static void bc_close(struct rpc_xprt *xprt)
2715{
 
2716}
2717
2718/*
2719 * The xprt destroy routine. Again, because this connection is client
2720 * initiated, we do nothing
2721 */
2722
2723static void bc_destroy(struct rpc_xprt *xprt)
2724{
2725	dprintk("RPC:       bc_destroy xprt %p\n", xprt);
2726
2727	xs_xprt_free(xprt);
2728	module_put(THIS_MODULE);
2729}
2730
2731static const struct rpc_xprt_ops xs_local_ops = {
2732	.reserve_xprt		= xprt_reserve_xprt,
2733	.release_xprt		= xprt_release_xprt,
2734	.alloc_slot		= xprt_alloc_slot,
2735	.free_slot		= xprt_free_slot,
2736	.rpcbind		= xs_local_rpcbind,
2737	.set_port		= xs_local_set_port,
2738	.connect		= xs_local_connect,
2739	.buf_alloc		= rpc_malloc,
2740	.buf_free		= rpc_free,
2741	.prepare_request	= xs_stream_prepare_request,
2742	.send_request		= xs_local_send_request,
2743	.wait_for_reply_request	= xprt_wait_for_reply_request_def,
2744	.close			= xs_close,
2745	.destroy		= xs_destroy,
2746	.print_stats		= xs_local_print_stats,
2747	.enable_swap		= xs_enable_swap,
2748	.disable_swap		= xs_disable_swap,
2749};
2750
2751static const struct rpc_xprt_ops xs_udp_ops = {
2752	.set_buffer_size	= xs_udp_set_buffer_size,
2753	.reserve_xprt		= xprt_reserve_xprt_cong,
2754	.release_xprt		= xprt_release_xprt_cong,
2755	.alloc_slot		= xprt_alloc_slot,
2756	.free_slot		= xprt_free_slot,
2757	.rpcbind		= rpcb_getport_async,
2758	.set_port		= xs_set_port,
2759	.connect		= xs_connect,
 
 
2760	.buf_alloc		= rpc_malloc,
2761	.buf_free		= rpc_free,
2762	.send_request		= xs_udp_send_request,
2763	.wait_for_reply_request	= xprt_wait_for_reply_request_rtt,
2764	.timer			= xs_udp_timer,
2765	.release_request	= xprt_release_rqst_cong,
2766	.close			= xs_close,
2767	.destroy		= xs_destroy,
2768	.print_stats		= xs_udp_print_stats,
2769	.enable_swap		= xs_enable_swap,
2770	.disable_swap		= xs_disable_swap,
2771	.inject_disconnect	= xs_inject_disconnect,
2772};
2773
2774static const struct rpc_xprt_ops xs_tcp_ops = {
2775	.reserve_xprt		= xprt_reserve_xprt,
2776	.release_xprt		= xprt_release_xprt,
2777	.alloc_slot		= xprt_alloc_slot,
2778	.free_slot		= xprt_free_slot,
2779	.rpcbind		= rpcb_getport_async,
2780	.set_port		= xs_set_port,
2781	.connect		= xs_connect,
 
 
2782	.buf_alloc		= rpc_malloc,
2783	.buf_free		= rpc_free,
2784	.prepare_request	= xs_stream_prepare_request,
2785	.send_request		= xs_tcp_send_request,
2786	.wait_for_reply_request	= xprt_wait_for_reply_request_def,
2787	.close			= xs_tcp_shutdown,
2788	.destroy		= xs_destroy,
2789	.set_connect_timeout	= xs_tcp_set_connect_timeout,
2790	.print_stats		= xs_tcp_print_stats,
2791	.enable_swap		= xs_enable_swap,
2792	.disable_swap		= xs_disable_swap,
2793	.inject_disconnect	= xs_inject_disconnect,
2794#ifdef CONFIG_SUNRPC_BACKCHANNEL
2795	.bc_setup		= xprt_setup_bc,
2796	.bc_maxpayload		= xs_tcp_bc_maxpayload,
2797	.bc_num_slots		= xprt_bc_max_slots,
2798	.bc_free_rqst		= xprt_free_bc_rqst,
2799	.bc_destroy		= xprt_destroy_bc,
2800#endif
2801};
2802
2803/*
2804 * The rpc_xprt_ops for the server backchannel
2805 */
2806
2807static const struct rpc_xprt_ops bc_tcp_ops = {
2808	.reserve_xprt		= xprt_reserve_xprt,
2809	.release_xprt		= xprt_release_xprt,
2810	.alloc_slot		= xprt_alloc_slot,
2811	.free_slot		= xprt_free_slot,
2812	.buf_alloc		= bc_malloc,
2813	.buf_free		= bc_free,
2814	.send_request		= bc_send_request,
2815	.wait_for_reply_request	= xprt_wait_for_reply_request_def,
2816	.close			= bc_close,
2817	.destroy		= bc_destroy,
2818	.print_stats		= xs_tcp_print_stats,
2819	.enable_swap		= xs_enable_swap,
2820	.disable_swap		= xs_disable_swap,
2821	.inject_disconnect	= xs_inject_disconnect,
2822};
2823
2824static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2825{
2826	static const struct sockaddr_in sin = {
2827		.sin_family		= AF_INET,
2828		.sin_addr.s_addr	= htonl(INADDR_ANY),
2829	};
2830	static const struct sockaddr_in6 sin6 = {
2831		.sin6_family		= AF_INET6,
2832		.sin6_addr		= IN6ADDR_ANY_INIT,
2833	};
2834
2835	switch (family) {
2836	case AF_LOCAL:
2837		break;
2838	case AF_INET:
2839		memcpy(sap, &sin, sizeof(sin));
2840		break;
2841	case AF_INET6:
2842		memcpy(sap, &sin6, sizeof(sin6));
2843		break;
2844	default:
2845		dprintk("RPC:       %s: Bad address family\n", __func__);
2846		return -EAFNOSUPPORT;
2847	}
2848	return 0;
2849}
2850
2851static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2852				      unsigned int slot_table_size,
2853				      unsigned int max_slot_table_size)
2854{
2855	struct rpc_xprt *xprt;
2856	struct sock_xprt *new;
2857
2858	if (args->addrlen > sizeof(xprt->addr)) {
2859		dprintk("RPC:       xs_setup_xprt: address too large\n");
2860		return ERR_PTR(-EBADF);
2861	}
2862
2863	xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2864			max_slot_table_size);
2865	if (xprt == NULL) {
2866		dprintk("RPC:       xs_setup_xprt: couldn't allocate "
2867				"rpc_xprt\n");
2868		return ERR_PTR(-ENOMEM);
2869	}
2870
2871	new = container_of(xprt, struct sock_xprt, xprt);
2872	mutex_init(&new->recv_mutex);
2873	memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2874	xprt->addrlen = args->addrlen;
2875	if (args->srcaddr)
2876		memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2877	else {
2878		int err;
2879		err = xs_init_anyaddr(args->dstaddr->sa_family,
2880					(struct sockaddr *)&new->srcaddr);
2881		if (err != 0) {
2882			xprt_free(xprt);
2883			return ERR_PTR(err);
2884		}
2885	}
2886
2887	return xprt;
2888}
2889
2890static const struct rpc_timeout xs_local_default_timeout = {
2891	.to_initval = 10 * HZ,
2892	.to_maxval = 10 * HZ,
2893	.to_retries = 2,
2894};
2895
2896/**
2897 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2898 * @args: rpc transport creation arguments
2899 *
2900 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2901 */
2902static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2903{
2904	struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2905	struct sock_xprt *transport;
2906	struct rpc_xprt *xprt;
2907	struct rpc_xprt *ret;
2908
2909	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2910			xprt_max_tcp_slot_table_entries);
2911	if (IS_ERR(xprt))
2912		return xprt;
2913	transport = container_of(xprt, struct sock_xprt, xprt);
2914
2915	xprt->prot = 0;
 
2916	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2917
2918	xprt->bind_timeout = XS_BIND_TO;
2919	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2920	xprt->idle_timeout = XS_IDLE_DISC_TO;
2921
2922	xprt->ops = &xs_local_ops;
2923	xprt->timeout = &xs_local_default_timeout;
2924
2925	INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
2926	INIT_WORK(&transport->error_worker, xs_error_handle);
2927	INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket);
2928
2929	switch (sun->sun_family) {
2930	case AF_LOCAL:
2931		if (sun->sun_path[0] != '/') {
2932			dprintk("RPC:       bad AF_LOCAL address: %s\n",
2933					sun->sun_path);
2934			ret = ERR_PTR(-EINVAL);
2935			goto out_err;
2936		}
2937		xprt_set_bound(xprt);
2938		xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2939		ret = ERR_PTR(xs_local_setup_socket(transport));
2940		if (ret)
2941			goto out_err;
2942		break;
2943	default:
2944		ret = ERR_PTR(-EAFNOSUPPORT);
2945		goto out_err;
2946	}
2947
2948	dprintk("RPC:       set up xprt to %s via AF_LOCAL\n",
2949			xprt->address_strings[RPC_DISPLAY_ADDR]);
2950
2951	if (try_module_get(THIS_MODULE))
2952		return xprt;
2953	ret = ERR_PTR(-EINVAL);
2954out_err:
2955	xs_xprt_free(xprt);
2956	return ret;
2957}
2958
2959static const struct rpc_timeout xs_udp_default_timeout = {
2960	.to_initval = 5 * HZ,
2961	.to_maxval = 30 * HZ,
2962	.to_increment = 5 * HZ,
2963	.to_retries = 5,
2964};
2965
2966/**
2967 * xs_setup_udp - Set up transport to use a UDP socket
2968 * @args: rpc transport creation arguments
2969 *
2970 */
2971static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2972{
2973	struct sockaddr *addr = args->dstaddr;
2974	struct rpc_xprt *xprt;
2975	struct sock_xprt *transport;
2976	struct rpc_xprt *ret;
2977
2978	xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2979			xprt_udp_slot_table_entries);
2980	if (IS_ERR(xprt))
2981		return xprt;
2982	transport = container_of(xprt, struct sock_xprt, xprt);
2983
2984	xprt->prot = IPPROTO_UDP;
 
2985	/* XXX: header size can vary due to auth type, IPv6, etc. */
2986	xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2987
2988	xprt->bind_timeout = XS_BIND_TO;
2989	xprt->reestablish_timeout = XS_UDP_REEST_TO;
2990	xprt->idle_timeout = XS_IDLE_DISC_TO;
2991
2992	xprt->ops = &xs_udp_ops;
2993
2994	xprt->timeout = &xs_udp_default_timeout;
2995
2996	INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
2997	INIT_WORK(&transport->error_worker, xs_error_handle);
2998	INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
2999
3000	switch (addr->sa_family) {
3001	case AF_INET:
3002		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3003			xprt_set_bound(xprt);
3004
3005		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
3006		break;
3007	case AF_INET6:
3008		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
3009			xprt_set_bound(xprt);
3010
3011		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
3012		break;
3013	default:
3014		ret = ERR_PTR(-EAFNOSUPPORT);
3015		goto out_err;
3016	}
3017
3018	if (xprt_bound(xprt))
3019		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
3020				xprt->address_strings[RPC_DISPLAY_ADDR],
3021				xprt->address_strings[RPC_DISPLAY_PORT],
3022				xprt->address_strings[RPC_DISPLAY_PROTO]);
3023	else
3024		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
3025				xprt->address_strings[RPC_DISPLAY_ADDR],
3026				xprt->address_strings[RPC_DISPLAY_PROTO]);
3027
3028	if (try_module_get(THIS_MODULE))
3029		return xprt;
3030	ret = ERR_PTR(-EINVAL);
3031out_err:
3032	xs_xprt_free(xprt);
3033	return ret;
3034}
3035
3036static const struct rpc_timeout xs_tcp_default_timeout = {
3037	.to_initval = 60 * HZ,
3038	.to_maxval = 60 * HZ,
3039	.to_retries = 2,
3040};
3041
3042/**
3043 * xs_setup_tcp - Set up transport to use a TCP socket
3044 * @args: rpc transport creation arguments
3045 *
3046 */
3047static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
3048{
3049	struct sockaddr *addr = args->dstaddr;
3050	struct rpc_xprt *xprt;
3051	struct sock_xprt *transport;
3052	struct rpc_xprt *ret;
3053	unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
3054
3055	if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
3056		max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
3057
3058	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
3059			max_slot_table_size);
3060	if (IS_ERR(xprt))
3061		return xprt;
3062	transport = container_of(xprt, struct sock_xprt, xprt);
3063
3064	xprt->prot = IPPROTO_TCP;
 
3065	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3066
3067	xprt->bind_timeout = XS_BIND_TO;
3068	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
3069	xprt->idle_timeout = XS_IDLE_DISC_TO;
3070
3071	xprt->ops = &xs_tcp_ops;
3072	xprt->timeout = &xs_tcp_default_timeout;
3073
3074	xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
3075	xprt->connect_timeout = xprt->timeout->to_initval *
3076		(xprt->timeout->to_retries + 1);
3077
3078	INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
3079	INIT_WORK(&transport->error_worker, xs_error_handle);
3080	INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
3081
3082	switch (addr->sa_family) {
3083	case AF_INET:
3084		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3085			xprt_set_bound(xprt);
3086
3087		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
3088		break;
3089	case AF_INET6:
3090		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
3091			xprt_set_bound(xprt);
3092
3093		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
3094		break;
3095	default:
3096		ret = ERR_PTR(-EAFNOSUPPORT);
3097		goto out_err;
3098	}
3099
3100	if (xprt_bound(xprt))
3101		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
3102				xprt->address_strings[RPC_DISPLAY_ADDR],
3103				xprt->address_strings[RPC_DISPLAY_PORT],
3104				xprt->address_strings[RPC_DISPLAY_PROTO]);
3105	else
3106		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
3107				xprt->address_strings[RPC_DISPLAY_ADDR],
3108				xprt->address_strings[RPC_DISPLAY_PROTO]);
3109
3110	if (try_module_get(THIS_MODULE))
3111		return xprt;
3112	ret = ERR_PTR(-EINVAL);
3113out_err:
3114	xs_xprt_free(xprt);
3115	return ret;
3116}
3117
3118/**
3119 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
3120 * @args: rpc transport creation arguments
3121 *
3122 */
3123static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
3124{
3125	struct sockaddr *addr = args->dstaddr;
3126	struct rpc_xprt *xprt;
3127	struct sock_xprt *transport;
3128	struct svc_sock *bc_sock;
3129	struct rpc_xprt *ret;
3130
3131	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
3132			xprt_tcp_slot_table_entries);
3133	if (IS_ERR(xprt))
3134		return xprt;
3135	transport = container_of(xprt, struct sock_xprt, xprt);
3136
3137	xprt->prot = IPPROTO_TCP;
 
3138	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3139	xprt->timeout = &xs_tcp_default_timeout;
3140
3141	/* backchannel */
3142	xprt_set_bound(xprt);
3143	xprt->bind_timeout = 0;
3144	xprt->reestablish_timeout = 0;
3145	xprt->idle_timeout = 0;
3146
3147	xprt->ops = &bc_tcp_ops;
3148
3149	switch (addr->sa_family) {
3150	case AF_INET:
3151		xs_format_peer_addresses(xprt, "tcp",
3152					 RPCBIND_NETID_TCP);
3153		break;
3154	case AF_INET6:
3155		xs_format_peer_addresses(xprt, "tcp",
3156				   RPCBIND_NETID_TCP6);
3157		break;
3158	default:
3159		ret = ERR_PTR(-EAFNOSUPPORT);
3160		goto out_err;
3161	}
3162
3163	dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
3164			xprt->address_strings[RPC_DISPLAY_ADDR],
3165			xprt->address_strings[RPC_DISPLAY_PORT],
3166			xprt->address_strings[RPC_DISPLAY_PROTO]);
3167
3168	/*
3169	 * Once we've associated a backchannel xprt with a connection,
3170	 * we want to keep it around as long as the connection lasts,
3171	 * in case we need to start using it for a backchannel again;
3172	 * this reference won't be dropped until bc_xprt is destroyed.
3173	 */
3174	xprt_get(xprt);
3175	args->bc_xprt->xpt_bc_xprt = xprt;
3176	xprt->bc_xprt = args->bc_xprt;
3177	bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
3178	transport->sock = bc_sock->sk_sock;
3179	transport->inet = bc_sock->sk_sk;
3180
3181	/*
3182	 * Since we don't want connections for the backchannel, we set
3183	 * the xprt status to connected
3184	 */
3185	xprt_set_connected(xprt);
3186
3187	if (try_module_get(THIS_MODULE))
3188		return xprt;
3189
3190	args->bc_xprt->xpt_bc_xprt = NULL;
3191	args->bc_xprt->xpt_bc_xps = NULL;
3192	xprt_put(xprt);
3193	ret = ERR_PTR(-EINVAL);
3194out_err:
3195	xs_xprt_free(xprt);
3196	return ret;
3197}
3198
3199static struct xprt_class	xs_local_transport = {
3200	.list		= LIST_HEAD_INIT(xs_local_transport.list),
3201	.name		= "named UNIX socket",
3202	.owner		= THIS_MODULE,
3203	.ident		= XPRT_TRANSPORT_LOCAL,
3204	.setup		= xs_setup_local,
 
3205};
3206
3207static struct xprt_class	xs_udp_transport = {
3208	.list		= LIST_HEAD_INIT(xs_udp_transport.list),
3209	.name		= "udp",
3210	.owner		= THIS_MODULE,
3211	.ident		= XPRT_TRANSPORT_UDP,
3212	.setup		= xs_setup_udp,
 
3213};
3214
3215static struct xprt_class	xs_tcp_transport = {
3216	.list		= LIST_HEAD_INIT(xs_tcp_transport.list),
3217	.name		= "tcp",
3218	.owner		= THIS_MODULE,
3219	.ident		= XPRT_TRANSPORT_TCP,
3220	.setup		= xs_setup_tcp,
 
3221};
3222
3223static struct xprt_class	xs_bc_tcp_transport = {
3224	.list		= LIST_HEAD_INIT(xs_bc_tcp_transport.list),
3225	.name		= "tcp NFSv4.1 backchannel",
3226	.owner		= THIS_MODULE,
3227	.ident		= XPRT_TRANSPORT_BC_TCP,
3228	.setup		= xs_setup_bc_tcp,
 
3229};
3230
3231/**
3232 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3233 *
3234 */
3235int init_socket_xprt(void)
3236{
3237	if (!sunrpc_table_header)
3238		sunrpc_table_header = register_sysctl_table(sunrpc_table);
3239
3240	xprt_register_transport(&xs_local_transport);
3241	xprt_register_transport(&xs_udp_transport);
3242	xprt_register_transport(&xs_tcp_transport);
3243	xprt_register_transport(&xs_bc_tcp_transport);
3244
3245	return 0;
3246}
3247
3248/**
3249 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3250 *
3251 */
3252void cleanup_socket_xprt(void)
3253{
3254	if (sunrpc_table_header) {
3255		unregister_sysctl_table(sunrpc_table_header);
3256		sunrpc_table_header = NULL;
3257	}
3258
3259	xprt_unregister_transport(&xs_local_transport);
3260	xprt_unregister_transport(&xs_udp_transport);
3261	xprt_unregister_transport(&xs_tcp_transport);
3262	xprt_unregister_transport(&xs_bc_tcp_transport);
3263}
3264
3265static int param_set_uint_minmax(const char *val,
3266		const struct kernel_param *kp,
3267		unsigned int min, unsigned int max)
3268{
3269	unsigned int num;
3270	int ret;
3271
3272	if (!val)
3273		return -EINVAL;
3274	ret = kstrtouint(val, 0, &num);
3275	if (ret)
3276		return ret;
3277	if (num < min || num > max)
3278		return -EINVAL;
3279	*((unsigned int *)kp->arg) = num;
3280	return 0;
3281}
3282
3283static int param_set_portnr(const char *val, const struct kernel_param *kp)
3284{
3285	return param_set_uint_minmax(val, kp,
3286			RPC_MIN_RESVPORT,
3287			RPC_MAX_RESVPORT);
3288}
3289
3290static const struct kernel_param_ops param_ops_portnr = {
3291	.set = param_set_portnr,
3292	.get = param_get_uint,
3293};
3294
3295#define param_check_portnr(name, p) \
3296	__param_check(name, p, unsigned int);
3297
3298module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3299module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3300
3301static int param_set_slot_table_size(const char *val,
3302				     const struct kernel_param *kp)
3303{
3304	return param_set_uint_minmax(val, kp,
3305			RPC_MIN_SLOT_TABLE,
3306			RPC_MAX_SLOT_TABLE);
3307}
3308
3309static const struct kernel_param_ops param_ops_slot_table_size = {
3310	.set = param_set_slot_table_size,
3311	.get = param_get_uint,
3312};
3313
3314#define param_check_slot_table_size(name, p) \
3315	__param_check(name, p, unsigned int);
3316
3317static int param_set_max_slot_table_size(const char *val,
3318				     const struct kernel_param *kp)
3319{
3320	return param_set_uint_minmax(val, kp,
3321			RPC_MIN_SLOT_TABLE,
3322			RPC_MAX_SLOT_TABLE_LIMIT);
3323}
3324
3325static const struct kernel_param_ops param_ops_max_slot_table_size = {
3326	.set = param_set_max_slot_table_size,
3327	.get = param_get_uint,
3328};
3329
3330#define param_check_max_slot_table_size(name, p) \
3331	__param_check(name, p, unsigned int);
3332
3333module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3334		   slot_table_size, 0644);
3335module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3336		   max_slot_table_size, 0644);
3337module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3338		   slot_table_size, 0644);
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/net/sunrpc/xprtsock.c
   4 *
   5 * Client-side transport implementation for sockets.
   6 *
   7 * TCP callback races fixes (C) 1998 Red Hat
   8 * TCP send fixes (C) 1998 Red Hat
   9 * TCP NFS related read + write fixes
  10 *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  11 *
  12 * Rewrite of larges part of the code in order to stabilize TCP stuff.
  13 * Fix behaviour when socket buffer is full.
  14 *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
  15 *
  16 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
  17 *
  18 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
  19 *   <gilles.quillard@bull.net>
  20 */
  21
  22#include <linux/types.h>
  23#include <linux/string.h>
  24#include <linux/slab.h>
  25#include <linux/module.h>
  26#include <linux/capability.h>
  27#include <linux/pagemap.h>
  28#include <linux/errno.h>
  29#include <linux/socket.h>
  30#include <linux/in.h>
  31#include <linux/net.h>
  32#include <linux/mm.h>
  33#include <linux/un.h>
  34#include <linux/udp.h>
  35#include <linux/tcp.h>
  36#include <linux/sunrpc/clnt.h>
  37#include <linux/sunrpc/addr.h>
  38#include <linux/sunrpc/sched.h>
  39#include <linux/sunrpc/svcsock.h>
  40#include <linux/sunrpc/xprtsock.h>
  41#include <linux/file.h>
  42#ifdef CONFIG_SUNRPC_BACKCHANNEL
  43#include <linux/sunrpc/bc_xprt.h>
  44#endif
  45
  46#include <net/sock.h>
  47#include <net/checksum.h>
  48#include <net/udp.h>
  49#include <net/tcp.h>
  50#include <linux/bvec.h>
  51#include <linux/highmem.h>
  52#include <linux/uio.h>
  53#include <linux/sched/mm.h>
  54
  55#include <trace/events/sunrpc.h>
  56
  57#include "socklib.h"
  58#include "sunrpc.h"
  59
  60static void xs_close(struct rpc_xprt *xprt);
  61static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock);
  62static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
  63		struct socket *sock);
  64
  65/*
  66 * xprtsock tunables
  67 */
  68static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
  69static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
  70static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
  71
  72static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
  73static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
  74
  75#define XS_TCP_LINGER_TO	(15U * HZ)
  76static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
  77
  78/*
  79 * We can register our own files under /proc/sys/sunrpc by
  80 * calling register_sysctl_table() again.  The files in that
  81 * directory become the union of all files registered there.
  82 *
  83 * We simply need to make sure that we don't collide with
  84 * someone else's file names!
  85 */
  86
  87static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
  88static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
  89static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
  90static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
  91static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
  92
  93static struct ctl_table_header *sunrpc_table_header;
  94
  95static struct xprt_class xs_local_transport;
  96static struct xprt_class xs_udp_transport;
  97static struct xprt_class xs_tcp_transport;
  98static struct xprt_class xs_bc_tcp_transport;
  99
 100/*
 101 * FIXME: changing the UDP slot table size should also resize the UDP
 102 *        socket buffers for existing UDP transports
 103 */
 104static struct ctl_table xs_tunables_table[] = {
 105	{
 106		.procname	= "udp_slot_table_entries",
 107		.data		= &xprt_udp_slot_table_entries,
 108		.maxlen		= sizeof(unsigned int),
 109		.mode		= 0644,
 110		.proc_handler	= proc_dointvec_minmax,
 111		.extra1		= &min_slot_table_size,
 112		.extra2		= &max_slot_table_size
 113	},
 114	{
 115		.procname	= "tcp_slot_table_entries",
 116		.data		= &xprt_tcp_slot_table_entries,
 117		.maxlen		= sizeof(unsigned int),
 118		.mode		= 0644,
 119		.proc_handler	= proc_dointvec_minmax,
 120		.extra1		= &min_slot_table_size,
 121		.extra2		= &max_slot_table_size
 122	},
 123	{
 124		.procname	= "tcp_max_slot_table_entries",
 125		.data		= &xprt_max_tcp_slot_table_entries,
 126		.maxlen		= sizeof(unsigned int),
 127		.mode		= 0644,
 128		.proc_handler	= proc_dointvec_minmax,
 129		.extra1		= &min_slot_table_size,
 130		.extra2		= &max_tcp_slot_table_limit
 131	},
 132	{
 133		.procname	= "min_resvport",
 134		.data		= &xprt_min_resvport,
 135		.maxlen		= sizeof(unsigned int),
 136		.mode		= 0644,
 137		.proc_handler	= proc_dointvec_minmax,
 138		.extra1		= &xprt_min_resvport_limit,
 139		.extra2		= &xprt_max_resvport_limit
 140	},
 141	{
 142		.procname	= "max_resvport",
 143		.data		= &xprt_max_resvport,
 144		.maxlen		= sizeof(unsigned int),
 145		.mode		= 0644,
 146		.proc_handler	= proc_dointvec_minmax,
 147		.extra1		= &xprt_min_resvport_limit,
 148		.extra2		= &xprt_max_resvport_limit
 149	},
 150	{
 151		.procname	= "tcp_fin_timeout",
 152		.data		= &xs_tcp_fin_timeout,
 153		.maxlen		= sizeof(xs_tcp_fin_timeout),
 154		.mode		= 0644,
 155		.proc_handler	= proc_dointvec_jiffies,
 156	},
 157	{ },
 158};
 159
 160static struct ctl_table sunrpc_table[] = {
 161	{
 162		.procname	= "sunrpc",
 163		.mode		= 0555,
 164		.child		= xs_tunables_table
 165	},
 166	{ },
 167};
 168
 169/*
 170 * Wait duration for a reply from the RPC portmapper.
 171 */
 172#define XS_BIND_TO		(60U * HZ)
 173
 174/*
 175 * Delay if a UDP socket connect error occurs.  This is most likely some
 176 * kind of resource problem on the local host.
 177 */
 178#define XS_UDP_REEST_TO		(2U * HZ)
 179
 180/*
 181 * The reestablish timeout allows clients to delay for a bit before attempting
 182 * to reconnect to a server that just dropped our connection.
 183 *
 184 * We implement an exponential backoff when trying to reestablish a TCP
 185 * transport connection with the server.  Some servers like to drop a TCP
 186 * connection when they are overworked, so we start with a short timeout and
 187 * increase over time if the server is down or not responding.
 188 */
 189#define XS_TCP_INIT_REEST_TO	(3U * HZ)
 190
 191/*
 192 * TCP idle timeout; client drops the transport socket if it is idle
 193 * for this long.  Note that we also timeout UDP sockets to prevent
 194 * holding port numbers when there is no RPC traffic.
 195 */
 196#define XS_IDLE_DISC_TO		(5U * 60 * HZ)
 197
 198#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 199# undef  RPC_DEBUG_DATA
 200# define RPCDBG_FACILITY	RPCDBG_TRANS
 201#endif
 202
 203#ifdef RPC_DEBUG_DATA
 204static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
 205{
 206	u8 *buf = (u8 *) packet;
 207	int j;
 208
 209	dprintk("RPC:       %s\n", msg);
 210	for (j = 0; j < count && j < 128; j += 4) {
 211		if (!(j & 31)) {
 212			if (j)
 213				dprintk("\n");
 214			dprintk("0x%04x ", j);
 215		}
 216		dprintk("%02x%02x%02x%02x ",
 217			buf[j], buf[j+1], buf[j+2], buf[j+3]);
 218	}
 219	dprintk("\n");
 220}
 221#else
 222static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
 223{
 224	/* NOP */
 225}
 226#endif
 227
 228static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
 229{
 230	return (struct rpc_xprt *) sk->sk_user_data;
 231}
 232
 233static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
 234{
 235	return (struct sockaddr *) &xprt->addr;
 236}
 237
 238static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
 239{
 240	return (struct sockaddr_un *) &xprt->addr;
 241}
 242
 243static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
 244{
 245	return (struct sockaddr_in *) &xprt->addr;
 246}
 247
 248static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
 249{
 250	return (struct sockaddr_in6 *) &xprt->addr;
 251}
 252
 253static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
 254{
 255	struct sockaddr *sap = xs_addr(xprt);
 256	struct sockaddr_in6 *sin6;
 257	struct sockaddr_in *sin;
 258	struct sockaddr_un *sun;
 259	char buf[128];
 260
 261	switch (sap->sa_family) {
 262	case AF_LOCAL:
 263		sun = xs_addr_un(xprt);
 264		strscpy(buf, sun->sun_path, sizeof(buf));
 265		xprt->address_strings[RPC_DISPLAY_ADDR] =
 266						kstrdup(buf, GFP_KERNEL);
 267		break;
 268	case AF_INET:
 269		(void)rpc_ntop(sap, buf, sizeof(buf));
 270		xprt->address_strings[RPC_DISPLAY_ADDR] =
 271						kstrdup(buf, GFP_KERNEL);
 272		sin = xs_addr_in(xprt);
 273		snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
 274		break;
 275	case AF_INET6:
 276		(void)rpc_ntop(sap, buf, sizeof(buf));
 277		xprt->address_strings[RPC_DISPLAY_ADDR] =
 278						kstrdup(buf, GFP_KERNEL);
 279		sin6 = xs_addr_in6(xprt);
 280		snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
 281		break;
 282	default:
 283		BUG();
 284	}
 285
 286	xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
 287}
 288
 289static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
 290{
 291	struct sockaddr *sap = xs_addr(xprt);
 292	char buf[128];
 293
 294	snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
 295	xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
 296
 297	snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
 298	xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
 299}
 300
 301static void xs_format_peer_addresses(struct rpc_xprt *xprt,
 302				     const char *protocol,
 303				     const char *netid)
 304{
 305	xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
 306	xprt->address_strings[RPC_DISPLAY_NETID] = netid;
 307	xs_format_common_peer_addresses(xprt);
 308	xs_format_common_peer_ports(xprt);
 309}
 310
 311static void xs_update_peer_port(struct rpc_xprt *xprt)
 312{
 313	kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
 314	kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
 315
 316	xs_format_common_peer_ports(xprt);
 317}
 318
 319static void xs_free_peer_addresses(struct rpc_xprt *xprt)
 320{
 321	unsigned int i;
 322
 323	for (i = 0; i < RPC_DISPLAY_MAX; i++)
 324		switch (i) {
 325		case RPC_DISPLAY_PROTO:
 326		case RPC_DISPLAY_NETID:
 327			continue;
 328		default:
 329			kfree(xprt->address_strings[i]);
 330		}
 331}
 332
 333static size_t
 334xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
 335{
 336	size_t i,n;
 337
 338	if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
 339		return want;
 340	n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
 341	for (i = 0; i < n; i++) {
 342		if (buf->pages[i])
 343			continue;
 344		buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
 345		if (!buf->pages[i]) {
 346			i *= PAGE_SIZE;
 347			return i > buf->page_base ? i - buf->page_base : 0;
 348		}
 349	}
 350	return want;
 351}
 352
 353static ssize_t
 354xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
 355{
 356	ssize_t ret;
 357	if (seek != 0)
 358		iov_iter_advance(&msg->msg_iter, seek);
 359	ret = sock_recvmsg(sock, msg, flags);
 360	return ret > 0 ? ret + seek : ret;
 361}
 362
 363static ssize_t
 364xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
 365		struct kvec *kvec, size_t count, size_t seek)
 366{
 367	iov_iter_kvec(&msg->msg_iter, ITER_DEST, kvec, 1, count);
 368	return xs_sock_recvmsg(sock, msg, flags, seek);
 369}
 370
 371static ssize_t
 372xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
 373		struct bio_vec *bvec, unsigned long nr, size_t count,
 374		size_t seek)
 375{
 376	iov_iter_bvec(&msg->msg_iter, ITER_DEST, bvec, nr, count);
 377	return xs_sock_recvmsg(sock, msg, flags, seek);
 378}
 379
 380static ssize_t
 381xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
 382		size_t count)
 383{
 384	iov_iter_discard(&msg->msg_iter, ITER_DEST, count);
 385	return sock_recvmsg(sock, msg, flags);
 386}
 387
 388#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 389static void
 390xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
 391{
 392	struct bvec_iter bi = {
 393		.bi_size = count,
 394	};
 395	struct bio_vec bv;
 396
 397	bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
 398	for_each_bvec(bv, bvec, bi, bi)
 399		flush_dcache_page(bv.bv_page);
 400}
 401#else
 402static inline void
 403xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
 404{
 405}
 406#endif
 407
 408static ssize_t
 409xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
 410		struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
 411{
 412	size_t want, seek_init = seek, offset = 0;
 413	ssize_t ret;
 414
 415	want = min_t(size_t, count, buf->head[0].iov_len);
 416	if (seek < want) {
 417		ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek);
 418		if (ret <= 0)
 419			goto sock_err;
 420		offset += ret;
 421		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 422			goto out;
 423		if (ret != want)
 424			goto out;
 425		seek = 0;
 426	} else {
 427		seek -= want;
 428		offset += want;
 429	}
 430
 431	want = xs_alloc_sparse_pages(
 432		buf, min_t(size_t, count - offset, buf->page_len),
 433		GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
 434	if (seek < want) {
 435		ret = xs_read_bvec(sock, msg, flags, buf->bvec,
 436				xdr_buf_pagecount(buf),
 437				want + buf->page_base,
 438				seek + buf->page_base);
 439		if (ret <= 0)
 440			goto sock_err;
 441		xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
 442		ret -= buf->page_base;
 443		offset += ret;
 444		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 445			goto out;
 446		if (ret != want)
 447			goto out;
 448		seek = 0;
 449	} else {
 450		seek -= want;
 451		offset += want;
 452	}
 453
 454	want = min_t(size_t, count - offset, buf->tail[0].iov_len);
 455	if (seek < want) {
 456		ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
 457		if (ret <= 0)
 458			goto sock_err;
 459		offset += ret;
 460		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 461			goto out;
 462		if (ret != want)
 463			goto out;
 464	} else if (offset < seek_init)
 465		offset = seek_init;
 466	ret = -EMSGSIZE;
 467out:
 468	*read = offset - seek_init;
 469	return ret;
 470sock_err:
 471	offset += seek;
 472	goto out;
 473}
 474
 475static void
 476xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf)
 477{
 478	if (!transport->recv.copied) {
 479		if (buf->head[0].iov_len >= transport->recv.offset)
 480			memcpy(buf->head[0].iov_base,
 481					&transport->recv.xid,
 482					transport->recv.offset);
 483		transport->recv.copied = transport->recv.offset;
 484	}
 485}
 486
 487static bool
 488xs_read_stream_request_done(struct sock_xprt *transport)
 489{
 490	return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT);
 491}
 492
 493static void
 494xs_read_stream_check_eor(struct sock_xprt *transport,
 495		struct msghdr *msg)
 496{
 497	if (xs_read_stream_request_done(transport))
 498		msg->msg_flags |= MSG_EOR;
 499}
 500
 501static ssize_t
 502xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
 503		int flags, struct rpc_rqst *req)
 504{
 505	struct xdr_buf *buf = &req->rq_private_buf;
 506	size_t want, read;
 507	ssize_t ret;
 508
 509	xs_read_header(transport, buf);
 510
 511	want = transport->recv.len - transport->recv.offset;
 512	if (want != 0) {
 513		ret = xs_read_xdr_buf(transport->sock, msg, flags, buf,
 514				transport->recv.copied + want,
 515				transport->recv.copied,
 516				&read);
 517		transport->recv.offset += read;
 518		transport->recv.copied += read;
 519	}
 520
 521	if (transport->recv.offset == transport->recv.len)
 522		xs_read_stream_check_eor(transport, msg);
 523
 524	if (want == 0)
 525		return 0;
 526
 527	switch (ret) {
 528	default:
 529		break;
 530	case -EFAULT:
 531	case -EMSGSIZE:
 532		msg->msg_flags |= MSG_TRUNC;
 533		return read;
 534	case 0:
 535		return -ESHUTDOWN;
 536	}
 537	return ret < 0 ? ret : read;
 538}
 539
 540static size_t
 541xs_read_stream_headersize(bool isfrag)
 542{
 543	if (isfrag)
 544		return sizeof(__be32);
 545	return 3 * sizeof(__be32);
 546}
 547
 548static ssize_t
 549xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg,
 550		int flags, size_t want, size_t seek)
 551{
 552	struct kvec kvec = {
 553		.iov_base = &transport->recv.fraghdr,
 554		.iov_len = want,
 555	};
 556	return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek);
 557}
 558
 559#if defined(CONFIG_SUNRPC_BACKCHANNEL)
 560static ssize_t
 561xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
 562{
 563	struct rpc_xprt *xprt = &transport->xprt;
 564	struct rpc_rqst *req;
 565	ssize_t ret;
 566
 567	/* Is this transport associated with the backchannel? */
 568	if (!xprt->bc_serv)
 569		return -ESHUTDOWN;
 570
 571	/* Look up and lock the request corresponding to the given XID */
 572	req = xprt_lookup_bc_request(xprt, transport->recv.xid);
 573	if (!req) {
 574		printk(KERN_WARNING "Callback slot table overflowed\n");
 575		return -ESHUTDOWN;
 576	}
 577	if (transport->recv.copied && !req->rq_private_buf.len)
 578		return -ESHUTDOWN;
 579
 580	ret = xs_read_stream_request(transport, msg, flags, req);
 581	if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 582		xprt_complete_bc_request(req, transport->recv.copied);
 583	else
 584		req->rq_private_buf.len = transport->recv.copied;
 585
 586	return ret;
 587}
 588#else /* CONFIG_SUNRPC_BACKCHANNEL */
 589static ssize_t
 590xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
 591{
 592	return -ESHUTDOWN;
 593}
 594#endif /* CONFIG_SUNRPC_BACKCHANNEL */
 595
 596static ssize_t
 597xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
 598{
 599	struct rpc_xprt *xprt = &transport->xprt;
 600	struct rpc_rqst *req;
 601	ssize_t ret = 0;
 602
 603	/* Look up and lock the request corresponding to the given XID */
 604	spin_lock(&xprt->queue_lock);
 605	req = xprt_lookup_rqst(xprt, transport->recv.xid);
 606	if (!req || (transport->recv.copied && !req->rq_private_buf.len)) {
 607		msg->msg_flags |= MSG_TRUNC;
 608		goto out;
 609	}
 610	xprt_pin_rqst(req);
 611	spin_unlock(&xprt->queue_lock);
 612
 613	ret = xs_read_stream_request(transport, msg, flags, req);
 614
 615	spin_lock(&xprt->queue_lock);
 616	if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 617		xprt_complete_rqst(req->rq_task, transport->recv.copied);
 618	else
 619		req->rq_private_buf.len = transport->recv.copied;
 620	xprt_unpin_rqst(req);
 621out:
 622	spin_unlock(&xprt->queue_lock);
 623	return ret;
 624}
 625
 626static ssize_t
 627xs_read_stream(struct sock_xprt *transport, int flags)
 628{
 629	struct msghdr msg = { 0 };
 630	size_t want, read = 0;
 631	ssize_t ret = 0;
 632
 633	if (transport->recv.len == 0) {
 634		want = xs_read_stream_headersize(transport->recv.copied != 0);
 635		ret = xs_read_stream_header(transport, &msg, flags, want,
 636				transport->recv.offset);
 637		if (ret <= 0)
 638			goto out_err;
 639		transport->recv.offset = ret;
 640		if (transport->recv.offset != want)
 641			return transport->recv.offset;
 642		transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
 643			RPC_FRAGMENT_SIZE_MASK;
 644		transport->recv.offset -= sizeof(transport->recv.fraghdr);
 645		read = ret;
 646	}
 647
 648	switch (be32_to_cpu(transport->recv.calldir)) {
 649	default:
 650		msg.msg_flags |= MSG_TRUNC;
 651		break;
 652	case RPC_CALL:
 653		ret = xs_read_stream_call(transport, &msg, flags);
 654		break;
 655	case RPC_REPLY:
 656		ret = xs_read_stream_reply(transport, &msg, flags);
 657	}
 658	if (msg.msg_flags & MSG_TRUNC) {
 659		transport->recv.calldir = cpu_to_be32(-1);
 660		transport->recv.copied = -1;
 661	}
 662	if (ret < 0)
 663		goto out_err;
 664	read += ret;
 665	if (transport->recv.offset < transport->recv.len) {
 666		if (!(msg.msg_flags & MSG_TRUNC))
 667			return read;
 668		msg.msg_flags = 0;
 669		ret = xs_read_discard(transport->sock, &msg, flags,
 670				transport->recv.len - transport->recv.offset);
 671		if (ret <= 0)
 672			goto out_err;
 673		transport->recv.offset += ret;
 674		read += ret;
 675		if (transport->recv.offset != transport->recv.len)
 676			return read;
 677	}
 678	if (xs_read_stream_request_done(transport)) {
 679		trace_xs_stream_read_request(transport);
 680		transport->recv.copied = 0;
 681	}
 682	transport->recv.offset = 0;
 683	transport->recv.len = 0;
 684	return read;
 685out_err:
 686	return ret != 0 ? ret : -ESHUTDOWN;
 687}
 688
 689static __poll_t xs_poll_socket(struct sock_xprt *transport)
 690{
 691	return transport->sock->ops->poll(transport->file, transport->sock,
 692			NULL);
 693}
 694
 695static bool xs_poll_socket_readable(struct sock_xprt *transport)
 696{
 697	__poll_t events = xs_poll_socket(transport);
 698
 699	return (events & (EPOLLIN | EPOLLRDNORM)) && !(events & EPOLLRDHUP);
 700}
 701
 702static void xs_poll_check_readable(struct sock_xprt *transport)
 703{
 704
 705	clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
 706	if (!xs_poll_socket_readable(transport))
 707		return;
 708	if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
 709		queue_work(xprtiod_workqueue, &transport->recv_worker);
 710}
 711
 712static void xs_stream_data_receive(struct sock_xprt *transport)
 713{
 714	size_t read = 0;
 715	ssize_t ret = 0;
 716
 717	mutex_lock(&transport->recv_mutex);
 718	if (transport->sock == NULL)
 719		goto out;
 720	for (;;) {
 721		ret = xs_read_stream(transport, MSG_DONTWAIT);
 722		if (ret < 0)
 723			break;
 724		read += ret;
 725		cond_resched();
 726	}
 727	if (ret == -ESHUTDOWN)
 728		kernel_sock_shutdown(transport->sock, SHUT_RDWR);
 729	else
 730		xs_poll_check_readable(transport);
 731out:
 732	mutex_unlock(&transport->recv_mutex);
 733	trace_xs_stream_read_data(&transport->xprt, ret, read);
 734}
 735
 736static void xs_stream_data_receive_workfn(struct work_struct *work)
 737{
 738	struct sock_xprt *transport =
 739		container_of(work, struct sock_xprt, recv_worker);
 740	unsigned int pflags = memalloc_nofs_save();
 741
 742	xs_stream_data_receive(transport);
 743	memalloc_nofs_restore(pflags);
 744}
 745
 746static void
 747xs_stream_reset_connect(struct sock_xprt *transport)
 748{
 749	transport->recv.offset = 0;
 750	transport->recv.len = 0;
 751	transport->recv.copied = 0;
 752	transport->xmit.offset = 0;
 753}
 754
 755static void
 756xs_stream_start_connect(struct sock_xprt *transport)
 757{
 758	transport->xprt.stat.connect_count++;
 759	transport->xprt.stat.connect_start = jiffies;
 760}
 761
 762#define XS_SENDMSG_FLAGS	(MSG_DONTWAIT | MSG_NOSIGNAL)
 763
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 764/**
 765 * xs_nospace - handle transmit was incomplete
 766 * @req: pointer to RPC request
 767 * @transport: pointer to struct sock_xprt
 768 *
 769 */
 770static int xs_nospace(struct rpc_rqst *req, struct sock_xprt *transport)
 771{
 772	struct rpc_xprt *xprt = &transport->xprt;
 
 773	struct sock *sk = transport->inet;
 774	int ret = -EAGAIN;
 775
 776	trace_rpc_socket_nospace(req, transport);
 
 
 
 777
 778	/* Protect against races with write_space */
 779	spin_lock(&xprt->transport_lock);
 780
 781	/* Don't race with disconnect */
 782	if (xprt_connected(xprt)) {
 783		/* wait for more buffer space */
 784		set_bit(XPRT_SOCK_NOSPACE, &transport->sock_state);
 785		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 786		sk->sk_write_pending++;
 787		xprt_wait_for_buffer_space(xprt);
 788	} else
 789		ret = -ENOTCONN;
 790
 791	spin_unlock(&xprt->transport_lock);
 792	return ret;
 793}
 794
 795static int xs_sock_nospace(struct rpc_rqst *req)
 796{
 797	struct sock_xprt *transport =
 798		container_of(req->rq_xprt, struct sock_xprt, xprt);
 799	struct sock *sk = transport->inet;
 800	int ret = -EAGAIN;
 
 
 801
 802	lock_sock(sk);
 803	if (!sock_writeable(sk))
 804		ret = xs_nospace(req, transport);
 805	release_sock(sk);
 806	return ret;
 807}
 808
 809static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait)
 
 810{
 811	struct sock_xprt *transport =
 812		container_of(req->rq_xprt, struct sock_xprt, xprt);
 813	struct sock *sk = transport->inet;
 814	int ret = -EAGAIN;
 815
 816	if (vm_wait)
 817		return -ENOBUFS;
 818	lock_sock(sk);
 819	if (!sk_stream_memory_free(sk))
 820		ret = xs_nospace(req, transport);
 821	release_sock(sk);
 822	return ret;
 823}
 824
 825static int xs_stream_prepare_request(struct rpc_rqst *req, struct xdr_buf *buf)
 826{
 827	return xdr_alloc_bvec(buf, rpc_task_gfp_mask());
 828}
 829
 830/*
 831 * Determine if the previous message in the stream was aborted before it
 832 * could complete transmission.
 833 */
 834static bool
 835xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
 836{
 837	return transport->xmit.offset != 0 && req->rq_bytes_sent == 0;
 838}
 839
 840/*
 841 * Return the stream record marker field for a record of length < 2^31-1
 842 */
 843static rpc_fraghdr
 844xs_stream_record_marker(struct xdr_buf *xdr)
 845{
 846	if (!xdr->len)
 847		return 0;
 848	return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len);
 849}
 850
 851/**
 852 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
 853 * @req: pointer to RPC request
 854 *
 855 * Return values:
 856 *        0:	The request has been sent
 857 *   EAGAIN:	The socket was blocked, please call again later to
 858 *		complete the request
 859 * ENOTCONN:	Caller needs to invoke connect logic then call again
 860 *    other:	Some other error occurred, the request was not sent
 861 */
 862static int xs_local_send_request(struct rpc_rqst *req)
 863{
 864	struct rpc_xprt *xprt = req->rq_xprt;
 865	struct sock_xprt *transport =
 866				container_of(xprt, struct sock_xprt, xprt);
 867	struct xdr_buf *xdr = &req->rq_snd_buf;
 868	rpc_fraghdr rm = xs_stream_record_marker(xdr);
 869	unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
 870	struct msghdr msg = {
 871		.msg_flags	= XS_SENDMSG_FLAGS,
 872	};
 873	bool vm_wait;
 874	unsigned int sent;
 875	int status;
 
 876
 877	/* Close the stream if the previous transmission was incomplete */
 878	if (xs_send_request_was_aborted(transport, req)) {
 879		xprt_force_disconnect(xprt);
 880		return -ENOTCONN;
 881	}
 882
 883	xs_pktdump("packet data:",
 884			req->rq_svec->iov_base, req->rq_svec->iov_len);
 885
 886	vm_wait = sk_stream_is_writeable(transport->inet) ? true : false;
 887
 888	req->rq_xtime = ktime_get();
 889	status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
 890				   transport->xmit.offset, rm, &sent);
 891	dprintk("RPC:       %s(%u) = %d\n",
 892			__func__, xdr->len - transport->xmit.offset, status);
 893
 
 
 
 894	if (likely(sent > 0) || status == 0) {
 895		transport->xmit.offset += sent;
 896		req->rq_bytes_sent = transport->xmit.offset;
 897		if (likely(req->rq_bytes_sent >= msglen)) {
 898			req->rq_xmit_bytes_sent += transport->xmit.offset;
 899			transport->xmit.offset = 0;
 900			return 0;
 901		}
 902		status = -EAGAIN;
 903		vm_wait = false;
 904	}
 905
 906	switch (status) {
 
 
 907	case -EAGAIN:
 908		status = xs_stream_nospace(req, vm_wait);
 909		break;
 910	default:
 911		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
 912			-status);
 913		fallthrough;
 914	case -EPIPE:
 915		xprt_force_disconnect(xprt);
 916		status = -ENOTCONN;
 917	}
 918
 919	return status;
 920}
 921
 922/**
 923 * xs_udp_send_request - write an RPC request to a UDP socket
 924 * @req: pointer to RPC request
 925 *
 926 * Return values:
 927 *        0:	The request has been sent
 928 *   EAGAIN:	The socket was blocked, please call again later to
 929 *		complete the request
 930 * ENOTCONN:	Caller needs to invoke connect logic then call again
 931 *    other:	Some other error occurred, the request was not sent
 932 */
 933static int xs_udp_send_request(struct rpc_rqst *req)
 934{
 935	struct rpc_xprt *xprt = req->rq_xprt;
 936	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 937	struct xdr_buf *xdr = &req->rq_snd_buf;
 938	struct msghdr msg = {
 939		.msg_name	= xs_addr(xprt),
 940		.msg_namelen	= xprt->addrlen,
 941		.msg_flags	= XS_SENDMSG_FLAGS,
 942	};
 943	unsigned int sent;
 944	int status;
 945
 946	xs_pktdump("packet data:",
 947				req->rq_svec->iov_base,
 948				req->rq_svec->iov_len);
 949
 950	if (!xprt_bound(xprt))
 951		return -ENOTCONN;
 952
 953	if (!xprt_request_get_cong(xprt, req))
 954		return -EBADSLT;
 955
 956	status = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
 957	if (status < 0)
 958		return status;
 959	req->rq_xtime = ktime_get();
 960	status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent);
 
 961
 962	dprintk("RPC:       xs_udp_send_request(%u) = %d\n",
 963			xdr->len, status);
 964
 965	/* firewall is blocking us, don't return -EAGAIN or we end up looping */
 966	if (status == -EPERM)
 967		goto process_status;
 968
 969	if (status == -EAGAIN && sock_writeable(transport->inet))
 970		status = -ENOBUFS;
 971
 972	if (sent > 0 || status == 0) {
 973		req->rq_xmit_bytes_sent += sent;
 974		if (sent >= req->rq_slen)
 975			return 0;
 976		/* Still some bytes left; set up for a retry later. */
 977		status = -EAGAIN;
 978	}
 979
 980process_status:
 981	switch (status) {
 982	case -ENOTSOCK:
 983		status = -ENOTCONN;
 984		/* Should we call xs_close() here? */
 985		break;
 986	case -EAGAIN:
 987		status = xs_sock_nospace(req);
 988		break;
 989	case -ENETUNREACH:
 990	case -ENOBUFS:
 991	case -EPIPE:
 992	case -ECONNREFUSED:
 993	case -EPERM:
 994		/* When the server has died, an ICMP port unreachable message
 995		 * prompts ECONNREFUSED. */
 996		break;
 997	default:
 998		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
 999			-status);
1000	}
1001
1002	return status;
1003}
1004
1005/**
1006 * xs_tcp_send_request - write an RPC request to a TCP socket
1007 * @req: pointer to RPC request
1008 *
1009 * Return values:
1010 *        0:	The request has been sent
1011 *   EAGAIN:	The socket was blocked, please call again later to
1012 *		complete the request
1013 * ENOTCONN:	Caller needs to invoke connect logic then call again
1014 *    other:	Some other error occurred, the request was not sent
1015 *
1016 * XXX: In the case of soft timeouts, should we eventually give up
1017 *	if sendmsg is not able to make progress?
1018 */
1019static int xs_tcp_send_request(struct rpc_rqst *req)
1020{
1021	struct rpc_xprt *xprt = req->rq_xprt;
1022	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1023	struct xdr_buf *xdr = &req->rq_snd_buf;
1024	rpc_fraghdr rm = xs_stream_record_marker(xdr);
1025	unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
1026	struct msghdr msg = {
1027		.msg_flags	= XS_SENDMSG_FLAGS,
1028	};
1029	bool vm_wait;
1030	unsigned int sent;
1031	int status;
 
1032
1033	/* Close the stream if the previous transmission was incomplete */
1034	if (xs_send_request_was_aborted(transport, req)) {
1035		if (transport->sock != NULL)
1036			kernel_sock_shutdown(transport->sock, SHUT_RDWR);
1037		return -ENOTCONN;
1038	}
1039	if (!transport->inet)
1040		return -ENOTCONN;
1041
1042	xs_pktdump("packet data:",
1043				req->rq_svec->iov_base,
1044				req->rq_svec->iov_len);
1045
1046	if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
1047		xs_tcp_set_socket_timeouts(xprt, transport->sock);
1048
1049	xs_set_srcport(transport, transport->sock);
1050
1051	/* Continue transmitting the packet/record. We must be careful
1052	 * to cope with writespace callbacks arriving _after_ we have
1053	 * called sendmsg(). */
1054	req->rq_xtime = ktime_get();
1055	tcp_sock_set_cork(transport->inet, true);
1056
1057	vm_wait = sk_stream_is_writeable(transport->inet) ? true : false;
1058
1059	do {
1060		status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
1061					   transport->xmit.offset, rm, &sent);
1062
1063		dprintk("RPC:       xs_tcp_send_request(%u) = %d\n",
1064				xdr->len - transport->xmit.offset, status);
1065
1066		/* If we've sent the entire packet, immediately
1067		 * reset the count of bytes sent. */
1068		transport->xmit.offset += sent;
1069		req->rq_bytes_sent = transport->xmit.offset;
1070		if (likely(req->rq_bytes_sent >= msglen)) {
1071			req->rq_xmit_bytes_sent += transport->xmit.offset;
1072			transport->xmit.offset = 0;
1073			if (atomic_long_read(&xprt->xmit_queuelen) == 1)
1074				tcp_sock_set_cork(transport->inet, false);
1075			return 0;
1076		}
1077
1078		WARN_ON_ONCE(sent == 0 && status == 0);
1079
1080		if (sent > 0)
1081			vm_wait = false;
1082
1083	} while (status == 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1084
1085	switch (status) {
1086	case -ENOTSOCK:
1087		status = -ENOTCONN;
1088		/* Should we call xs_close() here? */
1089		break;
1090	case -EAGAIN:
1091		status = xs_stream_nospace(req, vm_wait);
1092		break;
1093	case -ECONNRESET:
1094	case -ECONNREFUSED:
1095	case -ENOTCONN:
1096	case -EADDRINUSE:
1097	case -ENOBUFS:
1098	case -EPIPE:
1099		break;
1100	default:
1101		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
1102			-status);
1103	}
1104
1105	return status;
1106}
1107
1108static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1109{
1110	transport->old_data_ready = sk->sk_data_ready;
1111	transport->old_state_change = sk->sk_state_change;
1112	transport->old_write_space = sk->sk_write_space;
1113	transport->old_error_report = sk->sk_error_report;
1114}
1115
1116static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1117{
1118	sk->sk_data_ready = transport->old_data_ready;
1119	sk->sk_state_change = transport->old_state_change;
1120	sk->sk_write_space = transport->old_write_space;
1121	sk->sk_error_report = transport->old_error_report;
1122}
1123
1124static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
1125{
1126	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1127
1128	clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
1129	clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state);
1130	clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state);
1131	clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state);
1132	clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state);
1133}
1134
1135static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr)
1136{
1137	set_bit(nr, &transport->sock_state);
1138	queue_work(xprtiod_workqueue, &transport->error_worker);
1139}
1140
1141static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1142{
1143	xprt->connect_cookie++;
1144	smp_mb__before_atomic();
1145	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1146	clear_bit(XPRT_CLOSING, &xprt->state);
1147	xs_sock_reset_state_flags(xprt);
1148	smp_mb__after_atomic();
1149}
1150
1151/**
1152 * xs_error_report - callback to handle TCP socket state errors
1153 * @sk: socket
1154 *
1155 * Note: we don't call sock_error() since there may be a rpc_task
1156 * using the socket, and so we don't want to clear sk->sk_err.
1157 */
1158static void xs_error_report(struct sock *sk)
1159{
1160	struct sock_xprt *transport;
1161	struct rpc_xprt *xprt;
1162
 
1163	if (!(xprt = xprt_from_sock(sk)))
1164		return;
1165
1166	transport = container_of(xprt, struct sock_xprt, xprt);
1167	transport->xprt_err = -sk->sk_err;
1168	if (transport->xprt_err == 0)
1169		return;
1170	dprintk("RPC:       xs_error_report client %p, error=%d...\n",
1171			xprt, -transport->xprt_err);
1172	trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
1173
1174	/* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */
1175	smp_mb__before_atomic();
1176	xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR);
 
 
1177}
1178
1179static void xs_reset_transport(struct sock_xprt *transport)
1180{
1181	struct socket *sock = transport->sock;
1182	struct sock *sk = transport->inet;
1183	struct rpc_xprt *xprt = &transport->xprt;
1184	struct file *filp = transport->file;
1185
1186	if (sk == NULL)
1187		return;
1188	/*
1189	 * Make sure we're calling this in a context from which it is safe
1190	 * to call __fput_sync(). In practice that means rpciod and the
1191	 * system workqueue.
1192	 */
1193	if (!(current->flags & PF_WQ_WORKER)) {
1194		WARN_ON_ONCE(1);
1195		set_bit(XPRT_CLOSE_WAIT, &xprt->state);
1196		return;
1197	}
1198
1199	if (atomic_read(&transport->xprt.swapper))
1200		sk_clear_memalloc(sk);
1201
1202	kernel_sock_shutdown(sock, SHUT_RDWR);
1203
1204	mutex_lock(&transport->recv_mutex);
1205	lock_sock(sk);
1206	transport->inet = NULL;
1207	transport->sock = NULL;
1208	transport->file = NULL;
1209
1210	sk->sk_user_data = NULL;
1211
1212	xs_restore_old_callbacks(transport, sk);
1213	xprt_clear_connected(xprt);
 
1214	xs_sock_reset_connection_flags(xprt);
1215	/* Reset stream record info */
1216	xs_stream_reset_connect(transport);
1217	release_sock(sk);
1218	mutex_unlock(&transport->recv_mutex);
1219
1220	trace_rpc_socket_close(xprt, sock);
1221	__fput_sync(filp);
1222
1223	xprt_disconnect_done(xprt);
1224}
1225
1226/**
1227 * xs_close - close a socket
1228 * @xprt: transport
1229 *
1230 * This is used when all requests are complete; ie, no DRC state remains
1231 * on the server we want to save.
1232 *
1233 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
1234 * xs_reset_transport() zeroing the socket from underneath a writer.
1235 */
1236static void xs_close(struct rpc_xprt *xprt)
1237{
1238	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1239
1240	dprintk("RPC:       xs_close xprt %p\n", xprt);
1241
1242	xs_reset_transport(transport);
1243	xprt->reestablish_timeout = 0;
1244}
1245
1246static void xs_inject_disconnect(struct rpc_xprt *xprt)
1247{
1248	dprintk("RPC:       injecting transport disconnect on xprt=%p\n",
1249		xprt);
1250	xprt_disconnect_done(xprt);
1251}
1252
1253static void xs_xprt_free(struct rpc_xprt *xprt)
1254{
1255	xs_free_peer_addresses(xprt);
1256	xprt_free(xprt);
1257}
1258
1259/**
1260 * xs_destroy - prepare to shutdown a transport
1261 * @xprt: doomed transport
1262 *
1263 */
1264static void xs_destroy(struct rpc_xprt *xprt)
1265{
1266	struct sock_xprt *transport = container_of(xprt,
1267			struct sock_xprt, xprt);
1268	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
1269
1270	cancel_delayed_work_sync(&transport->connect_worker);
1271	xs_close(xprt);
1272	cancel_work_sync(&transport->recv_worker);
1273	cancel_work_sync(&transport->error_worker);
1274	xs_xprt_free(xprt);
1275	module_put(THIS_MODULE);
1276}
1277
1278/**
1279 * xs_udp_data_read_skb - receive callback for UDP sockets
1280 * @xprt: transport
1281 * @sk: socket
1282 * @skb: skbuff
1283 *
1284 */
1285static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
1286		struct sock *sk,
1287		struct sk_buff *skb)
1288{
1289	struct rpc_task *task;
1290	struct rpc_rqst *rovr;
1291	int repsize, copied;
1292	u32 _xid;
1293	__be32 *xp;
1294
1295	repsize = skb->len;
1296	if (repsize < 4) {
1297		dprintk("RPC:       impossible RPC reply size %d!\n", repsize);
1298		return;
1299	}
1300
1301	/* Copy the XID from the skb... */
1302	xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
1303	if (xp == NULL)
1304		return;
1305
1306	/* Look up and lock the request corresponding to the given XID */
1307	spin_lock(&xprt->queue_lock);
1308	rovr = xprt_lookup_rqst(xprt, *xp);
1309	if (!rovr)
1310		goto out_unlock;
1311	xprt_pin_rqst(rovr);
1312	xprt_update_rtt(rovr->rq_task);
1313	spin_unlock(&xprt->queue_lock);
1314	task = rovr->rq_task;
1315
1316	if ((copied = rovr->rq_private_buf.buflen) > repsize)
1317		copied = repsize;
1318
1319	/* Suck it into the iovec, verify checksum if not done by hw. */
1320	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1321		spin_lock(&xprt->queue_lock);
1322		__UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
1323		goto out_unpin;
1324	}
1325
1326
1327	spin_lock(&xprt->transport_lock);
1328	xprt_adjust_cwnd(xprt, task, copied);
1329	spin_unlock(&xprt->transport_lock);
1330	spin_lock(&xprt->queue_lock);
1331	xprt_complete_rqst(task, copied);
1332	__UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
1333out_unpin:
1334	xprt_unpin_rqst(rovr);
1335 out_unlock:
1336	spin_unlock(&xprt->queue_lock);
1337}
1338
1339static void xs_udp_data_receive(struct sock_xprt *transport)
1340{
1341	struct sk_buff *skb;
1342	struct sock *sk;
1343	int err;
1344
1345	mutex_lock(&transport->recv_mutex);
1346	sk = transport->inet;
1347	if (sk == NULL)
1348		goto out;
1349	for (;;) {
1350		skb = skb_recv_udp(sk, MSG_DONTWAIT, &err);
1351		if (skb == NULL)
1352			break;
1353		xs_udp_data_read_skb(&transport->xprt, sk, skb);
1354		consume_skb(skb);
1355		cond_resched();
1356	}
1357	xs_poll_check_readable(transport);
1358out:
1359	mutex_unlock(&transport->recv_mutex);
1360}
1361
1362static void xs_udp_data_receive_workfn(struct work_struct *work)
1363{
1364	struct sock_xprt *transport =
1365		container_of(work, struct sock_xprt, recv_worker);
1366	unsigned int pflags = memalloc_nofs_save();
1367
1368	xs_udp_data_receive(transport);
1369	memalloc_nofs_restore(pflags);
1370}
1371
1372/**
1373 * xs_data_ready - "data ready" callback for sockets
1374 * @sk: socket with data to read
1375 *
1376 */
1377static void xs_data_ready(struct sock *sk)
1378{
1379	struct rpc_xprt *xprt;
1380
 
 
1381	xprt = xprt_from_sock(sk);
1382	if (xprt != NULL) {
1383		struct sock_xprt *transport = container_of(xprt,
1384				struct sock_xprt, xprt);
1385
1386		trace_xs_data_ready(xprt);
1387
1388		transport->old_data_ready(sk);
1389		/* Any data means we had a useful conversation, so
1390		 * then we don't need to delay the next reconnect
1391		 */
1392		if (xprt->reestablish_timeout)
1393			xprt->reestablish_timeout = 0;
1394		if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1395			queue_work(xprtiod_workqueue, &transport->recv_worker);
1396	}
 
1397}
1398
1399/*
1400 * Helper function to force a TCP close if the server is sending
1401 * junk and/or it has put us in CLOSE_WAIT
1402 */
1403static void xs_tcp_force_close(struct rpc_xprt *xprt)
1404{
1405	xprt_force_disconnect(xprt);
1406}
1407
1408#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1409static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
1410{
1411	return PAGE_SIZE;
1412}
1413#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1414
1415/**
1416 * xs_local_state_change - callback to handle AF_LOCAL socket state changes
1417 * @sk: socket whose state has changed
1418 *
1419 */
1420static void xs_local_state_change(struct sock *sk)
1421{
1422	struct rpc_xprt *xprt;
1423	struct sock_xprt *transport;
1424
1425	if (!(xprt = xprt_from_sock(sk)))
1426		return;
1427	transport = container_of(xprt, struct sock_xprt, xprt);
1428	if (sk->sk_shutdown & SHUTDOWN_MASK) {
1429		clear_bit(XPRT_CONNECTED, &xprt->state);
1430		/* Trigger the socket release */
1431		xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1432	}
1433}
1434
1435/**
1436 * xs_tcp_state_change - callback to handle TCP socket state changes
1437 * @sk: socket whose state has changed
1438 *
1439 */
1440static void xs_tcp_state_change(struct sock *sk)
1441{
1442	struct rpc_xprt *xprt;
1443	struct sock_xprt *transport;
1444
 
1445	if (!(xprt = xprt_from_sock(sk)))
1446		return;
1447	dprintk("RPC:       xs_tcp_state_change client %p...\n", xprt);
1448	dprintk("RPC:       state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1449			sk->sk_state, xprt_connected(xprt),
1450			sock_flag(sk, SOCK_DEAD),
1451			sock_flag(sk, SOCK_ZAPPED),
1452			sk->sk_shutdown);
1453
1454	transport = container_of(xprt, struct sock_xprt, xprt);
1455	trace_rpc_socket_state_change(xprt, sk->sk_socket);
1456	switch (sk->sk_state) {
1457	case TCP_ESTABLISHED:
1458		if (!xprt_test_and_set_connected(xprt)) {
1459			xprt->connect_cookie++;
1460			clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1461			xprt_clear_connecting(xprt);
1462
1463			xprt->stat.connect_count++;
1464			xprt->stat.connect_time += (long)jiffies -
1465						   xprt->stat.connect_start;
1466			xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING);
1467		}
1468		break;
1469	case TCP_FIN_WAIT1:
1470		/* The client initiated a shutdown of the socket */
1471		xprt->connect_cookie++;
1472		xprt->reestablish_timeout = 0;
1473		set_bit(XPRT_CLOSING, &xprt->state);
1474		smp_mb__before_atomic();
1475		clear_bit(XPRT_CONNECTED, &xprt->state);
1476		clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1477		smp_mb__after_atomic();
1478		break;
1479	case TCP_CLOSE_WAIT:
1480		/* The server initiated a shutdown of the socket */
1481		xprt->connect_cookie++;
1482		clear_bit(XPRT_CONNECTED, &xprt->state);
1483		xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1484		fallthrough;
1485	case TCP_CLOSING:
1486		/*
1487		 * If the server closed down the connection, make sure that
1488		 * we back off before reconnecting
1489		 */
1490		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1491			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1492		break;
1493	case TCP_LAST_ACK:
1494		set_bit(XPRT_CLOSING, &xprt->state);
1495		smp_mb__before_atomic();
1496		clear_bit(XPRT_CONNECTED, &xprt->state);
1497		smp_mb__after_atomic();
1498		break;
1499	case TCP_CLOSE:
1500		if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1501					&transport->sock_state))
1502			xprt_clear_connecting(xprt);
1503		clear_bit(XPRT_CLOSING, &xprt->state);
1504		/* Trigger the socket release */
1505		xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1506	}
 
 
1507}
1508
1509static void xs_write_space(struct sock *sk)
1510{
 
1511	struct sock_xprt *transport;
1512	struct rpc_xprt *xprt;
1513
1514	if (!sk->sk_socket)
1515		return;
1516	clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1517
1518	if (unlikely(!(xprt = xprt_from_sock(sk))))
1519		return;
1520	transport = container_of(xprt, struct sock_xprt, xprt);
1521	if (!test_and_clear_bit(XPRT_SOCK_NOSPACE, &transport->sock_state))
1522		return;
 
 
 
1523	xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE);
1524	sk->sk_write_pending--;
 
 
1525}
1526
1527/**
1528 * xs_udp_write_space - callback invoked when socket buffer space
1529 *                             becomes available
1530 * @sk: socket whose state has changed
1531 *
1532 * Called when more output buffer space is available for this socket.
1533 * We try not to wake our writers until they can make "significant"
1534 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1535 * with a bunch of small requests.
1536 */
1537static void xs_udp_write_space(struct sock *sk)
1538{
 
 
1539	/* from net/core/sock.c:sock_def_write_space */
1540	if (sock_writeable(sk))
1541		xs_write_space(sk);
 
 
1542}
1543
1544/**
1545 * xs_tcp_write_space - callback invoked when socket buffer space
1546 *                             becomes available
1547 * @sk: socket whose state has changed
1548 *
1549 * Called when more output buffer space is available for this socket.
1550 * We try not to wake our writers until they can make "significant"
1551 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1552 * with a bunch of small requests.
1553 */
1554static void xs_tcp_write_space(struct sock *sk)
1555{
 
 
1556	/* from net/core/stream.c:sk_stream_write_space */
1557	if (sk_stream_is_writeable(sk))
1558		xs_write_space(sk);
 
 
1559}
1560
1561static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1562{
1563	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1564	struct sock *sk = transport->inet;
1565
1566	if (transport->rcvsize) {
1567		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1568		sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1569	}
1570	if (transport->sndsize) {
1571		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1572		sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1573		sk->sk_write_space(sk);
1574	}
1575}
1576
1577/**
1578 * xs_udp_set_buffer_size - set send and receive limits
1579 * @xprt: generic transport
1580 * @sndsize: requested size of send buffer, in bytes
1581 * @rcvsize: requested size of receive buffer, in bytes
1582 *
1583 * Set socket send and receive buffer size limits.
1584 */
1585static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1586{
1587	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1588
1589	transport->sndsize = 0;
1590	if (sndsize)
1591		transport->sndsize = sndsize + 1024;
1592	transport->rcvsize = 0;
1593	if (rcvsize)
1594		transport->rcvsize = rcvsize + 1024;
1595
1596	xs_udp_do_set_buffer_size(xprt);
1597}
1598
1599/**
1600 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1601 * @xprt: controlling transport
1602 * @task: task that timed out
1603 *
1604 * Adjust the congestion window after a retransmit timeout has occurred.
1605 */
1606static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1607{
1608	spin_lock(&xprt->transport_lock);
1609	xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1610	spin_unlock(&xprt->transport_lock);
1611}
1612
1613static int xs_get_random_port(void)
1614{
1615	unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
1616	unsigned short range;
1617	unsigned short rand;
1618
1619	if (max < min)
1620		return -EADDRINUSE;
1621	range = max - min + 1;
1622	rand = get_random_u32_below(range);
1623	return rand + min;
1624}
1625
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1626static unsigned short xs_sock_getport(struct socket *sock)
1627{
1628	struct sockaddr_storage buf;
1629	unsigned short port = 0;
1630
1631	if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0)
1632		goto out;
1633	switch (buf.ss_family) {
1634	case AF_INET6:
1635		port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1636		break;
1637	case AF_INET:
1638		port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1639	}
1640out:
1641	return port;
1642}
1643
1644/**
1645 * xs_set_port - reset the port number in the remote endpoint address
1646 * @xprt: generic transport
1647 * @port: new port number
1648 *
1649 */
1650static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1651{
1652	dprintk("RPC:       setting port for xprt %p to %u\n", xprt, port);
1653
1654	rpc_set_port(xs_addr(xprt), port);
1655	xs_update_peer_port(xprt);
1656}
1657
1658static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1659{
1660	if (transport->srcport == 0 && transport->xprt.reuseport)
1661		transport->srcport = xs_sock_getport(sock);
1662}
1663
1664static int xs_get_srcport(struct sock_xprt *transport)
1665{
1666	int port = transport->srcport;
1667
1668	if (port == 0 && transport->xprt.resvport)
1669		port = xs_get_random_port();
1670	return port;
1671}
1672
1673static unsigned short xs_sock_srcport(struct rpc_xprt *xprt)
1674{
1675	struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
1676	unsigned short ret = 0;
1677	mutex_lock(&sock->recv_mutex);
1678	if (sock->sock)
1679		ret = xs_sock_getport(sock->sock);
1680	mutex_unlock(&sock->recv_mutex);
1681	return ret;
1682}
1683
1684static int xs_sock_srcaddr(struct rpc_xprt *xprt, char *buf, size_t buflen)
1685{
1686	struct sock_xprt *sock = container_of(xprt, struct sock_xprt, xprt);
1687	union {
1688		struct sockaddr sa;
1689		struct sockaddr_storage st;
1690	} saddr;
1691	int ret = -ENOTCONN;
1692
1693	mutex_lock(&sock->recv_mutex);
1694	if (sock->sock) {
1695		ret = kernel_getsockname(sock->sock, &saddr.sa);
1696		if (ret >= 0)
1697			ret = snprintf(buf, buflen, "%pISc", &saddr.sa);
1698	}
1699	mutex_unlock(&sock->recv_mutex);
1700	return ret;
1701}
1702
1703static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1704{
1705	if (transport->srcport != 0)
1706		transport->srcport = 0;
1707	if (!transport->xprt.resvport)
1708		return 0;
1709	if (port <= xprt_min_resvport || port > xprt_max_resvport)
1710		return xprt_max_resvport;
1711	return --port;
1712}
1713static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1714{
1715	struct sockaddr_storage myaddr;
1716	int err, nloop = 0;
1717	int port = xs_get_srcport(transport);
1718	unsigned short last;
1719
1720	/*
1721	 * If we are asking for any ephemeral port (i.e. port == 0 &&
1722	 * transport->xprt.resvport == 0), don't bind.  Let the local
1723	 * port selection happen implicitly when the socket is used
1724	 * (for example at connect time).
1725	 *
1726	 * This ensures that we can continue to establish TCP
1727	 * connections even when all local ephemeral ports are already
1728	 * a part of some TCP connection.  This makes no difference
1729	 * for UDP sockets, but also doesn't harm them.
1730	 *
1731	 * If we're asking for any reserved port (i.e. port == 0 &&
1732	 * transport->xprt.resvport == 1) xs_get_srcport above will
1733	 * ensure that port is non-zero and we will bind as needed.
1734	 */
1735	if (port <= 0)
1736		return port;
1737
1738	memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1739	do {
1740		rpc_set_port((struct sockaddr *)&myaddr, port);
1741		err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1742				transport->xprt.addrlen);
1743		if (err == 0) {
1744			if (transport->xprt.reuseport)
1745				transport->srcport = port;
1746			break;
1747		}
1748		last = port;
1749		port = xs_next_srcport(transport, port);
1750		if (port > last)
1751			nloop++;
1752	} while (err == -EADDRINUSE && nloop != 2);
1753
1754	if (myaddr.ss_family == AF_INET)
1755		dprintk("RPC:       %s %pI4:%u: %s (%d)\n", __func__,
1756				&((struct sockaddr_in *)&myaddr)->sin_addr,
1757				port, err ? "failed" : "ok", err);
1758	else
1759		dprintk("RPC:       %s %pI6:%u: %s (%d)\n", __func__,
1760				&((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1761				port, err ? "failed" : "ok", err);
1762	return err;
1763}
1764
1765/*
1766 * We don't support autobind on AF_LOCAL sockets
1767 */
1768static void xs_local_rpcbind(struct rpc_task *task)
1769{
1770	xprt_set_bound(task->tk_xprt);
1771}
1772
1773static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1774{
1775}
1776
1777#ifdef CONFIG_DEBUG_LOCK_ALLOC
1778static struct lock_class_key xs_key[3];
1779static struct lock_class_key xs_slock_key[3];
1780
1781static inline void xs_reclassify_socketu(struct socket *sock)
1782{
1783	struct sock *sk = sock->sk;
1784
1785	sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1786		&xs_slock_key[0], "sk_lock-AF_LOCAL-RPC", &xs_key[0]);
1787}
1788
1789static inline void xs_reclassify_socket4(struct socket *sock)
1790{
1791	struct sock *sk = sock->sk;
1792
1793	sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1794		&xs_slock_key[1], "sk_lock-AF_INET-RPC", &xs_key[1]);
1795}
1796
1797static inline void xs_reclassify_socket6(struct socket *sock)
1798{
1799	struct sock *sk = sock->sk;
1800
1801	sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1802		&xs_slock_key[2], "sk_lock-AF_INET6-RPC", &xs_key[2]);
1803}
1804
1805static inline void xs_reclassify_socket(int family, struct socket *sock)
1806{
1807	if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
1808		return;
1809
1810	switch (family) {
1811	case AF_LOCAL:
1812		xs_reclassify_socketu(sock);
1813		break;
1814	case AF_INET:
1815		xs_reclassify_socket4(sock);
1816		break;
1817	case AF_INET6:
1818		xs_reclassify_socket6(sock);
1819		break;
1820	}
1821}
1822#else
1823static inline void xs_reclassify_socket(int family, struct socket *sock)
1824{
1825}
1826#endif
1827
1828static void xs_dummy_setup_socket(struct work_struct *work)
1829{
1830}
1831
1832static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1833		struct sock_xprt *transport, int family, int type,
1834		int protocol, bool reuseport)
1835{
1836	struct file *filp;
1837	struct socket *sock;
1838	int err;
1839
1840	err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1841	if (err < 0) {
1842		dprintk("RPC:       can't create %d transport socket (%d).\n",
1843				protocol, -err);
1844		goto out;
1845	}
1846	xs_reclassify_socket(family, sock);
1847
1848	if (reuseport)
1849		sock_set_reuseport(sock->sk);
1850
1851	err = xs_bind(transport, sock);
1852	if (err) {
1853		sock_release(sock);
1854		goto out;
1855	}
1856
1857	filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1858	if (IS_ERR(filp))
1859		return ERR_CAST(filp);
1860	transport->file = filp;
1861
1862	return sock;
1863out:
1864	return ERR_PTR(err);
1865}
1866
1867static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1868				      struct socket *sock)
1869{
1870	struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1871									xprt);
1872
1873	if (!transport->inet) {
1874		struct sock *sk = sock->sk;
1875
1876		lock_sock(sk);
1877
1878		xs_save_old_callbacks(transport, sk);
1879
1880		sk->sk_user_data = xprt;
1881		sk->sk_data_ready = xs_data_ready;
1882		sk->sk_write_space = xs_udp_write_space;
1883		sk->sk_state_change = xs_local_state_change;
1884		sk->sk_error_report = xs_error_report;
1885		sk->sk_use_task_frag = false;
1886
1887		xprt_clear_connected(xprt);
1888
1889		/* Reset to new socket */
1890		transport->sock = sock;
1891		transport->inet = sk;
1892
1893		release_sock(sk);
1894	}
1895
1896	xs_stream_start_connect(transport);
1897
1898	return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1899}
1900
1901/**
1902 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1903 * @transport: socket transport to connect
1904 */
1905static int xs_local_setup_socket(struct sock_xprt *transport)
1906{
1907	struct rpc_xprt *xprt = &transport->xprt;
1908	struct file *filp;
1909	struct socket *sock;
1910	int status;
1911
1912	status = __sock_create(xprt->xprt_net, AF_LOCAL,
1913					SOCK_STREAM, 0, &sock, 1);
1914	if (status < 0) {
1915		dprintk("RPC:       can't create AF_LOCAL "
1916			"transport socket (%d).\n", -status);
1917		goto out;
1918	}
1919	xs_reclassify_socket(AF_LOCAL, sock);
1920
1921	filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1922	if (IS_ERR(filp)) {
1923		status = PTR_ERR(filp);
1924		goto out;
1925	}
1926	transport->file = filp;
1927
1928	dprintk("RPC:       worker connecting xprt %p via AF_LOCAL to %s\n",
1929			xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1930
1931	status = xs_local_finish_connecting(xprt, sock);
1932	trace_rpc_socket_connect(xprt, sock, status);
1933	switch (status) {
1934	case 0:
1935		dprintk("RPC:       xprt %p connected to %s\n",
1936				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1937		xprt->stat.connect_count++;
1938		xprt->stat.connect_time += (long)jiffies -
1939					   xprt->stat.connect_start;
1940		xprt_set_connected(xprt);
1941		break;
1942	case -ENOBUFS:
1943		break;
1944	case -ENOENT:
1945		dprintk("RPC:       xprt %p: socket %s does not exist\n",
1946				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1947		break;
1948	case -ECONNREFUSED:
1949		dprintk("RPC:       xprt %p: connection refused for %s\n",
1950				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1951		break;
1952	default:
1953		printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1954				__func__, -status,
1955				xprt->address_strings[RPC_DISPLAY_ADDR]);
1956	}
1957
1958out:
1959	xprt_clear_connecting(xprt);
1960	xprt_wake_pending_tasks(xprt, status);
1961	return status;
1962}
1963
1964static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
1965{
1966	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1967	int ret;
1968
1969	if (transport->file)
1970		goto force_disconnect;
1971
1972	if (RPC_IS_ASYNC(task)) {
1973		/*
1974		 * We want the AF_LOCAL connect to be resolved in the
1975		 * filesystem namespace of the process making the rpc
1976		 * call.  Thus we connect synchronously.
1977		 *
1978		 * If we want to support asynchronous AF_LOCAL calls,
1979		 * we'll need to figure out how to pass a namespace to
1980		 * connect.
1981		 */
1982		rpc_task_set_rpc_status(task, -ENOTCONN);
1983		goto out_wake;
 
1984	}
1985	ret = xs_local_setup_socket(transport);
1986	if (ret && !RPC_IS_SOFTCONN(task))
1987		msleep_interruptible(15000);
1988	return;
1989force_disconnect:
1990	xprt_force_disconnect(xprt);
1991out_wake:
1992	xprt_clear_connecting(xprt);
1993	xprt_wake_pending_tasks(xprt, -ENOTCONN);
1994}
1995
1996#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
1997/*
1998 * Note that this should be called with XPRT_LOCKED held, or recv_mutex
1999 * held, or when we otherwise know that we have exclusive access to the
2000 * socket, to guard against races with xs_reset_transport.
2001 */
2002static void xs_set_memalloc(struct rpc_xprt *xprt)
2003{
2004	struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
2005			xprt);
2006
2007	/*
2008	 * If there's no sock, then we have nothing to set. The
2009	 * reconnecting process will get it for us.
2010	 */
2011	if (!transport->inet)
2012		return;
2013	if (atomic_read(&xprt->swapper))
2014		sk_set_memalloc(transport->inet);
2015}
2016
2017/**
2018 * xs_enable_swap - Tag this transport as being used for swap.
2019 * @xprt: transport to tag
2020 *
2021 * Take a reference to this transport on behalf of the rpc_clnt, and
2022 * optionally mark it for swapping if it wasn't already.
2023 */
2024static int
2025xs_enable_swap(struct rpc_xprt *xprt)
2026{
2027	struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2028
2029	mutex_lock(&xs->recv_mutex);
2030	if (atomic_inc_return(&xprt->swapper) == 1 &&
2031	    xs->inet)
 
 
2032		sk_set_memalloc(xs->inet);
2033	mutex_unlock(&xs->recv_mutex);
2034	return 0;
2035}
2036
2037/**
2038 * xs_disable_swap - Untag this transport as being used for swap.
2039 * @xprt: transport to tag
2040 *
2041 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
2042 * swapper refcount goes to 0, untag the socket as a memalloc socket.
2043 */
2044static void
2045xs_disable_swap(struct rpc_xprt *xprt)
2046{
2047	struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2048
2049	mutex_lock(&xs->recv_mutex);
2050	if (atomic_dec_and_test(&xprt->swapper) &&
2051	    xs->inet)
 
 
2052		sk_clear_memalloc(xs->inet);
2053	mutex_unlock(&xs->recv_mutex);
2054}
2055#else
2056static void xs_set_memalloc(struct rpc_xprt *xprt)
2057{
2058}
2059
2060static int
2061xs_enable_swap(struct rpc_xprt *xprt)
2062{
2063	return -EINVAL;
2064}
2065
2066static void
2067xs_disable_swap(struct rpc_xprt *xprt)
2068{
2069}
2070#endif
2071
2072static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2073{
2074	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2075
2076	if (!transport->inet) {
2077		struct sock *sk = sock->sk;
2078
2079		lock_sock(sk);
2080
2081		xs_save_old_callbacks(transport, sk);
2082
2083		sk->sk_user_data = xprt;
2084		sk->sk_data_ready = xs_data_ready;
2085		sk->sk_write_space = xs_udp_write_space;
2086		sk->sk_use_task_frag = false;
2087
2088		xprt_set_connected(xprt);
2089
2090		/* Reset to new socket */
2091		transport->sock = sock;
2092		transport->inet = sk;
2093
2094		xs_set_memalloc(xprt);
2095
2096		release_sock(sk);
2097	}
2098	xs_udp_do_set_buffer_size(xprt);
2099
2100	xprt->stat.connect_start = jiffies;
2101}
2102
2103static void xs_udp_setup_socket(struct work_struct *work)
2104{
2105	struct sock_xprt *transport =
2106		container_of(work, struct sock_xprt, connect_worker.work);
2107	struct rpc_xprt *xprt = &transport->xprt;
2108	struct socket *sock;
2109	int status = -EIO;
2110	unsigned int pflags = current->flags;
2111
2112	if (atomic_read(&xprt->swapper))
2113		current->flags |= PF_MEMALLOC;
2114	sock = xs_create_sock(xprt, transport,
2115			xs_addr(xprt)->sa_family, SOCK_DGRAM,
2116			IPPROTO_UDP, false);
2117	if (IS_ERR(sock))
2118		goto out;
2119
2120	dprintk("RPC:       worker connecting xprt %p via %s to "
2121				"%s (port %s)\n", xprt,
2122			xprt->address_strings[RPC_DISPLAY_PROTO],
2123			xprt->address_strings[RPC_DISPLAY_ADDR],
2124			xprt->address_strings[RPC_DISPLAY_PORT]);
2125
2126	xs_udp_finish_connecting(xprt, sock);
2127	trace_rpc_socket_connect(xprt, sock, 0);
2128	status = 0;
2129out:
2130	xprt_clear_connecting(xprt);
2131	xprt_unlock_connect(xprt, transport);
2132	xprt_wake_pending_tasks(xprt, status);
2133	current_restore_flags(pflags, PF_MEMALLOC);
2134}
2135
2136/**
2137 * xs_tcp_shutdown - gracefully shut down a TCP socket
2138 * @xprt: transport
2139 *
2140 * Initiates a graceful shutdown of the TCP socket by calling the
2141 * equivalent of shutdown(SHUT_RDWR);
2142 */
2143static void xs_tcp_shutdown(struct rpc_xprt *xprt)
2144{
2145	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2146	struct socket *sock = transport->sock;
2147	int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE;
2148
2149	if (sock == NULL)
2150		return;
2151	if (!xprt->reuseport) {
2152		xs_close(xprt);
2153		return;
2154	}
2155	switch (skst) {
2156	case TCP_FIN_WAIT1:
2157	case TCP_FIN_WAIT2:
2158		break;
2159	case TCP_ESTABLISHED:
2160	case TCP_CLOSE_WAIT:
2161		kernel_sock_shutdown(sock, SHUT_RDWR);
2162		trace_rpc_socket_shutdown(xprt, sock);
2163		break;
2164	default:
 
2165		xs_reset_transport(transport);
2166	}
2167}
2168
2169static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
2170		struct socket *sock)
2171{
2172	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2173	unsigned int keepidle;
2174	unsigned int keepcnt;
 
2175	unsigned int timeo;
2176
2177	spin_lock(&xprt->transport_lock);
2178	keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ);
2179	keepcnt = xprt->timeout->to_retries + 1;
2180	timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2181		(xprt->timeout->to_retries + 1);
2182	clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2183	spin_unlock(&xprt->transport_lock);
2184
2185	/* TCP Keepalive options */
2186	sock_set_keepalive(sock->sk);
2187	tcp_sock_set_keepidle(sock->sk, keepidle);
2188	tcp_sock_set_keepintvl(sock->sk, keepidle);
2189	tcp_sock_set_keepcnt(sock->sk, keepcnt);
 
 
 
 
2190
2191	/* TCP user timeout (see RFC5482) */
2192	tcp_sock_set_user_timeout(sock->sk, timeo);
 
2193}
2194
2195static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
2196		unsigned long connect_timeout,
2197		unsigned long reconnect_timeout)
2198{
2199	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2200	struct rpc_timeout to;
2201	unsigned long initval;
2202
2203	spin_lock(&xprt->transport_lock);
2204	if (reconnect_timeout < xprt->max_reconnect_timeout)
2205		xprt->max_reconnect_timeout = reconnect_timeout;
2206	if (connect_timeout < xprt->connect_timeout) {
2207		memcpy(&to, xprt->timeout, sizeof(to));
2208		initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1);
2209		/* Arbitrary lower limit */
2210		if (initval <  XS_TCP_INIT_REEST_TO << 1)
2211			initval = XS_TCP_INIT_REEST_TO << 1;
2212		to.to_initval = initval;
2213		to.to_maxval = initval;
2214		memcpy(&transport->tcp_timeout, &to,
2215				sizeof(transport->tcp_timeout));
2216		xprt->timeout = &transport->tcp_timeout;
2217		xprt->connect_timeout = connect_timeout;
2218	}
2219	set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2220	spin_unlock(&xprt->transport_lock);
2221}
2222
2223static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2224{
2225	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 
2226
2227	if (!transport->inet) {
2228		struct sock *sk = sock->sk;
 
2229
2230		/* Avoid temporary address, they are bad for long-lived
2231		 * connections such as NFS mounts.
2232		 * RFC4941, section 3.6 suggests that:
2233		 *    Individual applications, which have specific
2234		 *    knowledge about the normal duration of connections,
2235		 *    MAY override this as appropriate.
2236		 */
2237		if (xs_addr(xprt)->sa_family == PF_INET6) {
2238			ip6_sock_set_addr_preferences(sk,
2239				IPV6_PREFER_SRC_PUBLIC);
2240		}
2241
2242		xs_tcp_set_socket_timeouts(xprt, sock);
2243		tcp_sock_set_nodelay(sk);
2244
2245		lock_sock(sk);
2246
2247		xs_save_old_callbacks(transport, sk);
2248
2249		sk->sk_user_data = xprt;
2250		sk->sk_data_ready = xs_data_ready;
2251		sk->sk_state_change = xs_tcp_state_change;
2252		sk->sk_write_space = xs_tcp_write_space;
 
2253		sk->sk_error_report = xs_error_report;
2254		sk->sk_use_task_frag = false;
2255
2256		/* socket options */
2257		sock_reset_flag(sk, SOCK_LINGER);
 
2258
2259		xprt_clear_connected(xprt);
2260
2261		/* Reset to new socket */
2262		transport->sock = sock;
2263		transport->inet = sk;
2264
2265		release_sock(sk);
2266	}
2267
2268	if (!xprt_bound(xprt))
2269		return -ENOTCONN;
2270
2271	xs_set_memalloc(xprt);
2272
2273	xs_stream_start_connect(transport);
2274
2275	/* Tell the socket layer to start connecting... */
2276	set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2277	return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2278}
2279
2280/**
2281 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2282 * @work: queued work item
2283 *
2284 * Invoked by a work queue tasklet.
2285 */
2286static void xs_tcp_setup_socket(struct work_struct *work)
2287{
2288	struct sock_xprt *transport =
2289		container_of(work, struct sock_xprt, connect_worker.work);
2290	struct socket *sock = transport->sock;
2291	struct rpc_xprt *xprt = &transport->xprt;
2292	int status;
2293	unsigned int pflags = current->flags;
2294
2295	if (atomic_read(&xprt->swapper))
2296		current->flags |= PF_MEMALLOC;
2297
2298	if (xprt_connected(xprt))
2299		goto out;
2300	if (test_and_clear_bit(XPRT_SOCK_CONNECT_SENT,
2301			       &transport->sock_state) ||
2302	    !sock) {
2303		xs_reset_transport(transport);
2304		sock = xs_create_sock(xprt, transport, xs_addr(xprt)->sa_family,
2305				      SOCK_STREAM, IPPROTO_TCP, true);
2306		if (IS_ERR(sock)) {
2307			xprt_wake_pending_tasks(xprt, PTR_ERR(sock));
2308			goto out;
2309		}
2310	}
2311
2312	dprintk("RPC:       worker connecting xprt %p via %s to "
2313				"%s (port %s)\n", xprt,
2314			xprt->address_strings[RPC_DISPLAY_PROTO],
2315			xprt->address_strings[RPC_DISPLAY_ADDR],
2316			xprt->address_strings[RPC_DISPLAY_PORT]);
2317
2318	status = xs_tcp_finish_connecting(xprt, sock);
2319	trace_rpc_socket_connect(xprt, sock, status);
2320	dprintk("RPC:       %p connect status %d connected %d sock state %d\n",
2321			xprt, -status, xprt_connected(xprt),
2322			sock->sk->sk_state);
2323	switch (status) {
 
 
 
 
 
 
 
 
 
 
2324	case 0:
2325	case -EINPROGRESS:
2326		/* SYN_SENT! */
2327		set_bit(XPRT_SOCK_CONNECT_SENT, &transport->sock_state);
2328		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2329			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2330		fallthrough;
2331	case -EALREADY:
2332		goto out_unlock;
2333	case -EADDRNOTAVAIL:
2334		/* Source port number is unavailable. Try a new one! */
2335		transport->srcport = 0;
2336		status = -EAGAIN;
2337		break;
2338	case -EINVAL:
2339		/* Happens, for instance, if the user specified a link
2340		 * local IPv6 address without a scope-id.
2341		 */
2342	case -ECONNREFUSED:
2343	case -ECONNRESET:
2344	case -ENETDOWN:
2345	case -ENETUNREACH:
2346	case -EHOSTUNREACH:
2347	case -EADDRINUSE:
2348	case -ENOBUFS:
2349		break;
2350	default:
2351		printk("%s: connect returned unhandled error %d\n",
2352			__func__, status);
2353		status = -EAGAIN;
 
 
 
2354	}
2355
2356	/* xs_tcp_force_close() wakes tasks with a fixed error code.
2357	 * We need to wake them first to ensure the correct error code.
2358	 */
2359	xprt_wake_pending_tasks(xprt, status);
2360	xs_tcp_force_close(xprt);
2361out:
2362	xprt_clear_connecting(xprt);
2363out_unlock:
2364	xprt_unlock_connect(xprt, transport);
2365	current_restore_flags(pflags, PF_MEMALLOC);
2366}
2367
2368/**
2369 * xs_connect - connect a socket to a remote endpoint
2370 * @xprt: pointer to transport structure
2371 * @task: address of RPC task that manages state of connect request
2372 *
2373 * TCP: If the remote end dropped the connection, delay reconnecting.
2374 *
2375 * UDP socket connects are synchronous, but we use a work queue anyway
2376 * to guarantee that even unprivileged user processes can set up a
2377 * socket on a privileged port.
2378 *
2379 * If a UDP socket connect fails, the delay behavior here prevents
2380 * retry floods (hard mounts).
2381 */
2382static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2383{
2384	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2385	unsigned long delay = 0;
2386
2387	WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2388
2389	if (transport->sock != NULL) {
2390		dprintk("RPC:       xs_connect delayed xprt %p for %lu "
2391			"seconds\n", xprt, xprt->reestablish_timeout / HZ);
 
 
 
 
2392
2393		delay = xprt_reconnect_delay(xprt);
2394		xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO);
2395
2396	} else
2397		dprintk("RPC:       xs_connect scheduled xprt %p\n", xprt);
2398
2399	queue_delayed_work(xprtiod_workqueue,
2400			&transport->connect_worker,
2401			delay);
2402}
2403
2404static void xs_wake_disconnect(struct sock_xprt *transport)
2405{
2406	if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state))
2407		xs_tcp_force_close(&transport->xprt);
2408}
2409
2410static void xs_wake_write(struct sock_xprt *transport)
2411{
2412	if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state))
2413		xprt_write_space(&transport->xprt);
2414}
2415
2416static void xs_wake_error(struct sock_xprt *transport)
2417{
2418	int sockerr;
2419
2420	if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2421		return;
2422	mutex_lock(&transport->recv_mutex);
2423	if (transport->sock == NULL)
2424		goto out;
2425	if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2426		goto out;
2427	sockerr = xchg(&transport->xprt_err, 0);
2428	if (sockerr < 0)
2429		xprt_wake_pending_tasks(&transport->xprt, sockerr);
2430out:
2431	mutex_unlock(&transport->recv_mutex);
2432}
2433
2434static void xs_wake_pending(struct sock_xprt *transport)
2435{
2436	if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state))
2437		xprt_wake_pending_tasks(&transport->xprt, -EAGAIN);
2438}
2439
2440static void xs_error_handle(struct work_struct *work)
2441{
2442	struct sock_xprt *transport = container_of(work,
2443			struct sock_xprt, error_worker);
2444
2445	xs_wake_disconnect(transport);
2446	xs_wake_write(transport);
2447	xs_wake_error(transport);
2448	xs_wake_pending(transport);
2449}
2450
2451/**
2452 * xs_local_print_stats - display AF_LOCAL socket-specific stats
2453 * @xprt: rpc_xprt struct containing statistics
2454 * @seq: output file
2455 *
2456 */
2457static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2458{
2459	long idle_time = 0;
2460
2461	if (xprt_connected(xprt))
2462		idle_time = (long)(jiffies - xprt->last_used) / HZ;
2463
2464	seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2465			"%llu %llu %lu %llu %llu\n",
2466			xprt->stat.bind_count,
2467			xprt->stat.connect_count,
2468			xprt->stat.connect_time / HZ,
2469			idle_time,
2470			xprt->stat.sends,
2471			xprt->stat.recvs,
2472			xprt->stat.bad_xids,
2473			xprt->stat.req_u,
2474			xprt->stat.bklog_u,
2475			xprt->stat.max_slots,
2476			xprt->stat.sending_u,
2477			xprt->stat.pending_u);
2478}
2479
2480/**
2481 * xs_udp_print_stats - display UDP socket-specific stats
2482 * @xprt: rpc_xprt struct containing statistics
2483 * @seq: output file
2484 *
2485 */
2486static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2487{
2488	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2489
2490	seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2491			"%lu %llu %llu\n",
2492			transport->srcport,
2493			xprt->stat.bind_count,
2494			xprt->stat.sends,
2495			xprt->stat.recvs,
2496			xprt->stat.bad_xids,
2497			xprt->stat.req_u,
2498			xprt->stat.bklog_u,
2499			xprt->stat.max_slots,
2500			xprt->stat.sending_u,
2501			xprt->stat.pending_u);
2502}
2503
2504/**
2505 * xs_tcp_print_stats - display TCP socket-specific stats
2506 * @xprt: rpc_xprt struct containing statistics
2507 * @seq: output file
2508 *
2509 */
2510static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2511{
2512	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2513	long idle_time = 0;
2514
2515	if (xprt_connected(xprt))
2516		idle_time = (long)(jiffies - xprt->last_used) / HZ;
2517
2518	seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2519			"%llu %llu %lu %llu %llu\n",
2520			transport->srcport,
2521			xprt->stat.bind_count,
2522			xprt->stat.connect_count,
2523			xprt->stat.connect_time / HZ,
2524			idle_time,
2525			xprt->stat.sends,
2526			xprt->stat.recvs,
2527			xprt->stat.bad_xids,
2528			xprt->stat.req_u,
2529			xprt->stat.bklog_u,
2530			xprt->stat.max_slots,
2531			xprt->stat.sending_u,
2532			xprt->stat.pending_u);
2533}
2534
2535/*
2536 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2537 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2538 * to use the server side send routines.
2539 */
2540static int bc_malloc(struct rpc_task *task)
2541{
2542	struct rpc_rqst *rqst = task->tk_rqstp;
2543	size_t size = rqst->rq_callsize;
2544	struct page *page;
2545	struct rpc_buffer *buf;
2546
2547	if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) {
2548		WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n",
2549			  size);
2550		return -EINVAL;
2551	}
2552
2553	page = alloc_page(GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
2554	if (!page)
2555		return -ENOMEM;
2556
2557	buf = page_address(page);
2558	buf->len = PAGE_SIZE;
2559
2560	rqst->rq_buffer = buf->data;
2561	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
2562	return 0;
2563}
2564
2565/*
2566 * Free the space allocated in the bc_alloc routine
2567 */
2568static void bc_free(struct rpc_task *task)
2569{
2570	void *buffer = task->tk_rqstp->rq_buffer;
2571	struct rpc_buffer *buf;
2572
2573	buf = container_of(buffer, struct rpc_buffer, data);
2574	free_page((unsigned long)buf);
2575}
2576
 
 
 
 
2577static int bc_sendto(struct rpc_rqst *req)
2578{
2579	struct xdr_buf *xdr = &req->rq_snd_buf;
 
2580	struct sock_xprt *transport =
2581			container_of(req->rq_xprt, struct sock_xprt, xprt);
 
 
 
2582	struct msghdr msg = {
2583		.msg_flags	= 0,
2584	};
2585	rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
2586					 (u32)xdr->len);
2587	unsigned int sent = 0;
2588	int err;
 
 
 
 
 
 
2589
2590	req->rq_xtime = ktime_get();
2591	err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
2592	if (err < 0)
2593		return err;
2594	err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent);
2595	xdr_free_bvec(xdr);
2596	if (err < 0 || sent != (xdr->len + sizeof(marker)))
 
 
2597		return -EAGAIN;
2598	return sent;
2599}
2600
2601/**
2602 * bc_send_request - Send a backchannel Call on a TCP socket
2603 * @req: rpc_rqst containing Call message to be sent
2604 *
2605 * xpt_mutex ensures @rqstp's whole message is written to the socket
2606 * without interruption.
2607 *
2608 * Return values:
2609 *   %0 if the message was sent successfully
2610 *   %ENOTCONN if the message was not sent
2611 */
2612static int bc_send_request(struct rpc_rqst *req)
2613{
2614	struct svc_xprt	*xprt;
2615	int len;
2616
 
2617	/*
2618	 * Get the server socket associated with this callback xprt
2619	 */
2620	xprt = req->rq_xprt->bc_xprt;
2621
2622	/*
2623	 * Grab the mutex to serialize data as the connection is shared
2624	 * with the fore channel
2625	 */
2626	mutex_lock(&xprt->xpt_mutex);
2627	if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2628		len = -ENOTCONN;
2629	else
2630		len = bc_sendto(req);
2631	mutex_unlock(&xprt->xpt_mutex);
2632
2633	if (len > 0)
2634		len = 0;
2635
2636	return len;
2637}
2638
2639/*
2640 * The close routine. Since this is client initiated, we do nothing
2641 */
2642
2643static void bc_close(struct rpc_xprt *xprt)
2644{
2645	xprt_disconnect_done(xprt);
2646}
2647
2648/*
2649 * The xprt destroy routine. Again, because this connection is client
2650 * initiated, we do nothing
2651 */
2652
2653static void bc_destroy(struct rpc_xprt *xprt)
2654{
2655	dprintk("RPC:       bc_destroy xprt %p\n", xprt);
2656
2657	xs_xprt_free(xprt);
2658	module_put(THIS_MODULE);
2659}
2660
2661static const struct rpc_xprt_ops xs_local_ops = {
2662	.reserve_xprt		= xprt_reserve_xprt,
2663	.release_xprt		= xprt_release_xprt,
2664	.alloc_slot		= xprt_alloc_slot,
2665	.free_slot		= xprt_free_slot,
2666	.rpcbind		= xs_local_rpcbind,
2667	.set_port		= xs_local_set_port,
2668	.connect		= xs_local_connect,
2669	.buf_alloc		= rpc_malloc,
2670	.buf_free		= rpc_free,
2671	.prepare_request	= xs_stream_prepare_request,
2672	.send_request		= xs_local_send_request,
2673	.wait_for_reply_request	= xprt_wait_for_reply_request_def,
2674	.close			= xs_close,
2675	.destroy		= xs_destroy,
2676	.print_stats		= xs_local_print_stats,
2677	.enable_swap		= xs_enable_swap,
2678	.disable_swap		= xs_disable_swap,
2679};
2680
2681static const struct rpc_xprt_ops xs_udp_ops = {
2682	.set_buffer_size	= xs_udp_set_buffer_size,
2683	.reserve_xprt		= xprt_reserve_xprt_cong,
2684	.release_xprt		= xprt_release_xprt_cong,
2685	.alloc_slot		= xprt_alloc_slot,
2686	.free_slot		= xprt_free_slot,
2687	.rpcbind		= rpcb_getport_async,
2688	.set_port		= xs_set_port,
2689	.connect		= xs_connect,
2690	.get_srcaddr		= xs_sock_srcaddr,
2691	.get_srcport		= xs_sock_srcport,
2692	.buf_alloc		= rpc_malloc,
2693	.buf_free		= rpc_free,
2694	.send_request		= xs_udp_send_request,
2695	.wait_for_reply_request	= xprt_wait_for_reply_request_rtt,
2696	.timer			= xs_udp_timer,
2697	.release_request	= xprt_release_rqst_cong,
2698	.close			= xs_close,
2699	.destroy		= xs_destroy,
2700	.print_stats		= xs_udp_print_stats,
2701	.enable_swap		= xs_enable_swap,
2702	.disable_swap		= xs_disable_swap,
2703	.inject_disconnect	= xs_inject_disconnect,
2704};
2705
2706static const struct rpc_xprt_ops xs_tcp_ops = {
2707	.reserve_xprt		= xprt_reserve_xprt,
2708	.release_xprt		= xprt_release_xprt,
2709	.alloc_slot		= xprt_alloc_slot,
2710	.free_slot		= xprt_free_slot,
2711	.rpcbind		= rpcb_getport_async,
2712	.set_port		= xs_set_port,
2713	.connect		= xs_connect,
2714	.get_srcaddr		= xs_sock_srcaddr,
2715	.get_srcport		= xs_sock_srcport,
2716	.buf_alloc		= rpc_malloc,
2717	.buf_free		= rpc_free,
2718	.prepare_request	= xs_stream_prepare_request,
2719	.send_request		= xs_tcp_send_request,
2720	.wait_for_reply_request	= xprt_wait_for_reply_request_def,
2721	.close			= xs_tcp_shutdown,
2722	.destroy		= xs_destroy,
2723	.set_connect_timeout	= xs_tcp_set_connect_timeout,
2724	.print_stats		= xs_tcp_print_stats,
2725	.enable_swap		= xs_enable_swap,
2726	.disable_swap		= xs_disable_swap,
2727	.inject_disconnect	= xs_inject_disconnect,
2728#ifdef CONFIG_SUNRPC_BACKCHANNEL
2729	.bc_setup		= xprt_setup_bc,
2730	.bc_maxpayload		= xs_tcp_bc_maxpayload,
2731	.bc_num_slots		= xprt_bc_max_slots,
2732	.bc_free_rqst		= xprt_free_bc_rqst,
2733	.bc_destroy		= xprt_destroy_bc,
2734#endif
2735};
2736
2737/*
2738 * The rpc_xprt_ops for the server backchannel
2739 */
2740
2741static const struct rpc_xprt_ops bc_tcp_ops = {
2742	.reserve_xprt		= xprt_reserve_xprt,
2743	.release_xprt		= xprt_release_xprt,
2744	.alloc_slot		= xprt_alloc_slot,
2745	.free_slot		= xprt_free_slot,
2746	.buf_alloc		= bc_malloc,
2747	.buf_free		= bc_free,
2748	.send_request		= bc_send_request,
2749	.wait_for_reply_request	= xprt_wait_for_reply_request_def,
2750	.close			= bc_close,
2751	.destroy		= bc_destroy,
2752	.print_stats		= xs_tcp_print_stats,
2753	.enable_swap		= xs_enable_swap,
2754	.disable_swap		= xs_disable_swap,
2755	.inject_disconnect	= xs_inject_disconnect,
2756};
2757
2758static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2759{
2760	static const struct sockaddr_in sin = {
2761		.sin_family		= AF_INET,
2762		.sin_addr.s_addr	= htonl(INADDR_ANY),
2763	};
2764	static const struct sockaddr_in6 sin6 = {
2765		.sin6_family		= AF_INET6,
2766		.sin6_addr		= IN6ADDR_ANY_INIT,
2767	};
2768
2769	switch (family) {
2770	case AF_LOCAL:
2771		break;
2772	case AF_INET:
2773		memcpy(sap, &sin, sizeof(sin));
2774		break;
2775	case AF_INET6:
2776		memcpy(sap, &sin6, sizeof(sin6));
2777		break;
2778	default:
2779		dprintk("RPC:       %s: Bad address family\n", __func__);
2780		return -EAFNOSUPPORT;
2781	}
2782	return 0;
2783}
2784
2785static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2786				      unsigned int slot_table_size,
2787				      unsigned int max_slot_table_size)
2788{
2789	struct rpc_xprt *xprt;
2790	struct sock_xprt *new;
2791
2792	if (args->addrlen > sizeof(xprt->addr)) {
2793		dprintk("RPC:       xs_setup_xprt: address too large\n");
2794		return ERR_PTR(-EBADF);
2795	}
2796
2797	xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2798			max_slot_table_size);
2799	if (xprt == NULL) {
2800		dprintk("RPC:       xs_setup_xprt: couldn't allocate "
2801				"rpc_xprt\n");
2802		return ERR_PTR(-ENOMEM);
2803	}
2804
2805	new = container_of(xprt, struct sock_xprt, xprt);
2806	mutex_init(&new->recv_mutex);
2807	memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2808	xprt->addrlen = args->addrlen;
2809	if (args->srcaddr)
2810		memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2811	else {
2812		int err;
2813		err = xs_init_anyaddr(args->dstaddr->sa_family,
2814					(struct sockaddr *)&new->srcaddr);
2815		if (err != 0) {
2816			xprt_free(xprt);
2817			return ERR_PTR(err);
2818		}
2819	}
2820
2821	return xprt;
2822}
2823
2824static const struct rpc_timeout xs_local_default_timeout = {
2825	.to_initval = 10 * HZ,
2826	.to_maxval = 10 * HZ,
2827	.to_retries = 2,
2828};
2829
2830/**
2831 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2832 * @args: rpc transport creation arguments
2833 *
2834 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2835 */
2836static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2837{
2838	struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2839	struct sock_xprt *transport;
2840	struct rpc_xprt *xprt;
2841	struct rpc_xprt *ret;
2842
2843	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2844			xprt_max_tcp_slot_table_entries);
2845	if (IS_ERR(xprt))
2846		return xprt;
2847	transport = container_of(xprt, struct sock_xprt, xprt);
2848
2849	xprt->prot = 0;
2850	xprt->xprt_class = &xs_local_transport;
2851	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2852
2853	xprt->bind_timeout = XS_BIND_TO;
2854	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2855	xprt->idle_timeout = XS_IDLE_DISC_TO;
2856
2857	xprt->ops = &xs_local_ops;
2858	xprt->timeout = &xs_local_default_timeout;
2859
2860	INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
2861	INIT_WORK(&transport->error_worker, xs_error_handle);
2862	INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket);
2863
2864	switch (sun->sun_family) {
2865	case AF_LOCAL:
2866		if (sun->sun_path[0] != '/') {
2867			dprintk("RPC:       bad AF_LOCAL address: %s\n",
2868					sun->sun_path);
2869			ret = ERR_PTR(-EINVAL);
2870			goto out_err;
2871		}
2872		xprt_set_bound(xprt);
2873		xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
 
 
 
2874		break;
2875	default:
2876		ret = ERR_PTR(-EAFNOSUPPORT);
2877		goto out_err;
2878	}
2879
2880	dprintk("RPC:       set up xprt to %s via AF_LOCAL\n",
2881			xprt->address_strings[RPC_DISPLAY_ADDR]);
2882
2883	if (try_module_get(THIS_MODULE))
2884		return xprt;
2885	ret = ERR_PTR(-EINVAL);
2886out_err:
2887	xs_xprt_free(xprt);
2888	return ret;
2889}
2890
2891static const struct rpc_timeout xs_udp_default_timeout = {
2892	.to_initval = 5 * HZ,
2893	.to_maxval = 30 * HZ,
2894	.to_increment = 5 * HZ,
2895	.to_retries = 5,
2896};
2897
2898/**
2899 * xs_setup_udp - Set up transport to use a UDP socket
2900 * @args: rpc transport creation arguments
2901 *
2902 */
2903static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2904{
2905	struct sockaddr *addr = args->dstaddr;
2906	struct rpc_xprt *xprt;
2907	struct sock_xprt *transport;
2908	struct rpc_xprt *ret;
2909
2910	xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2911			xprt_udp_slot_table_entries);
2912	if (IS_ERR(xprt))
2913		return xprt;
2914	transport = container_of(xprt, struct sock_xprt, xprt);
2915
2916	xprt->prot = IPPROTO_UDP;
2917	xprt->xprt_class = &xs_udp_transport;
2918	/* XXX: header size can vary due to auth type, IPv6, etc. */
2919	xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2920
2921	xprt->bind_timeout = XS_BIND_TO;
2922	xprt->reestablish_timeout = XS_UDP_REEST_TO;
2923	xprt->idle_timeout = XS_IDLE_DISC_TO;
2924
2925	xprt->ops = &xs_udp_ops;
2926
2927	xprt->timeout = &xs_udp_default_timeout;
2928
2929	INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
2930	INIT_WORK(&transport->error_worker, xs_error_handle);
2931	INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
2932
2933	switch (addr->sa_family) {
2934	case AF_INET:
2935		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2936			xprt_set_bound(xprt);
2937
2938		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
2939		break;
2940	case AF_INET6:
2941		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2942			xprt_set_bound(xprt);
2943
2944		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2945		break;
2946	default:
2947		ret = ERR_PTR(-EAFNOSUPPORT);
2948		goto out_err;
2949	}
2950
2951	if (xprt_bound(xprt))
2952		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
2953				xprt->address_strings[RPC_DISPLAY_ADDR],
2954				xprt->address_strings[RPC_DISPLAY_PORT],
2955				xprt->address_strings[RPC_DISPLAY_PROTO]);
2956	else
2957		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
2958				xprt->address_strings[RPC_DISPLAY_ADDR],
2959				xprt->address_strings[RPC_DISPLAY_PROTO]);
2960
2961	if (try_module_get(THIS_MODULE))
2962		return xprt;
2963	ret = ERR_PTR(-EINVAL);
2964out_err:
2965	xs_xprt_free(xprt);
2966	return ret;
2967}
2968
2969static const struct rpc_timeout xs_tcp_default_timeout = {
2970	.to_initval = 60 * HZ,
2971	.to_maxval = 60 * HZ,
2972	.to_retries = 2,
2973};
2974
2975/**
2976 * xs_setup_tcp - Set up transport to use a TCP socket
2977 * @args: rpc transport creation arguments
2978 *
2979 */
2980static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2981{
2982	struct sockaddr *addr = args->dstaddr;
2983	struct rpc_xprt *xprt;
2984	struct sock_xprt *transport;
2985	struct rpc_xprt *ret;
2986	unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
2987
2988	if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
2989		max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
2990
2991	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2992			max_slot_table_size);
2993	if (IS_ERR(xprt))
2994		return xprt;
2995	transport = container_of(xprt, struct sock_xprt, xprt);
2996
2997	xprt->prot = IPPROTO_TCP;
2998	xprt->xprt_class = &xs_tcp_transport;
2999	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3000
3001	xprt->bind_timeout = XS_BIND_TO;
3002	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
3003	xprt->idle_timeout = XS_IDLE_DISC_TO;
3004
3005	xprt->ops = &xs_tcp_ops;
3006	xprt->timeout = &xs_tcp_default_timeout;
3007
3008	xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
3009	xprt->connect_timeout = xprt->timeout->to_initval *
3010		(xprt->timeout->to_retries + 1);
3011
3012	INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
3013	INIT_WORK(&transport->error_worker, xs_error_handle);
3014	INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
3015
3016	switch (addr->sa_family) {
3017	case AF_INET:
3018		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3019			xprt_set_bound(xprt);
3020
3021		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
3022		break;
3023	case AF_INET6:
3024		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
3025			xprt_set_bound(xprt);
3026
3027		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
3028		break;
3029	default:
3030		ret = ERR_PTR(-EAFNOSUPPORT);
3031		goto out_err;
3032	}
3033
3034	if (xprt_bound(xprt))
3035		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
3036				xprt->address_strings[RPC_DISPLAY_ADDR],
3037				xprt->address_strings[RPC_DISPLAY_PORT],
3038				xprt->address_strings[RPC_DISPLAY_PROTO]);
3039	else
3040		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
3041				xprt->address_strings[RPC_DISPLAY_ADDR],
3042				xprt->address_strings[RPC_DISPLAY_PROTO]);
3043
3044	if (try_module_get(THIS_MODULE))
3045		return xprt;
3046	ret = ERR_PTR(-EINVAL);
3047out_err:
3048	xs_xprt_free(xprt);
3049	return ret;
3050}
3051
3052/**
3053 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
3054 * @args: rpc transport creation arguments
3055 *
3056 */
3057static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
3058{
3059	struct sockaddr *addr = args->dstaddr;
3060	struct rpc_xprt *xprt;
3061	struct sock_xprt *transport;
3062	struct svc_sock *bc_sock;
3063	struct rpc_xprt *ret;
3064
3065	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
3066			xprt_tcp_slot_table_entries);
3067	if (IS_ERR(xprt))
3068		return xprt;
3069	transport = container_of(xprt, struct sock_xprt, xprt);
3070
3071	xprt->prot = IPPROTO_TCP;
3072	xprt->xprt_class = &xs_bc_tcp_transport;
3073	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3074	xprt->timeout = &xs_tcp_default_timeout;
3075
3076	/* backchannel */
3077	xprt_set_bound(xprt);
3078	xprt->bind_timeout = 0;
3079	xprt->reestablish_timeout = 0;
3080	xprt->idle_timeout = 0;
3081
3082	xprt->ops = &bc_tcp_ops;
3083
3084	switch (addr->sa_family) {
3085	case AF_INET:
3086		xs_format_peer_addresses(xprt, "tcp",
3087					 RPCBIND_NETID_TCP);
3088		break;
3089	case AF_INET6:
3090		xs_format_peer_addresses(xprt, "tcp",
3091				   RPCBIND_NETID_TCP6);
3092		break;
3093	default:
3094		ret = ERR_PTR(-EAFNOSUPPORT);
3095		goto out_err;
3096	}
3097
3098	dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
3099			xprt->address_strings[RPC_DISPLAY_ADDR],
3100			xprt->address_strings[RPC_DISPLAY_PORT],
3101			xprt->address_strings[RPC_DISPLAY_PROTO]);
3102
3103	/*
3104	 * Once we've associated a backchannel xprt with a connection,
3105	 * we want to keep it around as long as the connection lasts,
3106	 * in case we need to start using it for a backchannel again;
3107	 * this reference won't be dropped until bc_xprt is destroyed.
3108	 */
3109	xprt_get(xprt);
3110	args->bc_xprt->xpt_bc_xprt = xprt;
3111	xprt->bc_xprt = args->bc_xprt;
3112	bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
3113	transport->sock = bc_sock->sk_sock;
3114	transport->inet = bc_sock->sk_sk;
3115
3116	/*
3117	 * Since we don't want connections for the backchannel, we set
3118	 * the xprt status to connected
3119	 */
3120	xprt_set_connected(xprt);
3121
3122	if (try_module_get(THIS_MODULE))
3123		return xprt;
3124
3125	args->bc_xprt->xpt_bc_xprt = NULL;
3126	args->bc_xprt->xpt_bc_xps = NULL;
3127	xprt_put(xprt);
3128	ret = ERR_PTR(-EINVAL);
3129out_err:
3130	xs_xprt_free(xprt);
3131	return ret;
3132}
3133
3134static struct xprt_class	xs_local_transport = {
3135	.list		= LIST_HEAD_INIT(xs_local_transport.list),
3136	.name		= "named UNIX socket",
3137	.owner		= THIS_MODULE,
3138	.ident		= XPRT_TRANSPORT_LOCAL,
3139	.setup		= xs_setup_local,
3140	.netid		= { "" },
3141};
3142
3143static struct xprt_class	xs_udp_transport = {
3144	.list		= LIST_HEAD_INIT(xs_udp_transport.list),
3145	.name		= "udp",
3146	.owner		= THIS_MODULE,
3147	.ident		= XPRT_TRANSPORT_UDP,
3148	.setup		= xs_setup_udp,
3149	.netid		= { "udp", "udp6", "" },
3150};
3151
3152static struct xprt_class	xs_tcp_transport = {
3153	.list		= LIST_HEAD_INIT(xs_tcp_transport.list),
3154	.name		= "tcp",
3155	.owner		= THIS_MODULE,
3156	.ident		= XPRT_TRANSPORT_TCP,
3157	.setup		= xs_setup_tcp,
3158	.netid		= { "tcp", "tcp6", "" },
3159};
3160
3161static struct xprt_class	xs_bc_tcp_transport = {
3162	.list		= LIST_HEAD_INIT(xs_bc_tcp_transport.list),
3163	.name		= "tcp NFSv4.1 backchannel",
3164	.owner		= THIS_MODULE,
3165	.ident		= XPRT_TRANSPORT_BC_TCP,
3166	.setup		= xs_setup_bc_tcp,
3167	.netid		= { "" },
3168};
3169
3170/**
3171 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3172 *
3173 */
3174int init_socket_xprt(void)
3175{
3176	if (!sunrpc_table_header)
3177		sunrpc_table_header = register_sysctl_table(sunrpc_table);
3178
3179	xprt_register_transport(&xs_local_transport);
3180	xprt_register_transport(&xs_udp_transport);
3181	xprt_register_transport(&xs_tcp_transport);
3182	xprt_register_transport(&xs_bc_tcp_transport);
3183
3184	return 0;
3185}
3186
3187/**
3188 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3189 *
3190 */
3191void cleanup_socket_xprt(void)
3192{
3193	if (sunrpc_table_header) {
3194		unregister_sysctl_table(sunrpc_table_header);
3195		sunrpc_table_header = NULL;
3196	}
3197
3198	xprt_unregister_transport(&xs_local_transport);
3199	xprt_unregister_transport(&xs_udp_transport);
3200	xprt_unregister_transport(&xs_tcp_transport);
3201	xprt_unregister_transport(&xs_bc_tcp_transport);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3202}
3203
3204static int param_set_portnr(const char *val, const struct kernel_param *kp)
3205{
3206	return param_set_uint_minmax(val, kp,
3207			RPC_MIN_RESVPORT,
3208			RPC_MAX_RESVPORT);
3209}
3210
3211static const struct kernel_param_ops param_ops_portnr = {
3212	.set = param_set_portnr,
3213	.get = param_get_uint,
3214};
3215
3216#define param_check_portnr(name, p) \
3217	__param_check(name, p, unsigned int);
3218
3219module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3220module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3221
3222static int param_set_slot_table_size(const char *val,
3223				     const struct kernel_param *kp)
3224{
3225	return param_set_uint_minmax(val, kp,
3226			RPC_MIN_SLOT_TABLE,
3227			RPC_MAX_SLOT_TABLE);
3228}
3229
3230static const struct kernel_param_ops param_ops_slot_table_size = {
3231	.set = param_set_slot_table_size,
3232	.get = param_get_uint,
3233};
3234
3235#define param_check_slot_table_size(name, p) \
3236	__param_check(name, p, unsigned int);
3237
3238static int param_set_max_slot_table_size(const char *val,
3239				     const struct kernel_param *kp)
3240{
3241	return param_set_uint_minmax(val, kp,
3242			RPC_MIN_SLOT_TABLE,
3243			RPC_MAX_SLOT_TABLE_LIMIT);
3244}
3245
3246static const struct kernel_param_ops param_ops_max_slot_table_size = {
3247	.set = param_set_max_slot_table_size,
3248	.get = param_get_uint,
3249};
3250
3251#define param_check_max_slot_table_size(name, p) \
3252	__param_check(name, p, unsigned int);
3253
3254module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3255		   slot_table_size, 0644);
3256module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3257		   max_slot_table_size, 0644);
3258module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3259		   slot_table_size, 0644);