Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * linux/net/sunrpc/xprtsock.c
   3 *
   4 * Client-side transport implementation for sockets.
   5 *
   6 * TCP callback races fixes (C) 1998 Red Hat
   7 * TCP send fixes (C) 1998 Red Hat
   8 * TCP NFS related read + write fixes
   9 *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  10 *
  11 * Rewrite of larges part of the code in order to stabilize TCP stuff.
  12 * Fix behaviour when socket buffer is full.
  13 *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
  14 *
  15 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
  16 *
  17 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
  18 *   <gilles.quillard@bull.net>
  19 */
  20
  21#include <linux/types.h>
  22#include <linux/string.h>
  23#include <linux/slab.h>
  24#include <linux/module.h>
  25#include <linux/capability.h>
  26#include <linux/pagemap.h>
  27#include <linux/errno.h>
  28#include <linux/socket.h>
  29#include <linux/in.h>
  30#include <linux/net.h>
  31#include <linux/mm.h>
  32#include <linux/un.h>
  33#include <linux/udp.h>
  34#include <linux/tcp.h>
  35#include <linux/sunrpc/clnt.h>
  36#include <linux/sunrpc/addr.h>
  37#include <linux/sunrpc/sched.h>
  38#include <linux/sunrpc/svcsock.h>
  39#include <linux/sunrpc/xprtsock.h>
  40#include <linux/file.h>
  41#ifdef CONFIG_SUNRPC_BACKCHANNEL
  42#include <linux/sunrpc/bc_xprt.h>
  43#endif
  44
  45#include <net/sock.h>
  46#include <net/checksum.h>
  47#include <net/udp.h>
  48#include <net/tcp.h>
 
 
 
 
  49
  50#include <trace/events/sunrpc.h>
  51
 
  52#include "sunrpc.h"
  53
  54static void xs_close(struct rpc_xprt *xprt);
 
 
  55
  56/*
  57 * xprtsock tunables
  58 */
  59static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
  60static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
  61static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
  62
  63static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
  64static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
  65
  66#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  67
  68#define XS_TCP_LINGER_TO	(15U * HZ)
  69static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
  70
  71/*
  72 * We can register our own files under /proc/sys/sunrpc by
  73 * calling register_sysctl_table() again.  The files in that
  74 * directory become the union of all files registered there.
  75 *
  76 * We simply need to make sure that we don't collide with
  77 * someone else's file names!
  78 */
  79
  80static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
  81static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
  82static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
  83static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
  84static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
  85
  86static struct ctl_table_header *sunrpc_table_header;
  87
  88/*
  89 * FIXME: changing the UDP slot table size should also resize the UDP
  90 *        socket buffers for existing UDP transports
  91 */
  92static struct ctl_table xs_tunables_table[] = {
  93	{
  94		.procname	= "udp_slot_table_entries",
  95		.data		= &xprt_udp_slot_table_entries,
  96		.maxlen		= sizeof(unsigned int),
  97		.mode		= 0644,
  98		.proc_handler	= proc_dointvec_minmax,
  99		.extra1		= &min_slot_table_size,
 100		.extra2		= &max_slot_table_size
 101	},
 102	{
 103		.procname	= "tcp_slot_table_entries",
 104		.data		= &xprt_tcp_slot_table_entries,
 105		.maxlen		= sizeof(unsigned int),
 106		.mode		= 0644,
 107		.proc_handler	= proc_dointvec_minmax,
 108		.extra1		= &min_slot_table_size,
 109		.extra2		= &max_slot_table_size
 110	},
 111	{
 112		.procname	= "tcp_max_slot_table_entries",
 113		.data		= &xprt_max_tcp_slot_table_entries,
 114		.maxlen		= sizeof(unsigned int),
 115		.mode		= 0644,
 116		.proc_handler	= proc_dointvec_minmax,
 117		.extra1		= &min_slot_table_size,
 118		.extra2		= &max_tcp_slot_table_limit
 119	},
 120	{
 121		.procname	= "min_resvport",
 122		.data		= &xprt_min_resvport,
 123		.maxlen		= sizeof(unsigned int),
 124		.mode		= 0644,
 125		.proc_handler	= proc_dointvec_minmax,
 126		.extra1		= &xprt_min_resvport_limit,
 127		.extra2		= &xprt_max_resvport_limit
 128	},
 129	{
 130		.procname	= "max_resvport",
 131		.data		= &xprt_max_resvport,
 132		.maxlen		= sizeof(unsigned int),
 133		.mode		= 0644,
 134		.proc_handler	= proc_dointvec_minmax,
 135		.extra1		= &xprt_min_resvport_limit,
 136		.extra2		= &xprt_max_resvport_limit
 137	},
 138	{
 139		.procname	= "tcp_fin_timeout",
 140		.data		= &xs_tcp_fin_timeout,
 141		.maxlen		= sizeof(xs_tcp_fin_timeout),
 142		.mode		= 0644,
 143		.proc_handler	= proc_dointvec_jiffies,
 144	},
 145	{ },
 146};
 147
 148static struct ctl_table sunrpc_table[] = {
 149	{
 150		.procname	= "sunrpc",
 151		.mode		= 0555,
 152		.child		= xs_tunables_table
 153	},
 154	{ },
 155};
 156
 157#endif
 158
 159/*
 160 * Wait duration for a reply from the RPC portmapper.
 161 */
 162#define XS_BIND_TO		(60U * HZ)
 163
 164/*
 165 * Delay if a UDP socket connect error occurs.  This is most likely some
 166 * kind of resource problem on the local host.
 167 */
 168#define XS_UDP_REEST_TO		(2U * HZ)
 169
 170/*
 171 * The reestablish timeout allows clients to delay for a bit before attempting
 172 * to reconnect to a server that just dropped our connection.
 173 *
 174 * We implement an exponential backoff when trying to reestablish a TCP
 175 * transport connection with the server.  Some servers like to drop a TCP
 176 * connection when they are overworked, so we start with a short timeout and
 177 * increase over time if the server is down or not responding.
 178 */
 179#define XS_TCP_INIT_REEST_TO	(3U * HZ)
 180#define XS_TCP_MAX_REEST_TO	(5U * 60 * HZ)
 181
 182/*
 183 * TCP idle timeout; client drops the transport socket if it is idle
 184 * for this long.  Note that we also timeout UDP sockets to prevent
 185 * holding port numbers when there is no RPC traffic.
 186 */
 187#define XS_IDLE_DISC_TO		(5U * 60 * HZ)
 188
 189#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 190# undef  RPC_DEBUG_DATA
 191# define RPCDBG_FACILITY	RPCDBG_TRANS
 192#endif
 193
 194#ifdef RPC_DEBUG_DATA
 195static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
 196{
 197	u8 *buf = (u8 *) packet;
 198	int j;
 199
 200	dprintk("RPC:       %s\n", msg);
 201	for (j = 0; j < count && j < 128; j += 4) {
 202		if (!(j & 31)) {
 203			if (j)
 204				dprintk("\n");
 205			dprintk("0x%04x ", j);
 206		}
 207		dprintk("%02x%02x%02x%02x ",
 208			buf[j], buf[j+1], buf[j+2], buf[j+3]);
 209	}
 210	dprintk("\n");
 211}
 212#else
 213static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
 214{
 215	/* NOP */
 216}
 217#endif
 218
 219static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
 220{
 221	return (struct rpc_xprt *) sk->sk_user_data;
 222}
 223
 224static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
 225{
 226	return (struct sockaddr *) &xprt->addr;
 227}
 228
 229static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
 230{
 231	return (struct sockaddr_un *) &xprt->addr;
 232}
 233
 234static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
 235{
 236	return (struct sockaddr_in *) &xprt->addr;
 237}
 238
 239static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
 240{
 241	return (struct sockaddr_in6 *) &xprt->addr;
 242}
 243
 244static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
 245{
 246	struct sockaddr *sap = xs_addr(xprt);
 247	struct sockaddr_in6 *sin6;
 248	struct sockaddr_in *sin;
 249	struct sockaddr_un *sun;
 250	char buf[128];
 251
 252	switch (sap->sa_family) {
 253	case AF_LOCAL:
 254		sun = xs_addr_un(xprt);
 255		strlcpy(buf, sun->sun_path, sizeof(buf));
 256		xprt->address_strings[RPC_DISPLAY_ADDR] =
 257						kstrdup(buf, GFP_KERNEL);
 258		break;
 259	case AF_INET:
 260		(void)rpc_ntop(sap, buf, sizeof(buf));
 261		xprt->address_strings[RPC_DISPLAY_ADDR] =
 262						kstrdup(buf, GFP_KERNEL);
 263		sin = xs_addr_in(xprt);
 264		snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
 265		break;
 266	case AF_INET6:
 267		(void)rpc_ntop(sap, buf, sizeof(buf));
 268		xprt->address_strings[RPC_DISPLAY_ADDR] =
 269						kstrdup(buf, GFP_KERNEL);
 270		sin6 = xs_addr_in6(xprt);
 271		snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
 272		break;
 273	default:
 274		BUG();
 275	}
 276
 277	xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
 278}
 279
 280static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
 281{
 282	struct sockaddr *sap = xs_addr(xprt);
 283	char buf[128];
 284
 285	snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
 286	xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
 287
 288	snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
 289	xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
 290}
 291
 292static void xs_format_peer_addresses(struct rpc_xprt *xprt,
 293				     const char *protocol,
 294				     const char *netid)
 295{
 296	xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
 297	xprt->address_strings[RPC_DISPLAY_NETID] = netid;
 298	xs_format_common_peer_addresses(xprt);
 299	xs_format_common_peer_ports(xprt);
 300}
 301
 302static void xs_update_peer_port(struct rpc_xprt *xprt)
 303{
 304	kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
 305	kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
 306
 307	xs_format_common_peer_ports(xprt);
 308}
 309
 310static void xs_free_peer_addresses(struct rpc_xprt *xprt)
 311{
 312	unsigned int i;
 313
 314	for (i = 0; i < RPC_DISPLAY_MAX; i++)
 315		switch (i) {
 316		case RPC_DISPLAY_PROTO:
 317		case RPC_DISPLAY_NETID:
 318			continue;
 319		default:
 320			kfree(xprt->address_strings[i]);
 321		}
 322}
 323
 324#define XS_SENDMSG_FLAGS	(MSG_DONTWAIT | MSG_NOSIGNAL)
 
 
 
 325
 326static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 327{
 328	struct msghdr msg = {
 329		.msg_name	= addr,
 330		.msg_namelen	= addrlen,
 331		.msg_flags	= XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
 332	};
 333	struct kvec iov = {
 334		.iov_base	= vec->iov_base + base,
 335		.iov_len	= vec->iov_len - base,
 336	};
 337
 338	if (iov.iov_len != 0)
 339		return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
 340	return kernel_sendmsg(sock, &msg, NULL, 0, 0);
 
 
 
 341}
 342
 343static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more, bool zerocopy, int *sent_p)
 
 
 
 344{
 345	ssize_t (*do_sendpage)(struct socket *sock, struct page *page,
 346			int offset, size_t size, int flags);
 347	struct page **ppage;
 348	unsigned int remainder;
 349	int err;
 350
 351	remainder = xdr->page_len - base;
 352	base += xdr->page_base;
 353	ppage = xdr->pages + (base >> PAGE_SHIFT);
 354	base &= ~PAGE_MASK;
 355	do_sendpage = sock->ops->sendpage;
 356	if (!zerocopy)
 357		do_sendpage = sock_no_sendpage;
 358	for(;;) {
 359		unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
 360		int flags = XS_SENDMSG_FLAGS;
 361
 362		remainder -= len;
 363		if (more)
 364			flags |= MSG_MORE;
 365		if (remainder != 0)
 366			flags |= MSG_SENDPAGE_NOTLAST | MSG_MORE;
 367		err = do_sendpage(sock, *ppage, base, len, flags);
 368		if (remainder == 0 || err != len)
 369			break;
 370		*sent_p += err;
 371		ppage++;
 372		base = 0;
 373	}
 374	if (err > 0) {
 375		*sent_p += err;
 376		err = 0;
 377	}
 378	return err;
 379}
 380
 381/**
 382 * xs_sendpages - write pages directly to a socket
 383 * @sock: socket to send on
 384 * @addr: UDP only -- address of destination
 385 * @addrlen: UDP only -- length of destination address
 386 * @xdr: buffer containing this request
 387 * @base: starting position in the buffer
 388 * @zerocopy: true if it is safe to use sendpage()
 389 * @sent_p: return the total number of bytes successfully queued for sending
 390 *
 391 */
 392static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, bool zerocopy, int *sent_p)
 393{
 394	unsigned int remainder = xdr->len - base;
 395	int err = 0;
 396	int sent = 0;
 397
 398	if (unlikely(!sock))
 399		return -ENOTSOCK;
 400
 401	if (base != 0) {
 402		addr = NULL;
 403		addrlen = 0;
 404	}
 405
 406	if (base < xdr->head[0].iov_len || addr != NULL) {
 407		unsigned int len = xdr->head[0].iov_len - base;
 408		remainder -= len;
 409		err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
 410		if (remainder == 0 || err != len)
 
 
 
 
 411			goto out;
 412		*sent_p += err;
 413		base = 0;
 414	} else
 415		base -= xdr->head[0].iov_len;
 
 
 
 416
 417	if (base < xdr->page_len) {
 418		unsigned int len = xdr->page_len - base;
 419		remainder -= len;
 420		err = xs_send_pagedata(sock, xdr, base, remainder != 0, zerocopy, &sent);
 421		*sent_p += sent;
 422		if (remainder == 0 || sent != len)
 
 
 
 
 
 
 
 423			goto out;
 424		base = 0;
 425	} else
 426		base -= xdr->page_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 427
 428	if (base >= xdr->tail[0].iov_len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 429		return 0;
 430	err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 431out:
 432	if (err > 0) {
 433		*sent_p += err;
 434		err = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 435	}
 436	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 437}
 438
 439static void xs_nospace_callback(struct rpc_task *task)
 440{
 441	struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
 
 
 
 
 
 
 442
 443	transport->inet->sk_write_pending--;
 
 
 
 
 
 
 444}
 445
 
 
 
 
 
 
 
 
 
 446/**
 447 * xs_nospace - place task on wait queue if transmit was incomplete
 448 * @task: task to put to sleep
 449 *
 450 */
 451static int xs_nospace(struct rpc_task *task)
 452{
 453	struct rpc_rqst *req = task->tk_rqstp;
 454	struct rpc_xprt *xprt = req->rq_xprt;
 455	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 456	struct sock *sk = transport->inet;
 457	int ret = -EAGAIN;
 458
 459	dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
 460			task->tk_pid, req->rq_slen - req->rq_bytes_sent,
 
 461			req->rq_slen);
 462
 463	/* Protect against races with write_space */
 464	spin_lock_bh(&xprt->transport_lock);
 465
 466	/* Don't race with disconnect */
 467	if (xprt_connected(xprt)) {
 468		/* wait for more buffer space */
 469		sk->sk_write_pending++;
 470		xprt_wait_for_buffer_space(task, xs_nospace_callback);
 471	} else
 472		ret = -ENOTCONN;
 473
 474	spin_unlock_bh(&xprt->transport_lock);
 475
 476	/* Race breaker in case memory is freed before above code is called */
 477	sk->sk_write_space(sk);
 
 
 
 
 
 
 
 
 
 478	return ret;
 479}
 480
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 481/*
 482 * Construct a stream transport record marker in @buf.
 483 */
 484static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
 
 485{
 486	u32 reclen = buf->len - sizeof(rpc_fraghdr);
 487	rpc_fraghdr *base = buf->head[0].iov_base;
 488	*base = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | reclen);
 489}
 490
 491/**
 492 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
 493 * @task: RPC task that manages the state of an RPC request
 494 *
 495 * Return values:
 496 *        0:	The request has been sent
 497 *   EAGAIN:	The socket was blocked, please call again later to
 498 *		complete the request
 499 * ENOTCONN:	Caller needs to invoke connect logic then call again
 500 *    other:	Some other error occured, the request was not sent
 501 */
 502static int xs_local_send_request(struct rpc_task *task)
 503{
 504	struct rpc_rqst *req = task->tk_rqstp;
 505	struct rpc_xprt *xprt = req->rq_xprt;
 506	struct sock_xprt *transport =
 507				container_of(xprt, struct sock_xprt, xprt);
 508	struct xdr_buf *xdr = &req->rq_snd_buf;
 
 
 
 
 
 
 509	int status;
 510	int sent = 0;
 511
 512	xs_encode_stream_record_marker(&req->rq_snd_buf);
 
 
 
 
 513
 514	xs_pktdump("packet data:",
 515			req->rq_svec->iov_base, req->rq_svec->iov_len);
 516
 517	status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent,
 518			      true, &sent);
 
 519	dprintk("RPC:       %s(%u) = %d\n",
 520			__func__, xdr->len - req->rq_bytes_sent, status);
 521
 522	if (status == -EAGAIN && sock_writeable(transport->inet))
 523		status = -ENOBUFS;
 524
 525	if (likely(sent > 0) || status == 0) {
 526		req->rq_bytes_sent += sent;
 527		req->rq_xmit_bytes_sent += sent;
 528		if (likely(req->rq_bytes_sent >= req->rq_slen)) {
 529			req->rq_bytes_sent = 0;
 
 530			return 0;
 531		}
 532		status = -EAGAIN;
 533	}
 534
 535	switch (status) {
 536	case -ENOBUFS:
 537		break;
 538	case -EAGAIN:
 539		status = xs_nospace(task);
 540		break;
 541	default:
 542		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
 543			-status);
 
 544	case -EPIPE:
 545		xs_close(xprt);
 546		status = -ENOTCONN;
 547	}
 548
 549	return status;
 550}
 551
 552/**
 553 * xs_udp_send_request - write an RPC request to a UDP socket
 554 * @task: address of RPC task that manages the state of an RPC request
 555 *
 556 * Return values:
 557 *        0:	The request has been sent
 558 *   EAGAIN:	The socket was blocked, please call again later to
 559 *		complete the request
 560 * ENOTCONN:	Caller needs to invoke connect logic then call again
 561 *    other:	Some other error occurred, the request was not sent
 562 */
 563static int xs_udp_send_request(struct rpc_task *task)
 564{
 565	struct rpc_rqst *req = task->tk_rqstp;
 566	struct rpc_xprt *xprt = req->rq_xprt;
 567	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 568	struct xdr_buf *xdr = &req->rq_snd_buf;
 569	int sent = 0;
 
 
 
 
 
 570	int status;
 571
 572	xs_pktdump("packet data:",
 573				req->rq_svec->iov_base,
 574				req->rq_svec->iov_len);
 575
 576	if (!xprt_bound(xprt))
 577		return -ENOTCONN;
 578	status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
 579			      xdr, req->rq_bytes_sent, true, &sent);
 
 
 
 
 580
 581	dprintk("RPC:       xs_udp_send_request(%u) = %d\n",
 582			xdr->len - req->rq_bytes_sent, status);
 583
 584	/* firewall is blocking us, don't return -EAGAIN or we end up looping */
 585	if (status == -EPERM)
 586		goto process_status;
 587
 588	if (status == -EAGAIN && sock_writeable(transport->inet))
 589		status = -ENOBUFS;
 590
 591	if (sent > 0 || status == 0) {
 592		req->rq_xmit_bytes_sent += sent;
 593		if (sent >= req->rq_slen)
 594			return 0;
 595		/* Still some bytes left; set up for a retry later. */
 596		status = -EAGAIN;
 597	}
 598
 599process_status:
 600	switch (status) {
 601	case -ENOTSOCK:
 602		status = -ENOTCONN;
 603		/* Should we call xs_close() here? */
 604		break;
 605	case -EAGAIN:
 606		status = xs_nospace(task);
 607		break;
 608	case -ENETUNREACH:
 609	case -ENOBUFS:
 610	case -EPIPE:
 611	case -ECONNREFUSED:
 612	case -EPERM:
 613		/* When the server has died, an ICMP port unreachable message
 614		 * prompts ECONNREFUSED. */
 615		break;
 616	default:
 617		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
 618			-status);
 619	}
 620
 621	return status;
 622}
 623
 624/**
 625 * xs_tcp_send_request - write an RPC request to a TCP socket
 626 * @task: address of RPC task that manages the state of an RPC request
 627 *
 628 * Return values:
 629 *        0:	The request has been sent
 630 *   EAGAIN:	The socket was blocked, please call again later to
 631 *		complete the request
 632 * ENOTCONN:	Caller needs to invoke connect logic then call again
 633 *    other:	Some other error occurred, the request was not sent
 634 *
 635 * XXX: In the case of soft timeouts, should we eventually give up
 636 *	if sendmsg is not able to make progress?
 637 */
 638static int xs_tcp_send_request(struct rpc_task *task)
 639{
 640	struct rpc_rqst *req = task->tk_rqstp;
 641	struct rpc_xprt *xprt = req->rq_xprt;
 642	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 643	struct xdr_buf *xdr = &req->rq_snd_buf;
 644	bool zerocopy = true;
 
 
 
 
 
 
 645	int status;
 646	int sent;
 647
 648	xs_encode_stream_record_marker(&req->rq_snd_buf);
 
 
 
 
 
 649
 650	xs_pktdump("packet data:",
 651				req->rq_svec->iov_base,
 652				req->rq_svec->iov_len);
 653	/* Don't use zero copy if this is a resend. If the RPC call
 654	 * completes while the socket holds a reference to the pages,
 655	 * then we may end up resending corrupted data.
 656	 */
 657	if (task->tk_flags & RPC_TASK_SENT)
 658		zerocopy = false;
 659
 660	/* Continue transmitting the packet/record. We must be careful
 661	 * to cope with writespace callbacks arriving _after_ we have
 662	 * called sendmsg(). */
 
 663	while (1) {
 664		sent = 0;
 665		status = xs_sendpages(transport->sock, NULL, 0, xdr,
 666				      req->rq_bytes_sent, zerocopy, &sent);
 667
 668		dprintk("RPC:       xs_tcp_send_request(%u) = %d\n",
 669				xdr->len - req->rq_bytes_sent, status);
 670
 671		/* If we've sent the entire packet, immediately
 672		 * reset the count of bytes sent. */
 673		req->rq_bytes_sent += sent;
 674		req->rq_xmit_bytes_sent += sent;
 675		if (likely(req->rq_bytes_sent >= req->rq_slen)) {
 676			req->rq_bytes_sent = 0;
 
 677			return 0;
 678		}
 679
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 680		if (status < 0)
 681			break;
 682		if (sent == 0) {
 683			status = -EAGAIN;
 684			break;
 685		}
 686	}
 687	if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
 688		status = -ENOBUFS;
 689
 690	switch (status) {
 691	case -ENOTSOCK:
 692		status = -ENOTCONN;
 693		/* Should we call xs_close() here? */
 694		break;
 695	case -EAGAIN:
 696		status = xs_nospace(task);
 697		break;
 698	case -ECONNRESET:
 699	case -ECONNREFUSED:
 700	case -ENOTCONN:
 701	case -EADDRINUSE:
 702	case -ENOBUFS:
 703	case -EPIPE:
 704		break;
 705	default:
 706		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
 707			-status);
 708	}
 709
 710	return status;
 711}
 712
 713/**
 714 * xs_tcp_release_xprt - clean up after a tcp transmission
 715 * @xprt: transport
 716 * @task: rpc task
 717 *
 718 * This cleans up if an error causes us to abort the transmission of a request.
 719 * In this case, the socket may need to be reset in order to avoid confusing
 720 * the server.
 721 */
 722static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
 723{
 724	struct rpc_rqst *req;
 725
 726	if (task != xprt->snd_task)
 727		return;
 728	if (task == NULL)
 729		goto out_release;
 730	req = task->tk_rqstp;
 731	if (req == NULL)
 732		goto out_release;
 733	if (req->rq_bytes_sent == 0)
 734		goto out_release;
 735	if (req->rq_bytes_sent == req->rq_snd_buf.len)
 736		goto out_release;
 737	set_bit(XPRT_CLOSE_WAIT, &xprt->state);
 738out_release:
 739	xprt_release_xprt(xprt, task);
 740}
 741
 742static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
 743{
 744	transport->old_data_ready = sk->sk_data_ready;
 745	transport->old_state_change = sk->sk_state_change;
 746	transport->old_write_space = sk->sk_write_space;
 747	transport->old_error_report = sk->sk_error_report;
 748}
 749
 750static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
 751{
 752	sk->sk_data_ready = transport->old_data_ready;
 753	sk->sk_state_change = transport->old_state_change;
 754	sk->sk_write_space = transport->old_write_space;
 755	sk->sk_error_report = transport->old_error_report;
 756}
 757
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 758static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
 759{
 760	smp_mb__before_atomic();
 761	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
 762	clear_bit(XPRT_CLOSING, &xprt->state);
 
 763	smp_mb__after_atomic();
 764}
 765
 766static void xs_sock_mark_closed(struct rpc_xprt *xprt)
 767{
 768	xs_sock_reset_connection_flags(xprt);
 769	/* Mark transport as closed and wake up all pending tasks */
 770	xprt_disconnect_done(xprt);
 771}
 772
 773/**
 774 * xs_error_report - callback to handle TCP socket state errors
 775 * @sk: socket
 776 *
 777 * Note: we don't call sock_error() since there may be a rpc_task
 778 * using the socket, and so we don't want to clear sk->sk_err.
 779 */
 780static void xs_error_report(struct sock *sk)
 781{
 
 782	struct rpc_xprt *xprt;
 783	int err;
 784
 785	read_lock_bh(&sk->sk_callback_lock);
 786	if (!(xprt = xprt_from_sock(sk)))
 787		goto out;
 788
 789	err = -sk->sk_err;
 790	if (err == 0)
 
 791		goto out;
 792	/* Is this a reset event? */
 793	if (sk->sk_state == TCP_CLOSE)
 794		xs_sock_mark_closed(xprt);
 795	dprintk("RPC:       xs_error_report client %p, error=%d...\n",
 796			xprt, -err);
 797	trace_rpc_socket_error(xprt, sk->sk_socket, err);
 798	xprt_wake_pending_tasks(xprt, err);
 
 
 
 799 out:
 800	read_unlock_bh(&sk->sk_callback_lock);
 801}
 802
 803static void xs_reset_transport(struct sock_xprt *transport)
 804{
 805	struct socket *sock = transport->sock;
 806	struct sock *sk = transport->inet;
 807	struct rpc_xprt *xprt = &transport->xprt;
 
 808
 809	if (sk == NULL)
 810		return;
 811
 812	if (atomic_read(&transport->xprt.swapper))
 813		sk_clear_memalloc(sk);
 814
 815	kernel_sock_shutdown(sock, SHUT_RDWR);
 816
 817	mutex_lock(&transport->recv_mutex);
 818	write_lock_bh(&sk->sk_callback_lock);
 819	transport->inet = NULL;
 820	transport->sock = NULL;
 
 821
 822	sk->sk_user_data = NULL;
 823
 824	xs_restore_old_callbacks(transport, sk);
 825	xprt_clear_connected(xprt);
 826	write_unlock_bh(&sk->sk_callback_lock);
 827	xs_sock_reset_connection_flags(xprt);
 
 
 828	mutex_unlock(&transport->recv_mutex);
 829
 830	trace_rpc_socket_close(xprt, sock);
 831	sock_release(sock);
 
 
 832}
 833
 834/**
 835 * xs_close - close a socket
 836 * @xprt: transport
 837 *
 838 * This is used when all requests are complete; ie, no DRC state remains
 839 * on the server we want to save.
 840 *
 841 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
 842 * xs_reset_transport() zeroing the socket from underneath a writer.
 843 */
 844static void xs_close(struct rpc_xprt *xprt)
 845{
 846	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 847
 848	dprintk("RPC:       xs_close xprt %p\n", xprt);
 849
 850	xs_reset_transport(transport);
 851	xprt->reestablish_timeout = 0;
 852
 853	xprt_disconnect_done(xprt);
 854}
 855
 856static void xs_inject_disconnect(struct rpc_xprt *xprt)
 857{
 858	dprintk("RPC:       injecting transport disconnect on xprt=%p\n",
 859		xprt);
 860	xprt_disconnect_done(xprt);
 861}
 862
 863static void xs_xprt_free(struct rpc_xprt *xprt)
 864{
 865	xs_free_peer_addresses(xprt);
 866	xprt_free(xprt);
 867}
 868
 869/**
 870 * xs_destroy - prepare to shutdown a transport
 871 * @xprt: doomed transport
 872 *
 873 */
 874static void xs_destroy(struct rpc_xprt *xprt)
 875{
 876	struct sock_xprt *transport = container_of(xprt,
 877			struct sock_xprt, xprt);
 878	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
 879
 880	cancel_delayed_work_sync(&transport->connect_worker);
 881	xs_close(xprt);
 882	cancel_work_sync(&transport->recv_worker);
 
 883	xs_xprt_free(xprt);
 884	module_put(THIS_MODULE);
 885}
 886
 887static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
 888{
 889	struct xdr_skb_reader desc = {
 890		.skb		= skb,
 891		.offset		= sizeof(rpc_fraghdr),
 892		.count		= skb->len - sizeof(rpc_fraghdr),
 893	};
 894
 895	if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
 896		return -1;
 897	if (desc.count)
 898		return -1;
 899	return 0;
 900}
 901
 902/**
 903 * xs_local_data_read_skb
 904 * @xprt: transport
 905 * @sk: socket
 906 * @skb: skbuff
 907 *
 908 * Currently this assumes we can read the whole reply in a single gulp.
 909 */
 910static void xs_local_data_read_skb(struct rpc_xprt *xprt,
 911		struct sock *sk,
 912		struct sk_buff *skb)
 913{
 914	struct rpc_task *task;
 915	struct rpc_rqst *rovr;
 916	int repsize, copied;
 917	u32 _xid;
 918	__be32 *xp;
 919
 920	repsize = skb->len - sizeof(rpc_fraghdr);
 921	if (repsize < 4) {
 922		dprintk("RPC:       impossible RPC reply size %d\n", repsize);
 923		return;
 924	}
 925
 926	/* Copy the XID from the skb... */
 927	xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
 928	if (xp == NULL)
 929		return;
 930
 931	/* Look up and lock the request corresponding to the given XID */
 932	spin_lock_bh(&xprt->transport_lock);
 933	rovr = xprt_lookup_rqst(xprt, *xp);
 934	if (!rovr)
 935		goto out_unlock;
 936	task = rovr->rq_task;
 937
 938	copied = rovr->rq_private_buf.buflen;
 939	if (copied > repsize)
 940		copied = repsize;
 941
 942	if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
 943		dprintk("RPC:       sk_buff copy failed\n");
 944		goto out_unlock;
 945	}
 946
 947	xprt_complete_rqst(task, copied);
 948
 949 out_unlock:
 950	spin_unlock_bh(&xprt->transport_lock);
 951}
 952
 953static void xs_local_data_receive(struct sock_xprt *transport)
 954{
 955	struct sk_buff *skb;
 956	struct sock *sk;
 957	int err;
 958
 959	mutex_lock(&transport->recv_mutex);
 960	sk = transport->inet;
 961	if (sk == NULL)
 962		goto out;
 963	for (;;) {
 964		skb = skb_recv_datagram(sk, 0, 1, &err);
 965		if (skb == NULL)
 966			break;
 967		xs_local_data_read_skb(&transport->xprt, sk, skb);
 968		skb_free_datagram(sk, skb);
 969	}
 970out:
 971	mutex_unlock(&transport->recv_mutex);
 972}
 973
 974static void xs_local_data_receive_workfn(struct work_struct *work)
 975{
 976	struct sock_xprt *transport =
 977		container_of(work, struct sock_xprt, recv_worker);
 978	xs_local_data_receive(transport);
 979}
 980
 981/**
 982 * xs_udp_data_read_skb - receive callback for UDP sockets
 983 * @xprt: transport
 984 * @sk: socket
 985 * @skb: skbuff
 986 *
 987 */
 988static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
 989		struct sock *sk,
 990		struct sk_buff *skb)
 991{
 992	struct rpc_task *task;
 993	struct rpc_rqst *rovr;
 994	int repsize, copied;
 995	u32 _xid;
 996	__be32 *xp;
 997
 998	repsize = skb->len - sizeof(struct udphdr);
 999	if (repsize < 4) {
1000		dprintk("RPC:       impossible RPC reply size %d!\n", repsize);
1001		return;
1002	}
1003
1004	/* Copy the XID from the skb... */
1005	xp = skb_header_pointer(skb, sizeof(struct udphdr),
1006				sizeof(_xid), &_xid);
1007	if (xp == NULL)
1008		return;
1009
1010	/* Look up and lock the request corresponding to the given XID */
1011	spin_lock_bh(&xprt->transport_lock);
1012	rovr = xprt_lookup_rqst(xprt, *xp);
1013	if (!rovr)
1014		goto out_unlock;
 
 
 
1015	task = rovr->rq_task;
1016
1017	if ((copied = rovr->rq_private_buf.buflen) > repsize)
1018		copied = repsize;
1019
1020	/* Suck it into the iovec, verify checksum if not done by hw. */
1021	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1022		UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS);
1023		goto out_unlock;
 
1024	}
1025
1026	UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS);
1027
 
1028	xprt_adjust_cwnd(xprt, task, copied);
 
 
1029	xprt_complete_rqst(task, copied);
1030
 
 
1031 out_unlock:
1032	spin_unlock_bh(&xprt->transport_lock);
1033}
1034
1035static void xs_udp_data_receive(struct sock_xprt *transport)
1036{
1037	struct sk_buff *skb;
1038	struct sock *sk;
1039	int err;
1040
1041	mutex_lock(&transport->recv_mutex);
1042	sk = transport->inet;
1043	if (sk == NULL)
1044		goto out;
1045	for (;;) {
1046		skb = skb_recv_datagram(sk, 0, 1, &err);
1047		if (skb == NULL)
1048			break;
1049		xs_udp_data_read_skb(&transport->xprt, sk, skb);
1050		skb_free_datagram(sk, skb);
 
1051	}
 
1052out:
1053	mutex_unlock(&transport->recv_mutex);
1054}
1055
1056static void xs_udp_data_receive_workfn(struct work_struct *work)
1057{
1058	struct sock_xprt *transport =
1059		container_of(work, struct sock_xprt, recv_worker);
 
 
1060	xs_udp_data_receive(transport);
 
1061}
1062
1063/**
1064 * xs_data_ready - "data ready" callback for UDP sockets
1065 * @sk: socket with data to read
1066 *
1067 */
1068static void xs_data_ready(struct sock *sk)
1069{
1070	struct rpc_xprt *xprt;
1071
1072	read_lock_bh(&sk->sk_callback_lock);
1073	dprintk("RPC:       xs_data_ready...\n");
1074	xprt = xprt_from_sock(sk);
1075	if (xprt != NULL) {
1076		struct sock_xprt *transport = container_of(xprt,
1077				struct sock_xprt, xprt);
1078		queue_work(rpciod_workqueue, &transport->recv_worker);
 
 
 
 
 
 
 
1079	}
1080	read_unlock_bh(&sk->sk_callback_lock);
1081}
1082
1083/*
1084 * Helper function to force a TCP close if the server is sending
1085 * junk and/or it has put us in CLOSE_WAIT
1086 */
1087static void xs_tcp_force_close(struct rpc_xprt *xprt)
1088{
1089	xprt_force_disconnect(xprt);
1090}
1091
1092static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
1093{
1094	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1095	size_t len, used;
1096	char *p;
1097
1098	p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
1099	len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
1100	used = xdr_skb_read_bits(desc, p, len);
1101	transport->tcp_offset += used;
1102	if (used != len)
1103		return;
1104
1105	transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
1106	if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
1107		transport->tcp_flags |= TCP_RCV_LAST_FRAG;
1108	else
1109		transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
1110	transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
1111
1112	transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
1113	transport->tcp_offset = 0;
1114
1115	/* Sanity check of the record length */
1116	if (unlikely(transport->tcp_reclen < 8)) {
1117		dprintk("RPC:       invalid TCP record fragment length\n");
1118		xs_tcp_force_close(xprt);
1119		return;
1120	}
1121	dprintk("RPC:       reading TCP record fragment of length %d\n",
1122			transport->tcp_reclen);
1123}
1124
1125static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
1126{
1127	if (transport->tcp_offset == transport->tcp_reclen) {
1128		transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
1129		transport->tcp_offset = 0;
1130		if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
1131			transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1132			transport->tcp_flags |= TCP_RCV_COPY_XID;
1133			transport->tcp_copied = 0;
1134		}
1135	}
1136}
1137
1138static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1139{
1140	size_t len, used;
1141	char *p;
1142
1143	len = sizeof(transport->tcp_xid) - transport->tcp_offset;
1144	dprintk("RPC:       reading XID (%Zu bytes)\n", len);
1145	p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
1146	used = xdr_skb_read_bits(desc, p, len);
1147	transport->tcp_offset += used;
1148	if (used != len)
1149		return;
1150	transport->tcp_flags &= ~TCP_RCV_COPY_XID;
1151	transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
1152	transport->tcp_copied = 4;
1153	dprintk("RPC:       reading %s XID %08x\n",
1154			(transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
1155							      : "request with",
1156			ntohl(transport->tcp_xid));
1157	xs_tcp_check_fraghdr(transport);
1158}
1159
1160static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
1161				       struct xdr_skb_reader *desc)
1162{
1163	size_t len, used;
1164	u32 offset;
1165	char *p;
1166
1167	/*
1168	 * We want transport->tcp_offset to be 8 at the end of this routine
1169	 * (4 bytes for the xid and 4 bytes for the call/reply flag).
1170	 * When this function is called for the first time,
1171	 * transport->tcp_offset is 4 (after having already read the xid).
1172	 */
1173	offset = transport->tcp_offset - sizeof(transport->tcp_xid);
1174	len = sizeof(transport->tcp_calldir) - offset;
1175	dprintk("RPC:       reading CALL/REPLY flag (%Zu bytes)\n", len);
1176	p = ((char *) &transport->tcp_calldir) + offset;
1177	used = xdr_skb_read_bits(desc, p, len);
1178	transport->tcp_offset += used;
1179	if (used != len)
1180		return;
1181	transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
1182	/*
1183	 * We don't yet have the XDR buffer, so we will write the calldir
1184	 * out after we get the buffer from the 'struct rpc_rqst'
1185	 */
1186	switch (ntohl(transport->tcp_calldir)) {
1187	case RPC_REPLY:
1188		transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1189		transport->tcp_flags |= TCP_RCV_COPY_DATA;
1190		transport->tcp_flags |= TCP_RPC_REPLY;
1191		break;
1192	case RPC_CALL:
1193		transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
1194		transport->tcp_flags |= TCP_RCV_COPY_DATA;
1195		transport->tcp_flags &= ~TCP_RPC_REPLY;
1196		break;
1197	default:
1198		dprintk("RPC:       invalid request message type\n");
1199		xs_tcp_force_close(&transport->xprt);
1200	}
1201	xs_tcp_check_fraghdr(transport);
1202}
1203
1204static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
1205				     struct xdr_skb_reader *desc,
1206				     struct rpc_rqst *req)
1207{
1208	struct sock_xprt *transport =
1209				container_of(xprt, struct sock_xprt, xprt);
1210	struct xdr_buf *rcvbuf;
1211	size_t len;
1212	ssize_t r;
1213
1214	rcvbuf = &req->rq_private_buf;
1215
1216	if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
1217		/*
1218		 * Save the RPC direction in the XDR buffer
1219		 */
1220		memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
1221			&transport->tcp_calldir,
1222			sizeof(transport->tcp_calldir));
1223		transport->tcp_copied += sizeof(transport->tcp_calldir);
1224		transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
1225	}
1226
1227	len = desc->count;
1228	if (len > transport->tcp_reclen - transport->tcp_offset) {
1229		struct xdr_skb_reader my_desc;
1230
1231		len = transport->tcp_reclen - transport->tcp_offset;
1232		memcpy(&my_desc, desc, sizeof(my_desc));
1233		my_desc.count = len;
1234		r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1235					  &my_desc, xdr_skb_read_bits);
1236		desc->count -= r;
1237		desc->offset += r;
1238	} else
1239		r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
1240					  desc, xdr_skb_read_bits);
1241
1242	if (r > 0) {
1243		transport->tcp_copied += r;
1244		transport->tcp_offset += r;
1245	}
1246	if (r != len) {
1247		/* Error when copying to the receive buffer,
1248		 * usually because we weren't able to allocate
1249		 * additional buffer pages. All we can do now
1250		 * is turn off TCP_RCV_COPY_DATA, so the request
1251		 * will not receive any additional updates,
1252		 * and time out.
1253		 * Any remaining data from this record will
1254		 * be discarded.
1255		 */
1256		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1257		dprintk("RPC:       XID %08x truncated request\n",
1258				ntohl(transport->tcp_xid));
1259		dprintk("RPC:       xprt = %p, tcp_copied = %lu, "
1260				"tcp_offset = %u, tcp_reclen = %u\n",
1261				xprt, transport->tcp_copied,
1262				transport->tcp_offset, transport->tcp_reclen);
1263		return;
1264	}
1265
1266	dprintk("RPC:       XID %08x read %Zd bytes\n",
1267			ntohl(transport->tcp_xid), r);
1268	dprintk("RPC:       xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
1269			"tcp_reclen = %u\n", xprt, transport->tcp_copied,
1270			transport->tcp_offset, transport->tcp_reclen);
1271
1272	if (transport->tcp_copied == req->rq_private_buf.buflen)
1273		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1274	else if (transport->tcp_offset == transport->tcp_reclen) {
1275		if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
1276			transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1277	}
1278}
1279
1280/*
1281 * Finds the request corresponding to the RPC xid and invokes the common
1282 * tcp read code to read the data.
1283 */
1284static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
1285				    struct xdr_skb_reader *desc)
1286{
1287	struct sock_xprt *transport =
1288				container_of(xprt, struct sock_xprt, xprt);
1289	struct rpc_rqst *req;
1290
1291	dprintk("RPC:       read reply XID %08x\n", ntohl(transport->tcp_xid));
1292
1293	/* Find and lock the request corresponding to this xid */
1294	spin_lock_bh(&xprt->transport_lock);
1295	req = xprt_lookup_rqst(xprt, transport->tcp_xid);
1296	if (!req) {
1297		dprintk("RPC:       XID %08x request not found!\n",
1298				ntohl(transport->tcp_xid));
1299		spin_unlock_bh(&xprt->transport_lock);
1300		return -1;
1301	}
1302
1303	xs_tcp_read_common(xprt, desc, req);
1304
1305	if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1306		xprt_complete_rqst(req->rq_task, transport->tcp_copied);
1307
1308	spin_unlock_bh(&xprt->transport_lock);
1309	return 0;
1310}
1311
1312#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1313/*
1314 * Obtains an rpc_rqst previously allocated and invokes the common
1315 * tcp read code to read the data.  The result is placed in the callback
1316 * queue.
1317 * If we're unable to obtain the rpc_rqst we schedule the closing of the
1318 * connection and return -1.
1319 */
1320static int xs_tcp_read_callback(struct rpc_xprt *xprt,
1321				       struct xdr_skb_reader *desc)
1322{
1323	struct sock_xprt *transport =
1324				container_of(xprt, struct sock_xprt, xprt);
1325	struct rpc_rqst *req;
1326
1327	/* Look up and lock the request corresponding to the given XID */
1328	spin_lock_bh(&xprt->transport_lock);
1329	req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
1330	if (req == NULL) {
1331		spin_unlock_bh(&xprt->transport_lock);
1332		printk(KERN_WARNING "Callback slot table overflowed\n");
1333		xprt_force_disconnect(xprt);
1334		return -1;
1335	}
1336
1337	dprintk("RPC:       read callback  XID %08x\n", ntohl(req->rq_xid));
1338	xs_tcp_read_common(xprt, desc, req);
1339
1340	if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
1341		xprt_complete_bc_request(req, transport->tcp_copied);
1342	spin_unlock_bh(&xprt->transport_lock);
1343
1344	return 0;
1345}
1346
1347static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1348					struct xdr_skb_reader *desc)
1349{
1350	struct sock_xprt *transport =
1351				container_of(xprt, struct sock_xprt, xprt);
1352
1353	return (transport->tcp_flags & TCP_RPC_REPLY) ?
1354		xs_tcp_read_reply(xprt, desc) :
1355		xs_tcp_read_callback(xprt, desc);
1356}
1357
1358static int xs_tcp_bc_up(struct svc_serv *serv, struct net *net)
1359{
1360	int ret;
1361
1362	ret = svc_create_xprt(serv, "tcp-bc", net, PF_INET, 0,
1363			      SVC_SOCK_ANONYMOUS);
1364	if (ret < 0)
1365		return ret;
1366	return 0;
1367}
1368#else
1369static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
1370					struct xdr_skb_reader *desc)
1371{
1372	return xs_tcp_read_reply(xprt, desc);
1373}
1374#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1375
1376/*
1377 * Read data off the transport.  This can be either an RPC_CALL or an
1378 * RPC_REPLY.  Relay the processing to helper functions.
1379 */
1380static void xs_tcp_read_data(struct rpc_xprt *xprt,
1381				    struct xdr_skb_reader *desc)
1382{
1383	struct sock_xprt *transport =
1384				container_of(xprt, struct sock_xprt, xprt);
1385
1386	if (_xs_tcp_read_data(xprt, desc) == 0)
1387		xs_tcp_check_fraghdr(transport);
1388	else {
1389		/*
1390		 * The transport_lock protects the request handling.
1391		 * There's no need to hold it to update the tcp_flags.
1392		 */
1393		transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
1394	}
1395}
1396
1397static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
1398{
1399	size_t len;
1400
1401	len = transport->tcp_reclen - transport->tcp_offset;
1402	if (len > desc->count)
1403		len = desc->count;
1404	desc->count -= len;
1405	desc->offset += len;
1406	transport->tcp_offset += len;
1407	dprintk("RPC:       discarded %Zu bytes\n", len);
1408	xs_tcp_check_fraghdr(transport);
1409}
1410
1411static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
1412{
1413	struct rpc_xprt *xprt = rd_desc->arg.data;
1414	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1415	struct xdr_skb_reader desc = {
1416		.skb	= skb,
1417		.offset	= offset,
1418		.count	= len,
1419	};
1420
1421	dprintk("RPC:       xs_tcp_data_recv started\n");
1422	do {
1423		trace_xs_tcp_data_recv(transport);
1424		/* Read in a new fragment marker if necessary */
1425		/* Can we ever really expect to get completely empty fragments? */
1426		if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
1427			xs_tcp_read_fraghdr(xprt, &desc);
1428			continue;
1429		}
1430		/* Read in the xid if necessary */
1431		if (transport->tcp_flags & TCP_RCV_COPY_XID) {
1432			xs_tcp_read_xid(transport, &desc);
1433			continue;
1434		}
1435		/* Read in the call/reply flag */
1436		if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
1437			xs_tcp_read_calldir(transport, &desc);
1438			continue;
1439		}
1440		/* Read in the request data */
1441		if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
1442			xs_tcp_read_data(xprt, &desc);
1443			continue;
1444		}
1445		/* Skip over any trailing bytes on short reads */
1446		xs_tcp_read_discard(transport, &desc);
1447	} while (desc.count);
1448	trace_xs_tcp_data_recv(transport);
1449	dprintk("RPC:       xs_tcp_data_recv done\n");
1450	return len - desc.count;
1451}
1452
1453static void xs_tcp_data_receive(struct sock_xprt *transport)
1454{
1455	struct rpc_xprt *xprt = &transport->xprt;
1456	struct sock *sk;
1457	read_descriptor_t rd_desc = {
1458		.count = 2*1024*1024,
1459		.arg.data = xprt,
1460	};
1461	unsigned long total = 0;
1462	int read = 0;
1463
1464	mutex_lock(&transport->recv_mutex);
1465	sk = transport->inet;
1466	if (sk == NULL)
1467		goto out;
1468
1469	/* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
1470	for (;;) {
1471		lock_sock(sk);
1472		read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1473		release_sock(sk);
1474		if (read <= 0)
1475			break;
1476		total += read;
1477		rd_desc.count = 65536;
1478	}
1479out:
1480	mutex_unlock(&transport->recv_mutex);
1481	trace_xs_tcp_data_ready(xprt, read, total);
1482}
1483
1484static void xs_tcp_data_receive_workfn(struct work_struct *work)
1485{
1486	struct sock_xprt *transport =
1487		container_of(work, struct sock_xprt, recv_worker);
1488	xs_tcp_data_receive(transport);
1489}
1490
1491/**
1492 * xs_tcp_data_ready - "data ready" callback for TCP sockets
1493 * @sk: socket with data to read
1494 *
1495 */
1496static void xs_tcp_data_ready(struct sock *sk)
1497{
1498	struct sock_xprt *transport;
1499	struct rpc_xprt *xprt;
1500
1501	dprintk("RPC:       xs_tcp_data_ready...\n");
1502
1503	read_lock_bh(&sk->sk_callback_lock);
1504	if (!(xprt = xprt_from_sock(sk)))
1505		goto out;
1506	transport = container_of(xprt, struct sock_xprt, xprt);
1507
1508	/* Any data means we had a useful conversation, so
1509	 * the we don't need to delay the next reconnect
1510	 */
1511	if (xprt->reestablish_timeout)
1512		xprt->reestablish_timeout = 0;
1513	queue_work(rpciod_workqueue, &transport->recv_worker);
1514
1515out:
1516	read_unlock_bh(&sk->sk_callback_lock);
1517}
1518
1519/**
1520 * xs_tcp_state_change - callback to handle TCP socket state changes
1521 * @sk: socket whose state has changed
1522 *
1523 */
1524static void xs_tcp_state_change(struct sock *sk)
1525{
1526	struct rpc_xprt *xprt;
1527	struct sock_xprt *transport;
1528
1529	read_lock_bh(&sk->sk_callback_lock);
1530	if (!(xprt = xprt_from_sock(sk)))
1531		goto out;
1532	dprintk("RPC:       xs_tcp_state_change client %p...\n", xprt);
1533	dprintk("RPC:       state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1534			sk->sk_state, xprt_connected(xprt),
1535			sock_flag(sk, SOCK_DEAD),
1536			sock_flag(sk, SOCK_ZAPPED),
1537			sk->sk_shutdown);
1538
1539	transport = container_of(xprt, struct sock_xprt, xprt);
1540	trace_rpc_socket_state_change(xprt, sk->sk_socket);
1541	switch (sk->sk_state) {
1542	case TCP_ESTABLISHED:
1543		spin_lock(&xprt->transport_lock);
1544		if (!xprt_test_and_set_connected(xprt)) {
1545
1546			/* Reset TCP record info */
1547			transport->tcp_offset = 0;
1548			transport->tcp_reclen = 0;
1549			transport->tcp_copied = 0;
1550			transport->tcp_flags =
1551				TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
1552			xprt->connect_cookie++;
1553			clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1554			xprt_clear_connecting(xprt);
1555
1556			xprt_wake_pending_tasks(xprt, -EAGAIN);
 
 
 
1557		}
1558		spin_unlock(&xprt->transport_lock);
1559		break;
1560	case TCP_FIN_WAIT1:
1561		/* The client initiated a shutdown of the socket */
1562		xprt->connect_cookie++;
1563		xprt->reestablish_timeout = 0;
1564		set_bit(XPRT_CLOSING, &xprt->state);
1565		smp_mb__before_atomic();
1566		clear_bit(XPRT_CONNECTED, &xprt->state);
1567		clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1568		smp_mb__after_atomic();
1569		break;
1570	case TCP_CLOSE_WAIT:
1571		/* The server initiated a shutdown of the socket */
1572		xprt->connect_cookie++;
1573		clear_bit(XPRT_CONNECTED, &xprt->state);
1574		xs_tcp_force_close(xprt);
 
1575	case TCP_CLOSING:
1576		/*
1577		 * If the server closed down the connection, make sure that
1578		 * we back off before reconnecting
1579		 */
1580		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1581			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1582		break;
1583	case TCP_LAST_ACK:
1584		set_bit(XPRT_CLOSING, &xprt->state);
1585		smp_mb__before_atomic();
1586		clear_bit(XPRT_CONNECTED, &xprt->state);
1587		smp_mb__after_atomic();
1588		break;
1589	case TCP_CLOSE:
1590		if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1591					&transport->sock_state))
1592			xprt_clear_connecting(xprt);
1593		xs_sock_mark_closed(xprt);
 
 
1594	}
1595 out:
1596	read_unlock_bh(&sk->sk_callback_lock);
1597}
1598
1599static void xs_write_space(struct sock *sk)
1600{
1601	struct socket_wq *wq;
 
1602	struct rpc_xprt *xprt;
1603
1604	if (!sk->sk_socket)
1605		return;
1606	clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1607
1608	if (unlikely(!(xprt = xprt_from_sock(sk))))
1609		return;
 
1610	rcu_read_lock();
1611	wq = rcu_dereference(sk->sk_wq);
1612	if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
1613		goto out;
1614
1615	xprt_write_space(xprt);
 
1616out:
1617	rcu_read_unlock();
1618}
1619
1620/**
1621 * xs_udp_write_space - callback invoked when socket buffer space
1622 *                             becomes available
1623 * @sk: socket whose state has changed
1624 *
1625 * Called when more output buffer space is available for this socket.
1626 * We try not to wake our writers until they can make "significant"
1627 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1628 * with a bunch of small requests.
1629 */
1630static void xs_udp_write_space(struct sock *sk)
1631{
1632	read_lock_bh(&sk->sk_callback_lock);
1633
1634	/* from net/core/sock.c:sock_def_write_space */
1635	if (sock_writeable(sk))
1636		xs_write_space(sk);
1637
1638	read_unlock_bh(&sk->sk_callback_lock);
1639}
1640
1641/**
1642 * xs_tcp_write_space - callback invoked when socket buffer space
1643 *                             becomes available
1644 * @sk: socket whose state has changed
1645 *
1646 * Called when more output buffer space is available for this socket.
1647 * We try not to wake our writers until they can make "significant"
1648 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1649 * with a bunch of small requests.
1650 */
1651static void xs_tcp_write_space(struct sock *sk)
1652{
1653	read_lock_bh(&sk->sk_callback_lock);
1654
1655	/* from net/core/stream.c:sk_stream_write_space */
1656	if (sk_stream_is_writeable(sk))
1657		xs_write_space(sk);
1658
1659	read_unlock_bh(&sk->sk_callback_lock);
1660}
1661
1662static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1663{
1664	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1665	struct sock *sk = transport->inet;
1666
1667	if (transport->rcvsize) {
1668		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1669		sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1670	}
1671	if (transport->sndsize) {
1672		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1673		sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1674		sk->sk_write_space(sk);
1675	}
1676}
1677
1678/**
1679 * xs_udp_set_buffer_size - set send and receive limits
1680 * @xprt: generic transport
1681 * @sndsize: requested size of send buffer, in bytes
1682 * @rcvsize: requested size of receive buffer, in bytes
1683 *
1684 * Set socket send and receive buffer size limits.
1685 */
1686static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1687{
1688	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1689
1690	transport->sndsize = 0;
1691	if (sndsize)
1692		transport->sndsize = sndsize + 1024;
1693	transport->rcvsize = 0;
1694	if (rcvsize)
1695		transport->rcvsize = rcvsize + 1024;
1696
1697	xs_udp_do_set_buffer_size(xprt);
1698}
1699
1700/**
1701 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
 
1702 * @task: task that timed out
1703 *
1704 * Adjust the congestion window after a retransmit timeout has occurred.
1705 */
1706static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1707{
 
1708	xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
 
1709}
1710
1711static unsigned short xs_get_random_port(void)
1712{
1713	unsigned short range = xprt_max_resvport - xprt_min_resvport;
1714	unsigned short rand = (unsigned short) prandom_u32() % range;
1715	return rand + xprt_min_resvport;
1716}
1717
1718/**
1719 * xs_set_reuseaddr_port - set the socket's port and address reuse options
1720 * @sock: socket
1721 *
1722 * Note that this function has to be called on all sockets that share the
1723 * same port, and it must be called before binding.
1724 */
1725static void xs_sock_set_reuseport(struct socket *sock)
1726{
1727	int opt = 1;
 
 
1728
1729	kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEPORT,
1730			(char *)&opt, sizeof(opt));
 
 
 
1731}
1732
1733static unsigned short xs_sock_getport(struct socket *sock)
1734{
1735	struct sockaddr_storage buf;
1736	int buflen;
1737	unsigned short port = 0;
1738
1739	if (kernel_getsockname(sock, (struct sockaddr *)&buf, &buflen) < 0)
1740		goto out;
1741	switch (buf.ss_family) {
1742	case AF_INET6:
1743		port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1744		break;
1745	case AF_INET:
1746		port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1747	}
1748out:
1749	return port;
1750}
1751
1752/**
1753 * xs_set_port - reset the port number in the remote endpoint address
1754 * @xprt: generic transport
1755 * @port: new port number
1756 *
1757 */
1758static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1759{
1760	dprintk("RPC:       setting port for xprt %p to %u\n", xprt, port);
1761
1762	rpc_set_port(xs_addr(xprt), port);
1763	xs_update_peer_port(xprt);
1764}
1765
1766static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1767{
1768	if (transport->srcport == 0)
1769		transport->srcport = xs_sock_getport(sock);
1770}
1771
1772static unsigned short xs_get_srcport(struct sock_xprt *transport)
1773{
1774	unsigned short port = transport->srcport;
1775
1776	if (port == 0 && transport->xprt.resvport)
1777		port = xs_get_random_port();
1778	return port;
1779}
1780
1781static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1782{
1783	if (transport->srcport != 0)
1784		transport->srcport = 0;
1785	if (!transport->xprt.resvport)
1786		return 0;
1787	if (port <= xprt_min_resvport || port > xprt_max_resvport)
1788		return xprt_max_resvport;
1789	return --port;
1790}
1791static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1792{
1793	struct sockaddr_storage myaddr;
1794	int err, nloop = 0;
1795	unsigned short port = xs_get_srcport(transport);
1796	unsigned short last;
1797
1798	/*
1799	 * If we are asking for any ephemeral port (i.e. port == 0 &&
1800	 * transport->xprt.resvport == 0), don't bind.  Let the local
1801	 * port selection happen implicitly when the socket is used
1802	 * (for example at connect time).
1803	 *
1804	 * This ensures that we can continue to establish TCP
1805	 * connections even when all local ephemeral ports are already
1806	 * a part of some TCP connection.  This makes no difference
1807	 * for UDP sockets, but also doens't harm them.
1808	 *
1809	 * If we're asking for any reserved port (i.e. port == 0 &&
1810	 * transport->xprt.resvport == 1) xs_get_srcport above will
1811	 * ensure that port is non-zero and we will bind as needed.
1812	 */
1813	if (port == 0)
1814		return 0;
1815
1816	memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1817	do {
1818		rpc_set_port((struct sockaddr *)&myaddr, port);
1819		err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1820				transport->xprt.addrlen);
1821		if (err == 0) {
1822			transport->srcport = port;
1823			break;
1824		}
1825		last = port;
1826		port = xs_next_srcport(transport, port);
1827		if (port > last)
1828			nloop++;
1829	} while (err == -EADDRINUSE && nloop != 2);
1830
1831	if (myaddr.ss_family == AF_INET)
1832		dprintk("RPC:       %s %pI4:%u: %s (%d)\n", __func__,
1833				&((struct sockaddr_in *)&myaddr)->sin_addr,
1834				port, err ? "failed" : "ok", err);
1835	else
1836		dprintk("RPC:       %s %pI6:%u: %s (%d)\n", __func__,
1837				&((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1838				port, err ? "failed" : "ok", err);
1839	return err;
1840}
1841
1842/*
1843 * We don't support autobind on AF_LOCAL sockets
1844 */
1845static void xs_local_rpcbind(struct rpc_task *task)
1846{
1847	xprt_set_bound(task->tk_xprt);
1848}
1849
1850static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1851{
1852}
1853
1854#ifdef CONFIG_DEBUG_LOCK_ALLOC
1855static struct lock_class_key xs_key[2];
1856static struct lock_class_key xs_slock_key[2];
1857
1858static inline void xs_reclassify_socketu(struct socket *sock)
1859{
1860	struct sock *sk = sock->sk;
1861
1862	sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1863		&xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1864}
1865
1866static inline void xs_reclassify_socket4(struct socket *sock)
1867{
1868	struct sock *sk = sock->sk;
1869
1870	sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1871		&xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1872}
1873
1874static inline void xs_reclassify_socket6(struct socket *sock)
1875{
1876	struct sock *sk = sock->sk;
1877
1878	sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1879		&xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1880}
1881
1882static inline void xs_reclassify_socket(int family, struct socket *sock)
1883{
1884	WARN_ON_ONCE(sock_owned_by_user(sock->sk));
1885	if (sock_owned_by_user(sock->sk))
1886		return;
1887
1888	switch (family) {
1889	case AF_LOCAL:
1890		xs_reclassify_socketu(sock);
1891		break;
1892	case AF_INET:
1893		xs_reclassify_socket4(sock);
1894		break;
1895	case AF_INET6:
1896		xs_reclassify_socket6(sock);
1897		break;
1898	}
1899}
1900#else
1901static inline void xs_reclassify_socket(int family, struct socket *sock)
1902{
1903}
1904#endif
1905
1906static void xs_dummy_setup_socket(struct work_struct *work)
1907{
1908}
1909
1910static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1911		struct sock_xprt *transport, int family, int type,
1912		int protocol, bool reuseport)
1913{
 
1914	struct socket *sock;
1915	int err;
1916
1917	err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1918	if (err < 0) {
1919		dprintk("RPC:       can't create %d transport socket (%d).\n",
1920				protocol, -err);
1921		goto out;
1922	}
1923	xs_reclassify_socket(family, sock);
1924
1925	if (reuseport)
1926		xs_sock_set_reuseport(sock);
1927
1928	err = xs_bind(transport, sock);
1929	if (err) {
1930		sock_release(sock);
1931		goto out;
1932	}
1933
 
 
 
 
 
1934	return sock;
1935out:
1936	return ERR_PTR(err);
1937}
1938
1939static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1940				      struct socket *sock)
1941{
1942	struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1943									xprt);
1944
1945	if (!transport->inet) {
1946		struct sock *sk = sock->sk;
1947
1948		write_lock_bh(&sk->sk_callback_lock);
1949
1950		xs_save_old_callbacks(transport, sk);
1951
1952		sk->sk_user_data = xprt;
1953		sk->sk_data_ready = xs_data_ready;
1954		sk->sk_write_space = xs_udp_write_space;
 
1955		sk->sk_error_report = xs_error_report;
1956		sk->sk_allocation = GFP_NOIO;
1957
1958		xprt_clear_connected(xprt);
1959
1960		/* Reset to new socket */
1961		transport->sock = sock;
1962		transport->inet = sk;
1963
1964		write_unlock_bh(&sk->sk_callback_lock);
1965	}
1966
1967	/* Tell the socket layer to start connecting... */
1968	xprt->stat.connect_count++;
1969	xprt->stat.connect_start = jiffies;
1970	return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1971}
1972
1973/**
1974 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1975 * @transport: socket transport to connect
1976 */
1977static int xs_local_setup_socket(struct sock_xprt *transport)
1978{
1979	struct rpc_xprt *xprt = &transport->xprt;
 
1980	struct socket *sock;
1981	int status = -EIO;
1982
1983	status = __sock_create(xprt->xprt_net, AF_LOCAL,
1984					SOCK_STREAM, 0, &sock, 1);
1985	if (status < 0) {
1986		dprintk("RPC:       can't create AF_LOCAL "
1987			"transport socket (%d).\n", -status);
1988		goto out;
1989	}
1990	xs_reclassify_socket(AF_LOCAL, sock);
1991
 
 
 
 
 
 
 
1992	dprintk("RPC:       worker connecting xprt %p via AF_LOCAL to %s\n",
1993			xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1994
1995	status = xs_local_finish_connecting(xprt, sock);
1996	trace_rpc_socket_connect(xprt, sock, status);
1997	switch (status) {
1998	case 0:
1999		dprintk("RPC:       xprt %p connected to %s\n",
2000				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
 
 
 
2001		xprt_set_connected(xprt);
2002	case -ENOBUFS:
2003		break;
2004	case -ENOENT:
2005		dprintk("RPC:       xprt %p: socket %s does not exist\n",
2006				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2007		break;
2008	case -ECONNREFUSED:
2009		dprintk("RPC:       xprt %p: connection refused for %s\n",
2010				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
2011		break;
2012	default:
2013		printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
2014				__func__, -status,
2015				xprt->address_strings[RPC_DISPLAY_ADDR]);
2016	}
2017
2018out:
2019	xprt_clear_connecting(xprt);
2020	xprt_wake_pending_tasks(xprt, status);
2021	return status;
2022}
2023
2024static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2025{
2026	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2027	int ret;
2028
2029	 if (RPC_IS_ASYNC(task)) {
2030		/*
2031		 * We want the AF_LOCAL connect to be resolved in the
2032		 * filesystem namespace of the process making the rpc
2033		 * call.  Thus we connect synchronously.
2034		 *
2035		 * If we want to support asynchronous AF_LOCAL calls,
2036		 * we'll need to figure out how to pass a namespace to
2037		 * connect.
2038		 */
 
2039		rpc_exit(task, -ENOTCONN);
2040		return;
2041	}
2042	ret = xs_local_setup_socket(transport);
2043	if (ret && !RPC_IS_SOFTCONN(task))
2044		msleep_interruptible(15000);
2045}
2046
2047#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
2048/*
2049 * Note that this should be called with XPRT_LOCKED held (or when we otherwise
2050 * know that we have exclusive access to the socket), to guard against
2051 * races with xs_reset_transport.
2052 */
2053static void xs_set_memalloc(struct rpc_xprt *xprt)
2054{
2055	struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
2056			xprt);
2057
2058	/*
2059	 * If there's no sock, then we have nothing to set. The
2060	 * reconnecting process will get it for us.
2061	 */
2062	if (!transport->inet)
2063		return;
2064	if (atomic_read(&xprt->swapper))
2065		sk_set_memalloc(transport->inet);
2066}
2067
2068/**
2069 * xs_enable_swap - Tag this transport as being used for swap.
2070 * @xprt: transport to tag
2071 *
2072 * Take a reference to this transport on behalf of the rpc_clnt, and
2073 * optionally mark it for swapping if it wasn't already.
2074 */
2075static int
2076xs_enable_swap(struct rpc_xprt *xprt)
2077{
2078	struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2079
2080	if (atomic_inc_return(&xprt->swapper) != 1)
2081		return 0;
2082	if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2083		return -ERESTARTSYS;
2084	if (xs->inet)
2085		sk_set_memalloc(xs->inet);
2086	xprt_release_xprt(xprt, NULL);
2087	return 0;
2088}
2089
2090/**
2091 * xs_disable_swap - Untag this transport as being used for swap.
2092 * @xprt: transport to tag
2093 *
2094 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
2095 * swapper refcount goes to 0, untag the socket as a memalloc socket.
2096 */
2097static void
2098xs_disable_swap(struct rpc_xprt *xprt)
2099{
2100	struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
2101
2102	if (!atomic_dec_and_test(&xprt->swapper))
2103		return;
2104	if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
2105		return;
2106	if (xs->inet)
2107		sk_clear_memalloc(xs->inet);
2108	xprt_release_xprt(xprt, NULL);
2109}
2110#else
2111static void xs_set_memalloc(struct rpc_xprt *xprt)
2112{
2113}
2114
2115static int
2116xs_enable_swap(struct rpc_xprt *xprt)
2117{
2118	return -EINVAL;
2119}
2120
2121static void
2122xs_disable_swap(struct rpc_xprt *xprt)
2123{
2124}
2125#endif
2126
2127static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2128{
2129	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2130
2131	if (!transport->inet) {
2132		struct sock *sk = sock->sk;
2133
2134		write_lock_bh(&sk->sk_callback_lock);
2135
2136		xs_save_old_callbacks(transport, sk);
2137
2138		sk->sk_user_data = xprt;
2139		sk->sk_data_ready = xs_data_ready;
2140		sk->sk_write_space = xs_udp_write_space;
2141		sk->sk_allocation = GFP_NOIO;
2142
2143		xprt_set_connected(xprt);
2144
2145		/* Reset to new socket */
2146		transport->sock = sock;
2147		transport->inet = sk;
2148
2149		xs_set_memalloc(xprt);
2150
2151		write_unlock_bh(&sk->sk_callback_lock);
2152	}
2153	xs_udp_do_set_buffer_size(xprt);
 
 
2154}
2155
2156static void xs_udp_setup_socket(struct work_struct *work)
2157{
2158	struct sock_xprt *transport =
2159		container_of(work, struct sock_xprt, connect_worker.work);
2160	struct rpc_xprt *xprt = &transport->xprt;
2161	struct socket *sock = transport->sock;
2162	int status = -EIO;
2163
2164	sock = xs_create_sock(xprt, transport,
2165			xs_addr(xprt)->sa_family, SOCK_DGRAM,
2166			IPPROTO_UDP, false);
2167	if (IS_ERR(sock))
2168		goto out;
2169
2170	dprintk("RPC:       worker connecting xprt %p via %s to "
2171				"%s (port %s)\n", xprt,
2172			xprt->address_strings[RPC_DISPLAY_PROTO],
2173			xprt->address_strings[RPC_DISPLAY_ADDR],
2174			xprt->address_strings[RPC_DISPLAY_PORT]);
2175
2176	xs_udp_finish_connecting(xprt, sock);
2177	trace_rpc_socket_connect(xprt, sock, 0);
2178	status = 0;
2179out:
2180	xprt_unlock_connect(xprt, transport);
2181	xprt_clear_connecting(xprt);
 
2182	xprt_wake_pending_tasks(xprt, status);
2183}
2184
2185/**
2186 * xs_tcp_shutdown - gracefully shut down a TCP socket
2187 * @xprt: transport
2188 *
2189 * Initiates a graceful shutdown of the TCP socket by calling the
2190 * equivalent of shutdown(SHUT_RDWR);
2191 */
2192static void xs_tcp_shutdown(struct rpc_xprt *xprt)
2193{
2194	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2195	struct socket *sock = transport->sock;
 
2196
2197	if (sock == NULL)
2198		return;
2199	if (xprt_connected(xprt)) {
 
2200		kernel_sock_shutdown(sock, SHUT_RDWR);
2201		trace_rpc_socket_shutdown(xprt, sock);
2202	} else
 
 
2203		xs_reset_transport(transport);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2204}
2205
2206static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2207{
2208	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2209	int ret = -ENOTCONN;
2210
2211	if (!transport->inet) {
2212		struct sock *sk = sock->sk;
2213		unsigned int keepidle = xprt->timeout->to_initval / HZ;
2214		unsigned int keepcnt = xprt->timeout->to_retries + 1;
2215		unsigned int opt_on = 1;
2216		unsigned int timeo;
2217
2218		/* TCP Keepalive options */
2219		kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
2220				(char *)&opt_on, sizeof(opt_on));
2221		kernel_setsockopt(sock, SOL_TCP, TCP_KEEPIDLE,
2222				(char *)&keepidle, sizeof(keepidle));
2223		kernel_setsockopt(sock, SOL_TCP, TCP_KEEPINTVL,
2224				(char *)&keepidle, sizeof(keepidle));
2225		kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT,
2226				(char *)&keepcnt, sizeof(keepcnt));
2227
2228		/* TCP user timeout (see RFC5482) */
2229		timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2230			(xprt->timeout->to_retries + 1);
2231		kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT,
2232				(char *)&timeo, sizeof(timeo));
2233
2234		write_lock_bh(&sk->sk_callback_lock);
2235
2236		xs_save_old_callbacks(transport, sk);
2237
2238		sk->sk_user_data = xprt;
2239		sk->sk_data_ready = xs_tcp_data_ready;
2240		sk->sk_state_change = xs_tcp_state_change;
2241		sk->sk_write_space = xs_tcp_write_space;
 
2242		sk->sk_error_report = xs_error_report;
2243		sk->sk_allocation = GFP_NOIO;
2244
2245		/* socket options */
2246		sock_reset_flag(sk, SOCK_LINGER);
2247		tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
2248
2249		xprt_clear_connected(xprt);
2250
2251		/* Reset to new socket */
2252		transport->sock = sock;
2253		transport->inet = sk;
2254
2255		write_unlock_bh(&sk->sk_callback_lock);
2256	}
2257
2258	if (!xprt_bound(xprt))
2259		goto out;
2260
2261	xs_set_memalloc(xprt);
2262
 
 
2263	/* Tell the socket layer to start connecting... */
2264	xprt->stat.connect_count++;
2265	xprt->stat.connect_start = jiffies;
2266	set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2267	ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2268	switch (ret) {
2269	case 0:
2270		xs_set_srcport(transport, sock);
 
2271	case -EINPROGRESS:
2272		/* SYN_SENT! */
2273		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2274			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
 
 
 
 
2275	}
2276out:
2277	return ret;
2278}
2279
2280/**
2281 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
 
2282 *
2283 * Invoked by a work queue tasklet.
2284 */
2285static void xs_tcp_setup_socket(struct work_struct *work)
2286{
2287	struct sock_xprt *transport =
2288		container_of(work, struct sock_xprt, connect_worker.work);
2289	struct socket *sock = transport->sock;
2290	struct rpc_xprt *xprt = &transport->xprt;
2291	int status = -EIO;
2292
2293	if (!sock) {
2294		sock = xs_create_sock(xprt, transport,
2295				xs_addr(xprt)->sa_family, SOCK_STREAM,
2296				IPPROTO_TCP, true);
2297		if (IS_ERR(sock)) {
2298			status = PTR_ERR(sock);
2299			goto out;
2300		}
2301	}
2302
2303	dprintk("RPC:       worker connecting xprt %p via %s to "
2304				"%s (port %s)\n", xprt,
2305			xprt->address_strings[RPC_DISPLAY_PROTO],
2306			xprt->address_strings[RPC_DISPLAY_ADDR],
2307			xprt->address_strings[RPC_DISPLAY_PORT]);
2308
2309	status = xs_tcp_finish_connecting(xprt, sock);
2310	trace_rpc_socket_connect(xprt, sock, status);
2311	dprintk("RPC:       %p connect status %d connected %d sock state %d\n",
2312			xprt, -status, xprt_connected(xprt),
2313			sock->sk->sk_state);
2314	switch (status) {
2315	default:
2316		printk("%s: connect returned unhandled error %d\n",
2317			__func__, status);
 
2318	case -EADDRNOTAVAIL:
2319		/* We're probably in TIME_WAIT. Get rid of existing socket,
2320		 * and retry
2321		 */
2322		xs_tcp_force_close(xprt);
2323		break;
2324	case 0:
2325	case -EINPROGRESS:
2326	case -EALREADY:
2327		xprt_unlock_connect(xprt, transport);
2328		return;
2329	case -EINVAL:
2330		/* Happens, for instance, if the user specified a link
2331		 * local IPv6 address without a scope-id.
2332		 */
2333	case -ECONNREFUSED:
2334	case -ECONNRESET:
 
2335	case -ENETUNREACH:
 
2336	case -EADDRINUSE:
2337	case -ENOBUFS:
2338		/* retry with existing socket, after a delay */
 
 
 
 
 
2339		xs_tcp_force_close(xprt);
2340		goto out;
2341	}
2342	status = -EAGAIN;
2343out:
2344	xprt_unlock_connect(xprt, transport);
2345	xprt_clear_connecting(xprt);
 
2346	xprt_wake_pending_tasks(xprt, status);
2347}
2348
2349/**
2350 * xs_connect - connect a socket to a remote endpoint
2351 * @xprt: pointer to transport structure
2352 * @task: address of RPC task that manages state of connect request
2353 *
2354 * TCP: If the remote end dropped the connection, delay reconnecting.
2355 *
2356 * UDP socket connects are synchronous, but we use a work queue anyway
2357 * to guarantee that even unprivileged user processes can set up a
2358 * socket on a privileged port.
2359 *
2360 * If a UDP socket connect fails, the delay behavior here prevents
2361 * retry floods (hard mounts).
2362 */
2363static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2364{
2365	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 
2366
2367	WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2368
2369	if (transport->sock != NULL) {
2370		dprintk("RPC:       xs_connect delayed xprt %p for %lu "
2371				"seconds\n",
2372				xprt, xprt->reestablish_timeout / HZ);
2373
2374		/* Start by resetting any existing state */
2375		xs_reset_transport(transport);
2376
2377		queue_delayed_work(rpciod_workqueue,
2378				   &transport->connect_worker,
2379				   xprt->reestablish_timeout);
2380		xprt->reestablish_timeout <<= 1;
2381		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2382			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2383		if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
2384			xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
2385	} else {
2386		dprintk("RPC:       xs_connect scheduled xprt %p\n", xprt);
2387		queue_delayed_work(rpciod_workqueue,
2388				   &transport->connect_worker, 0);
2389	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2390}
2391
2392/**
2393 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2394 * @xprt: rpc_xprt struct containing statistics
2395 * @seq: output file
2396 *
2397 */
2398static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2399{
2400	long idle_time = 0;
2401
2402	if (xprt_connected(xprt))
2403		idle_time = (long)(jiffies - xprt->last_used) / HZ;
2404
2405	seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2406			"%llu %llu %lu %llu %llu\n",
2407			xprt->stat.bind_count,
2408			xprt->stat.connect_count,
2409			xprt->stat.connect_time,
2410			idle_time,
2411			xprt->stat.sends,
2412			xprt->stat.recvs,
2413			xprt->stat.bad_xids,
2414			xprt->stat.req_u,
2415			xprt->stat.bklog_u,
2416			xprt->stat.max_slots,
2417			xprt->stat.sending_u,
2418			xprt->stat.pending_u);
2419}
2420
2421/**
2422 * xs_udp_print_stats - display UDP socket-specifc stats
2423 * @xprt: rpc_xprt struct containing statistics
2424 * @seq: output file
2425 *
2426 */
2427static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2428{
2429	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2430
2431	seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2432			"%lu %llu %llu\n",
2433			transport->srcport,
2434			xprt->stat.bind_count,
2435			xprt->stat.sends,
2436			xprt->stat.recvs,
2437			xprt->stat.bad_xids,
2438			xprt->stat.req_u,
2439			xprt->stat.bklog_u,
2440			xprt->stat.max_slots,
2441			xprt->stat.sending_u,
2442			xprt->stat.pending_u);
2443}
2444
2445/**
2446 * xs_tcp_print_stats - display TCP socket-specifc stats
2447 * @xprt: rpc_xprt struct containing statistics
2448 * @seq: output file
2449 *
2450 */
2451static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2452{
2453	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2454	long idle_time = 0;
2455
2456	if (xprt_connected(xprt))
2457		idle_time = (long)(jiffies - xprt->last_used) / HZ;
2458
2459	seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2460			"%llu %llu %lu %llu %llu\n",
2461			transport->srcport,
2462			xprt->stat.bind_count,
2463			xprt->stat.connect_count,
2464			xprt->stat.connect_time,
2465			idle_time,
2466			xprt->stat.sends,
2467			xprt->stat.recvs,
2468			xprt->stat.bad_xids,
2469			xprt->stat.req_u,
2470			xprt->stat.bklog_u,
2471			xprt->stat.max_slots,
2472			xprt->stat.sending_u,
2473			xprt->stat.pending_u);
2474}
2475
2476/*
2477 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2478 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2479 * to use the server side send routines.
2480 */
2481static void *bc_malloc(struct rpc_task *task, size_t size)
2482{
 
 
2483	struct page *page;
2484	struct rpc_buffer *buf;
2485
2486	WARN_ON_ONCE(size > PAGE_SIZE - sizeof(struct rpc_buffer));
2487	if (size > PAGE_SIZE - sizeof(struct rpc_buffer))
2488		return NULL;
 
 
2489
2490	page = alloc_page(GFP_KERNEL);
2491	if (!page)
2492		return NULL;
2493
2494	buf = page_address(page);
2495	buf->len = PAGE_SIZE;
2496
2497	return buf->data;
 
 
2498}
2499
2500/*
2501 * Free the space allocated in the bc_alloc routine
2502 */
2503static void bc_free(void *buffer)
2504{
 
2505	struct rpc_buffer *buf;
2506
2507	if (!buffer)
2508		return;
2509
2510	buf = container_of(buffer, struct rpc_buffer, data);
2511	free_page((unsigned long)buf);
2512}
2513
2514/*
2515 * Use the svc_sock to send the callback. Must be called with svsk->sk_mutex
2516 * held. Borrows heavily from svc_tcp_sendto and xs_tcp_send_request.
2517 */
2518static int bc_sendto(struct rpc_rqst *req)
2519{
2520	int len;
2521	struct xdr_buf *xbufp = &req->rq_snd_buf;
2522	struct rpc_xprt *xprt = req->rq_xprt;
2523	struct sock_xprt *transport =
2524				container_of(xprt, struct sock_xprt, xprt);
2525	struct socket *sock = transport->sock;
2526	unsigned long headoff;
2527	unsigned long tailoff;
2528
2529	xs_encode_stream_record_marker(xbufp);
2530
2531	tailoff = (unsigned long)xbufp->tail[0].iov_base & ~PAGE_MASK;
2532	headoff = (unsigned long)xbufp->head[0].iov_base & ~PAGE_MASK;
2533	len = svc_send_common(sock, xbufp,
2534			      virt_to_page(xbufp->head[0].iov_base), headoff,
2535			      xbufp->tail[0].iov_base, tailoff);
2536
2537	if (len != xbufp->len) {
2538		printk(KERN_NOTICE "Error sending entire callback!\n");
2539		len = -EAGAIN;
2540	}
2541
2542	return len;
 
 
 
 
 
2543}
2544
2545/*
2546 * The send routine. Borrows from svc_send
 
 
 
 
 
 
 
 
2547 */
2548static int bc_send_request(struct rpc_task *task)
2549{
2550	struct rpc_rqst *req = task->tk_rqstp;
2551	struct svc_xprt	*xprt;
2552	int len;
2553
2554	dprintk("sending request with xid: %08x\n", ntohl(req->rq_xid));
2555	/*
2556	 * Get the server socket associated with this callback xprt
2557	 */
2558	xprt = req->rq_xprt->bc_xprt;
2559
2560	/*
2561	 * Grab the mutex to serialize data as the connection is shared
2562	 * with the fore channel
2563	 */
2564	if (!mutex_trylock(&xprt->xpt_mutex)) {
2565		rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
2566		if (!mutex_trylock(&xprt->xpt_mutex))
2567			return -EAGAIN;
2568		rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
2569	}
2570	if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2571		len = -ENOTCONN;
2572	else
2573		len = bc_sendto(req);
2574	mutex_unlock(&xprt->xpt_mutex);
2575
2576	if (len > 0)
2577		len = 0;
2578
2579	return len;
2580}
2581
2582/*
2583 * The close routine. Since this is client initiated, we do nothing
2584 */
2585
2586static void bc_close(struct rpc_xprt *xprt)
2587{
 
2588}
2589
2590/*
2591 * The xprt destroy routine. Again, because this connection is client
2592 * initiated, we do nothing
2593 */
2594
2595static void bc_destroy(struct rpc_xprt *xprt)
2596{
2597	dprintk("RPC:       bc_destroy xprt %p\n", xprt);
2598
2599	xs_xprt_free(xprt);
2600	module_put(THIS_MODULE);
2601}
2602
2603static struct rpc_xprt_ops xs_local_ops = {
2604	.reserve_xprt		= xprt_reserve_xprt,
2605	.release_xprt		= xs_tcp_release_xprt,
2606	.alloc_slot		= xprt_alloc_slot,
 
2607	.rpcbind		= xs_local_rpcbind,
2608	.set_port		= xs_local_set_port,
2609	.connect		= xs_local_connect,
2610	.buf_alloc		= rpc_malloc,
2611	.buf_free		= rpc_free,
 
2612	.send_request		= xs_local_send_request,
2613	.set_retrans_timeout	= xprt_set_retrans_timeout_def,
2614	.close			= xs_close,
2615	.destroy		= xs_destroy,
2616	.print_stats		= xs_local_print_stats,
2617	.enable_swap		= xs_enable_swap,
2618	.disable_swap		= xs_disable_swap,
2619};
2620
2621static struct rpc_xprt_ops xs_udp_ops = {
2622	.set_buffer_size	= xs_udp_set_buffer_size,
2623	.reserve_xprt		= xprt_reserve_xprt_cong,
2624	.release_xprt		= xprt_release_xprt_cong,
2625	.alloc_slot		= xprt_alloc_slot,
 
2626	.rpcbind		= rpcb_getport_async,
2627	.set_port		= xs_set_port,
2628	.connect		= xs_connect,
2629	.buf_alloc		= rpc_malloc,
2630	.buf_free		= rpc_free,
2631	.send_request		= xs_udp_send_request,
2632	.set_retrans_timeout	= xprt_set_retrans_timeout_rtt,
2633	.timer			= xs_udp_timer,
2634	.release_request	= xprt_release_rqst_cong,
2635	.close			= xs_close,
2636	.destroy		= xs_destroy,
2637	.print_stats		= xs_udp_print_stats,
2638	.enable_swap		= xs_enable_swap,
2639	.disable_swap		= xs_disable_swap,
2640	.inject_disconnect	= xs_inject_disconnect,
2641};
2642
2643static struct rpc_xprt_ops xs_tcp_ops = {
2644	.reserve_xprt		= xprt_reserve_xprt,
2645	.release_xprt		= xs_tcp_release_xprt,
2646	.alloc_slot		= xprt_lock_and_alloc_slot,
 
2647	.rpcbind		= rpcb_getport_async,
2648	.set_port		= xs_set_port,
2649	.connect		= xs_connect,
2650	.buf_alloc		= rpc_malloc,
2651	.buf_free		= rpc_free,
 
2652	.send_request		= xs_tcp_send_request,
2653	.set_retrans_timeout	= xprt_set_retrans_timeout_def,
2654	.close			= xs_tcp_shutdown,
2655	.destroy		= xs_destroy,
 
2656	.print_stats		= xs_tcp_print_stats,
2657	.enable_swap		= xs_enable_swap,
2658	.disable_swap		= xs_disable_swap,
2659	.inject_disconnect	= xs_inject_disconnect,
2660#ifdef CONFIG_SUNRPC_BACKCHANNEL
2661	.bc_setup		= xprt_setup_bc,
2662	.bc_up			= xs_tcp_bc_up,
 
2663	.bc_free_rqst		= xprt_free_bc_rqst,
2664	.bc_destroy		= xprt_destroy_bc,
2665#endif
2666};
2667
2668/*
2669 * The rpc_xprt_ops for the server backchannel
2670 */
2671
2672static struct rpc_xprt_ops bc_tcp_ops = {
2673	.reserve_xprt		= xprt_reserve_xprt,
2674	.release_xprt		= xprt_release_xprt,
2675	.alloc_slot		= xprt_alloc_slot,
 
2676	.buf_alloc		= bc_malloc,
2677	.buf_free		= bc_free,
2678	.send_request		= bc_send_request,
2679	.set_retrans_timeout	= xprt_set_retrans_timeout_def,
2680	.close			= bc_close,
2681	.destroy		= bc_destroy,
2682	.print_stats		= xs_tcp_print_stats,
2683	.enable_swap		= xs_enable_swap,
2684	.disable_swap		= xs_disable_swap,
2685	.inject_disconnect	= xs_inject_disconnect,
2686};
2687
2688static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2689{
2690	static const struct sockaddr_in sin = {
2691		.sin_family		= AF_INET,
2692		.sin_addr.s_addr	= htonl(INADDR_ANY),
2693	};
2694	static const struct sockaddr_in6 sin6 = {
2695		.sin6_family		= AF_INET6,
2696		.sin6_addr		= IN6ADDR_ANY_INIT,
2697	};
2698
2699	switch (family) {
2700	case AF_LOCAL:
2701		break;
2702	case AF_INET:
2703		memcpy(sap, &sin, sizeof(sin));
2704		break;
2705	case AF_INET6:
2706		memcpy(sap, &sin6, sizeof(sin6));
2707		break;
2708	default:
2709		dprintk("RPC:       %s: Bad address family\n", __func__);
2710		return -EAFNOSUPPORT;
2711	}
2712	return 0;
2713}
2714
2715static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2716				      unsigned int slot_table_size,
2717				      unsigned int max_slot_table_size)
2718{
2719	struct rpc_xprt *xprt;
2720	struct sock_xprt *new;
2721
2722	if (args->addrlen > sizeof(xprt->addr)) {
2723		dprintk("RPC:       xs_setup_xprt: address too large\n");
2724		return ERR_PTR(-EBADF);
2725	}
2726
2727	xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2728			max_slot_table_size);
2729	if (xprt == NULL) {
2730		dprintk("RPC:       xs_setup_xprt: couldn't allocate "
2731				"rpc_xprt\n");
2732		return ERR_PTR(-ENOMEM);
2733	}
2734
2735	new = container_of(xprt, struct sock_xprt, xprt);
2736	mutex_init(&new->recv_mutex);
2737	memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2738	xprt->addrlen = args->addrlen;
2739	if (args->srcaddr)
2740		memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2741	else {
2742		int err;
2743		err = xs_init_anyaddr(args->dstaddr->sa_family,
2744					(struct sockaddr *)&new->srcaddr);
2745		if (err != 0) {
2746			xprt_free(xprt);
2747			return ERR_PTR(err);
2748		}
2749	}
2750
2751	return xprt;
2752}
2753
2754static const struct rpc_timeout xs_local_default_timeout = {
2755	.to_initval = 10 * HZ,
2756	.to_maxval = 10 * HZ,
2757	.to_retries = 2,
2758};
2759
2760/**
2761 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2762 * @args: rpc transport creation arguments
2763 *
2764 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2765 */
2766static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2767{
2768	struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2769	struct sock_xprt *transport;
2770	struct rpc_xprt *xprt;
2771	struct rpc_xprt *ret;
2772
2773	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2774			xprt_max_tcp_slot_table_entries);
2775	if (IS_ERR(xprt))
2776		return xprt;
2777	transport = container_of(xprt, struct sock_xprt, xprt);
2778
2779	xprt->prot = 0;
2780	xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2781	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2782
2783	xprt->bind_timeout = XS_BIND_TO;
2784	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2785	xprt->idle_timeout = XS_IDLE_DISC_TO;
2786
2787	xprt->ops = &xs_local_ops;
2788	xprt->timeout = &xs_local_default_timeout;
2789
2790	INIT_WORK(&transport->recv_worker, xs_local_data_receive_workfn);
2791	INIT_DELAYED_WORK(&transport->connect_worker,
2792			xs_dummy_setup_socket);
2793
2794	switch (sun->sun_family) {
2795	case AF_LOCAL:
2796		if (sun->sun_path[0] != '/') {
2797			dprintk("RPC:       bad AF_LOCAL address: %s\n",
2798					sun->sun_path);
2799			ret = ERR_PTR(-EINVAL);
2800			goto out_err;
2801		}
2802		xprt_set_bound(xprt);
2803		xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2804		ret = ERR_PTR(xs_local_setup_socket(transport));
2805		if (ret)
2806			goto out_err;
2807		break;
2808	default:
2809		ret = ERR_PTR(-EAFNOSUPPORT);
2810		goto out_err;
2811	}
2812
2813	dprintk("RPC:       set up xprt to %s via AF_LOCAL\n",
2814			xprt->address_strings[RPC_DISPLAY_ADDR]);
2815
2816	if (try_module_get(THIS_MODULE))
2817		return xprt;
2818	ret = ERR_PTR(-EINVAL);
2819out_err:
2820	xs_xprt_free(xprt);
2821	return ret;
2822}
2823
2824static const struct rpc_timeout xs_udp_default_timeout = {
2825	.to_initval = 5 * HZ,
2826	.to_maxval = 30 * HZ,
2827	.to_increment = 5 * HZ,
2828	.to_retries = 5,
2829};
2830
2831/**
2832 * xs_setup_udp - Set up transport to use a UDP socket
2833 * @args: rpc transport creation arguments
2834 *
2835 */
2836static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2837{
2838	struct sockaddr *addr = args->dstaddr;
2839	struct rpc_xprt *xprt;
2840	struct sock_xprt *transport;
2841	struct rpc_xprt *ret;
2842
2843	xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2844			xprt_udp_slot_table_entries);
2845	if (IS_ERR(xprt))
2846		return xprt;
2847	transport = container_of(xprt, struct sock_xprt, xprt);
2848
2849	xprt->prot = IPPROTO_UDP;
2850	xprt->tsh_size = 0;
2851	/* XXX: header size can vary due to auth type, IPv6, etc. */
2852	xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2853
2854	xprt->bind_timeout = XS_BIND_TO;
2855	xprt->reestablish_timeout = XS_UDP_REEST_TO;
2856	xprt->idle_timeout = XS_IDLE_DISC_TO;
2857
2858	xprt->ops = &xs_udp_ops;
2859
2860	xprt->timeout = &xs_udp_default_timeout;
2861
2862	INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
 
2863	INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
2864
2865	switch (addr->sa_family) {
2866	case AF_INET:
2867		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2868			xprt_set_bound(xprt);
2869
2870		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
2871		break;
2872	case AF_INET6:
2873		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2874			xprt_set_bound(xprt);
2875
2876		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2877		break;
2878	default:
2879		ret = ERR_PTR(-EAFNOSUPPORT);
2880		goto out_err;
2881	}
2882
2883	if (xprt_bound(xprt))
2884		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
2885				xprt->address_strings[RPC_DISPLAY_ADDR],
2886				xprt->address_strings[RPC_DISPLAY_PORT],
2887				xprt->address_strings[RPC_DISPLAY_PROTO]);
2888	else
2889		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
2890				xprt->address_strings[RPC_DISPLAY_ADDR],
2891				xprt->address_strings[RPC_DISPLAY_PROTO]);
2892
2893	if (try_module_get(THIS_MODULE))
2894		return xprt;
2895	ret = ERR_PTR(-EINVAL);
2896out_err:
2897	xs_xprt_free(xprt);
2898	return ret;
2899}
2900
2901static const struct rpc_timeout xs_tcp_default_timeout = {
2902	.to_initval = 60 * HZ,
2903	.to_maxval = 60 * HZ,
2904	.to_retries = 2,
2905};
2906
2907/**
2908 * xs_setup_tcp - Set up transport to use a TCP socket
2909 * @args: rpc transport creation arguments
2910 *
2911 */
2912static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2913{
2914	struct sockaddr *addr = args->dstaddr;
2915	struct rpc_xprt *xprt;
2916	struct sock_xprt *transport;
2917	struct rpc_xprt *ret;
2918	unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
2919
2920	if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
2921		max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
2922
2923	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2924			max_slot_table_size);
2925	if (IS_ERR(xprt))
2926		return xprt;
2927	transport = container_of(xprt, struct sock_xprt, xprt);
2928
2929	xprt->prot = IPPROTO_TCP;
2930	xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
2931	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2932
2933	xprt->bind_timeout = XS_BIND_TO;
2934	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2935	xprt->idle_timeout = XS_IDLE_DISC_TO;
2936
2937	xprt->ops = &xs_tcp_ops;
2938	xprt->timeout = &xs_tcp_default_timeout;
2939
2940	INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn);
 
 
 
 
 
2941	INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
2942
2943	switch (addr->sa_family) {
2944	case AF_INET:
2945		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2946			xprt_set_bound(xprt);
2947
2948		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
2949		break;
2950	case AF_INET6:
2951		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2952			xprt_set_bound(xprt);
2953
2954		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2955		break;
2956	default:
2957		ret = ERR_PTR(-EAFNOSUPPORT);
2958		goto out_err;
2959	}
2960
2961	if (xprt_bound(xprt))
2962		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
2963				xprt->address_strings[RPC_DISPLAY_ADDR],
2964				xprt->address_strings[RPC_DISPLAY_PORT],
2965				xprt->address_strings[RPC_DISPLAY_PROTO]);
2966	else
2967		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
2968				xprt->address_strings[RPC_DISPLAY_ADDR],
2969				xprt->address_strings[RPC_DISPLAY_PROTO]);
2970
2971	if (try_module_get(THIS_MODULE))
2972		return xprt;
2973	ret = ERR_PTR(-EINVAL);
2974out_err:
2975	xs_xprt_free(xprt);
2976	return ret;
2977}
2978
2979/**
2980 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
2981 * @args: rpc transport creation arguments
2982 *
2983 */
2984static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2985{
2986	struct sockaddr *addr = args->dstaddr;
2987	struct rpc_xprt *xprt;
2988	struct sock_xprt *transport;
2989	struct svc_sock *bc_sock;
2990	struct rpc_xprt *ret;
2991
2992	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2993			xprt_tcp_slot_table_entries);
2994	if (IS_ERR(xprt))
2995		return xprt;
2996	transport = container_of(xprt, struct sock_xprt, xprt);
2997
2998	xprt->prot = IPPROTO_TCP;
2999	xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
3000	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
3001	xprt->timeout = &xs_tcp_default_timeout;
3002
3003	/* backchannel */
3004	xprt_set_bound(xprt);
3005	xprt->bind_timeout = 0;
3006	xprt->reestablish_timeout = 0;
3007	xprt->idle_timeout = 0;
3008
3009	xprt->ops = &bc_tcp_ops;
3010
3011	switch (addr->sa_family) {
3012	case AF_INET:
3013		xs_format_peer_addresses(xprt, "tcp",
3014					 RPCBIND_NETID_TCP);
3015		break;
3016	case AF_INET6:
3017		xs_format_peer_addresses(xprt, "tcp",
3018				   RPCBIND_NETID_TCP6);
3019		break;
3020	default:
3021		ret = ERR_PTR(-EAFNOSUPPORT);
3022		goto out_err;
3023	}
3024
3025	dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
3026			xprt->address_strings[RPC_DISPLAY_ADDR],
3027			xprt->address_strings[RPC_DISPLAY_PORT],
3028			xprt->address_strings[RPC_DISPLAY_PROTO]);
3029
3030	/*
3031	 * Once we've associated a backchannel xprt with a connection,
3032	 * we want to keep it around as long as the connection lasts,
3033	 * in case we need to start using it for a backchannel again;
3034	 * this reference won't be dropped until bc_xprt is destroyed.
3035	 */
3036	xprt_get(xprt);
3037	args->bc_xprt->xpt_bc_xprt = xprt;
3038	xprt->bc_xprt = args->bc_xprt;
3039	bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
3040	transport->sock = bc_sock->sk_sock;
3041	transport->inet = bc_sock->sk_sk;
3042
3043	/*
3044	 * Since we don't want connections for the backchannel, we set
3045	 * the xprt status to connected
3046	 */
3047	xprt_set_connected(xprt);
3048
3049	if (try_module_get(THIS_MODULE))
3050		return xprt;
3051
3052	args->bc_xprt->xpt_bc_xprt = NULL;
 
3053	xprt_put(xprt);
3054	ret = ERR_PTR(-EINVAL);
3055out_err:
3056	xs_xprt_free(xprt);
3057	return ret;
3058}
3059
3060static struct xprt_class	xs_local_transport = {
3061	.list		= LIST_HEAD_INIT(xs_local_transport.list),
3062	.name		= "named UNIX socket",
3063	.owner		= THIS_MODULE,
3064	.ident		= XPRT_TRANSPORT_LOCAL,
3065	.setup		= xs_setup_local,
3066};
3067
3068static struct xprt_class	xs_udp_transport = {
3069	.list		= LIST_HEAD_INIT(xs_udp_transport.list),
3070	.name		= "udp",
3071	.owner		= THIS_MODULE,
3072	.ident		= XPRT_TRANSPORT_UDP,
3073	.setup		= xs_setup_udp,
3074};
3075
3076static struct xprt_class	xs_tcp_transport = {
3077	.list		= LIST_HEAD_INIT(xs_tcp_transport.list),
3078	.name		= "tcp",
3079	.owner		= THIS_MODULE,
3080	.ident		= XPRT_TRANSPORT_TCP,
3081	.setup		= xs_setup_tcp,
3082};
3083
3084static struct xprt_class	xs_bc_tcp_transport = {
3085	.list		= LIST_HEAD_INIT(xs_bc_tcp_transport.list),
3086	.name		= "tcp NFSv4.1 backchannel",
3087	.owner		= THIS_MODULE,
3088	.ident		= XPRT_TRANSPORT_BC_TCP,
3089	.setup		= xs_setup_bc_tcp,
3090};
3091
3092/**
3093 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3094 *
3095 */
3096int init_socket_xprt(void)
3097{
3098#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3099	if (!sunrpc_table_header)
3100		sunrpc_table_header = register_sysctl_table(sunrpc_table);
3101#endif
3102
3103	xprt_register_transport(&xs_local_transport);
3104	xprt_register_transport(&xs_udp_transport);
3105	xprt_register_transport(&xs_tcp_transport);
3106	xprt_register_transport(&xs_bc_tcp_transport);
3107
3108	return 0;
3109}
3110
3111/**
3112 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3113 *
3114 */
3115void cleanup_socket_xprt(void)
3116{
3117#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
3118	if (sunrpc_table_header) {
3119		unregister_sysctl_table(sunrpc_table_header);
3120		sunrpc_table_header = NULL;
3121	}
3122#endif
3123
3124	xprt_unregister_transport(&xs_local_transport);
3125	xprt_unregister_transport(&xs_udp_transport);
3126	xprt_unregister_transport(&xs_tcp_transport);
3127	xprt_unregister_transport(&xs_bc_tcp_transport);
3128}
3129
3130static int param_set_uint_minmax(const char *val,
3131		const struct kernel_param *kp,
3132		unsigned int min, unsigned int max)
3133{
3134	unsigned int num;
3135	int ret;
3136
3137	if (!val)
3138		return -EINVAL;
3139	ret = kstrtouint(val, 0, &num);
3140	if (ret == -EINVAL || num < min || num > max)
 
 
3141		return -EINVAL;
3142	*((unsigned int *)kp->arg) = num;
3143	return 0;
3144}
3145
3146static int param_set_portnr(const char *val, const struct kernel_param *kp)
3147{
3148	return param_set_uint_minmax(val, kp,
3149			RPC_MIN_RESVPORT,
3150			RPC_MAX_RESVPORT);
3151}
3152
3153static const struct kernel_param_ops param_ops_portnr = {
3154	.set = param_set_portnr,
3155	.get = param_get_uint,
3156};
3157
3158#define param_check_portnr(name, p) \
3159	__param_check(name, p, unsigned int);
3160
3161module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3162module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3163
3164static int param_set_slot_table_size(const char *val,
3165				     const struct kernel_param *kp)
3166{
3167	return param_set_uint_minmax(val, kp,
3168			RPC_MIN_SLOT_TABLE,
3169			RPC_MAX_SLOT_TABLE);
3170}
3171
3172static const struct kernel_param_ops param_ops_slot_table_size = {
3173	.set = param_set_slot_table_size,
3174	.get = param_get_uint,
3175};
3176
3177#define param_check_slot_table_size(name, p) \
3178	__param_check(name, p, unsigned int);
3179
3180static int param_set_max_slot_table_size(const char *val,
3181				     const struct kernel_param *kp)
3182{
3183	return param_set_uint_minmax(val, kp,
3184			RPC_MIN_SLOT_TABLE,
3185			RPC_MAX_SLOT_TABLE_LIMIT);
3186}
3187
3188static const struct kernel_param_ops param_ops_max_slot_table_size = {
3189	.set = param_set_max_slot_table_size,
3190	.get = param_get_uint,
3191};
3192
3193#define param_check_max_slot_table_size(name, p) \
3194	__param_check(name, p, unsigned int);
3195
3196module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3197		   slot_table_size, 0644);
3198module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3199		   max_slot_table_size, 0644);
3200module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3201		   slot_table_size, 0644);
3202
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * linux/net/sunrpc/xprtsock.c
   4 *
   5 * Client-side transport implementation for sockets.
   6 *
   7 * TCP callback races fixes (C) 1998 Red Hat
   8 * TCP send fixes (C) 1998 Red Hat
   9 * TCP NFS related read + write fixes
  10 *  (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
  11 *
  12 * Rewrite of larges part of the code in order to stabilize TCP stuff.
  13 * Fix behaviour when socket buffer is full.
  14 *  (C) 1999 Trond Myklebust <trond.myklebust@fys.uio.no>
  15 *
  16 * IP socket transport implementation, (C) 2005 Chuck Lever <cel@netapp.com>
  17 *
  18 * IPv6 support contributed by Gilles Quillard, Bull Open Source, 2005.
  19 *   <gilles.quillard@bull.net>
  20 */
  21
  22#include <linux/types.h>
  23#include <linux/string.h>
  24#include <linux/slab.h>
  25#include <linux/module.h>
  26#include <linux/capability.h>
  27#include <linux/pagemap.h>
  28#include <linux/errno.h>
  29#include <linux/socket.h>
  30#include <linux/in.h>
  31#include <linux/net.h>
  32#include <linux/mm.h>
  33#include <linux/un.h>
  34#include <linux/udp.h>
  35#include <linux/tcp.h>
  36#include <linux/sunrpc/clnt.h>
  37#include <linux/sunrpc/addr.h>
  38#include <linux/sunrpc/sched.h>
  39#include <linux/sunrpc/svcsock.h>
  40#include <linux/sunrpc/xprtsock.h>
  41#include <linux/file.h>
  42#ifdef CONFIG_SUNRPC_BACKCHANNEL
  43#include <linux/sunrpc/bc_xprt.h>
  44#endif
  45
  46#include <net/sock.h>
  47#include <net/checksum.h>
  48#include <net/udp.h>
  49#include <net/tcp.h>
  50#include <linux/bvec.h>
  51#include <linux/highmem.h>
  52#include <linux/uio.h>
  53#include <linux/sched/mm.h>
  54
  55#include <trace/events/sunrpc.h>
  56
  57#include "socklib.h"
  58#include "sunrpc.h"
  59
  60static void xs_close(struct rpc_xprt *xprt);
  61static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
  62		struct socket *sock);
  63
  64/*
  65 * xprtsock tunables
  66 */
  67static unsigned int xprt_udp_slot_table_entries = RPC_DEF_SLOT_TABLE;
  68static unsigned int xprt_tcp_slot_table_entries = RPC_MIN_SLOT_TABLE;
  69static unsigned int xprt_max_tcp_slot_table_entries = RPC_MAX_SLOT_TABLE;
  70
  71static unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
  72static unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
  73
 
 
  74#define XS_TCP_LINGER_TO	(15U * HZ)
  75static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
  76
  77/*
  78 * We can register our own files under /proc/sys/sunrpc by
  79 * calling register_sysctl_table() again.  The files in that
  80 * directory become the union of all files registered there.
  81 *
  82 * We simply need to make sure that we don't collide with
  83 * someone else's file names!
  84 */
  85
  86static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
  87static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
  88static unsigned int max_tcp_slot_table_limit = RPC_MAX_SLOT_TABLE_LIMIT;
  89static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
  90static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
  91
  92static struct ctl_table_header *sunrpc_table_header;
  93
  94/*
  95 * FIXME: changing the UDP slot table size should also resize the UDP
  96 *        socket buffers for existing UDP transports
  97 */
  98static struct ctl_table xs_tunables_table[] = {
  99	{
 100		.procname	= "udp_slot_table_entries",
 101		.data		= &xprt_udp_slot_table_entries,
 102		.maxlen		= sizeof(unsigned int),
 103		.mode		= 0644,
 104		.proc_handler	= proc_dointvec_minmax,
 105		.extra1		= &min_slot_table_size,
 106		.extra2		= &max_slot_table_size
 107	},
 108	{
 109		.procname	= "tcp_slot_table_entries",
 110		.data		= &xprt_tcp_slot_table_entries,
 111		.maxlen		= sizeof(unsigned int),
 112		.mode		= 0644,
 113		.proc_handler	= proc_dointvec_minmax,
 114		.extra1		= &min_slot_table_size,
 115		.extra2		= &max_slot_table_size
 116	},
 117	{
 118		.procname	= "tcp_max_slot_table_entries",
 119		.data		= &xprt_max_tcp_slot_table_entries,
 120		.maxlen		= sizeof(unsigned int),
 121		.mode		= 0644,
 122		.proc_handler	= proc_dointvec_minmax,
 123		.extra1		= &min_slot_table_size,
 124		.extra2		= &max_tcp_slot_table_limit
 125	},
 126	{
 127		.procname	= "min_resvport",
 128		.data		= &xprt_min_resvport,
 129		.maxlen		= sizeof(unsigned int),
 130		.mode		= 0644,
 131		.proc_handler	= proc_dointvec_minmax,
 132		.extra1		= &xprt_min_resvport_limit,
 133		.extra2		= &xprt_max_resvport_limit
 134	},
 135	{
 136		.procname	= "max_resvport",
 137		.data		= &xprt_max_resvport,
 138		.maxlen		= sizeof(unsigned int),
 139		.mode		= 0644,
 140		.proc_handler	= proc_dointvec_minmax,
 141		.extra1		= &xprt_min_resvport_limit,
 142		.extra2		= &xprt_max_resvport_limit
 143	},
 144	{
 145		.procname	= "tcp_fin_timeout",
 146		.data		= &xs_tcp_fin_timeout,
 147		.maxlen		= sizeof(xs_tcp_fin_timeout),
 148		.mode		= 0644,
 149		.proc_handler	= proc_dointvec_jiffies,
 150	},
 151	{ },
 152};
 153
 154static struct ctl_table sunrpc_table[] = {
 155	{
 156		.procname	= "sunrpc",
 157		.mode		= 0555,
 158		.child		= xs_tunables_table
 159	},
 160	{ },
 161};
 162
 
 
 163/*
 164 * Wait duration for a reply from the RPC portmapper.
 165 */
 166#define XS_BIND_TO		(60U * HZ)
 167
 168/*
 169 * Delay if a UDP socket connect error occurs.  This is most likely some
 170 * kind of resource problem on the local host.
 171 */
 172#define XS_UDP_REEST_TO		(2U * HZ)
 173
 174/*
 175 * The reestablish timeout allows clients to delay for a bit before attempting
 176 * to reconnect to a server that just dropped our connection.
 177 *
 178 * We implement an exponential backoff when trying to reestablish a TCP
 179 * transport connection with the server.  Some servers like to drop a TCP
 180 * connection when they are overworked, so we start with a short timeout and
 181 * increase over time if the server is down or not responding.
 182 */
 183#define XS_TCP_INIT_REEST_TO	(3U * HZ)
 
 184
 185/*
 186 * TCP idle timeout; client drops the transport socket if it is idle
 187 * for this long.  Note that we also timeout UDP sockets to prevent
 188 * holding port numbers when there is no RPC traffic.
 189 */
 190#define XS_IDLE_DISC_TO		(5U * 60 * HZ)
 191
 192#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 193# undef  RPC_DEBUG_DATA
 194# define RPCDBG_FACILITY	RPCDBG_TRANS
 195#endif
 196
 197#ifdef RPC_DEBUG_DATA
 198static void xs_pktdump(char *msg, u32 *packet, unsigned int count)
 199{
 200	u8 *buf = (u8 *) packet;
 201	int j;
 202
 203	dprintk("RPC:       %s\n", msg);
 204	for (j = 0; j < count && j < 128; j += 4) {
 205		if (!(j & 31)) {
 206			if (j)
 207				dprintk("\n");
 208			dprintk("0x%04x ", j);
 209		}
 210		dprintk("%02x%02x%02x%02x ",
 211			buf[j], buf[j+1], buf[j+2], buf[j+3]);
 212	}
 213	dprintk("\n");
 214}
 215#else
 216static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
 217{
 218	/* NOP */
 219}
 220#endif
 221
 222static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
 223{
 224	return (struct rpc_xprt *) sk->sk_user_data;
 225}
 226
 227static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt)
 228{
 229	return (struct sockaddr *) &xprt->addr;
 230}
 231
 232static inline struct sockaddr_un *xs_addr_un(struct rpc_xprt *xprt)
 233{
 234	return (struct sockaddr_un *) &xprt->addr;
 235}
 236
 237static inline struct sockaddr_in *xs_addr_in(struct rpc_xprt *xprt)
 238{
 239	return (struct sockaddr_in *) &xprt->addr;
 240}
 241
 242static inline struct sockaddr_in6 *xs_addr_in6(struct rpc_xprt *xprt)
 243{
 244	return (struct sockaddr_in6 *) &xprt->addr;
 245}
 246
 247static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
 248{
 249	struct sockaddr *sap = xs_addr(xprt);
 250	struct sockaddr_in6 *sin6;
 251	struct sockaddr_in *sin;
 252	struct sockaddr_un *sun;
 253	char buf[128];
 254
 255	switch (sap->sa_family) {
 256	case AF_LOCAL:
 257		sun = xs_addr_un(xprt);
 258		strlcpy(buf, sun->sun_path, sizeof(buf));
 259		xprt->address_strings[RPC_DISPLAY_ADDR] =
 260						kstrdup(buf, GFP_KERNEL);
 261		break;
 262	case AF_INET:
 263		(void)rpc_ntop(sap, buf, sizeof(buf));
 264		xprt->address_strings[RPC_DISPLAY_ADDR] =
 265						kstrdup(buf, GFP_KERNEL);
 266		sin = xs_addr_in(xprt);
 267		snprintf(buf, sizeof(buf), "%08x", ntohl(sin->sin_addr.s_addr));
 268		break;
 269	case AF_INET6:
 270		(void)rpc_ntop(sap, buf, sizeof(buf));
 271		xprt->address_strings[RPC_DISPLAY_ADDR] =
 272						kstrdup(buf, GFP_KERNEL);
 273		sin6 = xs_addr_in6(xprt);
 274		snprintf(buf, sizeof(buf), "%pi6", &sin6->sin6_addr);
 275		break;
 276	default:
 277		BUG();
 278	}
 279
 280	xprt->address_strings[RPC_DISPLAY_HEX_ADDR] = kstrdup(buf, GFP_KERNEL);
 281}
 282
 283static void xs_format_common_peer_ports(struct rpc_xprt *xprt)
 284{
 285	struct sockaddr *sap = xs_addr(xprt);
 286	char buf[128];
 287
 288	snprintf(buf, sizeof(buf), "%u", rpc_get_port(sap));
 289	xprt->address_strings[RPC_DISPLAY_PORT] = kstrdup(buf, GFP_KERNEL);
 290
 291	snprintf(buf, sizeof(buf), "%4hx", rpc_get_port(sap));
 292	xprt->address_strings[RPC_DISPLAY_HEX_PORT] = kstrdup(buf, GFP_KERNEL);
 293}
 294
 295static void xs_format_peer_addresses(struct rpc_xprt *xprt,
 296				     const char *protocol,
 297				     const char *netid)
 298{
 299	xprt->address_strings[RPC_DISPLAY_PROTO] = protocol;
 300	xprt->address_strings[RPC_DISPLAY_NETID] = netid;
 301	xs_format_common_peer_addresses(xprt);
 302	xs_format_common_peer_ports(xprt);
 303}
 304
 305static void xs_update_peer_port(struct rpc_xprt *xprt)
 306{
 307	kfree(xprt->address_strings[RPC_DISPLAY_HEX_PORT]);
 308	kfree(xprt->address_strings[RPC_DISPLAY_PORT]);
 309
 310	xs_format_common_peer_ports(xprt);
 311}
 312
 313static void xs_free_peer_addresses(struct rpc_xprt *xprt)
 314{
 315	unsigned int i;
 316
 317	for (i = 0; i < RPC_DISPLAY_MAX; i++)
 318		switch (i) {
 319		case RPC_DISPLAY_PROTO:
 320		case RPC_DISPLAY_NETID:
 321			continue;
 322		default:
 323			kfree(xprt->address_strings[i]);
 324		}
 325}
 326
 327static size_t
 328xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
 329{
 330	size_t i,n;
 331
 332	if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
 333		return want;
 334	n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
 335	for (i = 0; i < n; i++) {
 336		if (buf->pages[i])
 337			continue;
 338		buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
 339		if (!buf->pages[i]) {
 340			i *= PAGE_SIZE;
 341			return i > buf->page_base ? i - buf->page_base : 0;
 342		}
 343	}
 344	return want;
 345}
 346
 347static ssize_t
 348xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
 349{
 350	ssize_t ret;
 351	if (seek != 0)
 352		iov_iter_advance(&msg->msg_iter, seek);
 353	ret = sock_recvmsg(sock, msg, flags);
 354	return ret > 0 ? ret + seek : ret;
 355}
 
 
 
 356
 357static ssize_t
 358xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
 359		struct kvec *kvec, size_t count, size_t seek)
 360{
 361	iov_iter_kvec(&msg->msg_iter, READ, kvec, 1, count);
 362	return xs_sock_recvmsg(sock, msg, flags, seek);
 363}
 364
 365static ssize_t
 366xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
 367		struct bio_vec *bvec, unsigned long nr, size_t count,
 368		size_t seek)
 369{
 370	iov_iter_bvec(&msg->msg_iter, READ, bvec, nr, count);
 371	return xs_sock_recvmsg(sock, msg, flags, seek);
 372}
 
 
 373
 374static ssize_t
 375xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
 376		size_t count)
 377{
 378	iov_iter_discard(&msg->msg_iter, READ, count);
 379	return sock_recvmsg(sock, msg, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 380}
 381
 382#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
 383static void
 384xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
 385{
 386	struct bvec_iter bi = {
 387		.bi_size = count,
 388	};
 389	struct bio_vec bv;
 390
 391	bvec_iter_advance(bvec, &bi, seek & PAGE_MASK);
 392	for_each_bvec(bv, bvec, bi, bi)
 393		flush_dcache_page(bv.bv_page);
 394}
 395#else
 396static inline void
 397xs_flush_bvec(const struct bio_vec *bvec, size_t count, size_t seek)
 398{
 399}
 400#endif
 401
 402static ssize_t
 403xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
 404		struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
 405{
 406	size_t want, seek_init = seek, offset = 0;
 407	ssize_t ret;
 408
 409	want = min_t(size_t, count, buf->head[0].iov_len);
 410	if (seek < want) {
 411		ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek);
 412		if (ret <= 0)
 413			goto sock_err;
 414		offset += ret;
 415		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 416			goto out;
 417		if (ret != want)
 418			goto out;
 419		seek = 0;
 420	} else {
 421		seek -= want;
 422		offset += want;
 423	}
 424
 425	want = xs_alloc_sparse_pages(buf,
 426			min_t(size_t, count - offset, buf->page_len),
 427			GFP_KERNEL);
 428	if (seek < want) {
 429		ret = xs_read_bvec(sock, msg, flags, buf->bvec,
 430				xdr_buf_pagecount(buf),
 431				want + buf->page_base,
 432				seek + buf->page_base);
 433		if (ret <= 0)
 434			goto sock_err;
 435		xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
 436		offset += ret - buf->page_base;
 437		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 438			goto out;
 439		if (ret != want)
 440			goto out;
 441		seek = 0;
 442	} else {
 443		seek -= want;
 444		offset += want;
 445	}
 446
 447	want = min_t(size_t, count - offset, buf->tail[0].iov_len);
 448	if (seek < want) {
 449		ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
 450		if (ret <= 0)
 451			goto sock_err;
 452		offset += ret;
 453		if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 454			goto out;
 455		if (ret != want)
 456			goto out;
 457	} else if (offset < seek_init)
 458		offset = seek_init;
 459	ret = -EMSGSIZE;
 460out:
 461	*read = offset - seek_init;
 462	return ret;
 463sock_err:
 464	offset += seek;
 465	goto out;
 466}
 467
 468static void
 469xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf)
 470{
 471	if (!transport->recv.copied) {
 472		if (buf->head[0].iov_len >= transport->recv.offset)
 473			memcpy(buf->head[0].iov_base,
 474					&transport->recv.xid,
 475					transport->recv.offset);
 476		transport->recv.copied = transport->recv.offset;
 477	}
 478}
 479
 480static bool
 481xs_read_stream_request_done(struct sock_xprt *transport)
 482{
 483	return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT);
 484}
 485
 486static void
 487xs_read_stream_check_eor(struct sock_xprt *transport,
 488		struct msghdr *msg)
 489{
 490	if (xs_read_stream_request_done(transport))
 491		msg->msg_flags |= MSG_EOR;
 492}
 493
 494static ssize_t
 495xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
 496		int flags, struct rpc_rqst *req)
 497{
 498	struct xdr_buf *buf = &req->rq_private_buf;
 499	size_t want, read;
 500	ssize_t ret;
 501
 502	xs_read_header(transport, buf);
 503
 504	want = transport->recv.len - transport->recv.offset;
 505	if (want != 0) {
 506		ret = xs_read_xdr_buf(transport->sock, msg, flags, buf,
 507				transport->recv.copied + want,
 508				transport->recv.copied,
 509				&read);
 510		transport->recv.offset += read;
 511		transport->recv.copied += read;
 512	}
 513
 514	if (transport->recv.offset == transport->recv.len)
 515		xs_read_stream_check_eor(transport, msg);
 516
 517	if (want == 0)
 518		return 0;
 519
 520	switch (ret) {
 521	default:
 522		break;
 523	case -EFAULT:
 524	case -EMSGSIZE:
 525		msg->msg_flags |= MSG_TRUNC;
 526		return read;
 527	case 0:
 528		return -ESHUTDOWN;
 529	}
 530	return ret < 0 ? ret : read;
 531}
 532
 533static size_t
 534xs_read_stream_headersize(bool isfrag)
 535{
 536	if (isfrag)
 537		return sizeof(__be32);
 538	return 3 * sizeof(__be32);
 539}
 540
 541static ssize_t
 542xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg,
 543		int flags, size_t want, size_t seek)
 544{
 545	struct kvec kvec = {
 546		.iov_base = &transport->recv.fraghdr,
 547		.iov_len = want,
 548	};
 549	return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek);
 550}
 551
 552#if defined(CONFIG_SUNRPC_BACKCHANNEL)
 553static ssize_t
 554xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
 555{
 556	struct rpc_xprt *xprt = &transport->xprt;
 557	struct rpc_rqst *req;
 558	ssize_t ret;
 559
 560	/* Look up and lock the request corresponding to the given XID */
 561	req = xprt_lookup_bc_request(xprt, transport->recv.xid);
 562	if (!req) {
 563		printk(KERN_WARNING "Callback slot table overflowed\n");
 564		return -ESHUTDOWN;
 565	}
 566	if (transport->recv.copied && !req->rq_private_buf.len)
 567		return -ESHUTDOWN;
 568
 569	ret = xs_read_stream_request(transport, msg, flags, req);
 570	if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 571		xprt_complete_bc_request(req, transport->recv.copied);
 572	else
 573		req->rq_private_buf.len = transport->recv.copied;
 574
 575	return ret;
 576}
 577#else /* CONFIG_SUNRPC_BACKCHANNEL */
 578static ssize_t
 579xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
 580{
 581	return -ESHUTDOWN;
 582}
 583#endif /* CONFIG_SUNRPC_BACKCHANNEL */
 584
 585static ssize_t
 586xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
 587{
 588	struct rpc_xprt *xprt = &transport->xprt;
 589	struct rpc_rqst *req;
 590	ssize_t ret = 0;
 591
 592	/* Look up and lock the request corresponding to the given XID */
 593	spin_lock(&xprt->queue_lock);
 594	req = xprt_lookup_rqst(xprt, transport->recv.xid);
 595	if (!req || (transport->recv.copied && !req->rq_private_buf.len)) {
 596		msg->msg_flags |= MSG_TRUNC;
 597		goto out;
 598	}
 599	xprt_pin_rqst(req);
 600	spin_unlock(&xprt->queue_lock);
 601
 602	ret = xs_read_stream_request(transport, msg, flags, req);
 603
 604	spin_lock(&xprt->queue_lock);
 605	if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
 606		xprt_complete_rqst(req->rq_task, transport->recv.copied);
 607	else
 608		req->rq_private_buf.len = transport->recv.copied;
 609	xprt_unpin_rqst(req);
 610out:
 611	spin_unlock(&xprt->queue_lock);
 612	return ret;
 613}
 614
 615static ssize_t
 616xs_read_stream(struct sock_xprt *transport, int flags)
 617{
 618	struct msghdr msg = { 0 };
 619	size_t want, read = 0;
 620	ssize_t ret = 0;
 621
 622	if (transport->recv.len == 0) {
 623		want = xs_read_stream_headersize(transport->recv.copied != 0);
 624		ret = xs_read_stream_header(transport, &msg, flags, want,
 625				transport->recv.offset);
 626		if (ret <= 0)
 627			goto out_err;
 628		transport->recv.offset = ret;
 629		if (transport->recv.offset != want)
 630			return transport->recv.offset;
 631		transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
 632			RPC_FRAGMENT_SIZE_MASK;
 633		transport->recv.offset -= sizeof(transport->recv.fraghdr);
 634		read = ret;
 635	}
 636
 637	switch (be32_to_cpu(transport->recv.calldir)) {
 638	default:
 639		msg.msg_flags |= MSG_TRUNC;
 640		break;
 641	case RPC_CALL:
 642		ret = xs_read_stream_call(transport, &msg, flags);
 643		break;
 644	case RPC_REPLY:
 645		ret = xs_read_stream_reply(transport, &msg, flags);
 646	}
 647	if (msg.msg_flags & MSG_TRUNC) {
 648		transport->recv.calldir = cpu_to_be32(-1);
 649		transport->recv.copied = -1;
 650	}
 651	if (ret < 0)
 652		goto out_err;
 653	read += ret;
 654	if (transport->recv.offset < transport->recv.len) {
 655		if (!(msg.msg_flags & MSG_TRUNC))
 656			return read;
 657		msg.msg_flags = 0;
 658		ret = xs_read_discard(transport->sock, &msg, flags,
 659				transport->recv.len - transport->recv.offset);
 660		if (ret <= 0)
 661			goto out_err;
 662		transport->recv.offset += ret;
 663		read += ret;
 664		if (transport->recv.offset != transport->recv.len)
 665			return read;
 666	}
 667	if (xs_read_stream_request_done(transport)) {
 668		trace_xs_stream_read_request(transport);
 669		transport->recv.copied = 0;
 670	}
 671	transport->recv.offset = 0;
 672	transport->recv.len = 0;
 673	return read;
 674out_err:
 675	return ret != 0 ? ret : -ESHUTDOWN;
 676}
 677
 678static __poll_t xs_poll_socket(struct sock_xprt *transport)
 679{
 680	return transport->sock->ops->poll(transport->file, transport->sock,
 681			NULL);
 682}
 683
 684static bool xs_poll_socket_readable(struct sock_xprt *transport)
 685{
 686	__poll_t events = xs_poll_socket(transport);
 687
 688	return (events & (EPOLLIN | EPOLLRDNORM)) && !(events & EPOLLRDHUP);
 689}
 690
 691static void xs_poll_check_readable(struct sock_xprt *transport)
 692{
 693
 694	clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
 695	if (!xs_poll_socket_readable(transport))
 696		return;
 697	if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
 698		queue_work(xprtiod_workqueue, &transport->recv_worker);
 699}
 700
 701static void xs_stream_data_receive(struct sock_xprt *transport)
 702{
 703	size_t read = 0;
 704	ssize_t ret = 0;
 705
 706	mutex_lock(&transport->recv_mutex);
 707	if (transport->sock == NULL)
 708		goto out;
 709	for (;;) {
 710		ret = xs_read_stream(transport, MSG_DONTWAIT);
 711		if (ret < 0)
 712			break;
 713		read += ret;
 714		cond_resched();
 715	}
 716	if (ret == -ESHUTDOWN)
 717		kernel_sock_shutdown(transport->sock, SHUT_RDWR);
 718	else
 719		xs_poll_check_readable(transport);
 720out:
 721	mutex_unlock(&transport->recv_mutex);
 722	trace_xs_stream_read_data(&transport->xprt, ret, read);
 723}
 724
 725static void xs_stream_data_receive_workfn(struct work_struct *work)
 726{
 727	struct sock_xprt *transport =
 728		container_of(work, struct sock_xprt, recv_worker);
 729	unsigned int pflags = memalloc_nofs_save();
 730
 731	xs_stream_data_receive(transport);
 732	memalloc_nofs_restore(pflags);
 733}
 734
 735static void
 736xs_stream_reset_connect(struct sock_xprt *transport)
 737{
 738	transport->recv.offset = 0;
 739	transport->recv.len = 0;
 740	transport->recv.copied = 0;
 741	transport->xmit.offset = 0;
 742}
 743
 744static void
 745xs_stream_start_connect(struct sock_xprt *transport)
 746{
 747	transport->xprt.stat.connect_count++;
 748	transport->xprt.stat.connect_start = jiffies;
 749}
 750
 751#define XS_SENDMSG_FLAGS	(MSG_DONTWAIT | MSG_NOSIGNAL)
 752
 753/**
 754 * xs_nospace - handle transmit was incomplete
 755 * @req: pointer to RPC request
 756 *
 757 */
 758static int xs_nospace(struct rpc_rqst *req)
 759{
 
 760	struct rpc_xprt *xprt = req->rq_xprt;
 761	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 762	struct sock *sk = transport->inet;
 763	int ret = -EAGAIN;
 764
 765	dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
 766			req->rq_task->tk_pid,
 767			req->rq_slen - transport->xmit.offset,
 768			req->rq_slen);
 769
 770	/* Protect against races with write_space */
 771	spin_lock(&xprt->transport_lock);
 772
 773	/* Don't race with disconnect */
 774	if (xprt_connected(xprt)) {
 775		/* wait for more buffer space */
 776		sk->sk_write_pending++;
 777		xprt_wait_for_buffer_space(xprt);
 778	} else
 779		ret = -ENOTCONN;
 780
 781	spin_unlock(&xprt->transport_lock);
 782
 783	/* Race breaker in case memory is freed before above code is called */
 784	if (ret == -EAGAIN) {
 785		struct socket_wq *wq;
 786
 787		rcu_read_lock();
 788		wq = rcu_dereference(sk->sk_wq);
 789		set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags);
 790		rcu_read_unlock();
 791
 792		sk->sk_write_space(sk);
 793	}
 794	return ret;
 795}
 796
 797static void
 798xs_stream_prepare_request(struct rpc_rqst *req)
 799{
 800	xdr_free_bvec(&req->rq_rcv_buf);
 801	req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_KERNEL);
 802}
 803
 804/*
 805 * Determine if the previous message in the stream was aborted before it
 806 * could complete transmission.
 807 */
 808static bool
 809xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
 810{
 811	return transport->xmit.offset != 0 && req->rq_bytes_sent == 0;
 812}
 813
 814/*
 815 * Return the stream record marker field for a record of length < 2^31-1
 816 */
 817static rpc_fraghdr
 818xs_stream_record_marker(struct xdr_buf *xdr)
 819{
 820	if (!xdr->len)
 821		return 0;
 822	return cpu_to_be32(RPC_LAST_STREAM_FRAGMENT | (u32)xdr->len);
 823}
 824
 825/**
 826 * xs_local_send_request - write an RPC request to an AF_LOCAL socket
 827 * @req: pointer to RPC request
 828 *
 829 * Return values:
 830 *        0:	The request has been sent
 831 *   EAGAIN:	The socket was blocked, please call again later to
 832 *		complete the request
 833 * ENOTCONN:	Caller needs to invoke connect logic then call again
 834 *    other:	Some other error occured, the request was not sent
 835 */
 836static int xs_local_send_request(struct rpc_rqst *req)
 837{
 
 838	struct rpc_xprt *xprt = req->rq_xprt;
 839	struct sock_xprt *transport =
 840				container_of(xprt, struct sock_xprt, xprt);
 841	struct xdr_buf *xdr = &req->rq_snd_buf;
 842	rpc_fraghdr rm = xs_stream_record_marker(xdr);
 843	unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
 844	struct msghdr msg = {
 845		.msg_flags	= XS_SENDMSG_FLAGS,
 846	};
 847	unsigned int sent;
 848	int status;
 
 849
 850	/* Close the stream if the previous transmission was incomplete */
 851	if (xs_send_request_was_aborted(transport, req)) {
 852		xs_close(xprt);
 853		return -ENOTCONN;
 854	}
 855
 856	xs_pktdump("packet data:",
 857			req->rq_svec->iov_base, req->rq_svec->iov_len);
 858
 859	req->rq_xtime = ktime_get();
 860	status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
 861				   transport->xmit.offset, rm, &sent);
 862	dprintk("RPC:       %s(%u) = %d\n",
 863			__func__, xdr->len - transport->xmit.offset, status);
 864
 865	if (status == -EAGAIN && sock_writeable(transport->inet))
 866		status = -ENOBUFS;
 867
 868	if (likely(sent > 0) || status == 0) {
 869		transport->xmit.offset += sent;
 870		req->rq_bytes_sent = transport->xmit.offset;
 871		if (likely(req->rq_bytes_sent >= msglen)) {
 872			req->rq_xmit_bytes_sent += transport->xmit.offset;
 873			transport->xmit.offset = 0;
 874			return 0;
 875		}
 876		status = -EAGAIN;
 877	}
 878
 879	switch (status) {
 880	case -ENOBUFS:
 881		break;
 882	case -EAGAIN:
 883		status = xs_nospace(req);
 884		break;
 885	default:
 886		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
 887			-status);
 888		fallthrough;
 889	case -EPIPE:
 890		xs_close(xprt);
 891		status = -ENOTCONN;
 892	}
 893
 894	return status;
 895}
 896
 897/**
 898 * xs_udp_send_request - write an RPC request to a UDP socket
 899 * @req: pointer to RPC request
 900 *
 901 * Return values:
 902 *        0:	The request has been sent
 903 *   EAGAIN:	The socket was blocked, please call again later to
 904 *		complete the request
 905 * ENOTCONN:	Caller needs to invoke connect logic then call again
 906 *    other:	Some other error occurred, the request was not sent
 907 */
 908static int xs_udp_send_request(struct rpc_rqst *req)
 909{
 
 910	struct rpc_xprt *xprt = req->rq_xprt;
 911	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 912	struct xdr_buf *xdr = &req->rq_snd_buf;
 913	struct msghdr msg = {
 914		.msg_name	= xs_addr(xprt),
 915		.msg_namelen	= xprt->addrlen,
 916		.msg_flags	= XS_SENDMSG_FLAGS,
 917	};
 918	unsigned int sent;
 919	int status;
 920
 921	xs_pktdump("packet data:",
 922				req->rq_svec->iov_base,
 923				req->rq_svec->iov_len);
 924
 925	if (!xprt_bound(xprt))
 926		return -ENOTCONN;
 927
 928	if (!xprt_request_get_cong(xprt, req))
 929		return -EBADSLT;
 930
 931	req->rq_xtime = ktime_get();
 932	status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent);
 933
 934	dprintk("RPC:       xs_udp_send_request(%u) = %d\n",
 935			xdr->len, status);
 936
 937	/* firewall is blocking us, don't return -EAGAIN or we end up looping */
 938	if (status == -EPERM)
 939		goto process_status;
 940
 941	if (status == -EAGAIN && sock_writeable(transport->inet))
 942		status = -ENOBUFS;
 943
 944	if (sent > 0 || status == 0) {
 945		req->rq_xmit_bytes_sent += sent;
 946		if (sent >= req->rq_slen)
 947			return 0;
 948		/* Still some bytes left; set up for a retry later. */
 949		status = -EAGAIN;
 950	}
 951
 952process_status:
 953	switch (status) {
 954	case -ENOTSOCK:
 955		status = -ENOTCONN;
 956		/* Should we call xs_close() here? */
 957		break;
 958	case -EAGAIN:
 959		status = xs_nospace(req);
 960		break;
 961	case -ENETUNREACH:
 962	case -ENOBUFS:
 963	case -EPIPE:
 964	case -ECONNREFUSED:
 965	case -EPERM:
 966		/* When the server has died, an ICMP port unreachable message
 967		 * prompts ECONNREFUSED. */
 968		break;
 969	default:
 970		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
 971			-status);
 972	}
 973
 974	return status;
 975}
 976
 977/**
 978 * xs_tcp_send_request - write an RPC request to a TCP socket
 979 * @req: pointer to RPC request
 980 *
 981 * Return values:
 982 *        0:	The request has been sent
 983 *   EAGAIN:	The socket was blocked, please call again later to
 984 *		complete the request
 985 * ENOTCONN:	Caller needs to invoke connect logic then call again
 986 *    other:	Some other error occurred, the request was not sent
 987 *
 988 * XXX: In the case of soft timeouts, should we eventually give up
 989 *	if sendmsg is not able to make progress?
 990 */
 991static int xs_tcp_send_request(struct rpc_rqst *req)
 992{
 
 993	struct rpc_xprt *xprt = req->rq_xprt;
 994	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 995	struct xdr_buf *xdr = &req->rq_snd_buf;
 996	rpc_fraghdr rm = xs_stream_record_marker(xdr);
 997	unsigned int msglen = rm ? req->rq_slen + sizeof(rm) : req->rq_slen;
 998	struct msghdr msg = {
 999		.msg_flags	= XS_SENDMSG_FLAGS,
1000	};
1001	bool vm_wait = false;
1002	unsigned int sent;
1003	int status;
 
1004
1005	/* Close the stream if the previous transmission was incomplete */
1006	if (xs_send_request_was_aborted(transport, req)) {
1007		if (transport->sock != NULL)
1008			kernel_sock_shutdown(transport->sock, SHUT_RDWR);
1009		return -ENOTCONN;
1010	}
1011
1012	xs_pktdump("packet data:",
1013				req->rq_svec->iov_base,
1014				req->rq_svec->iov_len);
1015
1016	if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
1017		xs_tcp_set_socket_timeouts(xprt, transport->sock);
 
 
 
1018
1019	/* Continue transmitting the packet/record. We must be careful
1020	 * to cope with writespace callbacks arriving _after_ we have
1021	 * called sendmsg(). */
1022	req->rq_xtime = ktime_get();
1023	while (1) {
1024		status = xprt_sock_sendmsg(transport->sock, &msg, xdr,
1025					   transport->xmit.offset, rm, &sent);
 
1026
1027		dprintk("RPC:       xs_tcp_send_request(%u) = %d\n",
1028				xdr->len - transport->xmit.offset, status);
1029
1030		/* If we've sent the entire packet, immediately
1031		 * reset the count of bytes sent. */
1032		transport->xmit.offset += sent;
1033		req->rq_bytes_sent = transport->xmit.offset;
1034		if (likely(req->rq_bytes_sent >= msglen)) {
1035			req->rq_xmit_bytes_sent += transport->xmit.offset;
1036			transport->xmit.offset = 0;
1037			return 0;
1038		}
1039
1040		WARN_ON_ONCE(sent == 0 && status == 0);
1041
1042		if (status == -EAGAIN ) {
1043			/*
1044			 * Return EAGAIN if we're sure we're hitting the
1045			 * socket send buffer limits.
1046			 */
1047			if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
1048				break;
1049			/*
1050			 * Did we hit a memory allocation failure?
1051			 */
1052			if (sent == 0) {
1053				status = -ENOBUFS;
1054				if (vm_wait)
1055					break;
1056				/* Retry, knowing now that we're below the
1057				 * socket send buffer limit
1058				 */
1059				vm_wait = true;
1060			}
1061			continue;
1062		}
1063		if (status < 0)
1064			break;
1065		vm_wait = false;
 
 
 
1066	}
 
 
1067
1068	switch (status) {
1069	case -ENOTSOCK:
1070		status = -ENOTCONN;
1071		/* Should we call xs_close() here? */
1072		break;
1073	case -EAGAIN:
1074		status = xs_nospace(req);
1075		break;
1076	case -ECONNRESET:
1077	case -ECONNREFUSED:
1078	case -ENOTCONN:
1079	case -EADDRINUSE:
1080	case -ENOBUFS:
1081	case -EPIPE:
1082		break;
1083	default:
1084		dprintk("RPC:       sendmsg returned unrecognized error %d\n",
1085			-status);
1086	}
1087
1088	return status;
1089}
1090
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1091static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1092{
1093	transport->old_data_ready = sk->sk_data_ready;
1094	transport->old_state_change = sk->sk_state_change;
1095	transport->old_write_space = sk->sk_write_space;
1096	transport->old_error_report = sk->sk_error_report;
1097}
1098
1099static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
1100{
1101	sk->sk_data_ready = transport->old_data_ready;
1102	sk->sk_state_change = transport->old_state_change;
1103	sk->sk_write_space = transport->old_write_space;
1104	sk->sk_error_report = transport->old_error_report;
1105}
1106
1107static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
1108{
1109	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1110
1111	clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
1112	clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state);
1113	clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state);
1114	clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state);
1115}
1116
1117static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr)
1118{
1119	set_bit(nr, &transport->sock_state);
1120	queue_work(xprtiod_workqueue, &transport->error_worker);
1121}
1122
1123static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
1124{
1125	smp_mb__before_atomic();
1126	clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1127	clear_bit(XPRT_CLOSING, &xprt->state);
1128	xs_sock_reset_state_flags(xprt);
1129	smp_mb__after_atomic();
1130}
1131
 
 
 
 
 
 
 
1132/**
1133 * xs_error_report - callback to handle TCP socket state errors
1134 * @sk: socket
1135 *
1136 * Note: we don't call sock_error() since there may be a rpc_task
1137 * using the socket, and so we don't want to clear sk->sk_err.
1138 */
1139static void xs_error_report(struct sock *sk)
1140{
1141	struct sock_xprt *transport;
1142	struct rpc_xprt *xprt;
 
1143
1144	read_lock_bh(&sk->sk_callback_lock);
1145	if (!(xprt = xprt_from_sock(sk)))
1146		goto out;
1147
1148	transport = container_of(xprt, struct sock_xprt, xprt);
1149	transport->xprt_err = -sk->sk_err;
1150	if (transport->xprt_err == 0)
1151		goto out;
 
 
 
1152	dprintk("RPC:       xs_error_report client %p, error=%d...\n",
1153			xprt, -transport->xprt_err);
1154	trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
1155
1156	/* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */
1157	smp_mb__before_atomic();
1158	xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR);
1159 out:
1160	read_unlock_bh(&sk->sk_callback_lock);
1161}
1162
1163static void xs_reset_transport(struct sock_xprt *transport)
1164{
1165	struct socket *sock = transport->sock;
1166	struct sock *sk = transport->inet;
1167	struct rpc_xprt *xprt = &transport->xprt;
1168	struct file *filp = transport->file;
1169
1170	if (sk == NULL)
1171		return;
1172
1173	if (atomic_read(&transport->xprt.swapper))
1174		sk_clear_memalloc(sk);
1175
1176	kernel_sock_shutdown(sock, SHUT_RDWR);
1177
1178	mutex_lock(&transport->recv_mutex);
1179	write_lock_bh(&sk->sk_callback_lock);
1180	transport->inet = NULL;
1181	transport->sock = NULL;
1182	transport->file = NULL;
1183
1184	sk->sk_user_data = NULL;
1185
1186	xs_restore_old_callbacks(transport, sk);
1187	xprt_clear_connected(xprt);
1188	write_unlock_bh(&sk->sk_callback_lock);
1189	xs_sock_reset_connection_flags(xprt);
1190	/* Reset stream record info */
1191	xs_stream_reset_connect(transport);
1192	mutex_unlock(&transport->recv_mutex);
1193
1194	trace_rpc_socket_close(xprt, sock);
1195	fput(filp);
1196
1197	xprt_disconnect_done(xprt);
1198}
1199
1200/**
1201 * xs_close - close a socket
1202 * @xprt: transport
1203 *
1204 * This is used when all requests are complete; ie, no DRC state remains
1205 * on the server we want to save.
1206 *
1207 * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
1208 * xs_reset_transport() zeroing the socket from underneath a writer.
1209 */
1210static void xs_close(struct rpc_xprt *xprt)
1211{
1212	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1213
1214	dprintk("RPC:       xs_close xprt %p\n", xprt);
1215
1216	xs_reset_transport(transport);
1217	xprt->reestablish_timeout = 0;
 
 
1218}
1219
1220static void xs_inject_disconnect(struct rpc_xprt *xprt)
1221{
1222	dprintk("RPC:       injecting transport disconnect on xprt=%p\n",
1223		xprt);
1224	xprt_disconnect_done(xprt);
1225}
1226
1227static void xs_xprt_free(struct rpc_xprt *xprt)
1228{
1229	xs_free_peer_addresses(xprt);
1230	xprt_free(xprt);
1231}
1232
1233/**
1234 * xs_destroy - prepare to shutdown a transport
1235 * @xprt: doomed transport
1236 *
1237 */
1238static void xs_destroy(struct rpc_xprt *xprt)
1239{
1240	struct sock_xprt *transport = container_of(xprt,
1241			struct sock_xprt, xprt);
1242	dprintk("RPC:       xs_destroy xprt %p\n", xprt);
1243
1244	cancel_delayed_work_sync(&transport->connect_worker);
1245	xs_close(xprt);
1246	cancel_work_sync(&transport->recv_worker);
1247	cancel_work_sync(&transport->error_worker);
1248	xs_xprt_free(xprt);
1249	module_put(THIS_MODULE);
1250}
1251
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1252/**
1253 * xs_udp_data_read_skb - receive callback for UDP sockets
1254 * @xprt: transport
1255 * @sk: socket
1256 * @skb: skbuff
1257 *
1258 */
1259static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
1260		struct sock *sk,
1261		struct sk_buff *skb)
1262{
1263	struct rpc_task *task;
1264	struct rpc_rqst *rovr;
1265	int repsize, copied;
1266	u32 _xid;
1267	__be32 *xp;
1268
1269	repsize = skb->len;
1270	if (repsize < 4) {
1271		dprintk("RPC:       impossible RPC reply size %d!\n", repsize);
1272		return;
1273	}
1274
1275	/* Copy the XID from the skb... */
1276	xp = skb_header_pointer(skb, 0, sizeof(_xid), &_xid);
 
1277	if (xp == NULL)
1278		return;
1279
1280	/* Look up and lock the request corresponding to the given XID */
1281	spin_lock(&xprt->queue_lock);
1282	rovr = xprt_lookup_rqst(xprt, *xp);
1283	if (!rovr)
1284		goto out_unlock;
1285	xprt_pin_rqst(rovr);
1286	xprt_update_rtt(rovr->rq_task);
1287	spin_unlock(&xprt->queue_lock);
1288	task = rovr->rq_task;
1289
1290	if ((copied = rovr->rq_private_buf.buflen) > repsize)
1291		copied = repsize;
1292
1293	/* Suck it into the iovec, verify checksum if not done by hw. */
1294	if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
1295		spin_lock(&xprt->queue_lock);
1296		__UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
1297		goto out_unpin;
1298	}
1299
 
1300
1301	spin_lock(&xprt->transport_lock);
1302	xprt_adjust_cwnd(xprt, task, copied);
1303	spin_unlock(&xprt->transport_lock);
1304	spin_lock(&xprt->queue_lock);
1305	xprt_complete_rqst(task, copied);
1306	__UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
1307out_unpin:
1308	xprt_unpin_rqst(rovr);
1309 out_unlock:
1310	spin_unlock(&xprt->queue_lock);
1311}
1312
1313static void xs_udp_data_receive(struct sock_xprt *transport)
1314{
1315	struct sk_buff *skb;
1316	struct sock *sk;
1317	int err;
1318
1319	mutex_lock(&transport->recv_mutex);
1320	sk = transport->inet;
1321	if (sk == NULL)
1322		goto out;
1323	for (;;) {
1324		skb = skb_recv_udp(sk, 0, 1, &err);
1325		if (skb == NULL)
1326			break;
1327		xs_udp_data_read_skb(&transport->xprt, sk, skb);
1328		consume_skb(skb);
1329		cond_resched();
1330	}
1331	xs_poll_check_readable(transport);
1332out:
1333	mutex_unlock(&transport->recv_mutex);
1334}
1335
1336static void xs_udp_data_receive_workfn(struct work_struct *work)
1337{
1338	struct sock_xprt *transport =
1339		container_of(work, struct sock_xprt, recv_worker);
1340	unsigned int pflags = memalloc_nofs_save();
1341
1342	xs_udp_data_receive(transport);
1343	memalloc_nofs_restore(pflags);
1344}
1345
1346/**
1347 * xs_data_ready - "data ready" callback for UDP sockets
1348 * @sk: socket with data to read
1349 *
1350 */
1351static void xs_data_ready(struct sock *sk)
1352{
1353	struct rpc_xprt *xprt;
1354
1355	read_lock_bh(&sk->sk_callback_lock);
1356	dprintk("RPC:       xs_data_ready...\n");
1357	xprt = xprt_from_sock(sk);
1358	if (xprt != NULL) {
1359		struct sock_xprt *transport = container_of(xprt,
1360				struct sock_xprt, xprt);
1361		transport->old_data_ready(sk);
1362		/* Any data means we had a useful conversation, so
1363		 * then we don't need to delay the next reconnect
1364		 */
1365		if (xprt->reestablish_timeout)
1366			xprt->reestablish_timeout = 0;
1367		if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
1368			queue_work(xprtiod_workqueue, &transport->recv_worker);
1369	}
1370	read_unlock_bh(&sk->sk_callback_lock);
1371}
1372
1373/*
1374 * Helper function to force a TCP close if the server is sending
1375 * junk and/or it has put us in CLOSE_WAIT
1376 */
1377static void xs_tcp_force_close(struct rpc_xprt *xprt)
1378{
1379	xprt_force_disconnect(xprt);
1380}
1381
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1382#if defined(CONFIG_SUNRPC_BACKCHANNEL)
1383static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
 
 
 
 
 
 
 
 
1384{
1385	return PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1386}
1387#endif /* CONFIG_SUNRPC_BACKCHANNEL */
1388
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1389/**
1390 * xs_tcp_state_change - callback to handle TCP socket state changes
1391 * @sk: socket whose state has changed
1392 *
1393 */
1394static void xs_tcp_state_change(struct sock *sk)
1395{
1396	struct rpc_xprt *xprt;
1397	struct sock_xprt *transport;
1398
1399	read_lock_bh(&sk->sk_callback_lock);
1400	if (!(xprt = xprt_from_sock(sk)))
1401		goto out;
1402	dprintk("RPC:       xs_tcp_state_change client %p...\n", xprt);
1403	dprintk("RPC:       state %x conn %d dead %d zapped %d sk_shutdown %d\n",
1404			sk->sk_state, xprt_connected(xprt),
1405			sock_flag(sk, SOCK_DEAD),
1406			sock_flag(sk, SOCK_ZAPPED),
1407			sk->sk_shutdown);
1408
1409	transport = container_of(xprt, struct sock_xprt, xprt);
1410	trace_rpc_socket_state_change(xprt, sk->sk_socket);
1411	switch (sk->sk_state) {
1412	case TCP_ESTABLISHED:
 
1413		if (!xprt_test_and_set_connected(xprt)) {
 
 
 
 
 
 
 
1414			xprt->connect_cookie++;
1415			clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
1416			xprt_clear_connecting(xprt);
1417
1418			xprt->stat.connect_count++;
1419			xprt->stat.connect_time += (long)jiffies -
1420						   xprt->stat.connect_start;
1421			xs_run_error_worker(transport, XPRT_SOCK_WAKE_PENDING);
1422		}
 
1423		break;
1424	case TCP_FIN_WAIT1:
1425		/* The client initiated a shutdown of the socket */
1426		xprt->connect_cookie++;
1427		xprt->reestablish_timeout = 0;
1428		set_bit(XPRT_CLOSING, &xprt->state);
1429		smp_mb__before_atomic();
1430		clear_bit(XPRT_CONNECTED, &xprt->state);
1431		clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
1432		smp_mb__after_atomic();
1433		break;
1434	case TCP_CLOSE_WAIT:
1435		/* The server initiated a shutdown of the socket */
1436		xprt->connect_cookie++;
1437		clear_bit(XPRT_CONNECTED, &xprt->state);
1438		xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1439		fallthrough;
1440	case TCP_CLOSING:
1441		/*
1442		 * If the server closed down the connection, make sure that
1443		 * we back off before reconnecting
1444		 */
1445		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
1446			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
1447		break;
1448	case TCP_LAST_ACK:
1449		set_bit(XPRT_CLOSING, &xprt->state);
1450		smp_mb__before_atomic();
1451		clear_bit(XPRT_CONNECTED, &xprt->state);
1452		smp_mb__after_atomic();
1453		break;
1454	case TCP_CLOSE:
1455		if (test_and_clear_bit(XPRT_SOCK_CONNECTING,
1456					&transport->sock_state))
1457			xprt_clear_connecting(xprt);
1458		clear_bit(XPRT_CLOSING, &xprt->state);
1459		/* Trigger the socket release */
1460		xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
1461	}
1462 out:
1463	read_unlock_bh(&sk->sk_callback_lock);
1464}
1465
1466static void xs_write_space(struct sock *sk)
1467{
1468	struct socket_wq *wq;
1469	struct sock_xprt *transport;
1470	struct rpc_xprt *xprt;
1471
1472	if (!sk->sk_socket)
1473		return;
1474	clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1475
1476	if (unlikely(!(xprt = xprt_from_sock(sk))))
1477		return;
1478	transport = container_of(xprt, struct sock_xprt, xprt);
1479	rcu_read_lock();
1480	wq = rcu_dereference(sk->sk_wq);
1481	if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
1482		goto out;
1483
1484	xs_run_error_worker(transport, XPRT_SOCK_WAKE_WRITE);
1485	sk->sk_write_pending--;
1486out:
1487	rcu_read_unlock();
1488}
1489
1490/**
1491 * xs_udp_write_space - callback invoked when socket buffer space
1492 *                             becomes available
1493 * @sk: socket whose state has changed
1494 *
1495 * Called when more output buffer space is available for this socket.
1496 * We try not to wake our writers until they can make "significant"
1497 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1498 * with a bunch of small requests.
1499 */
1500static void xs_udp_write_space(struct sock *sk)
1501{
1502	read_lock_bh(&sk->sk_callback_lock);
1503
1504	/* from net/core/sock.c:sock_def_write_space */
1505	if (sock_writeable(sk))
1506		xs_write_space(sk);
1507
1508	read_unlock_bh(&sk->sk_callback_lock);
1509}
1510
1511/**
1512 * xs_tcp_write_space - callback invoked when socket buffer space
1513 *                             becomes available
1514 * @sk: socket whose state has changed
1515 *
1516 * Called when more output buffer space is available for this socket.
1517 * We try not to wake our writers until they can make "significant"
1518 * progress, otherwise we'll waste resources thrashing kernel_sendmsg
1519 * with a bunch of small requests.
1520 */
1521static void xs_tcp_write_space(struct sock *sk)
1522{
1523	read_lock_bh(&sk->sk_callback_lock);
1524
1525	/* from net/core/stream.c:sk_stream_write_space */
1526	if (sk_stream_is_writeable(sk))
1527		xs_write_space(sk);
1528
1529	read_unlock_bh(&sk->sk_callback_lock);
1530}
1531
1532static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
1533{
1534	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1535	struct sock *sk = transport->inet;
1536
1537	if (transport->rcvsize) {
1538		sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
1539		sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
1540	}
1541	if (transport->sndsize) {
1542		sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
1543		sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
1544		sk->sk_write_space(sk);
1545	}
1546}
1547
1548/**
1549 * xs_udp_set_buffer_size - set send and receive limits
1550 * @xprt: generic transport
1551 * @sndsize: requested size of send buffer, in bytes
1552 * @rcvsize: requested size of receive buffer, in bytes
1553 *
1554 * Set socket send and receive buffer size limits.
1555 */
1556static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
1557{
1558	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1559
1560	transport->sndsize = 0;
1561	if (sndsize)
1562		transport->sndsize = sndsize + 1024;
1563	transport->rcvsize = 0;
1564	if (rcvsize)
1565		transport->rcvsize = rcvsize + 1024;
1566
1567	xs_udp_do_set_buffer_size(xprt);
1568}
1569
1570/**
1571 * xs_udp_timer - called when a retransmit timeout occurs on a UDP transport
1572 * @xprt: controlling transport
1573 * @task: task that timed out
1574 *
1575 * Adjust the congestion window after a retransmit timeout has occurred.
1576 */
1577static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
1578{
1579	spin_lock(&xprt->transport_lock);
1580	xprt_adjust_cwnd(xprt, task, -ETIMEDOUT);
1581	spin_unlock(&xprt->transport_lock);
1582}
1583
1584static int xs_get_random_port(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1585{
1586	unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
1587	unsigned short range;
1588	unsigned short rand;
1589
1590	if (max < min)
1591		return -EADDRINUSE;
1592	range = max - min + 1;
1593	rand = (unsigned short) prandom_u32() % range;
1594	return rand + min;
1595}
1596
1597static unsigned short xs_sock_getport(struct socket *sock)
1598{
1599	struct sockaddr_storage buf;
 
1600	unsigned short port = 0;
1601
1602	if (kernel_getsockname(sock, (struct sockaddr *)&buf) < 0)
1603		goto out;
1604	switch (buf.ss_family) {
1605	case AF_INET6:
1606		port = ntohs(((struct sockaddr_in6 *)&buf)->sin6_port);
1607		break;
1608	case AF_INET:
1609		port = ntohs(((struct sockaddr_in *)&buf)->sin_port);
1610	}
1611out:
1612	return port;
1613}
1614
1615/**
1616 * xs_set_port - reset the port number in the remote endpoint address
1617 * @xprt: generic transport
1618 * @port: new port number
1619 *
1620 */
1621static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1622{
1623	dprintk("RPC:       setting port for xprt %p to %u\n", xprt, port);
1624
1625	rpc_set_port(xs_addr(xprt), port);
1626	xs_update_peer_port(xprt);
1627}
1628
1629static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
1630{
1631	if (transport->srcport == 0 && transport->xprt.reuseport)
1632		transport->srcport = xs_sock_getport(sock);
1633}
1634
1635static int xs_get_srcport(struct sock_xprt *transport)
1636{
1637	int port = transport->srcport;
1638
1639	if (port == 0 && transport->xprt.resvport)
1640		port = xs_get_random_port();
1641	return port;
1642}
1643
1644static unsigned short xs_next_srcport(struct sock_xprt *transport, unsigned short port)
1645{
1646	if (transport->srcport != 0)
1647		transport->srcport = 0;
1648	if (!transport->xprt.resvport)
1649		return 0;
1650	if (port <= xprt_min_resvport || port > xprt_max_resvport)
1651		return xprt_max_resvport;
1652	return --port;
1653}
1654static int xs_bind(struct sock_xprt *transport, struct socket *sock)
1655{
1656	struct sockaddr_storage myaddr;
1657	int err, nloop = 0;
1658	int port = xs_get_srcport(transport);
1659	unsigned short last;
1660
1661	/*
1662	 * If we are asking for any ephemeral port (i.e. port == 0 &&
1663	 * transport->xprt.resvport == 0), don't bind.  Let the local
1664	 * port selection happen implicitly when the socket is used
1665	 * (for example at connect time).
1666	 *
1667	 * This ensures that we can continue to establish TCP
1668	 * connections even when all local ephemeral ports are already
1669	 * a part of some TCP connection.  This makes no difference
1670	 * for UDP sockets, but also doens't harm them.
1671	 *
1672	 * If we're asking for any reserved port (i.e. port == 0 &&
1673	 * transport->xprt.resvport == 1) xs_get_srcport above will
1674	 * ensure that port is non-zero and we will bind as needed.
1675	 */
1676	if (port <= 0)
1677		return port;
1678
1679	memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
1680	do {
1681		rpc_set_port((struct sockaddr *)&myaddr, port);
1682		err = kernel_bind(sock, (struct sockaddr *)&myaddr,
1683				transport->xprt.addrlen);
1684		if (err == 0) {
1685			transport->srcport = port;
1686			break;
1687		}
1688		last = port;
1689		port = xs_next_srcport(transport, port);
1690		if (port > last)
1691			nloop++;
1692	} while (err == -EADDRINUSE && nloop != 2);
1693
1694	if (myaddr.ss_family == AF_INET)
1695		dprintk("RPC:       %s %pI4:%u: %s (%d)\n", __func__,
1696				&((struct sockaddr_in *)&myaddr)->sin_addr,
1697				port, err ? "failed" : "ok", err);
1698	else
1699		dprintk("RPC:       %s %pI6:%u: %s (%d)\n", __func__,
1700				&((struct sockaddr_in6 *)&myaddr)->sin6_addr,
1701				port, err ? "failed" : "ok", err);
1702	return err;
1703}
1704
1705/*
1706 * We don't support autobind on AF_LOCAL sockets
1707 */
1708static void xs_local_rpcbind(struct rpc_task *task)
1709{
1710	xprt_set_bound(task->tk_xprt);
1711}
1712
1713static void xs_local_set_port(struct rpc_xprt *xprt, unsigned short port)
1714{
1715}
1716
1717#ifdef CONFIG_DEBUG_LOCK_ALLOC
1718static struct lock_class_key xs_key[2];
1719static struct lock_class_key xs_slock_key[2];
1720
1721static inline void xs_reclassify_socketu(struct socket *sock)
1722{
1723	struct sock *sk = sock->sk;
1724
1725	sock_lock_init_class_and_name(sk, "slock-AF_LOCAL-RPC",
1726		&xs_slock_key[1], "sk_lock-AF_LOCAL-RPC", &xs_key[1]);
1727}
1728
1729static inline void xs_reclassify_socket4(struct socket *sock)
1730{
1731	struct sock *sk = sock->sk;
1732
1733	sock_lock_init_class_and_name(sk, "slock-AF_INET-RPC",
1734		&xs_slock_key[0], "sk_lock-AF_INET-RPC", &xs_key[0]);
1735}
1736
1737static inline void xs_reclassify_socket6(struct socket *sock)
1738{
1739	struct sock *sk = sock->sk;
1740
1741	sock_lock_init_class_and_name(sk, "slock-AF_INET6-RPC",
1742		&xs_slock_key[1], "sk_lock-AF_INET6-RPC", &xs_key[1]);
1743}
1744
1745static inline void xs_reclassify_socket(int family, struct socket *sock)
1746{
1747	if (WARN_ON_ONCE(!sock_allow_reclassification(sock->sk)))
 
1748		return;
1749
1750	switch (family) {
1751	case AF_LOCAL:
1752		xs_reclassify_socketu(sock);
1753		break;
1754	case AF_INET:
1755		xs_reclassify_socket4(sock);
1756		break;
1757	case AF_INET6:
1758		xs_reclassify_socket6(sock);
1759		break;
1760	}
1761}
1762#else
1763static inline void xs_reclassify_socket(int family, struct socket *sock)
1764{
1765}
1766#endif
1767
1768static void xs_dummy_setup_socket(struct work_struct *work)
1769{
1770}
1771
1772static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1773		struct sock_xprt *transport, int family, int type,
1774		int protocol, bool reuseport)
1775{
1776	struct file *filp;
1777	struct socket *sock;
1778	int err;
1779
1780	err = __sock_create(xprt->xprt_net, family, type, protocol, &sock, 1);
1781	if (err < 0) {
1782		dprintk("RPC:       can't create %d transport socket (%d).\n",
1783				protocol, -err);
1784		goto out;
1785	}
1786	xs_reclassify_socket(family, sock);
1787
1788	if (reuseport)
1789		sock_set_reuseport(sock->sk);
1790
1791	err = xs_bind(transport, sock);
1792	if (err) {
1793		sock_release(sock);
1794		goto out;
1795	}
1796
1797	filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1798	if (IS_ERR(filp))
1799		return ERR_CAST(filp);
1800	transport->file = filp;
1801
1802	return sock;
1803out:
1804	return ERR_PTR(err);
1805}
1806
1807static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1808				      struct socket *sock)
1809{
1810	struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1811									xprt);
1812
1813	if (!transport->inet) {
1814		struct sock *sk = sock->sk;
1815
1816		write_lock_bh(&sk->sk_callback_lock);
1817
1818		xs_save_old_callbacks(transport, sk);
1819
1820		sk->sk_user_data = xprt;
1821		sk->sk_data_ready = xs_data_ready;
1822		sk->sk_write_space = xs_udp_write_space;
1823		sock_set_flag(sk, SOCK_FASYNC);
1824		sk->sk_error_report = xs_error_report;
 
1825
1826		xprt_clear_connected(xprt);
1827
1828		/* Reset to new socket */
1829		transport->sock = sock;
1830		transport->inet = sk;
1831
1832		write_unlock_bh(&sk->sk_callback_lock);
1833	}
1834
1835	xs_stream_start_connect(transport);
1836
 
1837	return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
1838}
1839
1840/**
1841 * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint
1842 * @transport: socket transport to connect
1843 */
1844static int xs_local_setup_socket(struct sock_xprt *transport)
1845{
1846	struct rpc_xprt *xprt = &transport->xprt;
1847	struct file *filp;
1848	struct socket *sock;
1849	int status;
1850
1851	status = __sock_create(xprt->xprt_net, AF_LOCAL,
1852					SOCK_STREAM, 0, &sock, 1);
1853	if (status < 0) {
1854		dprintk("RPC:       can't create AF_LOCAL "
1855			"transport socket (%d).\n", -status);
1856		goto out;
1857	}
1858	xs_reclassify_socket(AF_LOCAL, sock);
1859
1860	filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
1861	if (IS_ERR(filp)) {
1862		status = PTR_ERR(filp);
1863		goto out;
1864	}
1865	transport->file = filp;
1866
1867	dprintk("RPC:       worker connecting xprt %p via AF_LOCAL to %s\n",
1868			xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1869
1870	status = xs_local_finish_connecting(xprt, sock);
1871	trace_rpc_socket_connect(xprt, sock, status);
1872	switch (status) {
1873	case 0:
1874		dprintk("RPC:       xprt %p connected to %s\n",
1875				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1876		xprt->stat.connect_count++;
1877		xprt->stat.connect_time += (long)jiffies -
1878					   xprt->stat.connect_start;
1879		xprt_set_connected(xprt);
1880	case -ENOBUFS:
1881		break;
1882	case -ENOENT:
1883		dprintk("RPC:       xprt %p: socket %s does not exist\n",
1884				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1885		break;
1886	case -ECONNREFUSED:
1887		dprintk("RPC:       xprt %p: connection refused for %s\n",
1888				xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
1889		break;
1890	default:
1891		printk(KERN_ERR "%s: unhandled error (%d) connecting to %s\n",
1892				__func__, -status,
1893				xprt->address_strings[RPC_DISPLAY_ADDR]);
1894	}
1895
1896out:
1897	xprt_clear_connecting(xprt);
1898	xprt_wake_pending_tasks(xprt, status);
1899	return status;
1900}
1901
1902static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
1903{
1904	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1905	int ret;
1906
1907	 if (RPC_IS_ASYNC(task)) {
1908		/*
1909		 * We want the AF_LOCAL connect to be resolved in the
1910		 * filesystem namespace of the process making the rpc
1911		 * call.  Thus we connect synchronously.
1912		 *
1913		 * If we want to support asynchronous AF_LOCAL calls,
1914		 * we'll need to figure out how to pass a namespace to
1915		 * connect.
1916		 */
1917		task->tk_rpc_status = -ENOTCONN;
1918		rpc_exit(task, -ENOTCONN);
1919		return;
1920	}
1921	ret = xs_local_setup_socket(transport);
1922	if (ret && !RPC_IS_SOFTCONN(task))
1923		msleep_interruptible(15000);
1924}
1925
1926#if IS_ENABLED(CONFIG_SUNRPC_SWAP)
1927/*
1928 * Note that this should be called with XPRT_LOCKED held (or when we otherwise
1929 * know that we have exclusive access to the socket), to guard against
1930 * races with xs_reset_transport.
1931 */
1932static void xs_set_memalloc(struct rpc_xprt *xprt)
1933{
1934	struct sock_xprt *transport = container_of(xprt, struct sock_xprt,
1935			xprt);
1936
1937	/*
1938	 * If there's no sock, then we have nothing to set. The
1939	 * reconnecting process will get it for us.
1940	 */
1941	if (!transport->inet)
1942		return;
1943	if (atomic_read(&xprt->swapper))
1944		sk_set_memalloc(transport->inet);
1945}
1946
1947/**
1948 * xs_enable_swap - Tag this transport as being used for swap.
1949 * @xprt: transport to tag
1950 *
1951 * Take a reference to this transport on behalf of the rpc_clnt, and
1952 * optionally mark it for swapping if it wasn't already.
1953 */
1954static int
1955xs_enable_swap(struct rpc_xprt *xprt)
1956{
1957	struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
1958
1959	if (atomic_inc_return(&xprt->swapper) != 1)
1960		return 0;
1961	if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
1962		return -ERESTARTSYS;
1963	if (xs->inet)
1964		sk_set_memalloc(xs->inet);
1965	xprt_release_xprt(xprt, NULL);
1966	return 0;
1967}
1968
1969/**
1970 * xs_disable_swap - Untag this transport as being used for swap.
1971 * @xprt: transport to tag
1972 *
1973 * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the
1974 * swapper refcount goes to 0, untag the socket as a memalloc socket.
1975 */
1976static void
1977xs_disable_swap(struct rpc_xprt *xprt)
1978{
1979	struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt);
1980
1981	if (!atomic_dec_and_test(&xprt->swapper))
1982		return;
1983	if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE))
1984		return;
1985	if (xs->inet)
1986		sk_clear_memalloc(xs->inet);
1987	xprt_release_xprt(xprt, NULL);
1988}
1989#else
1990static void xs_set_memalloc(struct rpc_xprt *xprt)
1991{
1992}
1993
1994static int
1995xs_enable_swap(struct rpc_xprt *xprt)
1996{
1997	return -EINVAL;
1998}
1999
2000static void
2001xs_disable_swap(struct rpc_xprt *xprt)
2002{
2003}
2004#endif
2005
2006static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2007{
2008	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2009
2010	if (!transport->inet) {
2011		struct sock *sk = sock->sk;
2012
2013		write_lock_bh(&sk->sk_callback_lock);
2014
2015		xs_save_old_callbacks(transport, sk);
2016
2017		sk->sk_user_data = xprt;
2018		sk->sk_data_ready = xs_data_ready;
2019		sk->sk_write_space = xs_udp_write_space;
2020		sock_set_flag(sk, SOCK_FASYNC);
2021
2022		xprt_set_connected(xprt);
2023
2024		/* Reset to new socket */
2025		transport->sock = sock;
2026		transport->inet = sk;
2027
2028		xs_set_memalloc(xprt);
2029
2030		write_unlock_bh(&sk->sk_callback_lock);
2031	}
2032	xs_udp_do_set_buffer_size(xprt);
2033
2034	xprt->stat.connect_start = jiffies;
2035}
2036
2037static void xs_udp_setup_socket(struct work_struct *work)
2038{
2039	struct sock_xprt *transport =
2040		container_of(work, struct sock_xprt, connect_worker.work);
2041	struct rpc_xprt *xprt = &transport->xprt;
2042	struct socket *sock;
2043	int status = -EIO;
2044
2045	sock = xs_create_sock(xprt, transport,
2046			xs_addr(xprt)->sa_family, SOCK_DGRAM,
2047			IPPROTO_UDP, false);
2048	if (IS_ERR(sock))
2049		goto out;
2050
2051	dprintk("RPC:       worker connecting xprt %p via %s to "
2052				"%s (port %s)\n", xprt,
2053			xprt->address_strings[RPC_DISPLAY_PROTO],
2054			xprt->address_strings[RPC_DISPLAY_ADDR],
2055			xprt->address_strings[RPC_DISPLAY_PORT]);
2056
2057	xs_udp_finish_connecting(xprt, sock);
2058	trace_rpc_socket_connect(xprt, sock, 0);
2059	status = 0;
2060out:
 
2061	xprt_clear_connecting(xprt);
2062	xprt_unlock_connect(xprt, transport);
2063	xprt_wake_pending_tasks(xprt, status);
2064}
2065
2066/**
2067 * xs_tcp_shutdown - gracefully shut down a TCP socket
2068 * @xprt: transport
2069 *
2070 * Initiates a graceful shutdown of the TCP socket by calling the
2071 * equivalent of shutdown(SHUT_RDWR);
2072 */
2073static void xs_tcp_shutdown(struct rpc_xprt *xprt)
2074{
2075	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2076	struct socket *sock = transport->sock;
2077	int skst = transport->inet ? transport->inet->sk_state : TCP_CLOSE;
2078
2079	if (sock == NULL)
2080		return;
2081	switch (skst) {
2082	default:
2083		kernel_sock_shutdown(sock, SHUT_RDWR);
2084		trace_rpc_socket_shutdown(xprt, sock);
2085		break;
2086	case TCP_CLOSE:
2087	case TCP_TIME_WAIT:
2088		xs_reset_transport(transport);
2089	}
2090}
2091
2092static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
2093		struct socket *sock)
2094{
2095	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2096	unsigned int keepidle;
2097	unsigned int keepcnt;
2098	unsigned int timeo;
2099
2100	spin_lock(&xprt->transport_lock);
2101	keepidle = DIV_ROUND_UP(xprt->timeout->to_initval, HZ);
2102	keepcnt = xprt->timeout->to_retries + 1;
2103	timeo = jiffies_to_msecs(xprt->timeout->to_initval) *
2104		(xprt->timeout->to_retries + 1);
2105	clear_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2106	spin_unlock(&xprt->transport_lock);
2107
2108	/* TCP Keepalive options */
2109	sock_set_keepalive(sock->sk);
2110	tcp_sock_set_keepidle(sock->sk, keepidle);
2111	tcp_sock_set_keepintvl(sock->sk, keepidle);
2112	tcp_sock_set_keepcnt(sock->sk, keepcnt);
2113
2114	/* TCP user timeout (see RFC5482) */
2115	tcp_sock_set_user_timeout(sock->sk, timeo);
2116}
2117
2118static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
2119		unsigned long connect_timeout,
2120		unsigned long reconnect_timeout)
2121{
2122	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2123	struct rpc_timeout to;
2124	unsigned long initval;
2125
2126	spin_lock(&xprt->transport_lock);
2127	if (reconnect_timeout < xprt->max_reconnect_timeout)
2128		xprt->max_reconnect_timeout = reconnect_timeout;
2129	if (connect_timeout < xprt->connect_timeout) {
2130		memcpy(&to, xprt->timeout, sizeof(to));
2131		initval = DIV_ROUND_UP(connect_timeout, to.to_retries + 1);
2132		/* Arbitrary lower limit */
2133		if (initval <  XS_TCP_INIT_REEST_TO << 1)
2134			initval = XS_TCP_INIT_REEST_TO << 1;
2135		to.to_initval = initval;
2136		to.to_maxval = initval;
2137		memcpy(&transport->tcp_timeout, &to,
2138				sizeof(transport->tcp_timeout));
2139		xprt->timeout = &transport->tcp_timeout;
2140		xprt->connect_timeout = connect_timeout;
2141	}
2142	set_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state);
2143	spin_unlock(&xprt->transport_lock);
2144}
2145
2146static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
2147{
2148	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2149	int ret = -ENOTCONN;
2150
2151	if (!transport->inet) {
2152		struct sock *sk = sock->sk;
2153
2154		/* Avoid temporary address, they are bad for long-lived
2155		 * connections such as NFS mounts.
2156		 * RFC4941, section 3.6 suggests that:
2157		 *    Individual applications, which have specific
2158		 *    knowledge about the normal duration of connections,
2159		 *    MAY override this as appropriate.
2160		 */
2161		if (xs_addr(xprt)->sa_family == PF_INET6) {
2162			ip6_sock_set_addr_preferences(sk,
2163				IPV6_PREFER_SRC_PUBLIC);
2164		}
2165
2166		xs_tcp_set_socket_timeouts(xprt, sock);
 
 
 
 
 
 
2167
2168		write_lock_bh(&sk->sk_callback_lock);
2169
2170		xs_save_old_callbacks(transport, sk);
2171
2172		sk->sk_user_data = xprt;
2173		sk->sk_data_ready = xs_data_ready;
2174		sk->sk_state_change = xs_tcp_state_change;
2175		sk->sk_write_space = xs_tcp_write_space;
2176		sock_set_flag(sk, SOCK_FASYNC);
2177		sk->sk_error_report = xs_error_report;
 
2178
2179		/* socket options */
2180		sock_reset_flag(sk, SOCK_LINGER);
2181		tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
2182
2183		xprt_clear_connected(xprt);
2184
2185		/* Reset to new socket */
2186		transport->sock = sock;
2187		transport->inet = sk;
2188
2189		write_unlock_bh(&sk->sk_callback_lock);
2190	}
2191
2192	if (!xprt_bound(xprt))
2193		goto out;
2194
2195	xs_set_memalloc(xprt);
2196
2197	xs_stream_start_connect(transport);
2198
2199	/* Tell the socket layer to start connecting... */
 
 
2200	set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
2201	ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
2202	switch (ret) {
2203	case 0:
2204		xs_set_srcport(transport, sock);
2205		fallthrough;
2206	case -EINPROGRESS:
2207		/* SYN_SENT! */
2208		if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
2209			xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2210		break;
2211	case -EADDRNOTAVAIL:
2212		/* Source port number is unavailable. Try a new one! */
2213		transport->srcport = 0;
2214	}
2215out:
2216	return ret;
2217}
2218
2219/**
2220 * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint
2221 * @work: queued work item
2222 *
2223 * Invoked by a work queue tasklet.
2224 */
2225static void xs_tcp_setup_socket(struct work_struct *work)
2226{
2227	struct sock_xprt *transport =
2228		container_of(work, struct sock_xprt, connect_worker.work);
2229	struct socket *sock = transport->sock;
2230	struct rpc_xprt *xprt = &transport->xprt;
2231	int status = -EIO;
2232
2233	if (!sock) {
2234		sock = xs_create_sock(xprt, transport,
2235				xs_addr(xprt)->sa_family, SOCK_STREAM,
2236				IPPROTO_TCP, true);
2237		if (IS_ERR(sock)) {
2238			status = PTR_ERR(sock);
2239			goto out;
2240		}
2241	}
2242
2243	dprintk("RPC:       worker connecting xprt %p via %s to "
2244				"%s (port %s)\n", xprt,
2245			xprt->address_strings[RPC_DISPLAY_PROTO],
2246			xprt->address_strings[RPC_DISPLAY_ADDR],
2247			xprt->address_strings[RPC_DISPLAY_PORT]);
2248
2249	status = xs_tcp_finish_connecting(xprt, sock);
2250	trace_rpc_socket_connect(xprt, sock, status);
2251	dprintk("RPC:       %p connect status %d connected %d sock state %d\n",
2252			xprt, -status, xprt_connected(xprt),
2253			sock->sk->sk_state);
2254	switch (status) {
2255	default:
2256		printk("%s: connect returned unhandled error %d\n",
2257			__func__, status);
2258		fallthrough;
2259	case -EADDRNOTAVAIL:
2260		/* We're probably in TIME_WAIT. Get rid of existing socket,
2261		 * and retry
2262		 */
2263		xs_tcp_force_close(xprt);
2264		break;
2265	case 0:
2266	case -EINPROGRESS:
2267	case -EALREADY:
2268		xprt_unlock_connect(xprt, transport);
2269		return;
2270	case -EINVAL:
2271		/* Happens, for instance, if the user specified a link
2272		 * local IPv6 address without a scope-id.
2273		 */
2274	case -ECONNREFUSED:
2275	case -ECONNRESET:
2276	case -ENETDOWN:
2277	case -ENETUNREACH:
2278	case -EHOSTUNREACH:
2279	case -EADDRINUSE:
2280	case -ENOBUFS:
2281		/*
2282		 * xs_tcp_force_close() wakes tasks with -EIO.
2283		 * We need to wake them first to ensure the
2284		 * correct error code.
2285		 */
2286		xprt_wake_pending_tasks(xprt, status);
2287		xs_tcp_force_close(xprt);
2288		goto out;
2289	}
2290	status = -EAGAIN;
2291out:
 
2292	xprt_clear_connecting(xprt);
2293	xprt_unlock_connect(xprt, transport);
2294	xprt_wake_pending_tasks(xprt, status);
2295}
2296
2297/**
2298 * xs_connect - connect a socket to a remote endpoint
2299 * @xprt: pointer to transport structure
2300 * @task: address of RPC task that manages state of connect request
2301 *
2302 * TCP: If the remote end dropped the connection, delay reconnecting.
2303 *
2304 * UDP socket connects are synchronous, but we use a work queue anyway
2305 * to guarantee that even unprivileged user processes can set up a
2306 * socket on a privileged port.
2307 *
2308 * If a UDP socket connect fails, the delay behavior here prevents
2309 * retry floods (hard mounts).
2310 */
2311static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
2312{
2313	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2314	unsigned long delay = 0;
2315
2316	WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
2317
2318	if (transport->sock != NULL) {
2319		dprintk("RPC:       xs_connect delayed xprt %p for %lu "
2320				"seconds\n",
2321				xprt, xprt->reestablish_timeout / HZ);
2322
2323		/* Start by resetting any existing state */
2324		xs_reset_transport(transport);
2325
2326		delay = xprt_reconnect_delay(xprt);
2327		xprt_reconnect_backoff(xprt, XS_TCP_INIT_REEST_TO);
2328
2329	} else
 
 
 
 
 
2330		dprintk("RPC:       xs_connect scheduled xprt %p\n", xprt);
2331
2332	queue_delayed_work(xprtiod_workqueue,
2333			&transport->connect_worker,
2334			delay);
2335}
2336
2337static void xs_wake_disconnect(struct sock_xprt *transport)
2338{
2339	if (test_and_clear_bit(XPRT_SOCK_WAKE_DISCONNECT, &transport->sock_state))
2340		xs_tcp_force_close(&transport->xprt);
2341}
2342
2343static void xs_wake_write(struct sock_xprt *transport)
2344{
2345	if (test_and_clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state))
2346		xprt_write_space(&transport->xprt);
2347}
2348
2349static void xs_wake_error(struct sock_xprt *transport)
2350{
2351	int sockerr;
2352
2353	if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2354		return;
2355	mutex_lock(&transport->recv_mutex);
2356	if (transport->sock == NULL)
2357		goto out;
2358	if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
2359		goto out;
2360	sockerr = xchg(&transport->xprt_err, 0);
2361	if (sockerr < 0)
2362		xprt_wake_pending_tasks(&transport->xprt, sockerr);
2363out:
2364	mutex_unlock(&transport->recv_mutex);
2365}
2366
2367static void xs_wake_pending(struct sock_xprt *transport)
2368{
2369	if (test_and_clear_bit(XPRT_SOCK_WAKE_PENDING, &transport->sock_state))
2370		xprt_wake_pending_tasks(&transport->xprt, -EAGAIN);
2371}
2372
2373static void xs_error_handle(struct work_struct *work)
2374{
2375	struct sock_xprt *transport = container_of(work,
2376			struct sock_xprt, error_worker);
2377
2378	xs_wake_disconnect(transport);
2379	xs_wake_write(transport);
2380	xs_wake_error(transport);
2381	xs_wake_pending(transport);
2382}
2383
2384/**
2385 * xs_local_print_stats - display AF_LOCAL socket-specifc stats
2386 * @xprt: rpc_xprt struct containing statistics
2387 * @seq: output file
2388 *
2389 */
2390static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2391{
2392	long idle_time = 0;
2393
2394	if (xprt_connected(xprt))
2395		idle_time = (long)(jiffies - xprt->last_used) / HZ;
2396
2397	seq_printf(seq, "\txprt:\tlocal %lu %lu %lu %ld %lu %lu %lu "
2398			"%llu %llu %lu %llu %llu\n",
2399			xprt->stat.bind_count,
2400			xprt->stat.connect_count,
2401			xprt->stat.connect_time / HZ,
2402			idle_time,
2403			xprt->stat.sends,
2404			xprt->stat.recvs,
2405			xprt->stat.bad_xids,
2406			xprt->stat.req_u,
2407			xprt->stat.bklog_u,
2408			xprt->stat.max_slots,
2409			xprt->stat.sending_u,
2410			xprt->stat.pending_u);
2411}
2412
2413/**
2414 * xs_udp_print_stats - display UDP socket-specifc stats
2415 * @xprt: rpc_xprt struct containing statistics
2416 * @seq: output file
2417 *
2418 */
2419static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2420{
2421	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2422
2423	seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %llu %llu "
2424			"%lu %llu %llu\n",
2425			transport->srcport,
2426			xprt->stat.bind_count,
2427			xprt->stat.sends,
2428			xprt->stat.recvs,
2429			xprt->stat.bad_xids,
2430			xprt->stat.req_u,
2431			xprt->stat.bklog_u,
2432			xprt->stat.max_slots,
2433			xprt->stat.sending_u,
2434			xprt->stat.pending_u);
2435}
2436
2437/**
2438 * xs_tcp_print_stats - display TCP socket-specifc stats
2439 * @xprt: rpc_xprt struct containing statistics
2440 * @seq: output file
2441 *
2442 */
2443static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
2444{
2445	struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
2446	long idle_time = 0;
2447
2448	if (xprt_connected(xprt))
2449		idle_time = (long)(jiffies - xprt->last_used) / HZ;
2450
2451	seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu "
2452			"%llu %llu %lu %llu %llu\n",
2453			transport->srcport,
2454			xprt->stat.bind_count,
2455			xprt->stat.connect_count,
2456			xprt->stat.connect_time / HZ,
2457			idle_time,
2458			xprt->stat.sends,
2459			xprt->stat.recvs,
2460			xprt->stat.bad_xids,
2461			xprt->stat.req_u,
2462			xprt->stat.bklog_u,
2463			xprt->stat.max_slots,
2464			xprt->stat.sending_u,
2465			xprt->stat.pending_u);
2466}
2467
2468/*
2469 * Allocate a bunch of pages for a scratch buffer for the rpc code. The reason
2470 * we allocate pages instead doing a kmalloc like rpc_malloc is because we want
2471 * to use the server side send routines.
2472 */
2473static int bc_malloc(struct rpc_task *task)
2474{
2475	struct rpc_rqst *rqst = task->tk_rqstp;
2476	size_t size = rqst->rq_callsize;
2477	struct page *page;
2478	struct rpc_buffer *buf;
2479
2480	if (size > PAGE_SIZE - sizeof(struct rpc_buffer)) {
2481		WARN_ONCE(1, "xprtsock: large bc buffer request (size %zu)\n",
2482			  size);
2483		return -EINVAL;
2484	}
2485
2486	page = alloc_page(GFP_KERNEL);
2487	if (!page)
2488		return -ENOMEM;
2489
2490	buf = page_address(page);
2491	buf->len = PAGE_SIZE;
2492
2493	rqst->rq_buffer = buf->data;
2494	rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
2495	return 0;
2496}
2497
2498/*
2499 * Free the space allocated in the bc_alloc routine
2500 */
2501static void bc_free(struct rpc_task *task)
2502{
2503	void *buffer = task->tk_rqstp->rq_buffer;
2504	struct rpc_buffer *buf;
2505
 
 
 
2506	buf = container_of(buffer, struct rpc_buffer, data);
2507	free_page((unsigned long)buf);
2508}
2509
 
 
 
 
2510static int bc_sendto(struct rpc_rqst *req)
2511{
2512	struct xdr_buf *xdr = &req->rq_snd_buf;
 
 
2513	struct sock_xprt *transport =
2514			container_of(req->rq_xprt, struct sock_xprt, xprt);
2515	struct msghdr msg = {
2516		.msg_flags	= 0,
2517	};
2518	rpc_fraghdr marker = cpu_to_be32(RPC_LAST_STREAM_FRAGMENT |
2519					 (u32)xdr->len);
2520	unsigned int sent = 0;
2521	int err;
 
 
 
 
 
 
 
 
 
2522
2523	req->rq_xtime = ktime_get();
2524	err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent);
2525	xdr_free_bvec(xdr);
2526	if (err < 0 || sent != (xdr->len + sizeof(marker)))
2527		return -EAGAIN;
2528	return sent;
2529}
2530
2531/**
2532 * bc_send_request - Send a backchannel Call on a TCP socket
2533 * @req: rpc_rqst containing Call message to be sent
2534 *
2535 * xpt_mutex ensures @rqstp's whole message is written to the socket
2536 * without interruption.
2537 *
2538 * Return values:
2539 *   %0 if the message was sent successfully
2540 *   %ENOTCONN if the message was not sent
2541 */
2542static int bc_send_request(struct rpc_rqst *req)
2543{
 
2544	struct svc_xprt	*xprt;
2545	int len;
2546
 
2547	/*
2548	 * Get the server socket associated with this callback xprt
2549	 */
2550	xprt = req->rq_xprt->bc_xprt;
2551
2552	/*
2553	 * Grab the mutex to serialize data as the connection is shared
2554	 * with the fore channel
2555	 */
2556	mutex_lock(&xprt->xpt_mutex);
 
 
 
 
 
2557	if (test_bit(XPT_DEAD, &xprt->xpt_flags))
2558		len = -ENOTCONN;
2559	else
2560		len = bc_sendto(req);
2561	mutex_unlock(&xprt->xpt_mutex);
2562
2563	if (len > 0)
2564		len = 0;
2565
2566	return len;
2567}
2568
2569/*
2570 * The close routine. Since this is client initiated, we do nothing
2571 */
2572
2573static void bc_close(struct rpc_xprt *xprt)
2574{
2575	xprt_disconnect_done(xprt);
2576}
2577
2578/*
2579 * The xprt destroy routine. Again, because this connection is client
2580 * initiated, we do nothing
2581 */
2582
2583static void bc_destroy(struct rpc_xprt *xprt)
2584{
2585	dprintk("RPC:       bc_destroy xprt %p\n", xprt);
2586
2587	xs_xprt_free(xprt);
2588	module_put(THIS_MODULE);
2589}
2590
2591static const struct rpc_xprt_ops xs_local_ops = {
2592	.reserve_xprt		= xprt_reserve_xprt,
2593	.release_xprt		= xprt_release_xprt,
2594	.alloc_slot		= xprt_alloc_slot,
2595	.free_slot		= xprt_free_slot,
2596	.rpcbind		= xs_local_rpcbind,
2597	.set_port		= xs_local_set_port,
2598	.connect		= xs_local_connect,
2599	.buf_alloc		= rpc_malloc,
2600	.buf_free		= rpc_free,
2601	.prepare_request	= xs_stream_prepare_request,
2602	.send_request		= xs_local_send_request,
2603	.wait_for_reply_request	= xprt_wait_for_reply_request_def,
2604	.close			= xs_close,
2605	.destroy		= xs_destroy,
2606	.print_stats		= xs_local_print_stats,
2607	.enable_swap		= xs_enable_swap,
2608	.disable_swap		= xs_disable_swap,
2609};
2610
2611static const struct rpc_xprt_ops xs_udp_ops = {
2612	.set_buffer_size	= xs_udp_set_buffer_size,
2613	.reserve_xprt		= xprt_reserve_xprt_cong,
2614	.release_xprt		= xprt_release_xprt_cong,
2615	.alloc_slot		= xprt_alloc_slot,
2616	.free_slot		= xprt_free_slot,
2617	.rpcbind		= rpcb_getport_async,
2618	.set_port		= xs_set_port,
2619	.connect		= xs_connect,
2620	.buf_alloc		= rpc_malloc,
2621	.buf_free		= rpc_free,
2622	.send_request		= xs_udp_send_request,
2623	.wait_for_reply_request	= xprt_wait_for_reply_request_rtt,
2624	.timer			= xs_udp_timer,
2625	.release_request	= xprt_release_rqst_cong,
2626	.close			= xs_close,
2627	.destroy		= xs_destroy,
2628	.print_stats		= xs_udp_print_stats,
2629	.enable_swap		= xs_enable_swap,
2630	.disable_swap		= xs_disable_swap,
2631	.inject_disconnect	= xs_inject_disconnect,
2632};
2633
2634static const struct rpc_xprt_ops xs_tcp_ops = {
2635	.reserve_xprt		= xprt_reserve_xprt,
2636	.release_xprt		= xprt_release_xprt,
2637	.alloc_slot		= xprt_alloc_slot,
2638	.free_slot		= xprt_free_slot,
2639	.rpcbind		= rpcb_getport_async,
2640	.set_port		= xs_set_port,
2641	.connect		= xs_connect,
2642	.buf_alloc		= rpc_malloc,
2643	.buf_free		= rpc_free,
2644	.prepare_request	= xs_stream_prepare_request,
2645	.send_request		= xs_tcp_send_request,
2646	.wait_for_reply_request	= xprt_wait_for_reply_request_def,
2647	.close			= xs_tcp_shutdown,
2648	.destroy		= xs_destroy,
2649	.set_connect_timeout	= xs_tcp_set_connect_timeout,
2650	.print_stats		= xs_tcp_print_stats,
2651	.enable_swap		= xs_enable_swap,
2652	.disable_swap		= xs_disable_swap,
2653	.inject_disconnect	= xs_inject_disconnect,
2654#ifdef CONFIG_SUNRPC_BACKCHANNEL
2655	.bc_setup		= xprt_setup_bc,
2656	.bc_maxpayload		= xs_tcp_bc_maxpayload,
2657	.bc_num_slots		= xprt_bc_max_slots,
2658	.bc_free_rqst		= xprt_free_bc_rqst,
2659	.bc_destroy		= xprt_destroy_bc,
2660#endif
2661};
2662
2663/*
2664 * The rpc_xprt_ops for the server backchannel
2665 */
2666
2667static const struct rpc_xprt_ops bc_tcp_ops = {
2668	.reserve_xprt		= xprt_reserve_xprt,
2669	.release_xprt		= xprt_release_xprt,
2670	.alloc_slot		= xprt_alloc_slot,
2671	.free_slot		= xprt_free_slot,
2672	.buf_alloc		= bc_malloc,
2673	.buf_free		= bc_free,
2674	.send_request		= bc_send_request,
2675	.wait_for_reply_request	= xprt_wait_for_reply_request_def,
2676	.close			= bc_close,
2677	.destroy		= bc_destroy,
2678	.print_stats		= xs_tcp_print_stats,
2679	.enable_swap		= xs_enable_swap,
2680	.disable_swap		= xs_disable_swap,
2681	.inject_disconnect	= xs_inject_disconnect,
2682};
2683
2684static int xs_init_anyaddr(const int family, struct sockaddr *sap)
2685{
2686	static const struct sockaddr_in sin = {
2687		.sin_family		= AF_INET,
2688		.sin_addr.s_addr	= htonl(INADDR_ANY),
2689	};
2690	static const struct sockaddr_in6 sin6 = {
2691		.sin6_family		= AF_INET6,
2692		.sin6_addr		= IN6ADDR_ANY_INIT,
2693	};
2694
2695	switch (family) {
2696	case AF_LOCAL:
2697		break;
2698	case AF_INET:
2699		memcpy(sap, &sin, sizeof(sin));
2700		break;
2701	case AF_INET6:
2702		memcpy(sap, &sin6, sizeof(sin6));
2703		break;
2704	default:
2705		dprintk("RPC:       %s: Bad address family\n", __func__);
2706		return -EAFNOSUPPORT;
2707	}
2708	return 0;
2709}
2710
2711static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args,
2712				      unsigned int slot_table_size,
2713				      unsigned int max_slot_table_size)
2714{
2715	struct rpc_xprt *xprt;
2716	struct sock_xprt *new;
2717
2718	if (args->addrlen > sizeof(xprt->addr)) {
2719		dprintk("RPC:       xs_setup_xprt: address too large\n");
2720		return ERR_PTR(-EBADF);
2721	}
2722
2723	xprt = xprt_alloc(args->net, sizeof(*new), slot_table_size,
2724			max_slot_table_size);
2725	if (xprt == NULL) {
2726		dprintk("RPC:       xs_setup_xprt: couldn't allocate "
2727				"rpc_xprt\n");
2728		return ERR_PTR(-ENOMEM);
2729	}
2730
2731	new = container_of(xprt, struct sock_xprt, xprt);
2732	mutex_init(&new->recv_mutex);
2733	memcpy(&xprt->addr, args->dstaddr, args->addrlen);
2734	xprt->addrlen = args->addrlen;
2735	if (args->srcaddr)
2736		memcpy(&new->srcaddr, args->srcaddr, args->addrlen);
2737	else {
2738		int err;
2739		err = xs_init_anyaddr(args->dstaddr->sa_family,
2740					(struct sockaddr *)&new->srcaddr);
2741		if (err != 0) {
2742			xprt_free(xprt);
2743			return ERR_PTR(err);
2744		}
2745	}
2746
2747	return xprt;
2748}
2749
2750static const struct rpc_timeout xs_local_default_timeout = {
2751	.to_initval = 10 * HZ,
2752	.to_maxval = 10 * HZ,
2753	.to_retries = 2,
2754};
2755
2756/**
2757 * xs_setup_local - Set up transport to use an AF_LOCAL socket
2758 * @args: rpc transport creation arguments
2759 *
2760 * AF_LOCAL is a "tpi_cots_ord" transport, just like TCP
2761 */
2762static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2763{
2764	struct sockaddr_un *sun = (struct sockaddr_un *)args->dstaddr;
2765	struct sock_xprt *transport;
2766	struct rpc_xprt *xprt;
2767	struct rpc_xprt *ret;
2768
2769	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2770			xprt_max_tcp_slot_table_entries);
2771	if (IS_ERR(xprt))
2772		return xprt;
2773	transport = container_of(xprt, struct sock_xprt, xprt);
2774
2775	xprt->prot = 0;
 
2776	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2777
2778	xprt->bind_timeout = XS_BIND_TO;
2779	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2780	xprt->idle_timeout = XS_IDLE_DISC_TO;
2781
2782	xprt->ops = &xs_local_ops;
2783	xprt->timeout = &xs_local_default_timeout;
2784
2785	INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
2786	INIT_WORK(&transport->error_worker, xs_error_handle);
2787	INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket);
2788
2789	switch (sun->sun_family) {
2790	case AF_LOCAL:
2791		if (sun->sun_path[0] != '/') {
2792			dprintk("RPC:       bad AF_LOCAL address: %s\n",
2793					sun->sun_path);
2794			ret = ERR_PTR(-EINVAL);
2795			goto out_err;
2796		}
2797		xprt_set_bound(xprt);
2798		xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2799		ret = ERR_PTR(xs_local_setup_socket(transport));
2800		if (ret)
2801			goto out_err;
2802		break;
2803	default:
2804		ret = ERR_PTR(-EAFNOSUPPORT);
2805		goto out_err;
2806	}
2807
2808	dprintk("RPC:       set up xprt to %s via AF_LOCAL\n",
2809			xprt->address_strings[RPC_DISPLAY_ADDR]);
2810
2811	if (try_module_get(THIS_MODULE))
2812		return xprt;
2813	ret = ERR_PTR(-EINVAL);
2814out_err:
2815	xs_xprt_free(xprt);
2816	return ret;
2817}
2818
2819static const struct rpc_timeout xs_udp_default_timeout = {
2820	.to_initval = 5 * HZ,
2821	.to_maxval = 30 * HZ,
2822	.to_increment = 5 * HZ,
2823	.to_retries = 5,
2824};
2825
2826/**
2827 * xs_setup_udp - Set up transport to use a UDP socket
2828 * @args: rpc transport creation arguments
2829 *
2830 */
2831static struct rpc_xprt *xs_setup_udp(struct xprt_create *args)
2832{
2833	struct sockaddr *addr = args->dstaddr;
2834	struct rpc_xprt *xprt;
2835	struct sock_xprt *transport;
2836	struct rpc_xprt *ret;
2837
2838	xprt = xs_setup_xprt(args, xprt_udp_slot_table_entries,
2839			xprt_udp_slot_table_entries);
2840	if (IS_ERR(xprt))
2841		return xprt;
2842	transport = container_of(xprt, struct sock_xprt, xprt);
2843
2844	xprt->prot = IPPROTO_UDP;
 
2845	/* XXX: header size can vary due to auth type, IPv6, etc. */
2846	xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
2847
2848	xprt->bind_timeout = XS_BIND_TO;
2849	xprt->reestablish_timeout = XS_UDP_REEST_TO;
2850	xprt->idle_timeout = XS_IDLE_DISC_TO;
2851
2852	xprt->ops = &xs_udp_ops;
2853
2854	xprt->timeout = &xs_udp_default_timeout;
2855
2856	INIT_WORK(&transport->recv_worker, xs_udp_data_receive_workfn);
2857	INIT_WORK(&transport->error_worker, xs_error_handle);
2858	INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_setup_socket);
2859
2860	switch (addr->sa_family) {
2861	case AF_INET:
2862		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2863			xprt_set_bound(xprt);
2864
2865		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP);
2866		break;
2867	case AF_INET6:
2868		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2869			xprt_set_bound(xprt);
2870
2871		xs_format_peer_addresses(xprt, "udp", RPCBIND_NETID_UDP6);
2872		break;
2873	default:
2874		ret = ERR_PTR(-EAFNOSUPPORT);
2875		goto out_err;
2876	}
2877
2878	if (xprt_bound(xprt))
2879		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
2880				xprt->address_strings[RPC_DISPLAY_ADDR],
2881				xprt->address_strings[RPC_DISPLAY_PORT],
2882				xprt->address_strings[RPC_DISPLAY_PROTO]);
2883	else
2884		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
2885				xprt->address_strings[RPC_DISPLAY_ADDR],
2886				xprt->address_strings[RPC_DISPLAY_PROTO]);
2887
2888	if (try_module_get(THIS_MODULE))
2889		return xprt;
2890	ret = ERR_PTR(-EINVAL);
2891out_err:
2892	xs_xprt_free(xprt);
2893	return ret;
2894}
2895
2896static const struct rpc_timeout xs_tcp_default_timeout = {
2897	.to_initval = 60 * HZ,
2898	.to_maxval = 60 * HZ,
2899	.to_retries = 2,
2900};
2901
2902/**
2903 * xs_setup_tcp - Set up transport to use a TCP socket
2904 * @args: rpc transport creation arguments
2905 *
2906 */
2907static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
2908{
2909	struct sockaddr *addr = args->dstaddr;
2910	struct rpc_xprt *xprt;
2911	struct sock_xprt *transport;
2912	struct rpc_xprt *ret;
2913	unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
2914
2915	if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
2916		max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
2917
2918	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2919			max_slot_table_size);
2920	if (IS_ERR(xprt))
2921		return xprt;
2922	transport = container_of(xprt, struct sock_xprt, xprt);
2923
2924	xprt->prot = IPPROTO_TCP;
 
2925	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2926
2927	xprt->bind_timeout = XS_BIND_TO;
2928	xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
2929	xprt->idle_timeout = XS_IDLE_DISC_TO;
2930
2931	xprt->ops = &xs_tcp_ops;
2932	xprt->timeout = &xs_tcp_default_timeout;
2933
2934	xprt->max_reconnect_timeout = xprt->timeout->to_maxval;
2935	xprt->connect_timeout = xprt->timeout->to_initval *
2936		(xprt->timeout->to_retries + 1);
2937
2938	INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
2939	INIT_WORK(&transport->error_worker, xs_error_handle);
2940	INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
2941
2942	switch (addr->sa_family) {
2943	case AF_INET:
2944		if (((struct sockaddr_in *)addr)->sin_port != htons(0))
2945			xprt_set_bound(xprt);
2946
2947		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP);
2948		break;
2949	case AF_INET6:
2950		if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
2951			xprt_set_bound(xprt);
2952
2953		xs_format_peer_addresses(xprt, "tcp", RPCBIND_NETID_TCP6);
2954		break;
2955	default:
2956		ret = ERR_PTR(-EAFNOSUPPORT);
2957		goto out_err;
2958	}
2959
2960	if (xprt_bound(xprt))
2961		dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
2962				xprt->address_strings[RPC_DISPLAY_ADDR],
2963				xprt->address_strings[RPC_DISPLAY_PORT],
2964				xprt->address_strings[RPC_DISPLAY_PROTO]);
2965	else
2966		dprintk("RPC:       set up xprt to %s (autobind) via %s\n",
2967				xprt->address_strings[RPC_DISPLAY_ADDR],
2968				xprt->address_strings[RPC_DISPLAY_PROTO]);
2969
2970	if (try_module_get(THIS_MODULE))
2971		return xprt;
2972	ret = ERR_PTR(-EINVAL);
2973out_err:
2974	xs_xprt_free(xprt);
2975	return ret;
2976}
2977
2978/**
2979 * xs_setup_bc_tcp - Set up transport to use a TCP backchannel socket
2980 * @args: rpc transport creation arguments
2981 *
2982 */
2983static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
2984{
2985	struct sockaddr *addr = args->dstaddr;
2986	struct rpc_xprt *xprt;
2987	struct sock_xprt *transport;
2988	struct svc_sock *bc_sock;
2989	struct rpc_xprt *ret;
2990
2991	xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
2992			xprt_tcp_slot_table_entries);
2993	if (IS_ERR(xprt))
2994		return xprt;
2995	transport = container_of(xprt, struct sock_xprt, xprt);
2996
2997	xprt->prot = IPPROTO_TCP;
 
2998	xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
2999	xprt->timeout = &xs_tcp_default_timeout;
3000
3001	/* backchannel */
3002	xprt_set_bound(xprt);
3003	xprt->bind_timeout = 0;
3004	xprt->reestablish_timeout = 0;
3005	xprt->idle_timeout = 0;
3006
3007	xprt->ops = &bc_tcp_ops;
3008
3009	switch (addr->sa_family) {
3010	case AF_INET:
3011		xs_format_peer_addresses(xprt, "tcp",
3012					 RPCBIND_NETID_TCP);
3013		break;
3014	case AF_INET6:
3015		xs_format_peer_addresses(xprt, "tcp",
3016				   RPCBIND_NETID_TCP6);
3017		break;
3018	default:
3019		ret = ERR_PTR(-EAFNOSUPPORT);
3020		goto out_err;
3021	}
3022
3023	dprintk("RPC:       set up xprt to %s (port %s) via %s\n",
3024			xprt->address_strings[RPC_DISPLAY_ADDR],
3025			xprt->address_strings[RPC_DISPLAY_PORT],
3026			xprt->address_strings[RPC_DISPLAY_PROTO]);
3027
3028	/*
3029	 * Once we've associated a backchannel xprt with a connection,
3030	 * we want to keep it around as long as the connection lasts,
3031	 * in case we need to start using it for a backchannel again;
3032	 * this reference won't be dropped until bc_xprt is destroyed.
3033	 */
3034	xprt_get(xprt);
3035	args->bc_xprt->xpt_bc_xprt = xprt;
3036	xprt->bc_xprt = args->bc_xprt;
3037	bc_sock = container_of(args->bc_xprt, struct svc_sock, sk_xprt);
3038	transport->sock = bc_sock->sk_sock;
3039	transport->inet = bc_sock->sk_sk;
3040
3041	/*
3042	 * Since we don't want connections for the backchannel, we set
3043	 * the xprt status to connected
3044	 */
3045	xprt_set_connected(xprt);
3046
3047	if (try_module_get(THIS_MODULE))
3048		return xprt;
3049
3050	args->bc_xprt->xpt_bc_xprt = NULL;
3051	args->bc_xprt->xpt_bc_xps = NULL;
3052	xprt_put(xprt);
3053	ret = ERR_PTR(-EINVAL);
3054out_err:
3055	xs_xprt_free(xprt);
3056	return ret;
3057}
3058
3059static struct xprt_class	xs_local_transport = {
3060	.list		= LIST_HEAD_INIT(xs_local_transport.list),
3061	.name		= "named UNIX socket",
3062	.owner		= THIS_MODULE,
3063	.ident		= XPRT_TRANSPORT_LOCAL,
3064	.setup		= xs_setup_local,
3065};
3066
3067static struct xprt_class	xs_udp_transport = {
3068	.list		= LIST_HEAD_INIT(xs_udp_transport.list),
3069	.name		= "udp",
3070	.owner		= THIS_MODULE,
3071	.ident		= XPRT_TRANSPORT_UDP,
3072	.setup		= xs_setup_udp,
3073};
3074
3075static struct xprt_class	xs_tcp_transport = {
3076	.list		= LIST_HEAD_INIT(xs_tcp_transport.list),
3077	.name		= "tcp",
3078	.owner		= THIS_MODULE,
3079	.ident		= XPRT_TRANSPORT_TCP,
3080	.setup		= xs_setup_tcp,
3081};
3082
3083static struct xprt_class	xs_bc_tcp_transport = {
3084	.list		= LIST_HEAD_INIT(xs_bc_tcp_transport.list),
3085	.name		= "tcp NFSv4.1 backchannel",
3086	.owner		= THIS_MODULE,
3087	.ident		= XPRT_TRANSPORT_BC_TCP,
3088	.setup		= xs_setup_bc_tcp,
3089};
3090
3091/**
3092 * init_socket_xprt - set up xprtsock's sysctls, register with RPC client
3093 *
3094 */
3095int init_socket_xprt(void)
3096{
 
3097	if (!sunrpc_table_header)
3098		sunrpc_table_header = register_sysctl_table(sunrpc_table);
 
3099
3100	xprt_register_transport(&xs_local_transport);
3101	xprt_register_transport(&xs_udp_transport);
3102	xprt_register_transport(&xs_tcp_transport);
3103	xprt_register_transport(&xs_bc_tcp_transport);
3104
3105	return 0;
3106}
3107
3108/**
3109 * cleanup_socket_xprt - remove xprtsock's sysctls, unregister
3110 *
3111 */
3112void cleanup_socket_xprt(void)
3113{
 
3114	if (sunrpc_table_header) {
3115		unregister_sysctl_table(sunrpc_table_header);
3116		sunrpc_table_header = NULL;
3117	}
 
3118
3119	xprt_unregister_transport(&xs_local_transport);
3120	xprt_unregister_transport(&xs_udp_transport);
3121	xprt_unregister_transport(&xs_tcp_transport);
3122	xprt_unregister_transport(&xs_bc_tcp_transport);
3123}
3124
3125static int param_set_uint_minmax(const char *val,
3126		const struct kernel_param *kp,
3127		unsigned int min, unsigned int max)
3128{
3129	unsigned int num;
3130	int ret;
3131
3132	if (!val)
3133		return -EINVAL;
3134	ret = kstrtouint(val, 0, &num);
3135	if (ret)
3136		return ret;
3137	if (num < min || num > max)
3138		return -EINVAL;
3139	*((unsigned int *)kp->arg) = num;
3140	return 0;
3141}
3142
3143static int param_set_portnr(const char *val, const struct kernel_param *kp)
3144{
3145	return param_set_uint_minmax(val, kp,
3146			RPC_MIN_RESVPORT,
3147			RPC_MAX_RESVPORT);
3148}
3149
3150static const struct kernel_param_ops param_ops_portnr = {
3151	.set = param_set_portnr,
3152	.get = param_get_uint,
3153};
3154
3155#define param_check_portnr(name, p) \
3156	__param_check(name, p, unsigned int);
3157
3158module_param_named(min_resvport, xprt_min_resvport, portnr, 0644);
3159module_param_named(max_resvport, xprt_max_resvport, portnr, 0644);
3160
3161static int param_set_slot_table_size(const char *val,
3162				     const struct kernel_param *kp)
3163{
3164	return param_set_uint_minmax(val, kp,
3165			RPC_MIN_SLOT_TABLE,
3166			RPC_MAX_SLOT_TABLE);
3167}
3168
3169static const struct kernel_param_ops param_ops_slot_table_size = {
3170	.set = param_set_slot_table_size,
3171	.get = param_get_uint,
3172};
3173
3174#define param_check_slot_table_size(name, p) \
3175	__param_check(name, p, unsigned int);
3176
3177static int param_set_max_slot_table_size(const char *val,
3178				     const struct kernel_param *kp)
3179{
3180	return param_set_uint_minmax(val, kp,
3181			RPC_MIN_SLOT_TABLE,
3182			RPC_MAX_SLOT_TABLE_LIMIT);
3183}
3184
3185static const struct kernel_param_ops param_ops_max_slot_table_size = {
3186	.set = param_set_max_slot_table_size,
3187	.get = param_get_uint,
3188};
3189
3190#define param_check_max_slot_table_size(name, p) \
3191	__param_check(name, p, unsigned int);
3192
3193module_param_named(tcp_slot_table_entries, xprt_tcp_slot_table_entries,
3194		   slot_table_size, 0644);
3195module_param_named(tcp_max_slot_table_entries, xprt_max_tcp_slot_table_entries,
3196		   max_slot_table_size, 0644);
3197module_param_named(udp_slot_table_entries, xprt_udp_slot_table_entries,
3198		   slot_table_size, 0644);