Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  11 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  12 *		Florian La Roche, <flla@stud.uni-sb.de>
  13 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  15 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  16 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  17 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18 *		Jorge Cwik, <jorge@laser.satlink.net>
  19 *
  20 * Fixes:
  21 *		Alan Cox	:	Numerous verify_area() calls
  22 *		Alan Cox	:	Set the ACK bit on a reset
  23 *		Alan Cox	:	Stopped it crashing if it closed while
  24 *					sk->inuse=1 and was trying to connect
  25 *					(tcp_err()).
  26 *		Alan Cox	:	All icmp error handling was broken
  27 *					pointers passed where wrong and the
  28 *					socket was looked up backwards. Nobody
  29 *					tested any icmp error code obviously.
  30 *		Alan Cox	:	tcp_err() now handled properly. It
  31 *					wakes people on errors. poll
  32 *					behaves and the icmp error race
  33 *					has gone by moving it into sock.c
  34 *		Alan Cox	:	tcp_send_reset() fixed to work for
  35 *					everything not just packets for
  36 *					unknown sockets.
  37 *		Alan Cox	:	tcp option processing.
  38 *		Alan Cox	:	Reset tweaked (still not 100%) [Had
  39 *					syn rule wrong]
  40 *		Herp Rosmanith  :	More reset fixes
  41 *		Alan Cox	:	No longer acks invalid rst frames.
  42 *					Acking any kind of RST is right out.
  43 *		Alan Cox	:	Sets an ignore me flag on an rst
  44 *					receive otherwise odd bits of prattle
  45 *					escape still
  46 *		Alan Cox	:	Fixed another acking RST frame bug.
  47 *					Should stop LAN workplace lockups.
  48 *		Alan Cox	: 	Some tidyups using the new skb list
  49 *					facilities
  50 *		Alan Cox	:	sk->keepopen now seems to work
  51 *		Alan Cox	:	Pulls options out correctly on accepts
  52 *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
  53 *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
  54 *					bit to skb ops.
  55 *		Alan Cox	:	Tidied tcp_data to avoid a potential
  56 *					nasty.
  57 *		Alan Cox	:	Added some better commenting, as the
  58 *					tcp is hard to follow
  59 *		Alan Cox	:	Removed incorrect check for 20 * psh
  60 *	Michael O'Reilly	:	ack < copied bug fix.
  61 *	Johannes Stille		:	Misc tcp fixes (not all in yet).
  62 *		Alan Cox	:	FIN with no memory -> CRASH
  63 *		Alan Cox	:	Added socket option proto entries.
  64 *					Also added awareness of them to accept.
  65 *		Alan Cox	:	Added TCP options (SOL_TCP)
  66 *		Alan Cox	:	Switched wakeup calls to callbacks,
  67 *					so the kernel can layer network
  68 *					sockets.
  69 *		Alan Cox	:	Use ip_tos/ip_ttl settings.
  70 *		Alan Cox	:	Handle FIN (more) properly (we hope).
  71 *		Alan Cox	:	RST frames sent on unsynchronised
  72 *					state ack error.
  73 *		Alan Cox	:	Put in missing check for SYN bit.
  74 *		Alan Cox	:	Added tcp_select_window() aka NET2E
  75 *					window non shrink trick.
  76 *		Alan Cox	:	Added a couple of small NET2E timer
  77 *					fixes
  78 *		Charles Hedrick :	TCP fixes
  79 *		Toomas Tamm	:	TCP window fixes
  80 *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
  81 *		Charles Hedrick	:	Rewrote most of it to actually work
  82 *		Linus		:	Rewrote tcp_read() and URG handling
  83 *					completely
  84 *		Gerhard Koerting:	Fixed some missing timer handling
  85 *		Matthew Dillon  :	Reworked TCP machine states as per RFC
  86 *		Gerhard Koerting:	PC/TCP workarounds
  87 *		Adam Caldwell	:	Assorted timer/timing errors
  88 *		Matthew Dillon	:	Fixed another RST bug
  89 *		Alan Cox	:	Move to kernel side addressing changes.
  90 *		Alan Cox	:	Beginning work on TCP fastpathing
  91 *					(not yet usable)
  92 *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
  93 *		Alan Cox	:	TCP fast path debugging
  94 *		Alan Cox	:	Window clamping
  95 *		Michael Riepe	:	Bug in tcp_check()
  96 *		Matt Dillon	:	More TCP improvements and RST bug fixes
  97 *		Matt Dillon	:	Yet more small nasties remove from the
  98 *					TCP code (Be very nice to this man if
  99 *					tcp finally works 100%) 8)
 100 *		Alan Cox	:	BSD accept semantics.
 101 *		Alan Cox	:	Reset on closedown bug.
 102 *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
 103 *		Michael Pall	:	Handle poll() after URG properly in
 104 *					all cases.
 105 *		Michael Pall	:	Undo the last fix in tcp_read_urg()
 106 *					(multi URG PUSH broke rlogin).
 107 *		Michael Pall	:	Fix the multi URG PUSH problem in
 108 *					tcp_readable(), poll() after URG
 109 *					works now.
 110 *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
 111 *					BSD api.
 112 *		Alan Cox	:	Changed the semantics of sk->socket to
 113 *					fix a race and a signal problem with
 114 *					accept() and async I/O.
 115 *		Alan Cox	:	Relaxed the rules on tcp_sendto().
 116 *		Yury Shevchuk	:	Really fixed accept() blocking problem.
 117 *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
 118 *					clients/servers which listen in on
 119 *					fixed ports.
 120 *		Alan Cox	:	Cleaned the above up and shrank it to
 121 *					a sensible code size.
 122 *		Alan Cox	:	Self connect lockup fix.
 123 *		Alan Cox	:	No connect to multicast.
 124 *		Ross Biro	:	Close unaccepted children on master
 125 *					socket close.
 126 *		Alan Cox	:	Reset tracing code.
 127 *		Alan Cox	:	Spurious resets on shutdown.
 128 *		Alan Cox	:	Giant 15 minute/60 second timer error
 129 *		Alan Cox	:	Small whoops in polling before an
 130 *					accept.
 131 *		Alan Cox	:	Kept the state trace facility since
 132 *					it's handy for debugging.
 133 *		Alan Cox	:	More reset handler fixes.
 134 *		Alan Cox	:	Started rewriting the code based on
 135 *					the RFC's for other useful protocol
 136 *					references see: Comer, KA9Q NOS, and
 137 *					for a reference on the difference
 138 *					between specifications and how BSD
 139 *					works see the 4.4lite source.
 140 *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
 141 *					close.
 142 *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
 143 *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
 144 *		Alan Cox	:	Reimplemented timers as per the RFC
 145 *					and using multiple timers for sanity.
 146 *		Alan Cox	:	Small bug fixes, and a lot of new
 147 *					comments.
 148 *		Alan Cox	:	Fixed dual reader crash by locking
 149 *					the buffers (much like datagram.c)
 150 *		Alan Cox	:	Fixed stuck sockets in probe. A probe
 151 *					now gets fed up of retrying without
 152 *					(even a no space) answer.
 153 *		Alan Cox	:	Extracted closing code better
 154 *		Alan Cox	:	Fixed the closing state machine to
 155 *					resemble the RFC.
 156 *		Alan Cox	:	More 'per spec' fixes.
 157 *		Jorge Cwik	:	Even faster checksumming.
 158 *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
 159 *					only frames. At least one pc tcp stack
 160 *					generates them.
 161 *		Alan Cox	:	Cache last socket.
 162 *		Alan Cox	:	Per route irtt.
 163 *		Matt Day	:	poll()->select() match BSD precisely on error
 164 *		Alan Cox	:	New buffers
 165 *		Marc Tamsky	:	Various sk->prot->retransmits and
 166 *					sk->retransmits misupdating fixed.
 167 *					Fixed tcp_write_timeout: stuck close,
 168 *					and TCP syn retries gets used now.
 169 *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
 170 *					ack if state is TCP_CLOSED.
 171 *		Alan Cox	:	Look up device on a retransmit - routes may
 172 *					change. Doesn't yet cope with MSS shrink right
 173 *					but it's a start!
 174 *		Marc Tamsky	:	Closing in closing fixes.
 175 *		Mike Shaver	:	RFC1122 verifications.
 176 *		Alan Cox	:	rcv_saddr errors.
 177 *		Alan Cox	:	Block double connect().
 178 *		Alan Cox	:	Small hooks for enSKIP.
 179 *		Alexey Kuznetsov:	Path MTU discovery.
 180 *		Alan Cox	:	Support soft errors.
 181 *		Alan Cox	:	Fix MTU discovery pathological case
 182 *					when the remote claims no mtu!
 183 *		Marc Tamsky	:	TCP_CLOSE fix.
 184 *		Colin (G3TNE)	:	Send a reset on syn ack replies in
 185 *					window but wrong (fixes NT lpd problems)
 186 *		Pedro Roque	:	Better TCP window handling, delayed ack.
 187 *		Joerg Reuter	:	No modification of locked buffers in
 188 *					tcp_do_retransmit()
 189 *		Eric Schenk	:	Changed receiver side silly window
 190 *					avoidance algorithm to BSD style
 191 *					algorithm. This doubles throughput
 192 *					against machines running Solaris,
 193 *					and seems to result in general
 194 *					improvement.
 195 *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
 196 *	Willy Konynenberg	:	Transparent proxying support.
 197 *	Mike McLagan		:	Routing by source
 198 *		Keith Owens	:	Do proper merging with partial SKB's in
 199 *					tcp_do_sendmsg to avoid burstiness.
 200 *		Eric Schenk	:	Fix fast close down bug with
 201 *					shutdown() followed by close().
 202 *		Andi Kleen 	:	Make poll agree with SIGIO
 203 *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
 204 *					lingertime == 0 (RFC 793 ABORT Call)
 205 *	Hirokazu Takahashi	:	Use copy_from_user() instead of
 206 *					csum_and_copy_from_user() if possible.
 207 *
 208 *		This program is free software; you can redistribute it and/or
 209 *		modify it under the terms of the GNU General Public License
 210 *		as published by the Free Software Foundation; either version
 211 *		2 of the License, or(at your option) any later version.
 212 *
 213 * Description of States:
 214 *
 215 *	TCP_SYN_SENT		sent a connection request, waiting for ack
 216 *
 217 *	TCP_SYN_RECV		received a connection request, sent ack,
 218 *				waiting for final ack in three-way handshake.
 219 *
 220 *	TCP_ESTABLISHED		connection established
 221 *
 222 *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
 223 *				transmission of remaining buffered data
 224 *
 225 *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
 226 *				to shutdown
 227 *
 228 *	TCP_CLOSING		both sides have shutdown but we still have
 229 *				data we have to finish sending
 230 *
 231 *	TCP_TIME_WAIT		timeout to catch resent junk before entering
 232 *				closed, can only be entered from FIN_WAIT2
 233 *				or CLOSING.  Required because the other end
 234 *				may not have gotten our last ACK causing it
 235 *				to retransmit the data packet (which we ignore)
 236 *
 237 *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
 238 *				us to finish writing our data and to shutdown
 239 *				(we have to close() to move on to LAST_ACK)
 240 *
 241 *	TCP_LAST_ACK		out side has shutdown after remote has
 242 *				shutdown.  There may still be data in our
 243 *				buffer that we have to finish sending
 244 *
 245 *	TCP_CLOSE		socket is finished
 246 */
 247
 
 
 
 248#include <linux/kernel.h>
 249#include <linux/module.h>
 250#include <linux/types.h>
 251#include <linux/fcntl.h>
 252#include <linux/poll.h>
 
 253#include <linux/init.h>
 254#include <linux/fs.h>
 255#include <linux/skbuff.h>
 256#include <linux/scatterlist.h>
 257#include <linux/splice.h>
 258#include <linux/net.h>
 259#include <linux/socket.h>
 260#include <linux/random.h>
 261#include <linux/bootmem.h>
 262#include <linux/highmem.h>
 263#include <linux/swap.h>
 264#include <linux/cache.h>
 265#include <linux/err.h>
 266#include <linux/crypto.h>
 267#include <linux/time.h>
 268#include <linux/slab.h>
 
 
 269
 270#include <net/icmp.h>
 
 271#include <net/tcp.h>
 
 272#include <net/xfrm.h>
 273#include <net/ip.h>
 274#include <net/netdma.h>
 275#include <net/sock.h>
 276
 277#include <asm/uaccess.h>
 278#include <asm/ioctls.h>
 279
 280int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
 281
 282struct percpu_counter tcp_orphan_count;
 283EXPORT_SYMBOL_GPL(tcp_orphan_count);
 284
 285long sysctl_tcp_mem[3] __read_mostly;
 286int sysctl_tcp_wmem[3] __read_mostly;
 287int sysctl_tcp_rmem[3] __read_mostly;
 288
 289EXPORT_SYMBOL(sysctl_tcp_mem);
 290EXPORT_SYMBOL(sysctl_tcp_rmem);
 291EXPORT_SYMBOL(sysctl_tcp_wmem);
 292
 293atomic_long_t tcp_memory_allocated;	/* Current allocated memory. */
 294EXPORT_SYMBOL(tcp_memory_allocated);
 295
 
 
 
 
 
 296/*
 297 * Current number of TCP sockets.
 298 */
 299struct percpu_counter tcp_sockets_allocated;
 300EXPORT_SYMBOL(tcp_sockets_allocated);
 301
 302/*
 303 * TCP splice context
 304 */
 305struct tcp_splice_state {
 306	struct pipe_inode_info *pipe;
 307	size_t len;
 308	unsigned int flags;
 309};
 310
 311/*
 312 * Pressure flag: try to collapse.
 313 * Technical note: it is used by multiple contexts non atomically.
 314 * All the __sk_mem_schedule() is of this nature: accounting
 315 * is strict, actions are advisory and have some latency.
 316 */
 317int tcp_memory_pressure __read_mostly;
 318EXPORT_SYMBOL(tcp_memory_pressure);
 
 
 
 
 
 319
 320void tcp_enter_memory_pressure(struct sock *sk)
 321{
 322	if (!tcp_memory_pressure) {
 
 
 
 
 
 
 
 
 323		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
 324		tcp_memory_pressure = 1;
 325	}
 326}
 327EXPORT_SYMBOL(tcp_enter_memory_pressure);
 
 
 
 
 
 
 
 
 
 
 
 
 
 328
 329/* Convert seconds to retransmits based on initial and max timeout */
 330static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
 331{
 332	u8 res = 0;
 333
 334	if (seconds > 0) {
 335		int period = timeout;
 336
 337		res = 1;
 338		while (seconds > period && res < 255) {
 339			res++;
 340			timeout <<= 1;
 341			if (timeout > rto_max)
 342				timeout = rto_max;
 343			period += timeout;
 344		}
 345	}
 346	return res;
 347}
 348
 349/* Convert retransmits to seconds based on initial and max timeout */
 350static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
 351{
 352	int period = 0;
 353
 354	if (retrans > 0) {
 355		period = timeout;
 356		while (--retrans) {
 357			timeout <<= 1;
 358			if (timeout > rto_max)
 359				timeout = rto_max;
 360			period += timeout;
 361		}
 362	}
 363	return period;
 364}
 365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 366/*
 367 *	Wait for a TCP event.
 368 *
 369 *	Note that we don't need to lock the socket, as the upper poll layers
 370 *	take care of normal races (between the test and the event) and we don't
 371 *	go look at any of the socket buffers directly.
 372 */
 373unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 374{
 375	unsigned int mask;
 376	struct sock *sk = sock->sk;
 377	struct tcp_sock *tp = tcp_sk(sk);
 
 378
 379	sock_poll_wait(file, sk_sleep(sk), wait);
 380	if (sk->sk_state == TCP_LISTEN)
 
 
 381		return inet_csk_listen_poll(sk);
 382
 383	/* Socket is not locked. We are protected from async events
 384	 * by poll logic and correct handling of state changes
 385	 * made by other threads is impossible in any case.
 386	 */
 387
 388	mask = 0;
 389
 390	/*
 391	 * POLLHUP is certainly not done right. But poll() doesn't
 392	 * have a notion of HUP in just one direction, and for a
 393	 * socket the read side is more interesting.
 394	 *
 395	 * Some poll() documentation says that POLLHUP is incompatible
 396	 * with the POLLOUT/POLLWR flags, so somebody should check this
 397	 * all. But careful, it tends to be safer to return too many
 398	 * bits than too few, and you can easily break real applications
 399	 * if you don't tell them that something has hung up!
 400	 *
 401	 * Check-me.
 402	 *
 403	 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
 404	 * our fs/select.c). It means that after we received EOF,
 405	 * poll always returns immediately, making impossible poll() on write()
 406	 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
 407	 * if and only if shutdown has been made in both directions.
 408	 * Actually, it is interesting to look how Solaris and DUX
 409	 * solve this dilemma. I would prefer, if POLLHUP were maskable,
 410	 * then we could set it on SND_SHUTDOWN. BTW examples given
 411	 * in Stevens' books assume exactly this behaviour, it explains
 412	 * why POLLHUP is incompatible with POLLOUT.	--ANK
 413	 *
 414	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
 415	 * blocking on fresh not-connected or disconnected socket. --ANK
 416	 */
 417	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
 418		mask |= POLLHUP;
 419	if (sk->sk_shutdown & RCV_SHUTDOWN)
 420		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
 421
 422	/* Connected? */
 423	if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
 
 424		int target = sock_rcvlowat(sk, 0, INT_MAX);
 425
 426		if (tp->urg_seq == tp->copied_seq &&
 427		    !sock_flag(sk, SOCK_URGINLINE) &&
 428		    tp->urg_data)
 429			target++;
 430
 431		/* Potential race condition. If read of tp below will
 432		 * escape above sk->sk_state, we can be illegally awaken
 433		 * in SYN_* states. */
 434		if (tp->rcv_nxt - tp->copied_seq >= target)
 435			mask |= POLLIN | POLLRDNORM;
 436
 437		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
 438			if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
 439				mask |= POLLOUT | POLLWRNORM;
 440			} else {  /* send SIGIO later */
 441				set_bit(SOCK_ASYNC_NOSPACE,
 442					&sk->sk_socket->flags);
 443				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 444
 445				/* Race breaker. If space is freed after
 446				 * wspace test but before the flags are set,
 447				 * IO signal will be lost.
 
 448				 */
 449				if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
 450					mask |= POLLOUT | POLLWRNORM;
 
 451			}
 452		} else
 453			mask |= POLLOUT | POLLWRNORM;
 454
 455		if (tp->urg_data & TCP_URG_VALID)
 456			mask |= POLLPRI;
 
 
 
 
 
 
 457	}
 458	/* This barrier is coupled with smp_wmb() in tcp_reset() */
 459	smp_rmb();
 460	if (sk->sk_err)
 461		mask |= POLLERR;
 462
 463	return mask;
 464}
 465EXPORT_SYMBOL(tcp_poll);
 466
 467int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 468{
 469	struct tcp_sock *tp = tcp_sk(sk);
 470	int answ;
 
 471
 472	switch (cmd) {
 473	case SIOCINQ:
 474		if (sk->sk_state == TCP_LISTEN)
 475			return -EINVAL;
 476
 477		lock_sock(sk);
 478		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
 479			answ = 0;
 480		else if (sock_flag(sk, SOCK_URGINLINE) ||
 481			 !tp->urg_data ||
 482			 before(tp->urg_seq, tp->copied_seq) ||
 483			 !before(tp->urg_seq, tp->rcv_nxt)) {
 484			struct sk_buff *skb;
 485
 486			answ = tp->rcv_nxt - tp->copied_seq;
 487
 488			/* Subtract 1, if FIN is in queue. */
 489			skb = skb_peek_tail(&sk->sk_receive_queue);
 490			if (answ && skb)
 491				answ -= tcp_hdr(skb)->fin;
 492		} else
 493			answ = tp->urg_seq - tp->copied_seq;
 494		release_sock(sk);
 495		break;
 496	case SIOCATMARK:
 497		answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
 
 498		break;
 499	case SIOCOUTQ:
 500		if (sk->sk_state == TCP_LISTEN)
 501			return -EINVAL;
 502
 503		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
 504			answ = 0;
 505		else
 506			answ = tp->write_seq - tp->snd_una;
 507		break;
 508	case SIOCOUTQNSD:
 509		if (sk->sk_state == TCP_LISTEN)
 510			return -EINVAL;
 511
 512		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
 513			answ = 0;
 514		else
 515			answ = tp->write_seq - tp->snd_nxt;
 
 516		break;
 517	default:
 518		return -ENOIOCTLCMD;
 519	}
 520
 521	return put_user(answ, (int __user *)arg);
 522}
 523EXPORT_SYMBOL(tcp_ioctl);
 524
 525static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
 526{
 527	TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
 528	tp->pushed_seq = tp->write_seq;
 529}
 530
 531static inline int forced_push(struct tcp_sock *tp)
 532{
 533	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
 534}
 535
 536static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
 537{
 538	struct tcp_sock *tp = tcp_sk(sk);
 539	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 540
 541	skb->csum    = 0;
 542	tcb->seq     = tcb->end_seq = tp->write_seq;
 543	tcb->flags   = TCPHDR_ACK;
 544	tcb->sacked  = 0;
 545	skb_header_release(skb);
 546	tcp_add_write_queue_tail(sk, skb);
 547	sk->sk_wmem_queued += skb->truesize;
 548	sk_mem_charge(sk, skb->truesize);
 549	if (tp->nonagle & TCP_NAGLE_PUSH)
 550		tp->nonagle &= ~TCP_NAGLE_PUSH;
 
 
 551}
 552
 553static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
 554{
 555	if (flags & MSG_OOB)
 556		tp->snd_up = tp->write_seq;
 557}
 558
 559static inline void tcp_push(struct sock *sk, int flags, int mss_now,
 560			    int nonagle)
 
 
 
 
 
 
 
 
 
 
 561{
 562	if (tcp_send_head(sk)) {
 563		struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 564
 565		if (!(flags & MSG_MORE) || forced_push(tp))
 566			tcp_mark_push(tp, tcp_write_queue_tail(sk));
 
 
 
 
 
 
 
 
 
 567
 568		tcp_mark_urg(tp, flags);
 569		__tcp_push_pending_frames(sk, mss_now,
 570					  (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
 
 
 
 
 
 
 
 
 
 
 
 571	}
 
 
 
 
 
 572}
 573
 574static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
 575				unsigned int offset, size_t len)
 576{
 577	struct tcp_splice_state *tss = rd_desc->arg.data;
 578	int ret;
 579
 580	ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
 581			      tss->flags);
 582	if (ret > 0)
 583		rd_desc->count -= ret;
 584	return ret;
 585}
 586
 587static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
 588{
 589	/* Store TCP splice context information in read_descriptor_t. */
 590	read_descriptor_t rd_desc = {
 591		.arg.data = tss,
 592		.count	  = tss->len,
 593	};
 594
 595	return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
 596}
 597
 598/**
 599 *  tcp_splice_read - splice data from TCP socket to a pipe
 600 * @sock:	socket to splice from
 601 * @ppos:	position (not valid)
 602 * @pipe:	pipe to splice to
 603 * @len:	number of bytes to splice
 604 * @flags:	splice modifier flags
 605 *
 606 * Description:
 607 *    Will read pages from given socket and fill them into a pipe.
 608 *
 609 **/
 610ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
 611			struct pipe_inode_info *pipe, size_t len,
 612			unsigned int flags)
 613{
 614	struct sock *sk = sock->sk;
 615	struct tcp_splice_state tss = {
 616		.pipe = pipe,
 617		.len = len,
 618		.flags = flags,
 619	};
 620	long timeo;
 621	ssize_t spliced;
 622	int ret;
 623
 624	sock_rps_record_flow(sk);
 625	/*
 626	 * We can't seek on a socket input
 627	 */
 628	if (unlikely(*ppos))
 629		return -ESPIPE;
 630
 631	ret = spliced = 0;
 632
 633	lock_sock(sk);
 634
 635	timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
 636	while (tss.len) {
 637		ret = __tcp_splice_read(sk, &tss);
 638		if (ret < 0)
 639			break;
 640		else if (!ret) {
 641			if (spliced)
 642				break;
 643			if (sock_flag(sk, SOCK_DONE))
 644				break;
 645			if (sk->sk_err) {
 646				ret = sock_error(sk);
 647				break;
 648			}
 649			if (sk->sk_shutdown & RCV_SHUTDOWN)
 650				break;
 651			if (sk->sk_state == TCP_CLOSE) {
 652				/*
 653				 * This occurs when user tries to read
 654				 * from never connected socket.
 655				 */
 656				if (!sock_flag(sk, SOCK_DONE))
 657					ret = -ENOTCONN;
 658				break;
 659			}
 660			if (!timeo) {
 661				ret = -EAGAIN;
 662				break;
 663			}
 664			sk_wait_data(sk, &timeo);
 
 
 
 
 
 
 665			if (signal_pending(current)) {
 666				ret = sock_intr_errno(timeo);
 667				break;
 668			}
 669			continue;
 670		}
 671		tss.len -= ret;
 672		spliced += ret;
 673
 674		if (!timeo)
 675			break;
 676		release_sock(sk);
 677		lock_sock(sk);
 678
 679		if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
 680		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
 681		    signal_pending(current))
 682			break;
 683	}
 684
 685	release_sock(sk);
 686
 687	if (spliced)
 688		return spliced;
 689
 690	return ret;
 691}
 692EXPORT_SYMBOL(tcp_splice_read);
 693
 694struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
 
 695{
 696	struct sk_buff *skb;
 697
 
 
 
 
 
 
 
 
 
 
 
 
 698	/* The TCP header must be at least 32-bit aligned.  */
 699	size = ALIGN(size, 4);
 700
 
 
 
 701	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
 702	if (skb) {
 703		if (sk_wmem_schedule(sk, skb->truesize)) {
 
 
 
 
 
 
 
 
 
 704			/*
 705			 * Make sure that we have exactly size bytes
 706			 * available to the caller, no more, no less.
 707			 */
 708			skb_reserve(skb, skb_tailroom(skb) - size);
 
 709			return skb;
 710		}
 711		__kfree_skb(skb);
 712	} else {
 713		sk->sk_prot->enter_memory_pressure(sk);
 714		sk_stream_moderate_sndbuf(sk);
 715	}
 716	return NULL;
 717}
 718
 719static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
 720				       int large_allowed)
 721{
 722	struct tcp_sock *tp = tcp_sk(sk);
 723	u32 xmit_size_goal, old_size_goal;
 724
 725	xmit_size_goal = mss_now;
 
 726
 727	if (large_allowed && sk_can_gso(sk)) {
 728		xmit_size_goal = ((sk->sk_gso_max_size - 1) -
 729				  inet_csk(sk)->icsk_af_ops->net_header_len -
 730				  inet_csk(sk)->icsk_ext_hdr_len -
 731				  tp->tcp_header_len);
 732
 733		xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
 734
 735		/* We try hard to avoid divides here */
 736		old_size_goal = tp->xmit_size_goal_segs * mss_now;
 737
 738		if (likely(old_size_goal <= xmit_size_goal &&
 739			   old_size_goal + mss_now > xmit_size_goal)) {
 740			xmit_size_goal = old_size_goal;
 741		} else {
 742			tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
 743			xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
 744		}
 745	}
 746
 747	return max(xmit_size_goal, mss_now);
 748}
 749
 750static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
 751{
 752	int mss_now;
 753
 754	mss_now = tcp_current_mss(sk);
 755	*size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
 756
 757	return mss_now;
 758}
 759
 760static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
 761			 size_t psize, int flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 762{
 763	struct tcp_sock *tp = tcp_sk(sk);
 764	int mss_now, size_goal;
 765	int err;
 766	ssize_t copied;
 767	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 768
 769	/* Wait for a connection to finish. */
 770	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
 771		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
 
 
 
 
 
 
 
 
 
 
 772			goto out_err;
 
 773
 774	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
 775
 776	mss_now = tcp_send_mss(sk, &size_goal, flags);
 777	copied = 0;
 778
 779	err = -EPIPE;
 780	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
 781		goto out_err;
 782
 783	while (psize > 0) {
 784		struct sk_buff *skb = tcp_write_queue_tail(sk);
 785		struct page *page = pages[poffset / PAGE_SIZE];
 786		int copy, i, can_coalesce;
 787		int offset = poffset % PAGE_SIZE;
 788		int size = min_t(size_t, psize, PAGE_SIZE - offset);
 789
 790		if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
 
 791new_segment:
 792			if (!sk_stream_memory_free(sk))
 793				goto wait_for_sndbuf;
 794
 795			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
 
 796			if (!skb)
 797				goto wait_for_memory;
 798
 
 
 
 799			skb_entail(sk, skb);
 800			copy = size_goal;
 801		}
 802
 803		if (copy > size)
 804			copy = size;
 805
 806		i = skb_shinfo(skb)->nr_frags;
 807		can_coalesce = skb_can_coalesce(skb, i, page, offset);
 808		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
 809			tcp_mark_push(tp, skb);
 810			goto new_segment;
 811		}
 812		if (!sk_wmem_schedule(sk, copy))
 813			goto wait_for_memory;
 814
 815		if (can_coalesce) {
 816			skb_shinfo(skb)->frags[i - 1].size += copy;
 817		} else {
 818			get_page(page);
 819			skb_fill_page_desc(skb, i, page, offset, copy);
 820		}
 821
 
 
 
 822		skb->len += copy;
 823		skb->data_len += copy;
 824		skb->truesize += copy;
 825		sk->sk_wmem_queued += copy;
 826		sk_mem_charge(sk, copy);
 827		skb->ip_summed = CHECKSUM_PARTIAL;
 828		tp->write_seq += copy;
 829		TCP_SKB_CB(skb)->end_seq += copy;
 830		skb_shinfo(skb)->gso_segs = 0;
 831
 832		if (!copied)
 833			TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
 834
 835		copied += copy;
 836		poffset += copy;
 837		if (!(psize -= copy))
 
 838			goto out;
 839
 840		if (skb->len < size_goal || (flags & MSG_OOB))
 841			continue;
 842
 843		if (forced_push(tp)) {
 844			tcp_mark_push(tp, skb);
 845			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
 846		} else if (skb == tcp_send_head(sk))
 847			tcp_push_one(sk, mss_now);
 848		continue;
 849
 850wait_for_sndbuf:
 851		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 852wait_for_memory:
 853		if (copied)
 854			tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
 855
 856		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
 
 857			goto do_error;
 858
 859		mss_now = tcp_send_mss(sk, &size_goal, flags);
 860	}
 861
 862out:
 863	if (copied)
 864		tcp_push(sk, flags, mss_now, tp->nonagle);
 
 
 
 865	return copied;
 866
 867do_error:
 
 868	if (copied)
 869		goto out;
 870out_err:
 
 
 
 
 
 871	return sk_stream_error(sk, flags, err);
 872}
 
 
 
 
 
 
 
 
 
 
 
 
 
 873
 874int tcp_sendpage(struct sock *sk, struct page *page, int offset,
 875		 size_t size, int flags)
 876{
 877	ssize_t res;
 878
 879	if (!(sk->sk_route_caps & NETIF_F_SG) ||
 880	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
 881		return sock_no_sendpage(sk->sk_socket, page, offset, size,
 882					flags);
 883
 884	lock_sock(sk);
 885	res = do_tcp_sendpages(sk, &page, offset, size, flags);
 886	release_sock(sk);
 887	return res;
 
 888}
 889EXPORT_SYMBOL(tcp_sendpage);
 890
 891#define TCP_PAGE(sk)	(sk->sk_sndmsg_page)
 892#define TCP_OFF(sk)	(sk->sk_sndmsg_off)
 
 
 
 
 
 893
 894static inline int select_size(struct sock *sk, int sg)
 
 
 895{
 896	struct tcp_sock *tp = tcp_sk(sk);
 897	int tmp = tp->mss_cache;
 898
 899	if (sg) {
 900		if (sk_can_gso(sk))
 901			tmp = 0;
 902		else {
 903			int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
 904
 905			if (tmp >= pgbreak &&
 906			    tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
 907				tmp = pgbreak;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908		}
 909	}
 910
 911	return tmp;
 
 
 
 
 
 
 
 
 
 
 912}
 913
 914int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 915		size_t size)
 916{
 917	struct iovec *iov;
 918	struct tcp_sock *tp = tcp_sk(sk);
 
 919	struct sk_buff *skb;
 920	int iovlen, flags;
 921	int mss_now, size_goal;
 922	int sg, err, copied;
 
 
 923	long timeo;
 924
 925	lock_sock(sk);
 926
 927	flags = msg->msg_flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 928	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 929
 930	/* Wait for a connection to finish. */
 931	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
 932		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 933			goto out_err;
 934
 935	/* This should be in poll */
 936	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
 937
 938	mss_now = tcp_send_mss(sk, &size_goal, flags);
 
 
 
 
 
 
 
 
 
 
 939
 940	/* Ok commence sending. */
 941	iovlen = msg->msg_iovlen;
 942	iov = msg->msg_iov;
 943	copied = 0;
 944
 
 
 
 945	err = -EPIPE;
 946	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
 947		goto out_err;
 948
 949	sg = sk->sk_route_caps & NETIF_F_SG;
 
 950
 951	while (--iovlen >= 0) {
 952		size_t seglen = iov->iov_len;
 953		unsigned char __user *from = iov->iov_base;
 954
 955		iov++;
 
 956
 957		while (seglen > 0) {
 958			int copy = 0;
 959			int max = size_goal;
 960
 961			skb = tcp_write_queue_tail(sk);
 962			if (tcp_send_head(sk)) {
 963				if (skb->ip_summed == CHECKSUM_NONE)
 964					max = mss_now;
 965				copy = max - skb->len;
 966			}
 
 
 
 
 
 967
 968			if (copy <= 0) {
 969new_segment:
 970				/* Allocate new segment. If the interface is SG,
 971				 * allocate skb fitting to single page.
 972				 */
 973				if (!sk_stream_memory_free(sk))
 974					goto wait_for_sndbuf;
 975
 976				skb = sk_stream_alloc_skb(sk,
 977							  select_size(sk, sg),
 978							  sk->sk_allocation);
 979				if (!skb)
 980					goto wait_for_memory;
 981
 982				/*
 983				 * Check whether we can use HW checksum.
 984				 */
 985				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
 986					skb->ip_summed = CHECKSUM_PARTIAL;
 987
 988				skb_entail(sk, skb);
 989				copy = size_goal;
 990				max = size_goal;
 991			}
 992
 993			/* Try to append data to the end of skb. */
 994			if (copy > seglen)
 995				copy = seglen;
 996
 997			/* Where to copy to? */
 998			if (skb_tailroom(skb) > 0) {
 999				/* We have some space in skb head. Superb! */
1000				if (copy > skb_tailroom(skb))
1001					copy = skb_tailroom(skb);
1002				err = skb_add_data_nocache(sk, skb, from, copy);
1003				if (err)
1004					goto do_fault;
1005			} else {
1006				int merge = 0;
1007				int i = skb_shinfo(skb)->nr_frags;
1008				struct page *page = TCP_PAGE(sk);
1009				int off = TCP_OFF(sk);
1010
1011				if (skb_can_coalesce(skb, i, page, off) &&
1012				    off != PAGE_SIZE) {
1013					/* We can extend the last page
1014					 * fragment. */
1015					merge = 1;
1016				} else if (i == MAX_SKB_FRAGS || !sg) {
1017					/* Need to add new fragment and cannot
1018					 * do this because interface is non-SG,
1019					 * or because all the page slots are
1020					 * busy. */
1021					tcp_mark_push(tp, skb);
1022					goto new_segment;
1023				} else if (page) {
1024					if (off == PAGE_SIZE) {
1025						put_page(page);
1026						TCP_PAGE(sk) = page = NULL;
1027						off = 0;
1028					}
1029				} else
1030					off = 0;
1031
1032				if (copy > PAGE_SIZE - off)
1033					copy = PAGE_SIZE - off;
 
 
 
 
 
 
 
 
 
 
 
 
 
1034
1035				if (!sk_wmem_schedule(sk, copy))
1036					goto wait_for_memory;
1037
1038				if (!page) {
1039					/* Allocate new cache page. */
1040					if (!(page = sk_stream_alloc_page(sk)))
1041						goto wait_for_memory;
 
1042				}
 
 
1043
1044				/* Time to copy data. We are close to
1045				 * the end! */
1046				err = skb_copy_to_page_nocache(sk, from, skb,
1047							       page, off, copy);
1048				if (err) {
1049					/* If this page was new, give it to the
1050					 * socket so it does not get leaked.
1051					 */
1052					if (!TCP_PAGE(sk)) {
1053						TCP_PAGE(sk) = page;
1054						TCP_OFF(sk) = 0;
1055					}
1056					goto do_error;
1057				}
1058
1059				/* Update the skb. */
1060				if (merge) {
1061					skb_shinfo(skb)->frags[i - 1].size +=
1062									copy;
1063				} else {
1064					skb_fill_page_desc(skb, i, page, off, copy);
1065					if (TCP_PAGE(sk)) {
1066						get_page(page);
1067					} else if (off + copy < PAGE_SIZE) {
1068						get_page(page);
1069						TCP_PAGE(sk) = page;
1070					}
1071				}
1072
1073				TCP_OFF(sk) = off + copy;
1074			}
 
 
 
 
1075
1076			if (!copied)
1077				TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1078
1079			tp->write_seq += copy;
1080			TCP_SKB_CB(skb)->end_seq += copy;
1081			skb_shinfo(skb)->gso_segs = 0;
1082
1083			from += copy;
1084			copied += copy;
1085			if ((seglen -= copy) == 0 && iovlen == 0)
1086				goto out;
1087
1088			if (skb->len < max || (flags & MSG_OOB))
1089				continue;
 
 
 
 
1090
1091			if (forced_push(tp)) {
1092				tcp_mark_push(tp, skb);
1093				__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1094			} else if (skb == tcp_send_head(sk))
1095				tcp_push_one(sk, mss_now);
1096			continue;
1097
 
 
 
 
 
 
 
1098wait_for_sndbuf:
1099			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1100wait_for_memory:
1101			if (copied)
1102				tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
 
1103
1104			if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1105				goto do_error;
 
1106
1107			mss_now = tcp_send_mss(sk, &size_goal, flags);
1108		}
1109	}
1110
1111out:
1112	if (copied)
1113		tcp_push(sk, flags, mss_now, tp->nonagle);
1114	release_sock(sk);
1115	return copied;
 
 
 
1116
 
 
1117do_fault:
1118	if (!skb->len) {
1119		tcp_unlink_write_queue(skb, sk);
1120		/* It is the one place in all of TCP, except connection
1121		 * reset, where we can be unlinking the send_head.
1122		 */
1123		tcp_check_send_head(sk, skb);
1124		sk_wmem_free_skb(sk, skb);
1125	}
1126
1127do_error:
1128	if (copied)
1129		goto out;
1130out_err:
 
1131	err = sk_stream_error(sk, flags, err);
1132	release_sock(sk);
 
 
 
 
1133	return err;
1134}
 
 
 
 
 
 
 
 
 
 
 
 
1135EXPORT_SYMBOL(tcp_sendmsg);
1136
1137/*
1138 *	Handle reading urgent data. BSD has very simple semantics for
1139 *	this, no blocking and very strange errors 8)
1140 */
1141
1142static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1143{
1144	struct tcp_sock *tp = tcp_sk(sk);
1145
1146	/* No URG data to read. */
1147	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1148	    tp->urg_data == TCP_URG_READ)
1149		return -EINVAL;	/* Yes this is right ! */
1150
1151	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1152		return -ENOTCONN;
1153
1154	if (tp->urg_data & TCP_URG_VALID) {
1155		int err = 0;
1156		char c = tp->urg_data;
1157
1158		if (!(flags & MSG_PEEK))
1159			tp->urg_data = TCP_URG_READ;
1160
1161		/* Read urgent data. */
1162		msg->msg_flags |= MSG_OOB;
1163
1164		if (len > 0) {
1165			if (!(flags & MSG_TRUNC))
1166				err = memcpy_toiovec(msg->msg_iov, &c, 1);
1167			len = 1;
1168		} else
1169			msg->msg_flags |= MSG_TRUNC;
1170
1171		return err ? -EFAULT : len;
1172	}
1173
1174	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1175		return 0;
1176
1177	/* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
1178	 * the available implementations agree in this case:
1179	 * this call should never block, independent of the
1180	 * blocking state of the socket.
1181	 * Mike <pall@rz.uni-karlsruhe.de>
1182	 */
1183	return -EAGAIN;
1184}
1185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1186/* Clean up the receive buffer for full frames taken by the user,
1187 * then send an ACK if necessary.  COPIED is the number of bytes
1188 * tcp_recvmsg has given to the user so far, it speeds up the
1189 * calculation of whether or not we must ACK for the sake of
1190 * a window update.
1191 */
1192void tcp_cleanup_rbuf(struct sock *sk, int copied)
1193{
1194	struct tcp_sock *tp = tcp_sk(sk);
1195	int time_to_ack = 0;
1196
1197#if TCP_DEBUG
1198	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1199
1200	WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1201	     "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1202	     tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1203#endif
1204
1205	if (inet_csk_ack_scheduled(sk)) {
1206		const struct inet_connection_sock *icsk = inet_csk(sk);
1207		   /* Delayed ACKs frequently hit locked sockets during bulk
1208		    * receive. */
1209		if (icsk->icsk_ack.blocked ||
1210		    /* Once-per-two-segments ACK was not sent by tcp_input.c */
1211		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1212		    /*
1213		     * If this read emptied read buffer, we send ACK, if
1214		     * connection is not bidirectional, user drained
1215		     * receive buffer and there was a small segment
1216		     * in queue.
1217		     */
1218		    (copied > 0 &&
1219		     ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1220		      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1221		       !icsk->icsk_ack.pingpong)) &&
1222		      !atomic_read(&sk->sk_rmem_alloc)))
1223			time_to_ack = 1;
1224	}
1225
1226	/* We send an ACK if we can now advertise a non-zero window
1227	 * which has been raised "significantly".
1228	 *
1229	 * Even if window raised up to infinity, do not send window open ACK
1230	 * in states, where we will not receive more. It is useless.
1231	 */
1232	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1233		__u32 rcv_window_now = tcp_receive_window(tp);
1234
1235		/* Optimize, __tcp_select_window() is not cheap. */
1236		if (2*rcv_window_now <= tp->window_clamp) {
1237			__u32 new_window = __tcp_select_window(sk);
1238
1239			/* Send ACK now, if this read freed lots of space
1240			 * in our buffer. Certainly, new_window is new window.
1241			 * We can advertise it now, if it is not less than current one.
1242			 * "Lots" means "at least twice" here.
1243			 */
1244			if (new_window && new_window >= 2 * rcv_window_now)
1245				time_to_ack = 1;
1246		}
1247	}
1248	if (time_to_ack)
1249		tcp_send_ack(sk);
1250}
1251
1252static void tcp_prequeue_process(struct sock *sk)
1253{
1254	struct sk_buff *skb;
1255	struct tcp_sock *tp = tcp_sk(sk);
1256
1257	NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1258
1259	/* RX process wants to run with disabled BHs, though it is not
1260	 * necessary */
1261	local_bh_disable();
1262	while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1263		sk_backlog_rcv(sk, skb);
1264	local_bh_enable();
1265
1266	/* Clear memory counter. */
1267	tp->ucopy.memory = 0;
1268}
1269
1270#ifdef CONFIG_NET_DMA
1271static void tcp_service_net_dma(struct sock *sk, bool wait)
1272{
1273	dma_cookie_t done, used;
1274	dma_cookie_t last_issued;
1275	struct tcp_sock *tp = tcp_sk(sk);
1276
1277	if (!tp->ucopy.dma_chan)
1278		return;
1279
1280	last_issued = tp->ucopy.dma_cookie;
1281	dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1282
1283	do {
1284		if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1285					      last_issued, &done,
1286					      &used) == DMA_SUCCESS) {
1287			/* Safe to free early-copied skbs now */
1288			__skb_queue_purge(&sk->sk_async_wait_queue);
1289			break;
1290		} else {
1291			struct sk_buff *skb;
1292			while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1293			       (dma_async_is_complete(skb->dma_cookie, done,
1294						      used) == DMA_SUCCESS)) {
1295				__skb_dequeue(&sk->sk_async_wait_queue);
1296				kfree_skb(skb);
1297			}
1298		}
1299	} while (wait);
1300}
1301#endif
1302
1303static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1304{
1305	struct sk_buff *skb;
1306	u32 offset;
1307
1308	skb_queue_walk(&sk->sk_receive_queue, skb) {
1309		offset = seq - TCP_SKB_CB(skb)->seq;
1310		if (tcp_hdr(skb)->syn)
 
1311			offset--;
1312		if (offset < skb->len || tcp_hdr(skb)->fin) {
 
1313			*off = offset;
1314			return skb;
1315		}
 
 
 
 
 
1316	}
1317	return NULL;
1318}
1319
1320/*
1321 * This routine provides an alternative to tcp_recvmsg() for routines
1322 * that would like to handle copying from skbuffs directly in 'sendfile'
1323 * fashion.
1324 * Note:
1325 *	- It is assumed that the socket was locked by the caller.
1326 *	- The routine does not block.
1327 *	- At present, there is no support for reading OOB data
1328 *	  or for 'peeking' the socket using this routine
1329 *	  (although both would be easy to implement).
1330 */
1331int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1332		  sk_read_actor_t recv_actor)
1333{
1334	struct sk_buff *skb;
1335	struct tcp_sock *tp = tcp_sk(sk);
1336	u32 seq = tp->copied_seq;
1337	u32 offset;
1338	int copied = 0;
1339
1340	if (sk->sk_state == TCP_LISTEN)
1341		return -ENOTCONN;
1342	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1343		if (offset < skb->len) {
1344			int used;
1345			size_t len;
1346
1347			len = skb->len - offset;
1348			/* Stop reading if we hit a patch of urgent data */
1349			if (tp->urg_data) {
1350				u32 urg_offset = tp->urg_seq - seq;
1351				if (urg_offset < len)
1352					len = urg_offset;
1353				if (!len)
1354					break;
1355			}
1356			used = recv_actor(desc, skb, offset, len);
1357			if (used < 0) {
1358				if (!copied)
1359					copied = used;
1360				break;
1361			} else if (used <= len) {
1362				seq += used;
1363				copied += used;
1364				offset += used;
1365			}
1366			/*
1367			 * If recv_actor drops the lock (e.g. TCP splice
1368			 * receive) the skb pointer might be invalid when
1369			 * getting here: tcp_collapse might have deleted it
1370			 * while aggregating skbs from the socket queue.
1371			 */
1372			skb = tcp_recv_skb(sk, seq-1, &offset);
1373			if (!skb || (offset+1 != skb->len))
1374				break;
 
 
 
 
 
1375		}
1376		if (tcp_hdr(skb)->fin) {
1377			sk_eat_skb(sk, skb, 0);
1378			++seq;
1379			break;
1380		}
1381		sk_eat_skb(sk, skb, 0);
1382		if (!desc->count)
1383			break;
1384		tp->copied_seq = seq;
1385	}
1386	tp->copied_seq = seq;
1387
1388	tcp_rcv_space_adjust(sk);
1389
1390	/* Clean up data we have read: This will do ACK frames. */
1391	if (copied > 0)
 
1392		tcp_cleanup_rbuf(sk, copied);
 
1393	return copied;
1394}
1395EXPORT_SYMBOL(tcp_read_sock);
1396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1397/*
1398 *	This routine copies from a sock struct into the user buffer.
1399 *
1400 *	Technical note: in 2.3 we work on _locked_ socket, so that
1401 *	tricks with *seq access order and skb->users are not required.
1402 *	Probably, code can be easily improved even more.
1403 */
1404
1405int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1406		size_t len, int nonblock, int flags, int *addr_len)
1407{
1408	struct tcp_sock *tp = tcp_sk(sk);
1409	int copied = 0;
1410	u32 peek_seq;
1411	u32 *seq;
1412	unsigned long used;
1413	int err;
1414	int target;		/* Read at least this many bytes */
1415	long timeo;
1416	struct task_struct *user_recv = NULL;
1417	int copied_early = 0;
1418	struct sk_buff *skb;
1419	u32 urg_hole = 0;
 
 
 
 
 
 
 
 
 
1420
1421	lock_sock(sk);
1422
1423	err = -ENOTCONN;
1424	if (sk->sk_state == TCP_LISTEN)
1425		goto out;
1426
 
1427	timeo = sock_rcvtimeo(sk, nonblock);
1428
1429	/* Urgent data needs to be handled specially. */
1430	if (flags & MSG_OOB)
1431		goto recv_urg;
1432
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1433	seq = &tp->copied_seq;
1434	if (flags & MSG_PEEK) {
1435		peek_seq = tp->copied_seq;
1436		seq = &peek_seq;
1437	}
1438
1439	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1440
1441#ifdef CONFIG_NET_DMA
1442	tp->ucopy.dma_chan = NULL;
1443	preempt_disable();
1444	skb = skb_peek_tail(&sk->sk_receive_queue);
1445	{
1446		int available = 0;
1447
1448		if (skb)
1449			available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1450		if ((available < target) &&
1451		    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1452		    !sysctl_tcp_low_latency &&
1453		    dma_find_channel(DMA_MEMCPY)) {
1454			preempt_enable_no_resched();
1455			tp->ucopy.pinned_list =
1456					dma_pin_iovec_pages(msg->msg_iov, len);
1457		} else {
1458			preempt_enable_no_resched();
1459		}
1460	}
1461#endif
1462
1463	do {
1464		u32 offset;
1465
1466		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1467		if (tp->urg_data && tp->urg_seq == *seq) {
1468			if (copied)
1469				break;
1470			if (signal_pending(current)) {
1471				copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1472				break;
1473			}
1474		}
1475
1476		/* Next get a buffer. */
1477
 
1478		skb_queue_walk(&sk->sk_receive_queue, skb) {
 
1479			/* Now that we have two receive queues this
1480			 * shouldn't happen.
1481			 */
1482			if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1483				 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1484				 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1485				 flags))
1486				break;
1487
1488			offset = *seq - TCP_SKB_CB(skb)->seq;
1489			if (tcp_hdr(skb)->syn)
 
1490				offset--;
 
1491			if (offset < skb->len)
1492				goto found_ok_skb;
1493			if (tcp_hdr(skb)->fin)
1494				goto found_fin_ok;
1495			WARN(!(flags & MSG_PEEK),
1496			     "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1497			     *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
1498		}
1499
1500		/* Well, if we have backlog, try to process it now yet. */
1501
1502		if (copied >= target && !sk->sk_backlog.tail)
1503			break;
1504
1505		if (copied) {
1506			if (sk->sk_err ||
1507			    sk->sk_state == TCP_CLOSE ||
1508			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
1509			    !timeo ||
1510			    signal_pending(current))
1511				break;
1512		} else {
1513			if (sock_flag(sk, SOCK_DONE))
1514				break;
1515
1516			if (sk->sk_err) {
1517				copied = sock_error(sk);
1518				break;
1519			}
1520
1521			if (sk->sk_shutdown & RCV_SHUTDOWN)
1522				break;
1523
1524			if (sk->sk_state == TCP_CLOSE) {
1525				if (!sock_flag(sk, SOCK_DONE)) {
1526					/* This occurs when user tries to read
1527					 * from never connected socket.
1528					 */
1529					copied = -ENOTCONN;
1530					break;
1531				}
1532				break;
1533			}
1534
1535			if (!timeo) {
1536				copied = -EAGAIN;
1537				break;
1538			}
1539
1540			if (signal_pending(current)) {
1541				copied = sock_intr_errno(timeo);
1542				break;
1543			}
1544		}
1545
1546		tcp_cleanup_rbuf(sk, copied);
1547
1548		if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1549			/* Install new reader */
1550			if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1551				user_recv = current;
1552				tp->ucopy.task = user_recv;
1553				tp->ucopy.iov = msg->msg_iov;
1554			}
1555
1556			tp->ucopy.len = len;
1557
1558			WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1559				!(flags & (MSG_PEEK | MSG_TRUNC)));
1560
1561			/* Ugly... If prequeue is not empty, we have to
1562			 * process it before releasing socket, otherwise
1563			 * order will be broken at second iteration.
1564			 * More elegant solution is required!!!
1565			 *
1566			 * Look: we have the following (pseudo)queues:
1567			 *
1568			 * 1. packets in flight
1569			 * 2. backlog
1570			 * 3. prequeue
1571			 * 4. receive_queue
1572			 *
1573			 * Each queue can be processed only if the next ones
1574			 * are empty. At this point we have empty receive_queue.
1575			 * But prequeue _can_ be not empty after 2nd iteration,
1576			 * when we jumped to start of loop because backlog
1577			 * processing added something to receive_queue.
1578			 * We cannot release_sock(), because backlog contains
1579			 * packets arrived _after_ prequeued ones.
1580			 *
1581			 * Shortly, algorithm is clear --- to process all
1582			 * the queues in order. We could make it more directly,
1583			 * requeueing packets from backlog to prequeue, if
1584			 * is not empty. It is more elegant, but eats cycles,
1585			 * unfortunately.
1586			 */
1587			if (!skb_queue_empty(&tp->ucopy.prequeue))
1588				goto do_prequeue;
1589
1590			/* __ Set realtime policy in scheduler __ */
1591		}
1592
1593#ifdef CONFIG_NET_DMA
1594		if (tp->ucopy.dma_chan)
1595			dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1596#endif
1597		if (copied >= target) {
1598			/* Do not sleep, just process backlog. */
1599			release_sock(sk);
1600			lock_sock(sk);
1601		} else
1602			sk_wait_data(sk, &timeo);
1603
1604#ifdef CONFIG_NET_DMA
1605		tcp_service_net_dma(sk, false);  /* Don't block */
1606		tp->ucopy.wakeup = 0;
1607#endif
1608
1609		if (user_recv) {
1610			int chunk;
1611
1612			/* __ Restore normal policy in scheduler __ */
1613
1614			if ((chunk = len - tp->ucopy.len) != 0) {
1615				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1616				len -= chunk;
1617				copied += chunk;
1618			}
1619
1620			if (tp->rcv_nxt == tp->copied_seq &&
1621			    !skb_queue_empty(&tp->ucopy.prequeue)) {
1622do_prequeue:
1623				tcp_prequeue_process(sk);
1624
1625				if ((chunk = len - tp->ucopy.len) != 0) {
1626					NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1627					len -= chunk;
1628					copied += chunk;
1629				}
1630			}
1631		}
 
1632		if ((flags & MSG_PEEK) &&
1633		    (peek_seq - copied - urg_hole != tp->copied_seq)) {
1634			if (net_ratelimit())
1635				printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1636				       current->comm, task_pid_nr(current));
1637			peek_seq = tp->copied_seq;
1638		}
1639		continue;
1640
1641	found_ok_skb:
1642		/* Ok so how much can we use? */
1643		used = skb->len - offset;
1644		if (len < used)
1645			used = len;
1646
1647		/* Do we have urgent data here? */
1648		if (tp->urg_data) {
1649			u32 urg_offset = tp->urg_seq - *seq;
1650			if (urg_offset < used) {
1651				if (!urg_offset) {
1652					if (!sock_flag(sk, SOCK_URGINLINE)) {
1653						++*seq;
1654						urg_hole++;
1655						offset++;
1656						used--;
1657						if (!used)
1658							goto skip_copy;
1659					}
1660				} else
1661					used = urg_offset;
1662			}
1663		}
1664
1665		if (!(flags & MSG_TRUNC)) {
1666#ifdef CONFIG_NET_DMA
1667			if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1668				tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1669
1670			if (tp->ucopy.dma_chan) {
1671				tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1672					tp->ucopy.dma_chan, skb, offset,
1673					msg->msg_iov, used,
1674					tp->ucopy.pinned_list);
1675
1676				if (tp->ucopy.dma_cookie < 0) {
1677
1678					printk(KERN_ALERT "dma_cookie < 0\n");
1679
1680					/* Exception. Bailout! */
1681					if (!copied)
1682						copied = -EFAULT;
1683					break;
1684				}
1685
1686				dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1687
1688				if ((offset + used) == skb->len)
1689					copied_early = 1;
1690
1691			} else
1692#endif
1693			{
1694				err = skb_copy_datagram_iovec(skb, offset,
1695						msg->msg_iov, used);
1696				if (err) {
1697					/* Exception. Bailout! */
1698					if (!copied)
1699						copied = -EFAULT;
1700					break;
1701				}
1702			}
1703		}
1704
1705		*seq += used;
1706		copied += used;
1707		len -= used;
1708
1709		tcp_rcv_space_adjust(sk);
1710
1711skip_copy:
1712		if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1713			tp->urg_data = 0;
1714			tcp_fast_path_check(sk);
1715		}
 
 
 
 
 
 
1716		if (used + offset < skb->len)
1717			continue;
1718
1719		if (tcp_hdr(skb)->fin)
1720			goto found_fin_ok;
1721		if (!(flags & MSG_PEEK)) {
1722			sk_eat_skb(sk, skb, copied_early);
1723			copied_early = 0;
1724		}
1725		continue;
1726
1727	found_fin_ok:
1728		/* Process the FIN. */
1729		++*seq;
1730		if (!(flags & MSG_PEEK)) {
1731			sk_eat_skb(sk, skb, copied_early);
1732			copied_early = 0;
1733		}
1734		break;
1735	} while (len > 0);
1736
1737	if (user_recv) {
1738		if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1739			int chunk;
1740
1741			tp->ucopy.len = copied > 0 ? len : 0;
1742
1743			tcp_prequeue_process(sk);
1744
1745			if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1746				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1747				len -= chunk;
1748				copied += chunk;
1749			}
1750		}
1751
1752		tp->ucopy.task = NULL;
1753		tp->ucopy.len = 0;
1754	}
1755
1756#ifdef CONFIG_NET_DMA
1757	tcp_service_net_dma(sk, true);  /* Wait for queue to drain */
1758	tp->ucopy.dma_chan = NULL;
1759
1760	if (tp->ucopy.pinned_list) {
1761		dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1762		tp->ucopy.pinned_list = NULL;
1763	}
1764#endif
1765
1766	/* According to UNIX98, msg_name/msg_namelen are ignored
1767	 * on connected socket. I was just happy when found this 8) --ANK
1768	 */
1769
1770	/* Clean up data we have read: This will do ACK frames. */
1771	tcp_cleanup_rbuf(sk, copied);
1772
1773	release_sock(sk);
 
 
 
 
 
 
 
 
 
 
1774	return copied;
1775
1776out:
1777	release_sock(sk);
1778	return err;
1779
1780recv_urg:
1781	err = tcp_recv_urg(sk, msg, len, flags);
1782	goto out;
 
 
 
 
1783}
1784EXPORT_SYMBOL(tcp_recvmsg);
1785
1786void tcp_set_state(struct sock *sk, int state)
1787{
1788	int oldstate = sk->sk_state;
1789
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1790	switch (state) {
1791	case TCP_ESTABLISHED:
1792		if (oldstate != TCP_ESTABLISHED)
1793			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1794		break;
1795
1796	case TCP_CLOSE:
1797		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1798			TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
1799
1800		sk->sk_prot->unhash(sk);
1801		if (inet_csk(sk)->icsk_bind_hash &&
1802		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1803			inet_put_port(sk);
1804		/* fall through */
1805	default:
1806		if (oldstate == TCP_ESTABLISHED)
1807			TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1808	}
1809
1810	/* Change state AFTER socket is unhashed to avoid closed
1811	 * socket sitting in hash tables.
1812	 */
1813	sk->sk_state = state;
1814
1815#ifdef STATE_TRACE
1816	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
1817#endif
1818}
1819EXPORT_SYMBOL_GPL(tcp_set_state);
1820
1821/*
1822 *	State processing on a close. This implements the state shift for
1823 *	sending our FIN frame. Note that we only send a FIN for some
1824 *	states. A shutdown() may have already sent the FIN, or we may be
1825 *	closed.
1826 */
1827
1828static const unsigned char new_state[16] = {
1829  /* current state:        new state:      action:	*/
1830  /* (Invalid)		*/ TCP_CLOSE,
1831  /* TCP_ESTABLISHED	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1832  /* TCP_SYN_SENT	*/ TCP_CLOSE,
1833  /* TCP_SYN_RECV	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1834  /* TCP_FIN_WAIT1	*/ TCP_FIN_WAIT1,
1835  /* TCP_FIN_WAIT2	*/ TCP_FIN_WAIT2,
1836  /* TCP_TIME_WAIT	*/ TCP_CLOSE,
1837  /* TCP_CLOSE		*/ TCP_CLOSE,
1838  /* TCP_CLOSE_WAIT	*/ TCP_LAST_ACK  | TCP_ACTION_FIN,
1839  /* TCP_LAST_ACK	*/ TCP_LAST_ACK,
1840  /* TCP_LISTEN		*/ TCP_CLOSE,
1841  /* TCP_CLOSING	*/ TCP_CLOSING,
 
1842};
1843
1844static int tcp_close_state(struct sock *sk)
1845{
1846	int next = (int)new_state[sk->sk_state];
1847	int ns = next & TCP_STATE_MASK;
1848
1849	tcp_set_state(sk, ns);
1850
1851	return next & TCP_ACTION_FIN;
1852}
1853
1854/*
1855 *	Shutdown the sending side of a connection. Much like close except
1856 *	that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
1857 */
1858
1859void tcp_shutdown(struct sock *sk, int how)
1860{
1861	/*	We need to grab some memory, and put together a FIN,
1862	 *	and then put it into the queue to be sent.
1863	 *		Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1864	 */
1865	if (!(how & SEND_SHUTDOWN))
1866		return;
1867
1868	/* If we've already sent a FIN, or it's a closed state, skip this. */
1869	if ((1 << sk->sk_state) &
1870	    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1871	     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1872		/* Clear out any half completed packets.  FIN if needed. */
1873		if (tcp_close_state(sk))
1874			tcp_send_fin(sk);
1875	}
1876}
1877EXPORT_SYMBOL(tcp_shutdown);
1878
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1879void tcp_close(struct sock *sk, long timeout)
1880{
1881	struct sk_buff *skb;
1882	int data_was_unread = 0;
1883	int state;
1884
1885	lock_sock(sk);
1886	sk->sk_shutdown = SHUTDOWN_MASK;
1887
1888	if (sk->sk_state == TCP_LISTEN) {
1889		tcp_set_state(sk, TCP_CLOSE);
1890
1891		/* Special case. */
1892		inet_csk_listen_stop(sk);
1893
1894		goto adjudge_to_death;
1895	}
1896
1897	/*  We need to flush the recv. buffs.  We do this only on the
1898	 *  descriptor close, not protocol-sourced closes, because the
1899	 *  reader process may not have drained the data yet!
1900	 */
1901	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1902		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1903			  tcp_hdr(skb)->fin;
 
 
1904		data_was_unread += len;
1905		__kfree_skb(skb);
1906	}
1907
1908	sk_mem_reclaim(sk);
1909
1910	/* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
1911	if (sk->sk_state == TCP_CLOSE)
1912		goto adjudge_to_death;
1913
1914	/* As outlined in RFC 2525, section 2.17, we send a RST here because
1915	 * data was lost. To witness the awful effects of the old behavior of
1916	 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1917	 * GET in an FTP client, suspend the process, wait for the client to
1918	 * advertise a zero window, then kill -9 the FTP client, wheee...
1919	 * Note: timeout is always zero in such a case.
1920	 */
1921	if (data_was_unread) {
 
 
1922		/* Unread data was tossed, zap the connection. */
1923		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
1924		tcp_set_state(sk, TCP_CLOSE);
1925		tcp_send_active_reset(sk, sk->sk_allocation);
1926	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1927		/* Check zero linger _after_ checking for unread data. */
1928		sk->sk_prot->disconnect(sk, 0);
1929		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
1930	} else if (tcp_close_state(sk)) {
1931		/* We FIN if the application ate all the data before
1932		 * zapping the connection.
1933		 */
1934
1935		/* RED-PEN. Formally speaking, we have broken TCP state
1936		 * machine. State transitions:
1937		 *
1938		 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1939		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (forget it, it's impossible)
1940		 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1941		 *
1942		 * are legal only when FIN has been sent (i.e. in window),
1943		 * rather than queued out of window. Purists blame.
1944		 *
1945		 * F.e. "RFC state" is ESTABLISHED,
1946		 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1947		 *
1948		 * The visible declinations are that sometimes
1949		 * we enter time-wait state, when it is not required really
1950		 * (harmless), do not send active resets, when they are
1951		 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1952		 * they look as CLOSING or LAST_ACK for Linux)
1953		 * Probably, I missed some more holelets.
1954		 * 						--ANK
 
 
 
 
1955		 */
1956		tcp_send_fin(sk);
1957	}
1958
1959	sk_stream_wait_close(sk, timeout);
1960
1961adjudge_to_death:
1962	state = sk->sk_state;
1963	sock_hold(sk);
1964	sock_orphan(sk);
1965
1966	/* It is the last release_sock in its life. It will remove backlog. */
1967	release_sock(sk);
1968
1969
1970	/* Now socket is owned by kernel and we acquire BH lock
1971	   to finish close. No need to check for user refs.
1972	 */
1973	local_bh_disable();
1974	bh_lock_sock(sk);
1975	WARN_ON(sock_owned_by_user(sk));
 
1976
1977	percpu_counter_inc(sk->sk_prot->orphan_count);
1978
1979	/* Have we already been destroyed by a softirq or backlog? */
1980	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1981		goto out;
1982
1983	/*	This is a (useful) BSD violating of the RFC. There is a
1984	 *	problem with TCP as specified in that the other end could
1985	 *	keep a socket open forever with no application left this end.
1986	 *	We use a 3 minute timeout (about the same as BSD) then kill
1987	 *	our end. If they send after that then tough - BUT: long enough
1988	 *	that we won't make the old 4*rto = almost no time - whoops
1989	 *	reset mistake.
1990	 *
1991	 *	Nope, it was not mistake. It is really desired behaviour
1992	 *	f.e. on http servers, when such sockets are useless, but
1993	 *	consume significant resources. Let's do it with special
1994	 *	linger2	option.					--ANK
1995	 */
1996
1997	if (sk->sk_state == TCP_FIN_WAIT2) {
1998		struct tcp_sock *tp = tcp_sk(sk);
1999		if (tp->linger2 < 0) {
2000			tcp_set_state(sk, TCP_CLOSE);
2001			tcp_send_active_reset(sk, GFP_ATOMIC);
2002			NET_INC_STATS_BH(sock_net(sk),
2003					LINUX_MIB_TCPABORTONLINGER);
2004		} else {
2005			const int tmo = tcp_fin_time(sk);
2006
2007			if (tmo > TCP_TIMEWAIT_LEN) {
2008				inet_csk_reset_keepalive_timer(sk,
2009						tmo - TCP_TIMEWAIT_LEN);
2010			} else {
2011				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2012				goto out;
2013			}
2014		}
2015	}
2016	if (sk->sk_state != TCP_CLOSE) {
2017		sk_mem_reclaim(sk);
2018		if (tcp_too_many_orphans(sk, 0)) {
2019			if (net_ratelimit())
2020				printk(KERN_INFO "TCP: too many of orphaned "
2021				       "sockets\n");
2022			tcp_set_state(sk, TCP_CLOSE);
2023			tcp_send_active_reset(sk, GFP_ATOMIC);
2024			NET_INC_STATS_BH(sock_net(sk),
2025					LINUX_MIB_TCPABORTONMEMORY);
 
 
 
2026		}
2027	}
2028
2029	if (sk->sk_state == TCP_CLOSE)
 
 
 
 
 
 
 
 
 
 
2030		inet_csk_destroy_sock(sk);
 
2031	/* Otherwise, socket is reprieved until protocol close. */
2032
2033out:
2034	bh_unlock_sock(sk);
2035	local_bh_enable();
 
2036	sock_put(sk);
2037}
2038EXPORT_SYMBOL(tcp_close);
2039
2040/* These states need RST on ABORT according to RFC793 */
2041
2042static inline int tcp_need_reset(int state)
2043{
2044	return (1 << state) &
2045	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2046		TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2047}
2048
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2049int tcp_disconnect(struct sock *sk, int flags)
2050{
2051	struct inet_sock *inet = inet_sk(sk);
2052	struct inet_connection_sock *icsk = inet_csk(sk);
2053	struct tcp_sock *tp = tcp_sk(sk);
2054	int err = 0;
2055	int old_state = sk->sk_state;
 
2056
2057	if (old_state != TCP_CLOSE)
2058		tcp_set_state(sk, TCP_CLOSE);
2059
2060	/* ABORT function of RFC793 */
2061	if (old_state == TCP_LISTEN) {
2062		inet_csk_listen_stop(sk);
 
 
2063	} else if (tcp_need_reset(old_state) ||
2064		   (tp->snd_nxt != tp->write_seq &&
2065		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2066		/* The last check adjusts for discrepancy of Linux wrt. RFC
2067		 * states
2068		 */
2069		tcp_send_active_reset(sk, gfp_any());
2070		sk->sk_err = ECONNRESET;
2071	} else if (old_state == TCP_SYN_SENT)
2072		sk->sk_err = ECONNRESET;
2073
2074	tcp_clear_xmit_timers(sk);
2075	__skb_queue_purge(&sk->sk_receive_queue);
 
 
 
 
 
 
2076	tcp_write_queue_purge(sk);
2077	__skb_queue_purge(&tp->out_of_order_queue);
2078#ifdef CONFIG_NET_DMA
2079	__skb_queue_purge(&sk->sk_async_wait_queue);
2080#endif
2081
2082	inet->inet_dport = 0;
2083
2084	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2085		inet_reset_saddr(sk);
2086
2087	sk->sk_shutdown = 0;
2088	sock_reset_flag(sk, SOCK_DONE);
2089	tp->srtt = 0;
2090	if ((tp->write_seq += tp->max_window + 2) == 0)
2091		tp->write_seq = 1;
 
 
 
 
 
 
2092	icsk->icsk_backoff = 0;
2093	tp->snd_cwnd = 2;
2094	icsk->icsk_probes_out = 0;
2095	tp->packets_out = 0;
2096	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 
2097	tp->snd_cwnd_cnt = 0;
2098	tp->bytes_acked = 0;
2099	tp->window_clamp = 0;
 
 
 
 
 
2100	tcp_set_ca_state(sk, TCP_CA_Open);
 
2101	tcp_clear_retrans(tp);
 
2102	inet_csk_delack_init(sk);
2103	tcp_init_send_head(sk);
 
 
 
2104	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2105	__sk_dst_reset(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2106
2107	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2108
 
 
 
 
 
 
2109	sk->sk_error_report(sk);
2110	return err;
2111}
2112EXPORT_SYMBOL(tcp_disconnect);
2113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2114/*
2115 *	Socket option code for TCP.
2116 */
2117static int do_tcp_setsockopt(struct sock *sk, int level,
2118		int optname, char __user *optval, unsigned int optlen)
2119{
2120	struct tcp_sock *tp = tcp_sk(sk);
2121	struct inet_connection_sock *icsk = inet_csk(sk);
 
2122	int val;
2123	int err = 0;
2124
2125	/* These are data/string values, all the others are ints */
2126	switch (optname) {
2127	case TCP_CONGESTION: {
2128		char name[TCP_CA_NAME_MAX];
2129
2130		if (optlen < 1)
2131			return -EINVAL;
2132
2133		val = strncpy_from_user(name, optval,
2134					min_t(long, TCP_CA_NAME_MAX-1, optlen));
2135		if (val < 0)
2136			return -EFAULT;
2137		name[val] = 0;
2138
2139		lock_sock(sk);
2140		err = tcp_set_congestion_control(sk, name);
 
 
2141		release_sock(sk);
2142		return err;
2143	}
2144	case TCP_COOKIE_TRANSACTIONS: {
2145		struct tcp_cookie_transactions ctd;
2146		struct tcp_cookie_values *cvp = NULL;
2147
2148		if (sizeof(ctd) > optlen)
2149			return -EINVAL;
2150		if (copy_from_user(&ctd, optval, sizeof(ctd)))
2151			return -EFAULT;
2152
2153		if (ctd.tcpct_used > sizeof(ctd.tcpct_value) ||
2154		    ctd.tcpct_s_data_desired > TCP_MSS_DESIRED)
2155			return -EINVAL;
2156
2157		if (ctd.tcpct_cookie_desired == 0) {
2158			/* default to global value */
2159		} else if ((0x1 & ctd.tcpct_cookie_desired) ||
2160			   ctd.tcpct_cookie_desired > TCP_COOKIE_MAX ||
2161			   ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) {
2162			return -EINVAL;
2163		}
2164
2165		if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) {
2166			/* Supercedes all other values */
2167			lock_sock(sk);
2168			if (tp->cookie_values != NULL) {
2169				kref_put(&tp->cookie_values->kref,
2170					 tcp_cookie_values_release);
2171				tp->cookie_values = NULL;
2172			}
2173			tp->rx_opt.cookie_in_always = 0; /* false */
2174			tp->rx_opt.cookie_out_never = 1; /* true */
2175			release_sock(sk);
2176			return err;
2177		}
2178
2179		/* Allocate ancillary memory before locking.
 
2180		 */
2181		if (ctd.tcpct_used > 0 ||
2182		    (tp->cookie_values == NULL &&
2183		     (sysctl_tcp_cookie_size > 0 ||
2184		      ctd.tcpct_cookie_desired > 0 ||
2185		      ctd.tcpct_s_data_desired > 0))) {
2186			cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used,
2187				      GFP_KERNEL);
2188			if (cvp == NULL)
2189				return -ENOMEM;
2190
2191			kref_init(&cvp->kref);
2192		}
2193		lock_sock(sk);
2194		tp->rx_opt.cookie_in_always =
2195			(TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags);
2196		tp->rx_opt.cookie_out_never = 0; /* false */
2197
2198		if (tp->cookie_values != NULL) {
2199			if (cvp != NULL) {
2200				/* Changed values are recorded by a changed
2201				 * pointer, ensuring the cookie will differ,
2202				 * without separately hashing each value later.
2203				 */
2204				kref_put(&tp->cookie_values->kref,
2205					 tcp_cookie_values_release);
2206			} else {
2207				cvp = tp->cookie_values;
2208			}
2209		}
2210
2211		if (cvp != NULL) {
2212			cvp->cookie_desired = ctd.tcpct_cookie_desired;
2213
2214			if (ctd.tcpct_used > 0) {
2215				memcpy(cvp->s_data_payload, ctd.tcpct_value,
2216				       ctd.tcpct_used);
2217				cvp->s_data_desired = ctd.tcpct_used;
2218				cvp->s_data_constant = 1; /* true */
2219			} else {
2220				/* No constant payload data. */
2221				cvp->s_data_desired = ctd.tcpct_s_data_desired;
2222				cvp->s_data_constant = 0; /* false */
2223			}
2224
2225			tp->cookie_values = cvp;
2226		}
2227		release_sock(sk);
2228		return err;
2229	}
2230	default:
2231		/* fallthru */
2232		break;
2233	}
2234
2235	if (optlen < sizeof(int))
2236		return -EINVAL;
2237
2238	if (get_user(val, (int __user *)optval))
2239		return -EFAULT;
2240
2241	lock_sock(sk);
2242
2243	switch (optname) {
2244	case TCP_MAXSEG:
2245		/* Values greater than interface MTU won't take effect. However
2246		 * at the point when this call is done we typically don't yet
2247		 * know which interface is going to be used */
2248		if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
 
2249			err = -EINVAL;
2250			break;
2251		}
2252		tp->rx_opt.user_mss = val;
2253		break;
2254
2255	case TCP_NODELAY:
2256		if (val) {
2257			/* TCP_NODELAY is weaker than TCP_CORK, so that
2258			 * this option on corked socket is remembered, but
2259			 * it is not activated until cork is cleared.
2260			 *
2261			 * However, when TCP_NODELAY is set we make
2262			 * an explicit push, which overrides even TCP_CORK
2263			 * for currently queued segments.
2264			 */
2265			tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2266			tcp_push_pending_frames(sk);
2267		} else {
2268			tp->nonagle &= ~TCP_NAGLE_OFF;
2269		}
2270		break;
2271
2272	case TCP_THIN_LINEAR_TIMEOUTS:
2273		if (val < 0 || val > 1)
2274			err = -EINVAL;
2275		else
2276			tp->thin_lto = val;
2277		break;
2278
2279	case TCP_THIN_DUPACK:
2280		if (val < 0 || val > 1)
2281			err = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2282		else
2283			tp->thin_dupack = val;
2284		break;
2285
2286	case TCP_CORK:
2287		/* When set indicates to always queue non-full frames.
2288		 * Later the user clears this option and we transmit
2289		 * any pending partial frames in the queue.  This is
2290		 * meant to be used alongside sendfile() to get properly
2291		 * filled frames when the user (for example) must write
2292		 * out headers with a write() call first and then use
2293		 * sendfile to send out the data parts.
2294		 *
2295		 * TCP_CORK can be set together with TCP_NODELAY and it is
2296		 * stronger than TCP_NODELAY.
2297		 */
2298		if (val) {
2299			tp->nonagle |= TCP_NAGLE_CORK;
2300		} else {
2301			tp->nonagle &= ~TCP_NAGLE_CORK;
2302			if (tp->nonagle&TCP_NAGLE_OFF)
2303				tp->nonagle |= TCP_NAGLE_PUSH;
2304			tcp_push_pending_frames(sk);
2305		}
 
 
2306		break;
2307
2308	case TCP_KEEPIDLE:
2309		if (val < 1 || val > MAX_TCP_KEEPIDLE)
2310			err = -EINVAL;
2311		else {
2312			tp->keepalive_time = val * HZ;
2313			if (sock_flag(sk, SOCK_KEEPOPEN) &&
2314			    !((1 << sk->sk_state) &
2315			      (TCPF_CLOSE | TCPF_LISTEN))) {
2316				u32 elapsed = keepalive_time_elapsed(tp);
2317				if (tp->keepalive_time > elapsed)
2318					elapsed = tp->keepalive_time - elapsed;
2319				else
2320					elapsed = 0;
2321				inet_csk_reset_keepalive_timer(sk, elapsed);
2322			}
2323		}
2324		break;
2325	case TCP_KEEPINTVL:
2326		if (val < 1 || val > MAX_TCP_KEEPINTVL)
2327			err = -EINVAL;
2328		else
2329			tp->keepalive_intvl = val * HZ;
2330		break;
2331	case TCP_KEEPCNT:
2332		if (val < 1 || val > MAX_TCP_KEEPCNT)
2333			err = -EINVAL;
2334		else
2335			tp->keepalive_probes = val;
2336		break;
2337	case TCP_SYNCNT:
2338		if (val < 1 || val > MAX_TCP_SYNCNT)
2339			err = -EINVAL;
2340		else
2341			icsk->icsk_syn_retries = val;
2342		break;
2343
 
 
 
 
 
 
 
2344	case TCP_LINGER2:
2345		if (val < 0)
2346			tp->linger2 = -1;
2347		else if (val > sysctl_tcp_fin_timeout / HZ)
2348			tp->linger2 = 0;
2349		else
2350			tp->linger2 = val * HZ;
2351		break;
2352
2353	case TCP_DEFER_ACCEPT:
2354		/* Translate value in seconds to number of retransmits */
2355		icsk->icsk_accept_queue.rskq_defer_accept =
2356			secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2357					TCP_RTO_MAX / HZ);
2358		break;
2359
2360	case TCP_WINDOW_CLAMP:
2361		if (!val) {
2362			if (sk->sk_state != TCP_CLOSE) {
2363				err = -EINVAL;
2364				break;
2365			}
2366			tp->window_clamp = 0;
2367		} else
2368			tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2369						SOCK_MIN_RCVBUF / 2 : val;
2370		break;
2371
2372	case TCP_QUICKACK:
2373		if (!val) {
2374			icsk->icsk_ack.pingpong = 1;
2375		} else {
2376			icsk->icsk_ack.pingpong = 0;
2377			if ((1 << sk->sk_state) &
2378			    (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2379			    inet_csk_ack_scheduled(sk)) {
2380				icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2381				tcp_cleanup_rbuf(sk, 1);
2382				if (!(val & 1))
2383					icsk->icsk_ack.pingpong = 1;
2384			}
2385		}
2386		break;
2387
2388#ifdef CONFIG_TCP_MD5SIG
2389	case TCP_MD5SIG:
2390		/* Read the IP->Key mappings from userspace */
2391		err = tp->af_specific->md5_parse(sk, optval, optlen);
2392		break;
2393#endif
2394	case TCP_USER_TIMEOUT:
2395		/* Cap the max timeout in ms TCP will retry/retrans
2396		 * before giving up and aborting (ETIMEDOUT) a connection.
2397		 */
2398		icsk->icsk_user_timeout = msecs_to_jiffies(val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2399		break;
2400	default:
2401		err = -ENOPROTOOPT;
2402		break;
2403	}
2404
2405	release_sock(sk);
2406	return err;
2407}
2408
2409int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2410		   unsigned int optlen)
2411{
2412	struct inet_connection_sock *icsk = inet_csk(sk);
2413
2414	if (level != SOL_TCP)
2415		return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2416						     optval, optlen);
2417	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2418}
2419EXPORT_SYMBOL(tcp_setsockopt);
2420
2421#ifdef CONFIG_COMPAT
2422int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2423			  char __user *optval, unsigned int optlen)
2424{
2425	if (level != SOL_TCP)
2426		return inet_csk_compat_setsockopt(sk, level, optname,
2427						  optval, optlen);
2428	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
 
 
 
 
 
 
 
 
 
 
2429}
2430EXPORT_SYMBOL(compat_tcp_setsockopt);
2431#endif
2432
2433/* Return information about state of tcp endpoint in API format. */
2434void tcp_get_info(struct sock *sk, struct tcp_info *info)
2435{
2436	struct tcp_sock *tp = tcp_sk(sk);
2437	const struct inet_connection_sock *icsk = inet_csk(sk);
2438	u32 now = tcp_time_stamp;
 
 
 
2439
2440	memset(info, 0, sizeof(*info));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2441
2442	info->tcpi_state = sk->sk_state;
2443	info->tcpi_ca_state = icsk->icsk_ca_state;
2444	info->tcpi_retransmits = icsk->icsk_retransmits;
2445	info->tcpi_probes = icsk->icsk_probes_out;
2446	info->tcpi_backoff = icsk->icsk_backoff;
2447
2448	if (tp->rx_opt.tstamp_ok)
2449		info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2450	if (tcp_is_sack(tp))
2451		info->tcpi_options |= TCPI_OPT_SACK;
2452	if (tp->rx_opt.wscale_ok) {
2453		info->tcpi_options |= TCPI_OPT_WSCALE;
2454		info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2455		info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2456	}
2457
2458	if (tp->ecn_flags&TCP_ECN_OK)
2459		info->tcpi_options |= TCPI_OPT_ECN;
 
 
 
 
2460
2461	info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2462	info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2463	info->tcpi_snd_mss = tp->mss_cache;
2464	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2465
2466	if (sk->sk_state == TCP_LISTEN) {
2467		info->tcpi_unacked = sk->sk_ack_backlog;
2468		info->tcpi_sacked = sk->sk_max_ack_backlog;
2469	} else {
2470		info->tcpi_unacked = tp->packets_out;
2471		info->tcpi_sacked = tp->sacked_out;
2472	}
2473	info->tcpi_lost = tp->lost_out;
2474	info->tcpi_retrans = tp->retrans_out;
2475	info->tcpi_fackets = tp->fackets_out;
2476
 
2477	info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2478	info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2479	info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2480
2481	info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2482	info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2483	info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2484	info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2485	info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2486	info->tcpi_snd_cwnd = tp->snd_cwnd;
2487	info->tcpi_advmss = tp->advmss;
2488	info->tcpi_reordering = tp->reordering;
2489
2490	info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2491	info->tcpi_rcv_space = tp->rcvq_space.space;
2492
2493	info->tcpi_total_retrans = tp->total_retrans;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2494}
2495EXPORT_SYMBOL_GPL(tcp_get_info);
2496
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2497static int do_tcp_getsockopt(struct sock *sk, int level,
2498		int optname, char __user *optval, int __user *optlen)
2499{
2500	struct inet_connection_sock *icsk = inet_csk(sk);
2501	struct tcp_sock *tp = tcp_sk(sk);
 
2502	int val, len;
2503
2504	if (get_user(len, optlen))
2505		return -EFAULT;
2506
2507	len = min_t(unsigned int, len, sizeof(int));
2508
2509	if (len < 0)
2510		return -EINVAL;
2511
2512	switch (optname) {
2513	case TCP_MAXSEG:
2514		val = tp->mss_cache;
2515		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2516			val = tp->rx_opt.user_mss;
 
 
2517		break;
2518	case TCP_NODELAY:
2519		val = !!(tp->nonagle&TCP_NAGLE_OFF);
2520		break;
2521	case TCP_CORK:
2522		val = !!(tp->nonagle&TCP_NAGLE_CORK);
2523		break;
2524	case TCP_KEEPIDLE:
2525		val = keepalive_time_when(tp) / HZ;
2526		break;
2527	case TCP_KEEPINTVL:
2528		val = keepalive_intvl_when(tp) / HZ;
2529		break;
2530	case TCP_KEEPCNT:
2531		val = keepalive_probes(tp);
2532		break;
2533	case TCP_SYNCNT:
2534		val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2535		break;
2536	case TCP_LINGER2:
2537		val = tp->linger2;
2538		if (val >= 0)
2539			val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2540		break;
2541	case TCP_DEFER_ACCEPT:
2542		val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
2543				      TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
2544		break;
2545	case TCP_WINDOW_CLAMP:
2546		val = tp->window_clamp;
2547		break;
2548	case TCP_INFO: {
2549		struct tcp_info info;
2550
2551		if (get_user(len, optlen))
2552			return -EFAULT;
2553
2554		tcp_get_info(sk, &info);
2555
2556		len = min_t(unsigned int, len, sizeof(info));
2557		if (put_user(len, optlen))
2558			return -EFAULT;
2559		if (copy_to_user(optval, &info, len))
2560			return -EFAULT;
2561		return 0;
2562	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2563	case TCP_QUICKACK:
2564		val = !icsk->icsk_ack.pingpong;
2565		break;
2566
2567	case TCP_CONGESTION:
2568		if (get_user(len, optlen))
2569			return -EFAULT;
2570		len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2571		if (put_user(len, optlen))
2572			return -EFAULT;
2573		if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2574			return -EFAULT;
2575		return 0;
2576
2577	case TCP_COOKIE_TRANSACTIONS: {
2578		struct tcp_cookie_transactions ctd;
2579		struct tcp_cookie_values *cvp = tp->cookie_values;
2580
2581		if (get_user(len, optlen))
2582			return -EFAULT;
2583		if (len < sizeof(ctd))
2584			return -EINVAL;
2585
2586		memset(&ctd, 0, sizeof(ctd));
2587		ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ?
2588				   TCP_COOKIE_IN_ALWAYS : 0)
2589				| (tp->rx_opt.cookie_out_never ?
2590				   TCP_COOKIE_OUT_NEVER : 0);
2591
2592		if (cvp != NULL) {
2593			ctd.tcpct_flags |= (cvp->s_data_in ?
2594					    TCP_S_DATA_IN : 0)
2595					 | (cvp->s_data_out ?
2596					    TCP_S_DATA_OUT : 0);
2597
2598			ctd.tcpct_cookie_desired = cvp->cookie_desired;
2599			ctd.tcpct_s_data_desired = cvp->s_data_desired;
 
2600
2601			memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0],
2602			       cvp->cookie_pair_size);
2603			ctd.tcpct_used = cvp->cookie_pair_size;
2604		}
2605
2606		if (put_user(sizeof(ctd), optlen))
 
 
 
2607			return -EFAULT;
2608		if (copy_to_user(optval, &ctd, sizeof(ctd)))
2609			return -EFAULT;
2610		return 0;
2611	}
2612	case TCP_THIN_LINEAR_TIMEOUTS:
2613		val = tp->thin_lto;
2614		break;
 
2615	case TCP_THIN_DUPACK:
2616		val = tp->thin_dupack;
2617		break;
2618
2619	case TCP_USER_TIMEOUT:
2620		val = jiffies_to_msecs(icsk->icsk_user_timeout);
2621		break;
2622	default:
2623		return -ENOPROTOOPT;
2624	}
2625
2626	if (put_user(len, optlen))
2627		return -EFAULT;
2628	if (copy_to_user(optval, &val, len))
2629		return -EFAULT;
2630	return 0;
2631}
2632
2633int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2634		   int __user *optlen)
2635{
2636	struct inet_connection_sock *icsk = inet_csk(sk);
2637
2638	if (level != SOL_TCP)
2639		return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2640						     optval, optlen);
2641	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2642}
2643EXPORT_SYMBOL(tcp_getsockopt);
2644
2645#ifdef CONFIG_COMPAT
2646int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2647			  char __user *optval, int __user *optlen)
2648{
2649	if (level != SOL_TCP)
2650		return inet_csk_compat_getsockopt(sk, level, optname,
2651						  optval, optlen);
2652	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2653}
2654EXPORT_SYMBOL(compat_tcp_getsockopt);
2655#endif
2656
2657struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
2658{
2659	struct sk_buff *segs = ERR_PTR(-EINVAL);
2660	struct tcphdr *th;
2661	unsigned thlen;
2662	unsigned int seq;
2663	__be32 delta;
2664	unsigned int oldlen;
2665	unsigned int mss;
2666
2667	if (!pskb_may_pull(skb, sizeof(*th)))
2668		goto out;
2669
2670	th = tcp_hdr(skb);
2671	thlen = th->doff * 4;
2672	if (thlen < sizeof(*th))
2673		goto out;
2674
2675	if (!pskb_may_pull(skb, thlen))
2676		goto out;
2677
2678	oldlen = (u16)~skb->len;
2679	__skb_pull(skb, thlen);
2680
2681	mss = skb_shinfo(skb)->gso_size;
2682	if (unlikely(skb->len <= mss))
2683		goto out;
2684
2685	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2686		/* Packet is from an untrusted source, reset gso_segs. */
2687		int type = skb_shinfo(skb)->gso_type;
2688
2689		if (unlikely(type &
2690			     ~(SKB_GSO_TCPV4 |
2691			       SKB_GSO_DODGY |
2692			       SKB_GSO_TCP_ECN |
2693			       SKB_GSO_TCPV6 |
2694			       0) ||
2695			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2696			goto out;
2697
2698		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 
 
 
 
2699
2700		segs = NULL;
2701		goto out;
 
2702	}
 
 
 
 
 
 
 
 
2703
2704	segs = skb_segment(skb, features);
2705	if (IS_ERR(segs))
2706		goto out;
2707
2708	delta = htonl(oldlen + (thlen + mss));
2709
2710	skb = segs;
2711	th = tcp_hdr(skb);
2712	seq = ntohl(th->seq);
2713
2714	do {
2715		th->fin = th->psh = 0;
 
2716
2717		th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2718				       (__force u32)delta));
2719		if (skb->ip_summed != CHECKSUM_PARTIAL)
2720			th->check =
2721			     csum_fold(csum_partial(skb_transport_header(skb),
2722						    thlen, skb->csum));
2723
2724		seq += mss;
2725		skb = skb->next;
2726		th = tcp_hdr(skb);
2727
2728		th->seq = htonl(seq);
2729		th->cwr = 0;
2730	} while (skb->next);
2731
2732	delta = htonl(oldlen + (skb->tail - skb->transport_header) +
2733		      skb->data_len);
2734	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2735				(__force u32)delta));
2736	if (skb->ip_summed != CHECKSUM_PARTIAL)
2737		th->check = csum_fold(csum_partial(skb_transport_header(skb),
2738						   thlen, skb->csum));
2739
2740out:
2741	return segs;
2742}
2743EXPORT_SYMBOL(tcp_tso_segment);
2744
2745struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2746{
2747	struct sk_buff **pp = NULL;
2748	struct sk_buff *p;
2749	struct tcphdr *th;
2750	struct tcphdr *th2;
2751	unsigned int len;
2752	unsigned int thlen;
2753	__be32 flags;
2754	unsigned int mss = 1;
2755	unsigned int hlen;
2756	unsigned int off;
2757	int flush = 1;
2758	int i;
2759
2760	off = skb_gro_offset(skb);
2761	hlen = off + sizeof(*th);
2762	th = skb_gro_header_fast(skb, off);
2763	if (skb_gro_header_hard(skb, hlen)) {
2764		th = skb_gro_header_slow(skb, hlen, off);
2765		if (unlikely(!th))
2766			goto out;
2767	}
2768
2769	thlen = th->doff * 4;
2770	if (thlen < sizeof(*th))
2771		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
2772
2773	hlen = off + thlen;
2774	if (skb_gro_header_hard(skb, hlen)) {
2775		th = skb_gro_header_slow(skb, hlen, off);
2776		if (unlikely(!th))
2777			goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2778	}
 
 
 
 
2779
2780	skb_gro_pull(skb, thlen);
2781
2782	len = skb_gro_len(skb);
2783	flags = tcp_flag_word(th);
2784
2785	for (; (p = *head); head = &p->next) {
2786		if (!NAPI_GRO_CB(p)->same_flow)
2787			continue;
2788
2789		th2 = tcp_hdr(p);
2790
2791		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
2792			NAPI_GRO_CB(p)->same_flow = 0;
2793			continue;
2794		}
2795
2796		goto found;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2797	}
2798
2799	goto out_check_final;
2800
2801found:
2802	flush = NAPI_GRO_CB(p)->flush;
2803	flush |= (__force int)(flags & TCP_FLAG_CWR);
2804	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
2805		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
2806	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
2807	for (i = sizeof(*th); i < thlen; i += 4)
2808		flush |= *(u32 *)((u8 *)th + i) ^
2809			 *(u32 *)((u8 *)th2 + i);
2810
2811	mss = skb_shinfo(p)->gso_size;
2812
2813	flush |= (len - 1) >= mss;
2814	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
2815
2816	if (flush || skb_gro_receive(head, skb)) {
2817		mss = 1;
2818		goto out_check_final;
2819	}
2820
2821	p = *head;
2822	th2 = tcp_hdr(p);
2823	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
2824
2825out_check_final:
2826	flush = len < mss;
2827	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
2828					TCP_FLAG_RST | TCP_FLAG_SYN |
2829					TCP_FLAG_FIN));
2830
2831	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
2832		pp = head;
2833
2834out:
2835	NAPI_GRO_CB(skb)->flush |= flush;
2836
2837	return pp;
2838}
2839EXPORT_SYMBOL(tcp_gro_receive);
2840
2841int tcp_gro_complete(struct sk_buff *skb)
 
2842{
2843	struct tcphdr *th = tcp_hdr(skb);
2844
2845	skb->csum_start = skb_transport_header(skb) - skb->head;
2846	skb->csum_offset = offsetof(struct tcphdr, check);
2847	skb->ip_summed = CHECKSUM_PARTIAL;
2848
2849	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
2850
2851	if (th->cwr)
2852		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
2853
2854	return 0;
 
 
 
2855}
2856EXPORT_SYMBOL(tcp_gro_complete);
2857
2858#ifdef CONFIG_TCP_MD5SIG
2859static unsigned long tcp_md5sig_users;
2860static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool;
2861static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2862
2863static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
2864{
 
2865	int cpu;
2866	for_each_possible_cpu(cpu) {
2867		struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2868		if (p) {
2869			if (p->md5_desc.tfm)
2870				crypto_free_hash(p->md5_desc.tfm);
2871			kfree(p);
2872		}
2873	}
2874	free_percpu(pool);
2875}
2876
2877void tcp_free_md5sig_pool(void)
2878{
2879	struct tcp_md5sig_pool * __percpu *pool = NULL;
2880
2881	spin_lock_bh(&tcp_md5sig_pool_lock);
2882	if (--tcp_md5sig_users == 0) {
2883		pool = tcp_md5sig_pool;
2884		tcp_md5sig_pool = NULL;
2885	}
2886	spin_unlock_bh(&tcp_md5sig_pool_lock);
2887	if (pool)
2888		__tcp_free_md5sig_pool(pool);
2889}
2890EXPORT_SYMBOL(tcp_free_md5sig_pool);
2891
2892static struct tcp_md5sig_pool * __percpu *
2893__tcp_alloc_md5sig_pool(struct sock *sk)
2894{
2895	int cpu;
2896	struct tcp_md5sig_pool * __percpu *pool;
 
 
 
 
 
 
2897
2898	pool = alloc_percpu(struct tcp_md5sig_pool *);
2899	if (!pool)
2900		return NULL;
2901
2902	for_each_possible_cpu(cpu) {
2903		struct tcp_md5sig_pool *p;
2904		struct crypto_hash *hash;
2905
2906		p = kzalloc(sizeof(*p), sk->sk_allocation);
2907		if (!p)
2908			goto out_free;
2909		*per_cpu_ptr(pool, cpu) = p;
2910
2911		hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2912		if (!hash || IS_ERR(hash))
2913			goto out_free;
2914
2915		p->md5_desc.tfm = hash;
2916	}
2917	return pool;
2918out_free:
2919	__tcp_free_md5sig_pool(pool);
2920	return NULL;
2921}
2922
2923struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
2924{
2925	struct tcp_md5sig_pool * __percpu *pool;
2926	int alloc = 0;
2927
2928retry:
2929	spin_lock_bh(&tcp_md5sig_pool_lock);
2930	pool = tcp_md5sig_pool;
2931	if (tcp_md5sig_users++ == 0) {
2932		alloc = 1;
2933		spin_unlock_bh(&tcp_md5sig_pool_lock);
2934	} else if (!pool) {
2935		tcp_md5sig_users--;
2936		spin_unlock_bh(&tcp_md5sig_pool_lock);
2937		cpu_relax();
2938		goto retry;
2939	} else
2940		spin_unlock_bh(&tcp_md5sig_pool_lock);
2941
2942	if (alloc) {
2943		/* we cannot hold spinlock here because this may sleep. */
2944		struct tcp_md5sig_pool * __percpu *p;
2945
2946		p = __tcp_alloc_md5sig_pool(sk);
2947		spin_lock_bh(&tcp_md5sig_pool_lock);
2948		if (!p) {
2949			tcp_md5sig_users--;
2950			spin_unlock_bh(&tcp_md5sig_pool_lock);
2951			return NULL;
2952		}
2953		pool = tcp_md5sig_pool;
2954		if (pool) {
2955			/* oops, it has already been assigned. */
2956			spin_unlock_bh(&tcp_md5sig_pool_lock);
2957			__tcp_free_md5sig_pool(p);
2958		} else {
2959			tcp_md5sig_pool = pool = p;
2960			spin_unlock_bh(&tcp_md5sig_pool_lock);
2961		}
 
 
2962	}
2963	return pool;
2964}
2965EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2966
2967
2968/**
2969 *	tcp_get_md5sig_pool - get md5sig_pool for this user
2970 *
2971 *	We use percpu structure, so if we succeed, we exit with preemption
2972 *	and BH disabled, to make sure another thread or softirq handling
2973 *	wont try to get same context.
2974 */
2975struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2976{
2977	struct tcp_md5sig_pool * __percpu *p;
2978
2979	local_bh_disable();
2980
2981	spin_lock(&tcp_md5sig_pool_lock);
2982	p = tcp_md5sig_pool;
2983	if (p)
2984		tcp_md5sig_users++;
2985	spin_unlock(&tcp_md5sig_pool_lock);
2986
2987	if (p)
2988		return *this_cpu_ptr(p);
2989
2990	local_bh_enable();
2991	return NULL;
2992}
2993EXPORT_SYMBOL(tcp_get_md5sig_pool);
2994
2995void tcp_put_md5sig_pool(void)
2996{
2997	local_bh_enable();
2998	tcp_free_md5sig_pool();
2999}
3000EXPORT_SYMBOL(tcp_put_md5sig_pool);
3001
3002int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
3003			struct tcphdr *th)
3004{
3005	struct scatterlist sg;
3006	int err;
3007
3008	__sum16 old_checksum = th->check;
3009	th->check = 0;
3010	/* options aren't included in the hash */
3011	sg_init_one(&sg, th, sizeof(struct tcphdr));
3012	err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr));
3013	th->check = old_checksum;
3014	return err;
3015}
3016EXPORT_SYMBOL(tcp_md5_hash_header);
3017
3018int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3019			  struct sk_buff *skb, unsigned header_len)
3020{
3021	struct scatterlist sg;
3022	const struct tcphdr *tp = tcp_hdr(skb);
3023	struct hash_desc *desc = &hp->md5_desc;
3024	unsigned i;
3025	const unsigned head_data_len = skb_headlen(skb) > header_len ?
3026				       skb_headlen(skb) - header_len : 0;
3027	const struct skb_shared_info *shi = skb_shinfo(skb);
3028	struct sk_buff *frag_iter;
3029
3030	sg_init_table(&sg, 1);
3031
3032	sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
3033	if (crypto_hash_update(desc, &sg, head_data_len))
 
3034		return 1;
3035
3036	for (i = 0; i < shi->nr_frags; ++i) {
3037		const struct skb_frag_struct *f = &shi->frags[i];
3038		sg_set_page(&sg, f->page, f->size, f->page_offset);
3039		if (crypto_hash_update(desc, &sg, f->size))
 
 
 
 
 
3040			return 1;
3041	}
3042
3043	skb_walk_frags(skb, frag_iter)
3044		if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3045			return 1;
3046
3047	return 0;
3048}
3049EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3050
3051int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key)
3052{
 
3053	struct scatterlist sg;
3054
3055	sg_init_one(&sg, key->key, key->keylen);
3056	return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
 
 
 
3057}
3058EXPORT_SYMBOL(tcp_md5_hash_key);
3059
3060#endif
3061
3062/**
3063 * Each Responder maintains up to two secret values concurrently for
3064 * efficient secret rollover.  Each secret value has 4 states:
3065 *
3066 * Generating.  (tcp_secret_generating != tcp_secret_primary)
3067 *    Generates new Responder-Cookies, but not yet used for primary
3068 *    verification.  This is a short-term state, typically lasting only
3069 *    one round trip time (RTT).
3070 *
3071 * Primary.  (tcp_secret_generating == tcp_secret_primary)
3072 *    Used both for generation and primary verification.
3073 *
3074 * Retiring.  (tcp_secret_retiring != tcp_secret_secondary)
3075 *    Used for verification, until the first failure that can be
3076 *    verified by the newer Generating secret.  At that time, this
3077 *    cookie's state is changed to Secondary, and the Generating
3078 *    cookie's state is changed to Primary.  This is a short-term state,
3079 *    typically lasting only one round trip time (RTT).
3080 *
3081 * Secondary.  (tcp_secret_retiring == tcp_secret_secondary)
3082 *    Used for secondary verification, after primary verification
3083 *    failures.  This state lasts no more than twice the Maximum Segment
3084 *    Lifetime (2MSL).  Then, the secret is discarded.
3085 */
3086struct tcp_cookie_secret {
3087	/* The secret is divided into two parts.  The digest part is the
3088	 * equivalent of previously hashing a secret and saving the state,
3089	 * and serves as an initialization vector (IV).  The message part
3090	 * serves as the trailing secret.
3091	 */
3092	u32				secrets[COOKIE_WORKSPACE_WORDS];
3093	unsigned long			expires;
3094};
3095
3096#define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL)
3097#define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2)
3098#define TCP_SECRET_LIFE (HZ * 600)
3099
3100static struct tcp_cookie_secret tcp_secret_one;
3101static struct tcp_cookie_secret tcp_secret_two;
3102
3103/* Essentially a circular list, without dynamic allocation. */
3104static struct tcp_cookie_secret *tcp_secret_generating;
3105static struct tcp_cookie_secret *tcp_secret_primary;
3106static struct tcp_cookie_secret *tcp_secret_retiring;
3107static struct tcp_cookie_secret *tcp_secret_secondary;
3108
3109static DEFINE_SPINLOCK(tcp_secret_locker);
3110
3111/* Select a pseudo-random word in the cookie workspace.
3112 */
3113static inline u32 tcp_cookie_work(const u32 *ws, const int n)
3114{
3115	return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])];
3116}
3117
3118/* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed.
3119 * Called in softirq context.
3120 * Returns: 0 for success.
3121 */
3122int tcp_cookie_generator(u32 *bakery)
3123{
3124	unsigned long jiffy = jiffies;
3125
3126	if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) {
3127		spin_lock_bh(&tcp_secret_locker);
3128		if (!time_after_eq(jiffy, tcp_secret_generating->expires)) {
3129			/* refreshed by another */
3130			memcpy(bakery,
3131			       &tcp_secret_generating->secrets[0],
3132			       COOKIE_WORKSPACE_WORDS);
3133		} else {
3134			/* still needs refreshing */
3135			get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS);
3136
3137			/* The first time, paranoia assumes that the
3138			 * randomization function isn't as strong.  But,
3139			 * this secret initialization is delayed until
3140			 * the last possible moment (packet arrival).
3141			 * Although that time is observable, it is
3142			 * unpredictably variable.  Mash in the most
3143			 * volatile clock bits available, and expire the
3144			 * secret extra quickly.
3145			 */
3146			if (unlikely(tcp_secret_primary->expires ==
3147				     tcp_secret_secondary->expires)) {
3148				struct timespec tv;
3149
3150				getnstimeofday(&tv);
3151				bakery[COOKIE_DIGEST_WORDS+0] ^=
3152					(u32)tv.tv_nsec;
3153
3154				tcp_secret_secondary->expires = jiffy
3155					+ TCP_SECRET_1MSL
3156					+ (0x0f & tcp_cookie_work(bakery, 0));
3157			} else {
3158				tcp_secret_secondary->expires = jiffy
3159					+ TCP_SECRET_LIFE
3160					+ (0xff & tcp_cookie_work(bakery, 1));
3161				tcp_secret_primary->expires = jiffy
3162					+ TCP_SECRET_2MSL
3163					+ (0x1f & tcp_cookie_work(bakery, 2));
3164			}
3165			memcpy(&tcp_secret_secondary->secrets[0],
3166			       bakery, COOKIE_WORKSPACE_WORDS);
3167
3168			rcu_assign_pointer(tcp_secret_generating,
3169					   tcp_secret_secondary);
3170			rcu_assign_pointer(tcp_secret_retiring,
3171					   tcp_secret_primary);
3172			/*
3173			 * Neither call_rcu() nor synchronize_rcu() needed.
3174			 * Retiring data is not freed.  It is replaced after
3175			 * further (locked) pointer updates, and a quiet time
3176			 * (minimum 1MSL, maximum LIFE - 2MSL).
3177			 */
3178		}
3179		spin_unlock_bh(&tcp_secret_locker);
3180	} else {
3181		rcu_read_lock_bh();
3182		memcpy(bakery,
3183		       &rcu_dereference(tcp_secret_generating)->secrets[0],
3184		       COOKIE_WORKSPACE_WORDS);
3185		rcu_read_unlock_bh();
3186	}
3187	return 0;
3188}
3189EXPORT_SYMBOL(tcp_cookie_generator);
3190
3191void tcp_done(struct sock *sk)
3192{
3193	if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3194		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3195
3196	tcp_set_state(sk, TCP_CLOSE);
3197	tcp_clear_xmit_timers(sk);
 
 
3198
3199	sk->sk_shutdown = SHUTDOWN_MASK;
3200
3201	if (!sock_flag(sk, SOCK_DEAD))
3202		sk->sk_state_change(sk);
3203	else
3204		inet_csk_destroy_sock(sk);
3205}
3206EXPORT_SYMBOL_GPL(tcp_done);
3207
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3208extern struct tcp_congestion_ops tcp_reno;
3209
3210static __initdata unsigned long thash_entries;
3211static int __init set_thash_entries(char *str)
3212{
 
 
3213	if (!str)
3214		return 0;
3215	thash_entries = simple_strtoul(str, &str, 0);
 
 
 
 
3216	return 1;
3217}
3218__setup("thash_entries=", set_thash_entries);
3219
 
 
 
 
 
 
 
 
 
 
3220void __init tcp_init(void)
3221{
3222	struct sk_buff *skb = NULL;
3223	unsigned long limit;
3224	int i, max_share, cnt;
3225	unsigned long jiffy = jiffies;
3226
3227	BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
3228
3229	percpu_counter_init(&tcp_sockets_allocated, 0);
3230	percpu_counter_init(&tcp_orphan_count, 0);
 
 
 
 
 
 
 
 
3231	tcp_hashinfo.bind_bucket_cachep =
3232		kmem_cache_create("tcp_bind_bucket",
3233				  sizeof(struct inet_bind_bucket), 0,
3234				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3235
3236	/* Size and allocate the main established and bind bucket
3237	 * hash tables.
3238	 *
3239	 * The methodology is similar to that of the buffer cache.
3240	 */
3241	tcp_hashinfo.ehash =
3242		alloc_large_system_hash("TCP established",
3243					sizeof(struct inet_ehash_bucket),
3244					thash_entries,
3245					(totalram_pages >= 128 * 1024) ?
3246					13 : 15,
3247					0,
3248					NULL,
3249					&tcp_hashinfo.ehash_mask,
 
3250					thash_entries ? 0 : 512 * 1024);
3251	for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
3252		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
3253		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
3254	}
3255	if (inet_ehash_locks_alloc(&tcp_hashinfo))
3256		panic("TCP: failed to alloc ehash_locks");
3257	tcp_hashinfo.bhash =
3258		alloc_large_system_hash("TCP bind",
3259					sizeof(struct inet_bind_hashbucket),
3260					tcp_hashinfo.ehash_mask + 1,
3261					(totalram_pages >= 128 * 1024) ?
3262					13 : 15,
3263					0,
3264					&tcp_hashinfo.bhash_size,
3265					NULL,
 
3266					64 * 1024);
3267	tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
3268	for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3269		spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3270		INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3271	}
3272
3273
3274	cnt = tcp_hashinfo.ehash_mask + 1;
3275
3276	tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
3277	sysctl_tcp_max_orphans = cnt / 2;
3278	sysctl_max_syn_backlog = max(128, cnt / 256);
3279
3280	limit = nr_free_buffer_pages() / 8;
3281	limit = max(limit, 128UL);
3282	sysctl_tcp_mem[0] = limit / 4 * 3;
3283	sysctl_tcp_mem[1] = limit;
3284	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
3285
 
3286	/* Set per-socket limits to no more than 1/128 the pressure threshold */
3287	limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
3288	max_share = min(4UL*1024*1024, limit);
3289
3290	sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3291	sysctl_tcp_wmem[1] = 16*1024;
3292	sysctl_tcp_wmem[2] = max(64*1024, max_share);
3293
3294	sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3295	sysctl_tcp_rmem[1] = 87380;
3296	sysctl_tcp_rmem[2] = max(87380, max_share);
3297
3298	printk(KERN_INFO "TCP: Hash tables configured "
3299	       "(established %u bind %u)\n",
3300	       tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3301
3302	tcp_register_congestion_control(&tcp_reno);
3303
3304	memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets));
3305	memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets));
3306	tcp_secret_one.expires = jiffy; /* past due */
3307	tcp_secret_two.expires = jiffy; /* past due */
3308	tcp_secret_generating = &tcp_secret_one;
3309	tcp_secret_primary = &tcp_secret_one;
3310	tcp_secret_retiring = &tcp_secret_two;
3311	tcp_secret_secondary = &tcp_secret_two;
3312}
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Implementation of the Transmission Control Protocol(TCP).
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  13 *		Florian La Roche, <flla@stud.uni-sb.de>
  14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  19 *		Jorge Cwik, <jorge@laser.satlink.net>
  20 *
  21 * Fixes:
  22 *		Alan Cox	:	Numerous verify_area() calls
  23 *		Alan Cox	:	Set the ACK bit on a reset
  24 *		Alan Cox	:	Stopped it crashing if it closed while
  25 *					sk->inuse=1 and was trying to connect
  26 *					(tcp_err()).
  27 *		Alan Cox	:	All icmp error handling was broken
  28 *					pointers passed where wrong and the
  29 *					socket was looked up backwards. Nobody
  30 *					tested any icmp error code obviously.
  31 *		Alan Cox	:	tcp_err() now handled properly. It
  32 *					wakes people on errors. poll
  33 *					behaves and the icmp error race
  34 *					has gone by moving it into sock.c
  35 *		Alan Cox	:	tcp_send_reset() fixed to work for
  36 *					everything not just packets for
  37 *					unknown sockets.
  38 *		Alan Cox	:	tcp option processing.
  39 *		Alan Cox	:	Reset tweaked (still not 100%) [Had
  40 *					syn rule wrong]
  41 *		Herp Rosmanith  :	More reset fixes
  42 *		Alan Cox	:	No longer acks invalid rst frames.
  43 *					Acking any kind of RST is right out.
  44 *		Alan Cox	:	Sets an ignore me flag on an rst
  45 *					receive otherwise odd bits of prattle
  46 *					escape still
  47 *		Alan Cox	:	Fixed another acking RST frame bug.
  48 *					Should stop LAN workplace lockups.
  49 *		Alan Cox	: 	Some tidyups using the new skb list
  50 *					facilities
  51 *		Alan Cox	:	sk->keepopen now seems to work
  52 *		Alan Cox	:	Pulls options out correctly on accepts
  53 *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
  54 *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
  55 *					bit to skb ops.
  56 *		Alan Cox	:	Tidied tcp_data to avoid a potential
  57 *					nasty.
  58 *		Alan Cox	:	Added some better commenting, as the
  59 *					tcp is hard to follow
  60 *		Alan Cox	:	Removed incorrect check for 20 * psh
  61 *	Michael O'Reilly	:	ack < copied bug fix.
  62 *	Johannes Stille		:	Misc tcp fixes (not all in yet).
  63 *		Alan Cox	:	FIN with no memory -> CRASH
  64 *		Alan Cox	:	Added socket option proto entries.
  65 *					Also added awareness of them to accept.
  66 *		Alan Cox	:	Added TCP options (SOL_TCP)
  67 *		Alan Cox	:	Switched wakeup calls to callbacks,
  68 *					so the kernel can layer network
  69 *					sockets.
  70 *		Alan Cox	:	Use ip_tos/ip_ttl settings.
  71 *		Alan Cox	:	Handle FIN (more) properly (we hope).
  72 *		Alan Cox	:	RST frames sent on unsynchronised
  73 *					state ack error.
  74 *		Alan Cox	:	Put in missing check for SYN bit.
  75 *		Alan Cox	:	Added tcp_select_window() aka NET2E
  76 *					window non shrink trick.
  77 *		Alan Cox	:	Added a couple of small NET2E timer
  78 *					fixes
  79 *		Charles Hedrick :	TCP fixes
  80 *		Toomas Tamm	:	TCP window fixes
  81 *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
  82 *		Charles Hedrick	:	Rewrote most of it to actually work
  83 *		Linus		:	Rewrote tcp_read() and URG handling
  84 *					completely
  85 *		Gerhard Koerting:	Fixed some missing timer handling
  86 *		Matthew Dillon  :	Reworked TCP machine states as per RFC
  87 *		Gerhard Koerting:	PC/TCP workarounds
  88 *		Adam Caldwell	:	Assorted timer/timing errors
  89 *		Matthew Dillon	:	Fixed another RST bug
  90 *		Alan Cox	:	Move to kernel side addressing changes.
  91 *		Alan Cox	:	Beginning work on TCP fastpathing
  92 *					(not yet usable)
  93 *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
  94 *		Alan Cox	:	TCP fast path debugging
  95 *		Alan Cox	:	Window clamping
  96 *		Michael Riepe	:	Bug in tcp_check()
  97 *		Matt Dillon	:	More TCP improvements and RST bug fixes
  98 *		Matt Dillon	:	Yet more small nasties remove from the
  99 *					TCP code (Be very nice to this man if
 100 *					tcp finally works 100%) 8)
 101 *		Alan Cox	:	BSD accept semantics.
 102 *		Alan Cox	:	Reset on closedown bug.
 103 *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
 104 *		Michael Pall	:	Handle poll() after URG properly in
 105 *					all cases.
 106 *		Michael Pall	:	Undo the last fix in tcp_read_urg()
 107 *					(multi URG PUSH broke rlogin).
 108 *		Michael Pall	:	Fix the multi URG PUSH problem in
 109 *					tcp_readable(), poll() after URG
 110 *					works now.
 111 *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
 112 *					BSD api.
 113 *		Alan Cox	:	Changed the semantics of sk->socket to
 114 *					fix a race and a signal problem with
 115 *					accept() and async I/O.
 116 *		Alan Cox	:	Relaxed the rules on tcp_sendto().
 117 *		Yury Shevchuk	:	Really fixed accept() blocking problem.
 118 *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
 119 *					clients/servers which listen in on
 120 *					fixed ports.
 121 *		Alan Cox	:	Cleaned the above up and shrank it to
 122 *					a sensible code size.
 123 *		Alan Cox	:	Self connect lockup fix.
 124 *		Alan Cox	:	No connect to multicast.
 125 *		Ross Biro	:	Close unaccepted children on master
 126 *					socket close.
 127 *		Alan Cox	:	Reset tracing code.
 128 *		Alan Cox	:	Spurious resets on shutdown.
 129 *		Alan Cox	:	Giant 15 minute/60 second timer error
 130 *		Alan Cox	:	Small whoops in polling before an
 131 *					accept.
 132 *		Alan Cox	:	Kept the state trace facility since
 133 *					it's handy for debugging.
 134 *		Alan Cox	:	More reset handler fixes.
 135 *		Alan Cox	:	Started rewriting the code based on
 136 *					the RFC's for other useful protocol
 137 *					references see: Comer, KA9Q NOS, and
 138 *					for a reference on the difference
 139 *					between specifications and how BSD
 140 *					works see the 4.4lite source.
 141 *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
 142 *					close.
 143 *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
 144 *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
 145 *		Alan Cox	:	Reimplemented timers as per the RFC
 146 *					and using multiple timers for sanity.
 147 *		Alan Cox	:	Small bug fixes, and a lot of new
 148 *					comments.
 149 *		Alan Cox	:	Fixed dual reader crash by locking
 150 *					the buffers (much like datagram.c)
 151 *		Alan Cox	:	Fixed stuck sockets in probe. A probe
 152 *					now gets fed up of retrying without
 153 *					(even a no space) answer.
 154 *		Alan Cox	:	Extracted closing code better
 155 *		Alan Cox	:	Fixed the closing state machine to
 156 *					resemble the RFC.
 157 *		Alan Cox	:	More 'per spec' fixes.
 158 *		Jorge Cwik	:	Even faster checksumming.
 159 *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
 160 *					only frames. At least one pc tcp stack
 161 *					generates them.
 162 *		Alan Cox	:	Cache last socket.
 163 *		Alan Cox	:	Per route irtt.
 164 *		Matt Day	:	poll()->select() match BSD precisely on error
 165 *		Alan Cox	:	New buffers
 166 *		Marc Tamsky	:	Various sk->prot->retransmits and
 167 *					sk->retransmits misupdating fixed.
 168 *					Fixed tcp_write_timeout: stuck close,
 169 *					and TCP syn retries gets used now.
 170 *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
 171 *					ack if state is TCP_CLOSED.
 172 *		Alan Cox	:	Look up device on a retransmit - routes may
 173 *					change. Doesn't yet cope with MSS shrink right
 174 *					but it's a start!
 175 *		Marc Tamsky	:	Closing in closing fixes.
 176 *		Mike Shaver	:	RFC1122 verifications.
 177 *		Alan Cox	:	rcv_saddr errors.
 178 *		Alan Cox	:	Block double connect().
 179 *		Alan Cox	:	Small hooks for enSKIP.
 180 *		Alexey Kuznetsov:	Path MTU discovery.
 181 *		Alan Cox	:	Support soft errors.
 182 *		Alan Cox	:	Fix MTU discovery pathological case
 183 *					when the remote claims no mtu!
 184 *		Marc Tamsky	:	TCP_CLOSE fix.
 185 *		Colin (G3TNE)	:	Send a reset on syn ack replies in
 186 *					window but wrong (fixes NT lpd problems)
 187 *		Pedro Roque	:	Better TCP window handling, delayed ack.
 188 *		Joerg Reuter	:	No modification of locked buffers in
 189 *					tcp_do_retransmit()
 190 *		Eric Schenk	:	Changed receiver side silly window
 191 *					avoidance algorithm to BSD style
 192 *					algorithm. This doubles throughput
 193 *					against machines running Solaris,
 194 *					and seems to result in general
 195 *					improvement.
 196 *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
 197 *	Willy Konynenberg	:	Transparent proxying support.
 198 *	Mike McLagan		:	Routing by source
 199 *		Keith Owens	:	Do proper merging with partial SKB's in
 200 *					tcp_do_sendmsg to avoid burstiness.
 201 *		Eric Schenk	:	Fix fast close down bug with
 202 *					shutdown() followed by close().
 203 *		Andi Kleen 	:	Make poll agree with SIGIO
 204 *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
 205 *					lingertime == 0 (RFC 793 ABORT Call)
 206 *	Hirokazu Takahashi	:	Use copy_from_user() instead of
 207 *					csum_and_copy_from_user() if possible.
 208 *
 
 
 
 
 
 209 * Description of States:
 210 *
 211 *	TCP_SYN_SENT		sent a connection request, waiting for ack
 212 *
 213 *	TCP_SYN_RECV		received a connection request, sent ack,
 214 *				waiting for final ack in three-way handshake.
 215 *
 216 *	TCP_ESTABLISHED		connection established
 217 *
 218 *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
 219 *				transmission of remaining buffered data
 220 *
 221 *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
 222 *				to shutdown
 223 *
 224 *	TCP_CLOSING		both sides have shutdown but we still have
 225 *				data we have to finish sending
 226 *
 227 *	TCP_TIME_WAIT		timeout to catch resent junk before entering
 228 *				closed, can only be entered from FIN_WAIT2
 229 *				or CLOSING.  Required because the other end
 230 *				may not have gotten our last ACK causing it
 231 *				to retransmit the data packet (which we ignore)
 232 *
 233 *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
 234 *				us to finish writing our data and to shutdown
 235 *				(we have to close() to move on to LAST_ACK)
 236 *
 237 *	TCP_LAST_ACK		out side has shutdown after remote has
 238 *				shutdown.  There may still be data in our
 239 *				buffer that we have to finish sending
 240 *
 241 *	TCP_CLOSE		socket is finished
 242 */
 243
 244#define pr_fmt(fmt) "TCP: " fmt
 245
 246#include <crypto/hash.h>
 247#include <linux/kernel.h>
 248#include <linux/module.h>
 249#include <linux/types.h>
 250#include <linux/fcntl.h>
 251#include <linux/poll.h>
 252#include <linux/inet_diag.h>
 253#include <linux/init.h>
 254#include <linux/fs.h>
 255#include <linux/skbuff.h>
 256#include <linux/scatterlist.h>
 257#include <linux/splice.h>
 258#include <linux/net.h>
 259#include <linux/socket.h>
 260#include <linux/random.h>
 261#include <linux/memblock.h>
 262#include <linux/highmem.h>
 263#include <linux/swap.h>
 264#include <linux/cache.h>
 265#include <linux/err.h>
 
 266#include <linux/time.h>
 267#include <linux/slab.h>
 268#include <linux/errqueue.h>
 269#include <linux/static_key.h>
 270
 271#include <net/icmp.h>
 272#include <net/inet_common.h>
 273#include <net/tcp.h>
 274#include <net/mptcp.h>
 275#include <net/xfrm.h>
 276#include <net/ip.h>
 
 277#include <net/sock.h>
 278
 279#include <linux/uaccess.h>
 280#include <asm/ioctls.h>
 281#include <net/busy_poll.h>
 
 282
 283struct percpu_counter tcp_orphan_count;
 284EXPORT_SYMBOL_GPL(tcp_orphan_count);
 285
 286long sysctl_tcp_mem[3] __read_mostly;
 
 
 
 287EXPORT_SYMBOL(sysctl_tcp_mem);
 
 
 288
 289atomic_long_t tcp_memory_allocated;	/* Current allocated memory. */
 290EXPORT_SYMBOL(tcp_memory_allocated);
 291
 292#if IS_ENABLED(CONFIG_SMC)
 293DEFINE_STATIC_KEY_FALSE(tcp_have_smc);
 294EXPORT_SYMBOL(tcp_have_smc);
 295#endif
 296
 297/*
 298 * Current number of TCP sockets.
 299 */
 300struct percpu_counter tcp_sockets_allocated;
 301EXPORT_SYMBOL(tcp_sockets_allocated);
 302
 303/*
 304 * TCP splice context
 305 */
 306struct tcp_splice_state {
 307	struct pipe_inode_info *pipe;
 308	size_t len;
 309	unsigned int flags;
 310};
 311
 312/*
 313 * Pressure flag: try to collapse.
 314 * Technical note: it is used by multiple contexts non atomically.
 315 * All the __sk_mem_schedule() is of this nature: accounting
 316 * is strict, actions are advisory and have some latency.
 317 */
 318unsigned long tcp_memory_pressure __read_mostly;
 319EXPORT_SYMBOL_GPL(tcp_memory_pressure);
 320
 321DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
 322EXPORT_SYMBOL(tcp_rx_skb_cache_key);
 323
 324DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
 325
 326void tcp_enter_memory_pressure(struct sock *sk)
 327{
 328	unsigned long val;
 329
 330	if (READ_ONCE(tcp_memory_pressure))
 331		return;
 332	val = jiffies;
 333
 334	if (!val)
 335		val--;
 336	if (!cmpxchg(&tcp_memory_pressure, 0, val))
 337		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
 
 
 338}
 339EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure);
 340
 341void tcp_leave_memory_pressure(struct sock *sk)
 342{
 343	unsigned long val;
 344
 345	if (!READ_ONCE(tcp_memory_pressure))
 346		return;
 347	val = xchg(&tcp_memory_pressure, 0);
 348	if (val)
 349		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO,
 350			      jiffies_to_msecs(jiffies - val));
 351}
 352EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure);
 353
 354/* Convert seconds to retransmits based on initial and max timeout */
 355static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
 356{
 357	u8 res = 0;
 358
 359	if (seconds > 0) {
 360		int period = timeout;
 361
 362		res = 1;
 363		while (seconds > period && res < 255) {
 364			res++;
 365			timeout <<= 1;
 366			if (timeout > rto_max)
 367				timeout = rto_max;
 368			period += timeout;
 369		}
 370	}
 371	return res;
 372}
 373
 374/* Convert retransmits to seconds based on initial and max timeout */
 375static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
 376{
 377	int period = 0;
 378
 379	if (retrans > 0) {
 380		period = timeout;
 381		while (--retrans) {
 382			timeout <<= 1;
 383			if (timeout > rto_max)
 384				timeout = rto_max;
 385			period += timeout;
 386		}
 387	}
 388	return period;
 389}
 390
 391static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)
 392{
 393	u32 rate = READ_ONCE(tp->rate_delivered);
 394	u32 intv = READ_ONCE(tp->rate_interval_us);
 395	u64 rate64 = 0;
 396
 397	if (rate && intv) {
 398		rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC;
 399		do_div(rate64, intv);
 400	}
 401	return rate64;
 402}
 403
 404/* Address-family independent initialization for a tcp_sock.
 405 *
 406 * NOTE: A lot of things set to zero explicitly by call to
 407 *       sk_alloc() so need not be done here.
 408 */
 409void tcp_init_sock(struct sock *sk)
 410{
 411	struct inet_connection_sock *icsk = inet_csk(sk);
 412	struct tcp_sock *tp = tcp_sk(sk);
 413
 414	tp->out_of_order_queue = RB_ROOT;
 415	sk->tcp_rtx_queue = RB_ROOT;
 416	tcp_init_xmit_timers(sk);
 417	INIT_LIST_HEAD(&tp->tsq_node);
 418	INIT_LIST_HEAD(&tp->tsorted_sent_queue);
 419
 420	icsk->icsk_rto = TCP_TIMEOUT_INIT;
 421	tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
 422	minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
 423
 424	/* So many TCP implementations out there (incorrectly) count the
 425	 * initial SYN frame in their delayed-ACK and congestion control
 426	 * algorithms that we must have the following bandaid to talk
 427	 * efficiently to them.  -DaveM
 428	 */
 429	tp->snd_cwnd = TCP_INIT_CWND;
 430
 431	/* There's a bubble in the pipe until at least the first ACK. */
 432	tp->app_limited = ~0U;
 433
 434	/* See draft-stevens-tcpca-spec-01 for discussion of the
 435	 * initialization of these values.
 436	 */
 437	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 438	tp->snd_cwnd_clamp = ~0;
 439	tp->mss_cache = TCP_MSS_DEFAULT;
 440
 441	tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
 442	tcp_assign_congestion_control(sk);
 443
 444	tp->tsoffset = 0;
 445	tp->rack.reo_wnd_steps = 1;
 446
 447	sk->sk_write_space = sk_stream_write_space;
 448	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
 449
 450	icsk->icsk_sync_mss = tcp_sync_mss;
 451
 452	WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
 453	WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
 454
 455	sk_sockets_allocated_inc(sk);
 456	sk->sk_route_forced_caps = NETIF_F_GSO;
 457}
 458EXPORT_SYMBOL(tcp_init_sock);
 459
 460static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
 461{
 462	struct sk_buff *skb = tcp_write_queue_tail(sk);
 463
 464	if (tsflags && skb) {
 465		struct skb_shared_info *shinfo = skb_shinfo(skb);
 466		struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 467
 468		sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
 469		if (tsflags & SOF_TIMESTAMPING_TX_ACK)
 470			tcb->txstamp_ack = 1;
 471		if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
 472			shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
 473	}
 474}
 475
 476static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
 477					  int target, struct sock *sk)
 478{
 479	int avail = READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq);
 480
 481	if (avail > 0) {
 482		if (avail >= target)
 483			return true;
 484		if (tcp_rmem_pressure(sk))
 485			return true;
 486	}
 487	if (sk->sk_prot->stream_memory_read)
 488		return sk->sk_prot->stream_memory_read(sk);
 489	return false;
 490}
 491
 492/*
 493 *	Wait for a TCP event.
 494 *
 495 *	Note that we don't need to lock the socket, as the upper poll layers
 496 *	take care of normal races (between the test and the event) and we don't
 497 *	go look at any of the socket buffers directly.
 498 */
 499__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 500{
 501	__poll_t mask;
 502	struct sock *sk = sock->sk;
 503	const struct tcp_sock *tp = tcp_sk(sk);
 504	int state;
 505
 506	sock_poll_wait(file, sock, wait);
 507
 508	state = inet_sk_state_load(sk);
 509	if (state == TCP_LISTEN)
 510		return inet_csk_listen_poll(sk);
 511
 512	/* Socket is not locked. We are protected from async events
 513	 * by poll logic and correct handling of state changes
 514	 * made by other threads is impossible in any case.
 515	 */
 516
 517	mask = 0;
 518
 519	/*
 520	 * EPOLLHUP is certainly not done right. But poll() doesn't
 521	 * have a notion of HUP in just one direction, and for a
 522	 * socket the read side is more interesting.
 523	 *
 524	 * Some poll() documentation says that EPOLLHUP is incompatible
 525	 * with the EPOLLOUT/POLLWR flags, so somebody should check this
 526	 * all. But careful, it tends to be safer to return too many
 527	 * bits than too few, and you can easily break real applications
 528	 * if you don't tell them that something has hung up!
 529	 *
 530	 * Check-me.
 531	 *
 532	 * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and
 533	 * our fs/select.c). It means that after we received EOF,
 534	 * poll always returns immediately, making impossible poll() on write()
 535	 * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP
 536	 * if and only if shutdown has been made in both directions.
 537	 * Actually, it is interesting to look how Solaris and DUX
 538	 * solve this dilemma. I would prefer, if EPOLLHUP were maskable,
 539	 * then we could set it on SND_SHUTDOWN. BTW examples given
 540	 * in Stevens' books assume exactly this behaviour, it explains
 541	 * why EPOLLHUP is incompatible with EPOLLOUT.	--ANK
 542	 *
 543	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
 544	 * blocking on fresh not-connected or disconnected socket. --ANK
 545	 */
 546	if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
 547		mask |= EPOLLHUP;
 548	if (sk->sk_shutdown & RCV_SHUTDOWN)
 549		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
 550
 551	/* Connected or passive Fast Open socket? */
 552	if (state != TCP_SYN_SENT &&
 553	    (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
 554		int target = sock_rcvlowat(sk, 0, INT_MAX);
 555
 556		if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
 557		    !sock_flag(sk, SOCK_URGINLINE) &&
 558		    tp->urg_data)
 559			target++;
 560
 561		if (tcp_stream_is_readable(tp, target, sk))
 562			mask |= EPOLLIN | EPOLLRDNORM;
 
 
 
 563
 564		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
 565			if (sk_stream_is_writeable(sk)) {
 566				mask |= EPOLLOUT | EPOLLWRNORM;
 567			} else {  /* send SIGIO later */
 568				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
 569				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 570
 571				/* Race breaker. If space is freed after
 572				 * wspace test but before the flags are set,
 573				 * IO signal will be lost. Memory barrier
 574				 * pairs with the input side.
 575				 */
 576				smp_mb__after_atomic();
 577				if (sk_stream_is_writeable(sk))
 578					mask |= EPOLLOUT | EPOLLWRNORM;
 579			}
 580		} else
 581			mask |= EPOLLOUT | EPOLLWRNORM;
 582
 583		if (tp->urg_data & TCP_URG_VALID)
 584			mask |= EPOLLPRI;
 585	} else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
 586		/* Active TCP fastopen socket with defer_connect
 587		 * Return EPOLLOUT so application can call write()
 588		 * in order for kernel to generate SYN+data
 589		 */
 590		mask |= EPOLLOUT | EPOLLWRNORM;
 591	}
 592	/* This barrier is coupled with smp_wmb() in tcp_reset() */
 593	smp_rmb();
 594	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
 595		mask |= EPOLLERR;
 596
 597	return mask;
 598}
 599EXPORT_SYMBOL(tcp_poll);
 600
 601int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 602{
 603	struct tcp_sock *tp = tcp_sk(sk);
 604	int answ;
 605	bool slow;
 606
 607	switch (cmd) {
 608	case SIOCINQ:
 609		if (sk->sk_state == TCP_LISTEN)
 610			return -EINVAL;
 611
 612		slow = lock_sock_fast(sk);
 613		answ = tcp_inq(sk);
 614		unlock_sock_fast(sk, slow);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 615		break;
 616	case SIOCATMARK:
 617		answ = tp->urg_data &&
 618		       READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
 619		break;
 620	case SIOCOUTQ:
 621		if (sk->sk_state == TCP_LISTEN)
 622			return -EINVAL;
 623
 624		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
 625			answ = 0;
 626		else
 627			answ = READ_ONCE(tp->write_seq) - tp->snd_una;
 628		break;
 629	case SIOCOUTQNSD:
 630		if (sk->sk_state == TCP_LISTEN)
 631			return -EINVAL;
 632
 633		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
 634			answ = 0;
 635		else
 636			answ = READ_ONCE(tp->write_seq) -
 637			       READ_ONCE(tp->snd_nxt);
 638		break;
 639	default:
 640		return -ENOIOCTLCMD;
 641	}
 642
 643	return put_user(answ, (int __user *)arg);
 644}
 645EXPORT_SYMBOL(tcp_ioctl);
 646
 647static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
 648{
 649	TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
 650	tp->pushed_seq = tp->write_seq;
 651}
 652
 653static inline bool forced_push(const struct tcp_sock *tp)
 654{
 655	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
 656}
 657
 658static void skb_entail(struct sock *sk, struct sk_buff *skb)
 659{
 660	struct tcp_sock *tp = tcp_sk(sk);
 661	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 662
 663	skb->csum    = 0;
 664	tcb->seq     = tcb->end_seq = tp->write_seq;
 665	tcb->tcp_flags = TCPHDR_ACK;
 666	tcb->sacked  = 0;
 667	__skb_header_release(skb);
 668	tcp_add_write_queue_tail(sk, skb);
 669	sk_wmem_queued_add(sk, skb->truesize);
 670	sk_mem_charge(sk, skb->truesize);
 671	if (tp->nonagle & TCP_NAGLE_PUSH)
 672		tp->nonagle &= ~TCP_NAGLE_PUSH;
 673
 674	tcp_slow_start_after_idle_check(sk);
 675}
 676
 677static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
 678{
 679	if (flags & MSG_OOB)
 680		tp->snd_up = tp->write_seq;
 681}
 682
 683/* If a not yet filled skb is pushed, do not send it if
 684 * we have data packets in Qdisc or NIC queues :
 685 * Because TX completion will happen shortly, it gives a chance
 686 * to coalesce future sendmsg() payload into this skb, without
 687 * need for a timer, and with no latency trade off.
 688 * As packets containing data payload have a bigger truesize
 689 * than pure acks (dataless) packets, the last checks prevent
 690 * autocorking if we only have an ACK in Qdisc/NIC queues,
 691 * or if TX completion was delayed after we processed ACK packet.
 692 */
 693static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
 694				int size_goal)
 695{
 696	return skb->len < size_goal &&
 697	       sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
 698	       !tcp_rtx_queue_empty(sk) &&
 699	       refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
 700}
 701
 702void tcp_push(struct sock *sk, int flags, int mss_now,
 703	      int nonagle, int size_goal)
 704{
 705	struct tcp_sock *tp = tcp_sk(sk);
 706	struct sk_buff *skb;
 707
 708	skb = tcp_write_queue_tail(sk);
 709	if (!skb)
 710		return;
 711	if (!(flags & MSG_MORE) || forced_push(tp))
 712		tcp_mark_push(tp, skb);
 713
 714	tcp_mark_urg(tp, flags);
 715
 716	if (tcp_should_autocork(sk, skb, size_goal)) {
 717
 718		/* avoid atomic op if TSQ_THROTTLED bit is already set */
 719		if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
 720			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
 721			set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
 722		}
 723		/* It is possible TX completion already happened
 724		 * before we set TSQ_THROTTLED.
 725		 */
 726		if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize)
 727			return;
 728	}
 729
 730	if (flags & MSG_MORE)
 731		nonagle = TCP_NAGLE_CORK;
 732
 733	__tcp_push_pending_frames(sk, mss_now, nonagle);
 734}
 735
 736static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
 737				unsigned int offset, size_t len)
 738{
 739	struct tcp_splice_state *tss = rd_desc->arg.data;
 740	int ret;
 741
 742	ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
 743			      min(rd_desc->count, len), tss->flags);
 744	if (ret > 0)
 745		rd_desc->count -= ret;
 746	return ret;
 747}
 748
 749static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
 750{
 751	/* Store TCP splice context information in read_descriptor_t. */
 752	read_descriptor_t rd_desc = {
 753		.arg.data = tss,
 754		.count	  = tss->len,
 755	};
 756
 757	return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
 758}
 759
 760/**
 761 *  tcp_splice_read - splice data from TCP socket to a pipe
 762 * @sock:	socket to splice from
 763 * @ppos:	position (not valid)
 764 * @pipe:	pipe to splice to
 765 * @len:	number of bytes to splice
 766 * @flags:	splice modifier flags
 767 *
 768 * Description:
 769 *    Will read pages from given socket and fill them into a pipe.
 770 *
 771 **/
 772ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
 773			struct pipe_inode_info *pipe, size_t len,
 774			unsigned int flags)
 775{
 776	struct sock *sk = sock->sk;
 777	struct tcp_splice_state tss = {
 778		.pipe = pipe,
 779		.len = len,
 780		.flags = flags,
 781	};
 782	long timeo;
 783	ssize_t spliced;
 784	int ret;
 785
 786	sock_rps_record_flow(sk);
 787	/*
 788	 * We can't seek on a socket input
 789	 */
 790	if (unlikely(*ppos))
 791		return -ESPIPE;
 792
 793	ret = spliced = 0;
 794
 795	lock_sock(sk);
 796
 797	timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
 798	while (tss.len) {
 799		ret = __tcp_splice_read(sk, &tss);
 800		if (ret < 0)
 801			break;
 802		else if (!ret) {
 803			if (spliced)
 804				break;
 805			if (sock_flag(sk, SOCK_DONE))
 806				break;
 807			if (sk->sk_err) {
 808				ret = sock_error(sk);
 809				break;
 810			}
 811			if (sk->sk_shutdown & RCV_SHUTDOWN)
 812				break;
 813			if (sk->sk_state == TCP_CLOSE) {
 814				/*
 815				 * This occurs when user tries to read
 816				 * from never connected socket.
 817				 */
 818				ret = -ENOTCONN;
 
 819				break;
 820			}
 821			if (!timeo) {
 822				ret = -EAGAIN;
 823				break;
 824			}
 825			/* if __tcp_splice_read() got nothing while we have
 826			 * an skb in receive queue, we do not want to loop.
 827			 * This might happen with URG data.
 828			 */
 829			if (!skb_queue_empty(&sk->sk_receive_queue))
 830				break;
 831			sk_wait_data(sk, &timeo, NULL);
 832			if (signal_pending(current)) {
 833				ret = sock_intr_errno(timeo);
 834				break;
 835			}
 836			continue;
 837		}
 838		tss.len -= ret;
 839		spliced += ret;
 840
 841		if (!timeo)
 842			break;
 843		release_sock(sk);
 844		lock_sock(sk);
 845
 846		if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
 847		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
 848		    signal_pending(current))
 849			break;
 850	}
 851
 852	release_sock(sk);
 853
 854	if (spliced)
 855		return spliced;
 856
 857	return ret;
 858}
 859EXPORT_SYMBOL(tcp_splice_read);
 860
 861struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
 862				    bool force_schedule)
 863{
 864	struct sk_buff *skb;
 865
 866	if (likely(!size)) {
 867		skb = sk->sk_tx_skb_cache;
 868		if (skb) {
 869			skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
 870			sk->sk_tx_skb_cache = NULL;
 871			pskb_trim(skb, 0);
 872			INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
 873			skb_shinfo(skb)->tx_flags = 0;
 874			memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb));
 875			return skb;
 876		}
 877	}
 878	/* The TCP header must be at least 32-bit aligned.  */
 879	size = ALIGN(size, 4);
 880
 881	if (unlikely(tcp_under_memory_pressure(sk)))
 882		sk_mem_reclaim_partial(sk);
 883
 884	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
 885	if (likely(skb)) {
 886		bool mem_scheduled;
 887
 888		if (force_schedule) {
 889			mem_scheduled = true;
 890			sk_forced_mem_schedule(sk, skb->truesize);
 891		} else {
 892			mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
 893		}
 894		if (likely(mem_scheduled)) {
 895			skb_reserve(skb, sk->sk_prot->max_header);
 896			/*
 897			 * Make sure that we have exactly size bytes
 898			 * available to the caller, no more, no less.
 899			 */
 900			skb->reserved_tailroom = skb->end - skb->tail - size;
 901			INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
 902			return skb;
 903		}
 904		__kfree_skb(skb);
 905	} else {
 906		sk->sk_prot->enter_memory_pressure(sk);
 907		sk_stream_moderate_sndbuf(sk);
 908	}
 909	return NULL;
 910}
 911
 912static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
 913				       int large_allowed)
 914{
 915	struct tcp_sock *tp = tcp_sk(sk);
 916	u32 new_size_goal, size_goal;
 917
 918	if (!large_allowed)
 919		return mss_now;
 920
 921	/* Note : tcp_tso_autosize() will eventually split this later */
 922	new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
 923	new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
 
 
 924
 925	/* We try hard to avoid divides here */
 926	size_goal = tp->gso_segs * mss_now;
 927	if (unlikely(new_size_goal < size_goal ||
 928		     new_size_goal >= size_goal + mss_now)) {
 929		tp->gso_segs = min_t(u16, new_size_goal / mss_now,
 930				     sk->sk_gso_max_segs);
 931		size_goal = tp->gso_segs * mss_now;
 
 
 
 
 
 932	}
 933
 934	return max(size_goal, mss_now);
 935}
 936
 937int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
 938{
 939	int mss_now;
 940
 941	mss_now = tcp_current_mss(sk);
 942	*size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
 943
 944	return mss_now;
 945}
 946
 947/* In some cases, both sendpage() and sendmsg() could have added
 948 * an skb to the write queue, but failed adding payload on it.
 949 * We need to remove it to consume less memory, but more
 950 * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
 951 * users.
 952 */
 953static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
 954{
 955	if (skb && !skb->len) {
 956		tcp_unlink_write_queue(skb, sk);
 957		if (tcp_write_queue_empty(sk))
 958			tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
 959		sk_wmem_free_skb(sk, skb);
 960	}
 961}
 962
 963ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
 964			 size_t size, int flags)
 965{
 966	struct tcp_sock *tp = tcp_sk(sk);
 967	int mss_now, size_goal;
 968	int err;
 969	ssize_t copied;
 970	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 971
 972	if (IS_ENABLED(CONFIG_DEBUG_VM) &&
 973	    WARN_ONCE(!sendpage_ok(page),
 974		      "page must not be a Slab one and have page_count > 0"))
 975		return -EINVAL;
 976
 977	/* Wait for a connection to finish. One exception is TCP Fast Open
 978	 * (passive side) where data is allowed to be sent before a connection
 979	 * is fully established.
 980	 */
 981	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
 982	    !tcp_passive_fastopen(sk)) {
 983		err = sk_stream_wait_connect(sk, &timeo);
 984		if (err != 0)
 985			goto out_err;
 986	}
 987
 988	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 989
 990	mss_now = tcp_send_mss(sk, &size_goal, flags);
 991	copied = 0;
 992
 993	err = -EPIPE;
 994	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
 995		goto out_err;
 996
 997	while (size > 0) {
 998		struct sk_buff *skb = tcp_write_queue_tail(sk);
 999		int copy, i;
1000		bool can_coalesce;
 
 
1001
1002		if (!skb || (copy = size_goal - skb->len) <= 0 ||
1003		    !tcp_skb_can_collapse_to(skb)) {
1004new_segment:
1005			if (!sk_stream_memory_free(sk))
1006				goto wait_for_sndbuf;
1007
1008			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
1009					tcp_rtx_and_write_queues_empty(sk));
1010			if (!skb)
1011				goto wait_for_memory;
1012
1013#ifdef CONFIG_TLS_DEVICE
1014			skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
1015#endif
1016			skb_entail(sk, skb);
1017			copy = size_goal;
1018		}
1019
1020		if (copy > size)
1021			copy = size;
1022
1023		i = skb_shinfo(skb)->nr_frags;
1024		can_coalesce = skb_can_coalesce(skb, i, page, offset);
1025		if (!can_coalesce && i >= sysctl_max_skb_frags) {
1026			tcp_mark_push(tp, skb);
1027			goto new_segment;
1028		}
1029		if (!sk_wmem_schedule(sk, copy))
1030			goto wait_for_memory;
1031
1032		if (can_coalesce) {
1033			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1034		} else {
1035			get_page(page);
1036			skb_fill_page_desc(skb, i, page, offset, copy);
1037		}
1038
1039		if (!(flags & MSG_NO_SHARED_FRAGS))
1040			skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1041
1042		skb->len += copy;
1043		skb->data_len += copy;
1044		skb->truesize += copy;
1045		sk_wmem_queued_add(sk, copy);
1046		sk_mem_charge(sk, copy);
1047		skb->ip_summed = CHECKSUM_PARTIAL;
1048		WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
1049		TCP_SKB_CB(skb)->end_seq += copy;
1050		tcp_skb_pcount_set(skb, 0);
1051
1052		if (!copied)
1053			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1054
1055		copied += copy;
1056		offset += copy;
1057		size -= copy;
1058		if (!size)
1059			goto out;
1060
1061		if (skb->len < size_goal || (flags & MSG_OOB))
1062			continue;
1063
1064		if (forced_push(tp)) {
1065			tcp_mark_push(tp, skb);
1066			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1067		} else if (skb == tcp_send_head(sk))
1068			tcp_push_one(sk, mss_now);
1069		continue;
1070
1071wait_for_sndbuf:
1072		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1073wait_for_memory:
1074		tcp_push(sk, flags & ~MSG_MORE, mss_now,
1075			 TCP_NAGLE_PUSH, size_goal);
1076
1077		err = sk_stream_wait_memory(sk, &timeo);
1078		if (err != 0)
1079			goto do_error;
1080
1081		mss_now = tcp_send_mss(sk, &size_goal, flags);
1082	}
1083
1084out:
1085	if (copied) {
1086		tcp_tx_timestamp(sk, sk->sk_tsflags);
1087		if (!(flags & MSG_SENDPAGE_NOTLAST))
1088			tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1089	}
1090	return copied;
1091
1092do_error:
1093	tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
1094	if (copied)
1095		goto out;
1096out_err:
1097	/* make sure we wake any epoll edge trigger waiter */
1098	if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) {
1099		sk->sk_write_space(sk);
1100		tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1101	}
1102	return sk_stream_error(sk, flags, err);
1103}
1104EXPORT_SYMBOL_GPL(do_tcp_sendpages);
1105
1106int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
1107			size_t size, int flags)
1108{
1109	if (!(sk->sk_route_caps & NETIF_F_SG))
1110		return sock_no_sendpage_locked(sk, page, offset, size, flags);
1111
1112	tcp_rate_check_app_limited(sk);  /* is sending application-limited? */
1113
1114	return do_tcp_sendpages(sk, page, offset, size, flags);
1115}
1116EXPORT_SYMBOL_GPL(tcp_sendpage_locked);
1117
1118int tcp_sendpage(struct sock *sk, struct page *page, int offset,
1119		 size_t size, int flags)
1120{
1121	int ret;
 
 
 
 
 
1122
1123	lock_sock(sk);
1124	ret = tcp_sendpage_locked(sk, page, offset, size, flags);
1125	release_sock(sk);
1126
1127	return ret;
1128}
1129EXPORT_SYMBOL(tcp_sendpage);
1130
1131void tcp_free_fastopen_req(struct tcp_sock *tp)
1132{
1133	if (tp->fastopen_req) {
1134		kfree(tp->fastopen_req);
1135		tp->fastopen_req = NULL;
1136	}
1137}
1138
1139static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1140				int *copied, size_t size,
1141				struct ubuf_info *uarg)
1142{
1143	struct tcp_sock *tp = tcp_sk(sk);
1144	struct inet_sock *inet = inet_sk(sk);
1145	struct sockaddr *uaddr = msg->msg_name;
1146	int err, flags;
 
 
 
 
1147
1148	if (!(sock_net(sk)->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
1149	    (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
1150	     uaddr->sa_family == AF_UNSPEC))
1151		return -EOPNOTSUPP;
1152	if (tp->fastopen_req)
1153		return -EALREADY; /* Another Fast Open is in progress */
1154
1155	tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1156				   sk->sk_allocation);
1157	if (unlikely(!tp->fastopen_req))
1158		return -ENOBUFS;
1159	tp->fastopen_req->data = msg;
1160	tp->fastopen_req->size = size;
1161	tp->fastopen_req->uarg = uarg;
1162
1163	if (inet->defer_connect) {
1164		err = tcp_connect(sk);
1165		/* Same failure procedure as in tcp_v4/6_connect */
1166		if (err) {
1167			tcp_set_state(sk, TCP_CLOSE);
1168			inet->inet_dport = 0;
1169			sk->sk_route_caps = 0;
1170		}
1171	}
1172	flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1173	err = __inet_stream_connect(sk->sk_socket, uaddr,
1174				    msg->msg_namelen, flags, 1);
1175	/* fastopen_req could already be freed in __inet_stream_connect
1176	 * if the connection times out or gets rst
1177	 */
1178	if (tp->fastopen_req) {
1179		*copied = tp->fastopen_req->copied;
1180		tcp_free_fastopen_req(tp);
1181		inet->defer_connect = 0;
1182	}
1183	return err;
1184}
1185
1186int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 
1187{
 
1188	struct tcp_sock *tp = tcp_sk(sk);
1189	struct ubuf_info *uarg = NULL;
1190	struct sk_buff *skb;
1191	struct sockcm_cookie sockc;
1192	int flags, err, copied = 0;
1193	int mss_now = 0, size_goal, copied_syn = 0;
1194	int process_backlog = 0;
1195	bool zc = false;
1196	long timeo;
1197
 
 
1198	flags = msg->msg_flags;
1199
1200	if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
1201		skb = tcp_write_queue_tail(sk);
1202		uarg = sock_zerocopy_realloc(sk, size, skb_zcopy(skb));
1203		if (!uarg) {
1204			err = -ENOBUFS;
1205			goto out_err;
1206		}
1207
1208		zc = sk->sk_route_caps & NETIF_F_SG;
1209		if (!zc)
1210			uarg->zerocopy = 0;
1211	}
1212
1213	if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
1214	    !tp->repair) {
1215		err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg);
1216		if (err == -EINPROGRESS && copied_syn > 0)
1217			goto out;
1218		else if (err)
1219			goto out_err;
1220	}
1221
1222	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1223
1224	tcp_rate_check_app_limited(sk);  /* is sending application-limited? */
1225
1226	/* Wait for a connection to finish. One exception is TCP Fast Open
1227	 * (passive side) where data is allowed to be sent before a connection
1228	 * is fully established.
1229	 */
1230	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1231	    !tcp_passive_fastopen(sk)) {
1232		err = sk_stream_wait_connect(sk, &timeo);
1233		if (err != 0)
1234			goto do_error;
1235	}
1236
1237	if (unlikely(tp->repair)) {
1238		if (tp->repair_queue == TCP_RECV_QUEUE) {
1239			copied = tcp_send_rcvq(sk, msg, size);
1240			goto out_nopush;
1241		}
1242
1243		err = -EINVAL;
1244		if (tp->repair_queue == TCP_NO_QUEUE)
1245			goto out_err;
1246
1247		/* 'common' sending to sendq */
1248	}
1249
1250	sockcm_init(&sockc, sk);
1251	if (msg->msg_controllen) {
1252		err = sock_cmsg_send(sk, msg, &sockc);
1253		if (unlikely(err)) {
1254			err = -EINVAL;
1255			goto out_err;
1256		}
1257	}
1258
1259	/* This should be in poll */
1260	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1261
1262	/* Ok commence sending. */
 
 
1263	copied = 0;
1264
1265restart:
1266	mss_now = tcp_send_mss(sk, &size_goal, flags);
1267
1268	err = -EPIPE;
1269	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1270		goto do_error;
1271
1272	while (msg_data_left(msg)) {
1273		int copy = 0;
1274
1275		skb = tcp_write_queue_tail(sk);
1276		if (skb)
1277			copy = size_goal - skb->len;
1278
1279		if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
1280			bool first_skb;
1281
1282new_segment:
1283			if (!sk_stream_memory_free(sk))
1284				goto wait_for_sndbuf;
1285
1286			if (unlikely(process_backlog >= 16)) {
1287				process_backlog = 0;
1288				if (sk_flush_backlog(sk))
1289					goto restart;
 
1290			}
1291			first_skb = tcp_rtx_and_write_queues_empty(sk);
1292			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
1293						  first_skb);
1294			if (!skb)
1295				goto wait_for_memory;
1296
1297			process_backlog++;
1298			skb->ip_summed = CHECKSUM_PARTIAL;
 
 
 
 
 
 
 
 
 
 
 
1299
1300			skb_entail(sk, skb);
1301			copy = size_goal;
 
 
 
1302
1303			/* All packets are restored as if they have
1304			 * already been sent. skb_mstamp_ns isn't set to
1305			 * avoid wrong rtt estimation.
1306			 */
1307			if (tp->repair)
1308				TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
1309		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1310
1311		/* Try to append data to the end of skb. */
1312		if (copy > msg_data_left(msg))
1313			copy = msg_data_left(msg);
1314
1315		/* Where to copy to? */
1316		if (skb_availroom(skb) > 0 && !zc) {
1317			/* We have some space in skb head. Superb! */
1318			copy = min_t(int, copy, skb_availroom(skb));
1319			err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
1320			if (err)
1321				goto do_fault;
1322		} else if (!zc) {
1323			bool merge = true;
1324			int i = skb_shinfo(skb)->nr_frags;
1325			struct page_frag *pfrag = sk_page_frag(sk);
1326
1327			if (!sk_page_frag_refill(sk, pfrag))
1328				goto wait_for_memory;
1329
1330			if (!skb_can_coalesce(skb, i, pfrag->page,
1331					      pfrag->offset)) {
1332				if (i >= sysctl_max_skb_frags) {
1333					tcp_mark_push(tp, skb);
1334					goto new_segment;
1335				}
1336				merge = false;
1337			}
1338
1339			copy = min_t(int, copy, pfrag->size - pfrag->offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
1340
1341			if (!sk_wmem_schedule(sk, copy))
1342				goto wait_for_memory;
 
 
 
 
 
 
 
 
 
 
 
1343
1344			err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1345						       pfrag->page,
1346						       pfrag->offset,
1347						       copy);
1348			if (err)
1349				goto do_error;
1350
1351			/* Update the skb. */
1352			if (merge) {
1353				skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1354			} else {
1355				skb_fill_page_desc(skb, i, pfrag->page,
1356						   pfrag->offset, copy);
1357				page_ref_inc(pfrag->page);
1358			}
1359			pfrag->offset += copy;
1360		} else {
1361			err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
1362			if (err == -EMSGSIZE || err == -EEXIST) {
1363				tcp_mark_push(tp, skb);
1364				goto new_segment;
1365			}
1366			if (err < 0)
1367				goto do_error;
1368			copy = err;
1369		}
1370
1371		if (!copied)
1372			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
 
1373
1374		WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
1375		TCP_SKB_CB(skb)->end_seq += copy;
1376		tcp_skb_pcount_set(skb, 0);
 
1377
1378		copied += copy;
1379		if (!msg_data_left(msg)) {
1380			if (unlikely(flags & MSG_EOR))
1381				TCP_SKB_CB(skb)->eor = 1;
1382			goto out;
1383		}
1384
1385		if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair))
 
 
 
 
1386			continue;
1387
1388		if (forced_push(tp)) {
1389			tcp_mark_push(tp, skb);
1390			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1391		} else if (skb == tcp_send_head(sk))
1392			tcp_push_one(sk, mss_now);
1393		continue;
1394
1395wait_for_sndbuf:
1396		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1397wait_for_memory:
1398		if (copied)
1399			tcp_push(sk, flags & ~MSG_MORE, mss_now,
1400				 TCP_NAGLE_PUSH, size_goal);
1401
1402		err = sk_stream_wait_memory(sk, &timeo);
1403		if (err != 0)
1404			goto do_error;
1405
1406		mss_now = tcp_send_mss(sk, &size_goal, flags);
 
1407	}
1408
1409out:
1410	if (copied) {
1411		tcp_tx_timestamp(sk, sockc.tsflags);
1412		tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1413	}
1414out_nopush:
1415	sock_zerocopy_put(uarg);
1416	return copied + copied_syn;
1417
1418do_error:
1419	skb = tcp_write_queue_tail(sk);
1420do_fault:
1421	tcp_remove_empty_skb(sk, skb);
 
 
 
 
 
 
 
1422
1423	if (copied + copied_syn)
 
1424		goto out;
1425out_err:
1426	sock_zerocopy_put_abort(uarg, true);
1427	err = sk_stream_error(sk, flags, err);
1428	/* make sure we wake any epoll edge trigger waiter */
1429	if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) {
1430		sk->sk_write_space(sk);
1431		tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1432	}
1433	return err;
1434}
1435EXPORT_SYMBOL_GPL(tcp_sendmsg_locked);
1436
1437int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1438{
1439	int ret;
1440
1441	lock_sock(sk);
1442	ret = tcp_sendmsg_locked(sk, msg, size);
1443	release_sock(sk);
1444
1445	return ret;
1446}
1447EXPORT_SYMBOL(tcp_sendmsg);
1448
1449/*
1450 *	Handle reading urgent data. BSD has very simple semantics for
1451 *	this, no blocking and very strange errors 8)
1452 */
1453
1454static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1455{
1456	struct tcp_sock *tp = tcp_sk(sk);
1457
1458	/* No URG data to read. */
1459	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1460	    tp->urg_data == TCP_URG_READ)
1461		return -EINVAL;	/* Yes this is right ! */
1462
1463	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1464		return -ENOTCONN;
1465
1466	if (tp->urg_data & TCP_URG_VALID) {
1467		int err = 0;
1468		char c = tp->urg_data;
1469
1470		if (!(flags & MSG_PEEK))
1471			tp->urg_data = TCP_URG_READ;
1472
1473		/* Read urgent data. */
1474		msg->msg_flags |= MSG_OOB;
1475
1476		if (len > 0) {
1477			if (!(flags & MSG_TRUNC))
1478				err = memcpy_to_msg(msg, &c, 1);
1479			len = 1;
1480		} else
1481			msg->msg_flags |= MSG_TRUNC;
1482
1483		return err ? -EFAULT : len;
1484	}
1485
1486	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1487		return 0;
1488
1489	/* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
1490	 * the available implementations agree in this case:
1491	 * this call should never block, independent of the
1492	 * blocking state of the socket.
1493	 * Mike <pall@rz.uni-karlsruhe.de>
1494	 */
1495	return -EAGAIN;
1496}
1497
1498static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1499{
1500	struct sk_buff *skb;
1501	int copied = 0, err = 0;
1502
1503	/* XXX -- need to support SO_PEEK_OFF */
1504
1505	skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
1506		err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
1507		if (err)
1508			return err;
1509		copied += skb->len;
1510	}
1511
1512	skb_queue_walk(&sk->sk_write_queue, skb) {
1513		err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
1514		if (err)
1515			break;
1516
1517		copied += skb->len;
1518	}
1519
1520	return err ?: copied;
1521}
1522
1523/* Clean up the receive buffer for full frames taken by the user,
1524 * then send an ACK if necessary.  COPIED is the number of bytes
1525 * tcp_recvmsg has given to the user so far, it speeds up the
1526 * calculation of whether or not we must ACK for the sake of
1527 * a window update.
1528 */
1529static void tcp_cleanup_rbuf(struct sock *sk, int copied)
1530{
1531	struct tcp_sock *tp = tcp_sk(sk);
1532	bool time_to_ack = false;
1533
 
1534	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1535
1536	WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1537	     "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1538	     tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
 
1539
1540	if (inet_csk_ack_scheduled(sk)) {
1541		const struct inet_connection_sock *icsk = inet_csk(sk);
1542		   /* Delayed ACKs frequently hit locked sockets during bulk
1543		    * receive. */
1544		if (icsk->icsk_ack.blocked ||
1545		    /* Once-per-two-segments ACK was not sent by tcp_input.c */
1546		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1547		    /*
1548		     * If this read emptied read buffer, we send ACK, if
1549		     * connection is not bidirectional, user drained
1550		     * receive buffer and there was a small segment
1551		     * in queue.
1552		     */
1553		    (copied > 0 &&
1554		     ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1555		      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1556		       !inet_csk_in_pingpong_mode(sk))) &&
1557		      !atomic_read(&sk->sk_rmem_alloc)))
1558			time_to_ack = true;
1559	}
1560
1561	/* We send an ACK if we can now advertise a non-zero window
1562	 * which has been raised "significantly".
1563	 *
1564	 * Even if window raised up to infinity, do not send window open ACK
1565	 * in states, where we will not receive more. It is useless.
1566	 */
1567	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1568		__u32 rcv_window_now = tcp_receive_window(tp);
1569
1570		/* Optimize, __tcp_select_window() is not cheap. */
1571		if (2*rcv_window_now <= tp->window_clamp) {
1572			__u32 new_window = __tcp_select_window(sk);
1573
1574			/* Send ACK now, if this read freed lots of space
1575			 * in our buffer. Certainly, new_window is new window.
1576			 * We can advertise it now, if it is not less than current one.
1577			 * "Lots" means "at least twice" here.
1578			 */
1579			if (new_window && new_window >= 2 * rcv_window_now)
1580				time_to_ack = true;
1581		}
1582	}
1583	if (time_to_ack)
1584		tcp_send_ack(sk);
1585}
1586
1587static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1588{
1589	struct sk_buff *skb;
1590	u32 offset;
1591
1592	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1593		offset = seq - TCP_SKB_CB(skb)->seq;
1594		if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
1595			pr_err_once("%s: found a SYN, please report !\n", __func__);
1596			offset--;
1597		}
1598		if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
1599			*off = offset;
1600			return skb;
1601		}
1602		/* This looks weird, but this can happen if TCP collapsing
1603		 * splitted a fat GRO packet, while we released socket lock
1604		 * in skb_splice_bits()
1605		 */
1606		sk_eat_skb(sk, skb);
1607	}
1608	return NULL;
1609}
1610
1611/*
1612 * This routine provides an alternative to tcp_recvmsg() for routines
1613 * that would like to handle copying from skbuffs directly in 'sendfile'
1614 * fashion.
1615 * Note:
1616 *	- It is assumed that the socket was locked by the caller.
1617 *	- The routine does not block.
1618 *	- At present, there is no support for reading OOB data
1619 *	  or for 'peeking' the socket using this routine
1620 *	  (although both would be easy to implement).
1621 */
1622int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1623		  sk_read_actor_t recv_actor)
1624{
1625	struct sk_buff *skb;
1626	struct tcp_sock *tp = tcp_sk(sk);
1627	u32 seq = tp->copied_seq;
1628	u32 offset;
1629	int copied = 0;
1630
1631	if (sk->sk_state == TCP_LISTEN)
1632		return -ENOTCONN;
1633	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1634		if (offset < skb->len) {
1635			int used;
1636			size_t len;
1637
1638			len = skb->len - offset;
1639			/* Stop reading if we hit a patch of urgent data */
1640			if (tp->urg_data) {
1641				u32 urg_offset = tp->urg_seq - seq;
1642				if (urg_offset < len)
1643					len = urg_offset;
1644				if (!len)
1645					break;
1646			}
1647			used = recv_actor(desc, skb, offset, len);
1648			if (used <= 0) {
1649				if (!copied)
1650					copied = used;
1651				break;
1652			} else if (used <= len) {
1653				seq += used;
1654				copied += used;
1655				offset += used;
1656			}
1657			/* If recv_actor drops the lock (e.g. TCP splice
 
1658			 * receive) the skb pointer might be invalid when
1659			 * getting here: tcp_collapse might have deleted it
1660			 * while aggregating skbs from the socket queue.
1661			 */
1662			skb = tcp_recv_skb(sk, seq - 1, &offset);
1663			if (!skb)
1664				break;
1665			/* TCP coalescing might have appended data to the skb.
1666			 * Try to splice more frags
1667			 */
1668			if (offset + 1 != skb->len)
1669				continue;
1670		}
1671		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
1672			sk_eat_skb(sk, skb);
1673			++seq;
1674			break;
1675		}
1676		sk_eat_skb(sk, skb);
1677		if (!desc->count)
1678			break;
1679		WRITE_ONCE(tp->copied_seq, seq);
1680	}
1681	WRITE_ONCE(tp->copied_seq, seq);
1682
1683	tcp_rcv_space_adjust(sk);
1684
1685	/* Clean up data we have read: This will do ACK frames. */
1686	if (copied > 0) {
1687		tcp_recv_skb(sk, seq, &offset);
1688		tcp_cleanup_rbuf(sk, copied);
1689	}
1690	return copied;
1691}
1692EXPORT_SYMBOL(tcp_read_sock);
1693
1694int tcp_peek_len(struct socket *sock)
1695{
1696	return tcp_inq(sock->sk);
1697}
1698EXPORT_SYMBOL(tcp_peek_len);
1699
1700/* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
1701int tcp_set_rcvlowat(struct sock *sk, int val)
1702{
1703	int cap;
1704
1705	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
1706		cap = sk->sk_rcvbuf >> 1;
1707	else
1708		cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
1709	val = min(val, cap);
1710	WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
1711
1712	/* Check if we need to signal EPOLLIN right now */
1713	tcp_data_ready(sk);
1714
1715	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
1716		return 0;
1717
1718	val <<= 1;
1719	if (val > sk->sk_rcvbuf) {
1720		WRITE_ONCE(sk->sk_rcvbuf, val);
1721		tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val);
1722	}
1723	return 0;
1724}
1725EXPORT_SYMBOL(tcp_set_rcvlowat);
1726
1727#ifdef CONFIG_MMU
1728static const struct vm_operations_struct tcp_vm_ops = {
1729};
1730
1731int tcp_mmap(struct file *file, struct socket *sock,
1732	     struct vm_area_struct *vma)
1733{
1734	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
1735		return -EPERM;
1736	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
1737
1738	/* Instruct vm_insert_page() to not mmap_read_lock(mm) */
1739	vma->vm_flags |= VM_MIXEDMAP;
1740
1741	vma->vm_ops = &tcp_vm_ops;
1742	return 0;
1743}
1744EXPORT_SYMBOL(tcp_mmap);
1745
1746static int tcp_zerocopy_vm_insert_batch(struct vm_area_struct *vma,
1747					struct page **pages,
1748					unsigned long pages_to_map,
1749					unsigned long *insert_addr,
1750					u32 *length_with_pending,
1751					u32 *seq,
1752					struct tcp_zerocopy_receive *zc)
1753{
1754	unsigned long pages_remaining = pages_to_map;
1755	int bytes_mapped;
1756	int ret;
1757
1758	ret = vm_insert_pages(vma, *insert_addr, pages, &pages_remaining);
1759	bytes_mapped = PAGE_SIZE * (pages_to_map - pages_remaining);
1760	/* Even if vm_insert_pages fails, it may have partially succeeded in
1761	 * mapping (some but not all of the pages).
1762	 */
1763	*seq += bytes_mapped;
1764	*insert_addr += bytes_mapped;
1765	if (ret) {
1766		/* But if vm_insert_pages did fail, we have to unroll some state
1767		 * we speculatively touched before.
1768		 */
1769		const int bytes_not_mapped = PAGE_SIZE * pages_remaining;
1770		*length_with_pending -= bytes_not_mapped;
1771		zc->recv_skip_hint += bytes_not_mapped;
1772	}
1773	return ret;
1774}
1775
1776static int tcp_zerocopy_receive(struct sock *sk,
1777				struct tcp_zerocopy_receive *zc)
1778{
1779	unsigned long address = (unsigned long)zc->address;
1780	u32 length = 0, seq, offset, zap_len;
1781	#define PAGE_BATCH_SIZE 8
1782	struct page *pages[PAGE_BATCH_SIZE];
1783	const skb_frag_t *frags = NULL;
1784	struct vm_area_struct *vma;
1785	struct sk_buff *skb = NULL;
1786	unsigned long pg_idx = 0;
1787	unsigned long curr_addr;
1788	struct tcp_sock *tp;
1789	int inq;
1790	int ret;
1791
1792	if (address & (PAGE_SIZE - 1) || address != zc->address)
1793		return -EINVAL;
1794
1795	if (sk->sk_state == TCP_LISTEN)
1796		return -ENOTCONN;
1797
1798	sock_rps_record_flow(sk);
1799
1800	tp = tcp_sk(sk);
1801
1802	mmap_read_lock(current->mm);
1803
1804	vma = find_vma(current->mm, address);
1805	if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops) {
1806		mmap_read_unlock(current->mm);
1807		return -EINVAL;
1808	}
1809	zc->length = min_t(unsigned long, zc->length, vma->vm_end - address);
1810
1811	seq = tp->copied_seq;
1812	inq = tcp_inq(sk);
1813	zc->length = min_t(u32, zc->length, inq);
1814	zap_len = zc->length & ~(PAGE_SIZE - 1);
1815	if (zap_len) {
1816		zap_page_range(vma, address, zap_len);
1817		zc->recv_skip_hint = 0;
1818	} else {
1819		zc->recv_skip_hint = zc->length;
1820	}
1821	ret = 0;
1822	curr_addr = address;
1823	while (length + PAGE_SIZE <= zc->length) {
1824		if (zc->recv_skip_hint < PAGE_SIZE) {
1825			/* If we're here, finish the current batch. */
1826			if (pg_idx) {
1827				ret = tcp_zerocopy_vm_insert_batch(vma, pages,
1828								   pg_idx,
1829								   &curr_addr,
1830								   &length,
1831								   &seq, zc);
1832				if (ret)
1833					goto out;
1834				pg_idx = 0;
1835			}
1836			if (skb) {
1837				if (zc->recv_skip_hint > 0)
1838					break;
1839				skb = skb->next;
1840				offset = seq - TCP_SKB_CB(skb)->seq;
1841			} else {
1842				skb = tcp_recv_skb(sk, seq, &offset);
1843			}
1844			zc->recv_skip_hint = skb->len - offset;
1845			offset -= skb_headlen(skb);
1846			if ((int)offset < 0 || skb_has_frag_list(skb))
1847				break;
1848			frags = skb_shinfo(skb)->frags;
1849			while (offset) {
1850				if (skb_frag_size(frags) > offset)
1851					goto out;
1852				offset -= skb_frag_size(frags);
1853				frags++;
1854			}
1855		}
1856		if (skb_frag_size(frags) != PAGE_SIZE || skb_frag_off(frags)) {
1857			int remaining = zc->recv_skip_hint;
1858
1859			while (remaining && (skb_frag_size(frags) != PAGE_SIZE ||
1860					     skb_frag_off(frags))) {
1861				remaining -= skb_frag_size(frags);
1862				frags++;
1863			}
1864			zc->recv_skip_hint -= remaining;
1865			break;
1866		}
1867		pages[pg_idx] = skb_frag_page(frags);
1868		pg_idx++;
1869		length += PAGE_SIZE;
1870		zc->recv_skip_hint -= PAGE_SIZE;
1871		frags++;
1872		if (pg_idx == PAGE_BATCH_SIZE) {
1873			ret = tcp_zerocopy_vm_insert_batch(vma, pages, pg_idx,
1874							   &curr_addr, &length,
1875							   &seq, zc);
1876			if (ret)
1877				goto out;
1878			pg_idx = 0;
1879		}
1880	}
1881	if (pg_idx) {
1882		ret = tcp_zerocopy_vm_insert_batch(vma, pages, pg_idx,
1883						   &curr_addr, &length, &seq,
1884						   zc);
1885	}
1886out:
1887	mmap_read_unlock(current->mm);
1888	if (length) {
1889		WRITE_ONCE(tp->copied_seq, seq);
1890		tcp_rcv_space_adjust(sk);
1891
1892		/* Clean up data we have read: This will do ACK frames. */
1893		tcp_recv_skb(sk, seq, &offset);
1894		tcp_cleanup_rbuf(sk, length);
1895		ret = 0;
1896		if (length == zc->length)
1897			zc->recv_skip_hint = 0;
1898	} else {
1899		if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE))
1900			ret = -EIO;
1901	}
1902	zc->length = length;
1903	return ret;
1904}
1905#endif
1906
1907static void tcp_update_recv_tstamps(struct sk_buff *skb,
1908				    struct scm_timestamping_internal *tss)
1909{
1910	if (skb->tstamp)
1911		tss->ts[0] = ktime_to_timespec64(skb->tstamp);
1912	else
1913		tss->ts[0] = (struct timespec64) {0};
1914
1915	if (skb_hwtstamps(skb)->hwtstamp)
1916		tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp);
1917	else
1918		tss->ts[2] = (struct timespec64) {0};
1919}
1920
1921/* Similar to __sock_recv_timestamp, but does not require an skb */
1922static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
1923			       struct scm_timestamping_internal *tss)
1924{
1925	int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
1926	bool has_timestamping = false;
1927
1928	if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) {
1929		if (sock_flag(sk, SOCK_RCVTSTAMP)) {
1930			if (sock_flag(sk, SOCK_RCVTSTAMPNS)) {
1931				if (new_tstamp) {
1932					struct __kernel_timespec kts = {
1933						.tv_sec = tss->ts[0].tv_sec,
1934						.tv_nsec = tss->ts[0].tv_nsec,
1935					};
1936					put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
1937						 sizeof(kts), &kts);
1938				} else {
1939					struct __kernel_old_timespec ts_old = {
1940						.tv_sec = tss->ts[0].tv_sec,
1941						.tv_nsec = tss->ts[0].tv_nsec,
1942					};
1943					put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD,
1944						 sizeof(ts_old), &ts_old);
1945				}
1946			} else {
1947				if (new_tstamp) {
1948					struct __kernel_sock_timeval stv = {
1949						.tv_sec = tss->ts[0].tv_sec,
1950						.tv_usec = tss->ts[0].tv_nsec / 1000,
1951					};
1952					put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
1953						 sizeof(stv), &stv);
1954				} else {
1955					struct __kernel_old_timeval tv = {
1956						.tv_sec = tss->ts[0].tv_sec,
1957						.tv_usec = tss->ts[0].tv_nsec / 1000,
1958					};
1959					put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
1960						 sizeof(tv), &tv);
1961				}
1962			}
1963		}
1964
1965		if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE)
1966			has_timestamping = true;
1967		else
1968			tss->ts[0] = (struct timespec64) {0};
1969	}
1970
1971	if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
1972		if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)
1973			has_timestamping = true;
1974		else
1975			tss->ts[2] = (struct timespec64) {0};
1976	}
1977
1978	if (has_timestamping) {
1979		tss->ts[1] = (struct timespec64) {0};
1980		if (sock_flag(sk, SOCK_TSTAMP_NEW))
1981			put_cmsg_scm_timestamping64(msg, tss);
1982		else
1983			put_cmsg_scm_timestamping(msg, tss);
1984	}
1985}
1986
1987static int tcp_inq_hint(struct sock *sk)
1988{
1989	const struct tcp_sock *tp = tcp_sk(sk);
1990	u32 copied_seq = READ_ONCE(tp->copied_seq);
1991	u32 rcv_nxt = READ_ONCE(tp->rcv_nxt);
1992	int inq;
1993
1994	inq = rcv_nxt - copied_seq;
1995	if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) {
1996		lock_sock(sk);
1997		inq = tp->rcv_nxt - tp->copied_seq;
1998		release_sock(sk);
1999	}
2000	/* After receiving a FIN, tell the user-space to continue reading
2001	 * by returning a non-zero inq.
2002	 */
2003	if (inq == 0 && sock_flag(sk, SOCK_DONE))
2004		inq = 1;
2005	return inq;
2006}
2007
2008/*
2009 *	This routine copies from a sock struct into the user buffer.
2010 *
2011 *	Technical note: in 2.3 we work on _locked_ socket, so that
2012 *	tricks with *seq access order and skb->users are not required.
2013 *	Probably, code can be easily improved even more.
2014 */
2015
2016int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
2017		int flags, int *addr_len)
2018{
2019	struct tcp_sock *tp = tcp_sk(sk);
2020	int copied = 0;
2021	u32 peek_seq;
2022	u32 *seq;
2023	unsigned long used;
2024	int err, inq;
2025	int target;		/* Read at least this many bytes */
2026	long timeo;
2027	struct sk_buff *skb, *last;
 
 
2028	u32 urg_hole = 0;
2029	struct scm_timestamping_internal tss;
2030	int cmsg_flags;
2031
2032	if (unlikely(flags & MSG_ERRQUEUE))
2033		return inet_recv_error(sk, msg, len, addr_len);
2034
2035	if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
2036	    (sk->sk_state == TCP_ESTABLISHED))
2037		sk_busy_loop(sk, nonblock);
2038
2039	lock_sock(sk);
2040
2041	err = -ENOTCONN;
2042	if (sk->sk_state == TCP_LISTEN)
2043		goto out;
2044
2045	cmsg_flags = tp->recvmsg_inq ? 1 : 0;
2046	timeo = sock_rcvtimeo(sk, nonblock);
2047
2048	/* Urgent data needs to be handled specially. */
2049	if (flags & MSG_OOB)
2050		goto recv_urg;
2051
2052	if (unlikely(tp->repair)) {
2053		err = -EPERM;
2054		if (!(flags & MSG_PEEK))
2055			goto out;
2056
2057		if (tp->repair_queue == TCP_SEND_QUEUE)
2058			goto recv_sndq;
2059
2060		err = -EINVAL;
2061		if (tp->repair_queue == TCP_NO_QUEUE)
2062			goto out;
2063
2064		/* 'common' recv queue MSG_PEEK-ing */
2065	}
2066
2067	seq = &tp->copied_seq;
2068	if (flags & MSG_PEEK) {
2069		peek_seq = tp->copied_seq;
2070		seq = &peek_seq;
2071	}
2072
2073	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2074
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2075	do {
2076		u32 offset;
2077
2078		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
2079		if (tp->urg_data && tp->urg_seq == *seq) {
2080			if (copied)
2081				break;
2082			if (signal_pending(current)) {
2083				copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
2084				break;
2085			}
2086		}
2087
2088		/* Next get a buffer. */
2089
2090		last = skb_peek_tail(&sk->sk_receive_queue);
2091		skb_queue_walk(&sk->sk_receive_queue, skb) {
2092			last = skb;
2093			/* Now that we have two receive queues this
2094			 * shouldn't happen.
2095			 */
2096			if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
2097				 "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
2098				 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
2099				 flags))
2100				break;
2101
2102			offset = *seq - TCP_SKB_CB(skb)->seq;
2103			if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
2104				pr_err_once("%s: found a SYN, please report !\n", __func__);
2105				offset--;
2106			}
2107			if (offset < skb->len)
2108				goto found_ok_skb;
2109			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2110				goto found_fin_ok;
2111			WARN(!(flags & MSG_PEEK),
2112			     "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
2113			     *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
2114		}
2115
2116		/* Well, if we have backlog, try to process it now yet. */
2117
2118		if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
2119			break;
2120
2121		if (copied) {
2122			if (sk->sk_err ||
2123			    sk->sk_state == TCP_CLOSE ||
2124			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2125			    !timeo ||
2126			    signal_pending(current))
2127				break;
2128		} else {
2129			if (sock_flag(sk, SOCK_DONE))
2130				break;
2131
2132			if (sk->sk_err) {
2133				copied = sock_error(sk);
2134				break;
2135			}
2136
2137			if (sk->sk_shutdown & RCV_SHUTDOWN)
2138				break;
2139
2140			if (sk->sk_state == TCP_CLOSE) {
2141				/* This occurs when user tries to read
2142				 * from never connected socket.
2143				 */
2144				copied = -ENOTCONN;
 
 
 
2145				break;
2146			}
2147
2148			if (!timeo) {
2149				copied = -EAGAIN;
2150				break;
2151			}
2152
2153			if (signal_pending(current)) {
2154				copied = sock_intr_errno(timeo);
2155				break;
2156			}
2157		}
2158
2159		tcp_cleanup_rbuf(sk, copied);
2160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2161		if (copied >= target) {
2162			/* Do not sleep, just process backlog. */
2163			release_sock(sk);
2164			lock_sock(sk);
2165		} else {
2166			sk_wait_data(sk, &timeo, last);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2167		}
2168
2169		if ((flags & MSG_PEEK) &&
2170		    (peek_seq - copied - urg_hole != tp->copied_seq)) {
2171			net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
2172					    current->comm,
2173					    task_pid_nr(current));
2174			peek_seq = tp->copied_seq;
2175		}
2176		continue;
2177
2178found_ok_skb:
2179		/* Ok so how much can we use? */
2180		used = skb->len - offset;
2181		if (len < used)
2182			used = len;
2183
2184		/* Do we have urgent data here? */
2185		if (tp->urg_data) {
2186			u32 urg_offset = tp->urg_seq - *seq;
2187			if (urg_offset < used) {
2188				if (!urg_offset) {
2189					if (!sock_flag(sk, SOCK_URGINLINE)) {
2190						WRITE_ONCE(*seq, *seq + 1);
2191						urg_hole++;
2192						offset++;
2193						used--;
2194						if (!used)
2195							goto skip_copy;
2196					}
2197				} else
2198					used = urg_offset;
2199			}
2200		}
2201
2202		if (!(flags & MSG_TRUNC)) {
2203			err = skb_copy_datagram_msg(skb, offset, msg, used);
2204			if (err) {
2205				/* Exception. Bailout! */
2206				if (!copied)
2207					copied = -EFAULT;
2208				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2209			}
2210		}
2211
2212		WRITE_ONCE(*seq, *seq + used);
2213		copied += used;
2214		len -= used;
2215
2216		tcp_rcv_space_adjust(sk);
2217
2218skip_copy:
2219		if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
2220			tp->urg_data = 0;
2221			tcp_fast_path_check(sk);
2222		}
2223
2224		if (TCP_SKB_CB(skb)->has_rxtstamp) {
2225			tcp_update_recv_tstamps(skb, &tss);
2226			cmsg_flags |= 2;
2227		}
2228
2229		if (used + offset < skb->len)
2230			continue;
2231
2232		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2233			goto found_fin_ok;
2234		if (!(flags & MSG_PEEK))
2235			sk_eat_skb(sk, skb);
 
 
2236		continue;
2237
2238found_fin_ok:
2239		/* Process the FIN. */
2240		WRITE_ONCE(*seq, *seq + 1);
2241		if (!(flags & MSG_PEEK))
2242			sk_eat_skb(sk, skb);
 
 
2243		break;
2244	} while (len > 0);
2245
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2246	/* According to UNIX98, msg_name/msg_namelen are ignored
2247	 * on connected socket. I was just happy when found this 8) --ANK
2248	 */
2249
2250	/* Clean up data we have read: This will do ACK frames. */
2251	tcp_cleanup_rbuf(sk, copied);
2252
2253	release_sock(sk);
2254
2255	if (cmsg_flags) {
2256		if (cmsg_flags & 2)
2257			tcp_recv_timestamp(msg, sk, &tss);
2258		if (cmsg_flags & 1) {
2259			inq = tcp_inq_hint(sk);
2260			put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
2261		}
2262	}
2263
2264	return copied;
2265
2266out:
2267	release_sock(sk);
2268	return err;
2269
2270recv_urg:
2271	err = tcp_recv_urg(sk, msg, len, flags);
2272	goto out;
2273
2274recv_sndq:
2275	err = tcp_peek_sndq(sk, msg, len);
2276	goto out;
2277}
2278EXPORT_SYMBOL(tcp_recvmsg);
2279
2280void tcp_set_state(struct sock *sk, int state)
2281{
2282	int oldstate = sk->sk_state;
2283
2284	/* We defined a new enum for TCP states that are exported in BPF
2285	 * so as not force the internal TCP states to be frozen. The
2286	 * following checks will detect if an internal state value ever
2287	 * differs from the BPF value. If this ever happens, then we will
2288	 * need to remap the internal value to the BPF value before calling
2289	 * tcp_call_bpf_2arg.
2290	 */
2291	BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED);
2292	BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT);
2293	BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV);
2294	BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1);
2295	BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2);
2296	BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT);
2297	BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE);
2298	BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT);
2299	BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK);
2300	BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN);
2301	BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING);
2302	BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV);
2303	BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES);
2304
2305	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG))
2306		tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state);
2307
2308	switch (state) {
2309	case TCP_ESTABLISHED:
2310		if (oldstate != TCP_ESTABLISHED)
2311			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2312		break;
2313
2314	case TCP_CLOSE:
2315		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
2316			TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
2317
2318		sk->sk_prot->unhash(sk);
2319		if (inet_csk(sk)->icsk_bind_hash &&
2320		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
2321			inet_put_port(sk);
2322		fallthrough;
2323	default:
2324		if (oldstate == TCP_ESTABLISHED)
2325			TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2326	}
2327
2328	/* Change state AFTER socket is unhashed to avoid closed
2329	 * socket sitting in hash tables.
2330	 */
2331	inet_sk_state_store(sk, state);
 
 
 
 
2332}
2333EXPORT_SYMBOL_GPL(tcp_set_state);
2334
2335/*
2336 *	State processing on a close. This implements the state shift for
2337 *	sending our FIN frame. Note that we only send a FIN for some
2338 *	states. A shutdown() may have already sent the FIN, or we may be
2339 *	closed.
2340 */
2341
2342static const unsigned char new_state[16] = {
2343  /* current state:        new state:      action:	*/
2344  [0 /* (Invalid) */]	= TCP_CLOSE,
2345  [TCP_ESTABLISHED]	= TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2346  [TCP_SYN_SENT]	= TCP_CLOSE,
2347  [TCP_SYN_RECV]	= TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2348  [TCP_FIN_WAIT1]	= TCP_FIN_WAIT1,
2349  [TCP_FIN_WAIT2]	= TCP_FIN_WAIT2,
2350  [TCP_TIME_WAIT]	= TCP_CLOSE,
2351  [TCP_CLOSE]		= TCP_CLOSE,
2352  [TCP_CLOSE_WAIT]	= TCP_LAST_ACK  | TCP_ACTION_FIN,
2353  [TCP_LAST_ACK]	= TCP_LAST_ACK,
2354  [TCP_LISTEN]		= TCP_CLOSE,
2355  [TCP_CLOSING]		= TCP_CLOSING,
2356  [TCP_NEW_SYN_RECV]	= TCP_CLOSE,	/* should not happen ! */
2357};
2358
2359static int tcp_close_state(struct sock *sk)
2360{
2361	int next = (int)new_state[sk->sk_state];
2362	int ns = next & TCP_STATE_MASK;
2363
2364	tcp_set_state(sk, ns);
2365
2366	return next & TCP_ACTION_FIN;
2367}
2368
2369/*
2370 *	Shutdown the sending side of a connection. Much like close except
2371 *	that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
2372 */
2373
2374void tcp_shutdown(struct sock *sk, int how)
2375{
2376	/*	We need to grab some memory, and put together a FIN,
2377	 *	and then put it into the queue to be sent.
2378	 *		Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2379	 */
2380	if (!(how & SEND_SHUTDOWN))
2381		return;
2382
2383	/* If we've already sent a FIN, or it's a closed state, skip this. */
2384	if ((1 << sk->sk_state) &
2385	    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2386	     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
2387		/* Clear out any half completed packets.  FIN if needed. */
2388		if (tcp_close_state(sk))
2389			tcp_send_fin(sk);
2390	}
2391}
2392EXPORT_SYMBOL(tcp_shutdown);
2393
2394bool tcp_check_oom(struct sock *sk, int shift)
2395{
2396	bool too_many_orphans, out_of_socket_memory;
2397
2398	too_many_orphans = tcp_too_many_orphans(sk, shift);
2399	out_of_socket_memory = tcp_out_of_memory(sk);
2400
2401	if (too_many_orphans)
2402		net_info_ratelimited("too many orphaned sockets\n");
2403	if (out_of_socket_memory)
2404		net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2405	return too_many_orphans || out_of_socket_memory;
2406}
2407
2408void tcp_close(struct sock *sk, long timeout)
2409{
2410	struct sk_buff *skb;
2411	int data_was_unread = 0;
2412	int state;
2413
2414	lock_sock(sk);
2415	sk->sk_shutdown = SHUTDOWN_MASK;
2416
2417	if (sk->sk_state == TCP_LISTEN) {
2418		tcp_set_state(sk, TCP_CLOSE);
2419
2420		/* Special case. */
2421		inet_csk_listen_stop(sk);
2422
2423		goto adjudge_to_death;
2424	}
2425
2426	/*  We need to flush the recv. buffs.  We do this only on the
2427	 *  descriptor close, not protocol-sourced closes, because the
2428	 *  reader process may not have drained the data yet!
2429	 */
2430	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
2431		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
2432
2433		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2434			len--;
2435		data_was_unread += len;
2436		__kfree_skb(skb);
2437	}
2438
2439	sk_mem_reclaim(sk);
2440
2441	/* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2442	if (sk->sk_state == TCP_CLOSE)
2443		goto adjudge_to_death;
2444
2445	/* As outlined in RFC 2525, section 2.17, we send a RST here because
2446	 * data was lost. To witness the awful effects of the old behavior of
2447	 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2448	 * GET in an FTP client, suspend the process, wait for the client to
2449	 * advertise a zero window, then kill -9 the FTP client, wheee...
2450	 * Note: timeout is always zero in such a case.
2451	 */
2452	if (unlikely(tcp_sk(sk)->repair)) {
2453		sk->sk_prot->disconnect(sk, 0);
2454	} else if (data_was_unread) {
2455		/* Unread data was tossed, zap the connection. */
2456		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
2457		tcp_set_state(sk, TCP_CLOSE);
2458		tcp_send_active_reset(sk, sk->sk_allocation);
2459	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2460		/* Check zero linger _after_ checking for unread data. */
2461		sk->sk_prot->disconnect(sk, 0);
2462		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
2463	} else if (tcp_close_state(sk)) {
2464		/* We FIN if the application ate all the data before
2465		 * zapping the connection.
2466		 */
2467
2468		/* RED-PEN. Formally speaking, we have broken TCP state
2469		 * machine. State transitions:
2470		 *
2471		 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2472		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (forget it, it's impossible)
2473		 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2474		 *
2475		 * are legal only when FIN has been sent (i.e. in window),
2476		 * rather than queued out of window. Purists blame.
2477		 *
2478		 * F.e. "RFC state" is ESTABLISHED,
2479		 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2480		 *
2481		 * The visible declinations are that sometimes
2482		 * we enter time-wait state, when it is not required really
2483		 * (harmless), do not send active resets, when they are
2484		 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2485		 * they look as CLOSING or LAST_ACK for Linux)
2486		 * Probably, I missed some more holelets.
2487		 * 						--ANK
2488		 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2489		 * in a single packet! (May consider it later but will
2490		 * probably need API support or TCP_CORK SYN-ACK until
2491		 * data is written and socket is closed.)
2492		 */
2493		tcp_send_fin(sk);
2494	}
2495
2496	sk_stream_wait_close(sk, timeout);
2497
2498adjudge_to_death:
2499	state = sk->sk_state;
2500	sock_hold(sk);
2501	sock_orphan(sk);
2502
 
 
 
 
 
 
 
2503	local_bh_disable();
2504	bh_lock_sock(sk);
2505	/* remove backlog if any, without releasing ownership. */
2506	__release_sock(sk);
2507
2508	percpu_counter_inc(sk->sk_prot->orphan_count);
2509
2510	/* Have we already been destroyed by a softirq or backlog? */
2511	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
2512		goto out;
2513
2514	/*	This is a (useful) BSD violating of the RFC. There is a
2515	 *	problem with TCP as specified in that the other end could
2516	 *	keep a socket open forever with no application left this end.
2517	 *	We use a 1 minute timeout (about the same as BSD) then kill
2518	 *	our end. If they send after that then tough - BUT: long enough
2519	 *	that we won't make the old 4*rto = almost no time - whoops
2520	 *	reset mistake.
2521	 *
2522	 *	Nope, it was not mistake. It is really desired behaviour
2523	 *	f.e. on http servers, when such sockets are useless, but
2524	 *	consume significant resources. Let's do it with special
2525	 *	linger2	option.					--ANK
2526	 */
2527
2528	if (sk->sk_state == TCP_FIN_WAIT2) {
2529		struct tcp_sock *tp = tcp_sk(sk);
2530		if (tp->linger2 < 0) {
2531			tcp_set_state(sk, TCP_CLOSE);
2532			tcp_send_active_reset(sk, GFP_ATOMIC);
2533			__NET_INC_STATS(sock_net(sk),
2534					LINUX_MIB_TCPABORTONLINGER);
2535		} else {
2536			const int tmo = tcp_fin_time(sk);
2537
2538			if (tmo > TCP_TIMEWAIT_LEN) {
2539				inet_csk_reset_keepalive_timer(sk,
2540						tmo - TCP_TIMEWAIT_LEN);
2541			} else {
2542				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2543				goto out;
2544			}
2545		}
2546	}
2547	if (sk->sk_state != TCP_CLOSE) {
2548		sk_mem_reclaim(sk);
2549		if (tcp_check_oom(sk, 0)) {
 
 
 
2550			tcp_set_state(sk, TCP_CLOSE);
2551			tcp_send_active_reset(sk, GFP_ATOMIC);
2552			__NET_INC_STATS(sock_net(sk),
2553					LINUX_MIB_TCPABORTONMEMORY);
2554		} else if (!check_net(sock_net(sk))) {
2555			/* Not possible to send reset; just close */
2556			tcp_set_state(sk, TCP_CLOSE);
2557		}
2558	}
2559
2560	if (sk->sk_state == TCP_CLOSE) {
2561		struct request_sock *req;
2562
2563		req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
2564						lockdep_sock_is_held(sk));
2565		/* We could get here with a non-NULL req if the socket is
2566		 * aborted (e.g., closed with unread data) before 3WHS
2567		 * finishes.
2568		 */
2569		if (req)
2570			reqsk_fastopen_remove(sk, req, false);
2571		inet_csk_destroy_sock(sk);
2572	}
2573	/* Otherwise, socket is reprieved until protocol close. */
2574
2575out:
2576	bh_unlock_sock(sk);
2577	local_bh_enable();
2578	release_sock(sk);
2579	sock_put(sk);
2580}
2581EXPORT_SYMBOL(tcp_close);
2582
2583/* These states need RST on ABORT according to RFC793 */
2584
2585static inline bool tcp_need_reset(int state)
2586{
2587	return (1 << state) &
2588	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2589		TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2590}
2591
2592static void tcp_rtx_queue_purge(struct sock *sk)
2593{
2594	struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
2595
2596	tcp_sk(sk)->highest_sack = NULL;
2597	while (p) {
2598		struct sk_buff *skb = rb_to_skb(p);
2599
2600		p = rb_next(p);
2601		/* Since we are deleting whole queue, no need to
2602		 * list_del(&skb->tcp_tsorted_anchor)
2603		 */
2604		tcp_rtx_queue_unlink(skb, sk);
2605		sk_wmem_free_skb(sk, skb);
2606	}
2607}
2608
2609void tcp_write_queue_purge(struct sock *sk)
2610{
2611	struct sk_buff *skb;
2612
2613	tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
2614	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
2615		tcp_skb_tsorted_anchor_cleanup(skb);
2616		sk_wmem_free_skb(sk, skb);
2617	}
2618	tcp_rtx_queue_purge(sk);
2619	skb = sk->sk_tx_skb_cache;
2620	if (skb) {
2621		__kfree_skb(skb);
2622		sk->sk_tx_skb_cache = NULL;
2623	}
2624	INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
2625	sk_mem_reclaim(sk);
2626	tcp_clear_all_retrans_hints(tcp_sk(sk));
2627	tcp_sk(sk)->packets_out = 0;
2628	inet_csk(sk)->icsk_backoff = 0;
2629}
2630
2631int tcp_disconnect(struct sock *sk, int flags)
2632{
2633	struct inet_sock *inet = inet_sk(sk);
2634	struct inet_connection_sock *icsk = inet_csk(sk);
2635	struct tcp_sock *tp = tcp_sk(sk);
 
2636	int old_state = sk->sk_state;
2637	u32 seq;
2638
2639	if (old_state != TCP_CLOSE)
2640		tcp_set_state(sk, TCP_CLOSE);
2641
2642	/* ABORT function of RFC793 */
2643	if (old_state == TCP_LISTEN) {
2644		inet_csk_listen_stop(sk);
2645	} else if (unlikely(tp->repair)) {
2646		sk->sk_err = ECONNABORTED;
2647	} else if (tcp_need_reset(old_state) ||
2648		   (tp->snd_nxt != tp->write_seq &&
2649		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2650		/* The last check adjusts for discrepancy of Linux wrt. RFC
2651		 * states
2652		 */
2653		tcp_send_active_reset(sk, gfp_any());
2654		sk->sk_err = ECONNRESET;
2655	} else if (old_state == TCP_SYN_SENT)
2656		sk->sk_err = ECONNRESET;
2657
2658	tcp_clear_xmit_timers(sk);
2659	__skb_queue_purge(&sk->sk_receive_queue);
2660	if (sk->sk_rx_skb_cache) {
2661		__kfree_skb(sk->sk_rx_skb_cache);
2662		sk->sk_rx_skb_cache = NULL;
2663	}
2664	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
2665	tp->urg_data = 0;
2666	tcp_write_queue_purge(sk);
2667	tcp_fastopen_active_disable_ofo_check(sk);
2668	skb_rbtree_purge(&tp->out_of_order_queue);
 
 
2669
2670	inet->inet_dport = 0;
2671
2672	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2673		inet_reset_saddr(sk);
2674
2675	sk->sk_shutdown = 0;
2676	sock_reset_flag(sk, SOCK_DONE);
2677	tp->srtt_us = 0;
2678	tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
2679	tp->rcv_rtt_last_tsecr = 0;
2680
2681	seq = tp->write_seq + tp->max_window + 2;
2682	if (!seq)
2683		seq = 1;
2684	WRITE_ONCE(tp->write_seq, seq);
2685
2686	icsk->icsk_backoff = 0;
 
2687	icsk->icsk_probes_out = 0;
2688	icsk->icsk_rto = TCP_TIMEOUT_INIT;
2689	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2690	tp->snd_cwnd = TCP_INIT_CWND;
2691	tp->snd_cwnd_cnt = 0;
 
2692	tp->window_clamp = 0;
2693	tp->delivered = 0;
2694	tp->delivered_ce = 0;
2695	if (icsk->icsk_ca_ops->release)
2696		icsk->icsk_ca_ops->release(sk);
2697	memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
2698	tcp_set_ca_state(sk, TCP_CA_Open);
2699	tp->is_sack_reneg = 0;
2700	tcp_clear_retrans(tp);
2701	tp->total_retrans = 0;
2702	inet_csk_delack_init(sk);
2703	/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2704	 * issue in __tcp_select_window()
2705	 */
2706	icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
2707	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2708	__sk_dst_reset(sk);
2709	dst_release(sk->sk_rx_dst);
2710	sk->sk_rx_dst = NULL;
2711	tcp_saved_syn_free(tp);
2712	tp->compressed_ack = 0;
2713	tp->segs_in = 0;
2714	tp->segs_out = 0;
2715	tp->bytes_sent = 0;
2716	tp->bytes_acked = 0;
2717	tp->bytes_received = 0;
2718	tp->bytes_retrans = 0;
2719	tp->data_segs_in = 0;
2720	tp->data_segs_out = 0;
2721	tp->duplicate_sack[0].start_seq = 0;
2722	tp->duplicate_sack[0].end_seq = 0;
2723	tp->dsack_dups = 0;
2724	tp->reord_seen = 0;
2725	tp->retrans_out = 0;
2726	tp->sacked_out = 0;
2727	tp->tlp_high_seq = 0;
2728	tp->last_oow_ack_time = 0;
2729	/* There's a bubble in the pipe until at least the first ACK. */
2730	tp->app_limited = ~0U;
2731	tp->rack.mstamp = 0;
2732	tp->rack.advanced = 0;
2733	tp->rack.reo_wnd_steps = 1;
2734	tp->rack.last_delivered = 0;
2735	tp->rack.reo_wnd_persist = 0;
2736	tp->rack.dsack_seen = 0;
2737	tp->syn_data_acked = 0;
2738	tp->rx_opt.saw_tstamp = 0;
2739	tp->rx_opt.dsack = 0;
2740	tp->rx_opt.num_sacks = 0;
2741	tp->rcv_ooopack = 0;
2742
2743
2744	/* Clean up fastopen related fields */
2745	tcp_free_fastopen_req(tp);
2746	inet->defer_connect = 0;
2747	tp->fastopen_client_fail = 0;
2748
2749	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2750
2751	if (sk->sk_frag.page) {
2752		put_page(sk->sk_frag.page);
2753		sk->sk_frag.page = NULL;
2754		sk->sk_frag.offset = 0;
2755	}
2756
2757	sk->sk_error_report(sk);
2758	return 0;
2759}
2760EXPORT_SYMBOL(tcp_disconnect);
2761
2762static inline bool tcp_can_repair_sock(const struct sock *sk)
2763{
2764	return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
2765		(sk->sk_state != TCP_LISTEN);
2766}
2767
2768static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len)
2769{
2770	struct tcp_repair_window opt;
2771
2772	if (!tp->repair)
2773		return -EPERM;
2774
2775	if (len != sizeof(opt))
2776		return -EINVAL;
2777
2778	if (copy_from_sockptr(&opt, optbuf, sizeof(opt)))
2779		return -EFAULT;
2780
2781	if (opt.max_window < opt.snd_wnd)
2782		return -EINVAL;
2783
2784	if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
2785		return -EINVAL;
2786
2787	if (after(opt.rcv_wup, tp->rcv_nxt))
2788		return -EINVAL;
2789
2790	tp->snd_wl1	= opt.snd_wl1;
2791	tp->snd_wnd	= opt.snd_wnd;
2792	tp->max_window	= opt.max_window;
2793
2794	tp->rcv_wnd	= opt.rcv_wnd;
2795	tp->rcv_wup	= opt.rcv_wup;
2796
2797	return 0;
2798}
2799
2800static int tcp_repair_options_est(struct sock *sk, sockptr_t optbuf,
2801		unsigned int len)
2802{
2803	struct tcp_sock *tp = tcp_sk(sk);
2804	struct tcp_repair_opt opt;
2805	size_t offset = 0;
2806
2807	while (len >= sizeof(opt)) {
2808		if (copy_from_sockptr_offset(&opt, optbuf, offset, sizeof(opt)))
2809			return -EFAULT;
2810
2811		offset += sizeof(opt);
2812		len -= sizeof(opt);
2813
2814		switch (opt.opt_code) {
2815		case TCPOPT_MSS:
2816			tp->rx_opt.mss_clamp = opt.opt_val;
2817			tcp_mtup_init(sk);
2818			break;
2819		case TCPOPT_WINDOW:
2820			{
2821				u16 snd_wscale = opt.opt_val & 0xFFFF;
2822				u16 rcv_wscale = opt.opt_val >> 16;
2823
2824				if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE)
2825					return -EFBIG;
2826
2827				tp->rx_opt.snd_wscale = snd_wscale;
2828				tp->rx_opt.rcv_wscale = rcv_wscale;
2829				tp->rx_opt.wscale_ok = 1;
2830			}
2831			break;
2832		case TCPOPT_SACK_PERM:
2833			if (opt.opt_val != 0)
2834				return -EINVAL;
2835
2836			tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
2837			break;
2838		case TCPOPT_TIMESTAMP:
2839			if (opt.opt_val != 0)
2840				return -EINVAL;
2841
2842			tp->rx_opt.tstamp_ok = 1;
2843			break;
2844		}
2845	}
2846
2847	return 0;
2848}
2849
2850DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2851EXPORT_SYMBOL(tcp_tx_delay_enabled);
2852
2853static void tcp_enable_tx_delay(void)
2854{
2855	if (!static_branch_unlikely(&tcp_tx_delay_enabled)) {
2856		static int __tcp_tx_delay_enabled = 0;
2857
2858		if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) {
2859			static_branch_enable(&tcp_tx_delay_enabled);
2860			pr_info("TCP_TX_DELAY enabled\n");
2861		}
2862	}
2863}
2864
2865/* When set indicates to always queue non-full frames.  Later the user clears
2866 * this option and we transmit any pending partial frames in the queue.  This is
2867 * meant to be used alongside sendfile() to get properly filled frames when the
2868 * user (for example) must write out headers with a write() call first and then
2869 * use sendfile to send out the data parts.
2870 *
2871 * TCP_CORK can be set together with TCP_NODELAY and it is stronger than
2872 * TCP_NODELAY.
2873 */
2874static void __tcp_sock_set_cork(struct sock *sk, bool on)
2875{
2876	struct tcp_sock *tp = tcp_sk(sk);
2877
2878	if (on) {
2879		tp->nonagle |= TCP_NAGLE_CORK;
2880	} else {
2881		tp->nonagle &= ~TCP_NAGLE_CORK;
2882		if (tp->nonagle & TCP_NAGLE_OFF)
2883			tp->nonagle |= TCP_NAGLE_PUSH;
2884		tcp_push_pending_frames(sk);
2885	}
2886}
2887
2888void tcp_sock_set_cork(struct sock *sk, bool on)
2889{
2890	lock_sock(sk);
2891	__tcp_sock_set_cork(sk, on);
2892	release_sock(sk);
2893}
2894EXPORT_SYMBOL(tcp_sock_set_cork);
2895
2896/* TCP_NODELAY is weaker than TCP_CORK, so that this option on corked socket is
2897 * remembered, but it is not activated until cork is cleared.
2898 *
2899 * However, when TCP_NODELAY is set we make an explicit push, which overrides
2900 * even TCP_CORK for currently queued segments.
2901 */
2902static void __tcp_sock_set_nodelay(struct sock *sk, bool on)
2903{
2904	if (on) {
2905		tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2906		tcp_push_pending_frames(sk);
2907	} else {
2908		tcp_sk(sk)->nonagle &= ~TCP_NAGLE_OFF;
2909	}
2910}
2911
2912void tcp_sock_set_nodelay(struct sock *sk)
2913{
2914	lock_sock(sk);
2915	__tcp_sock_set_nodelay(sk, true);
2916	release_sock(sk);
2917}
2918EXPORT_SYMBOL(tcp_sock_set_nodelay);
2919
2920static void __tcp_sock_set_quickack(struct sock *sk, int val)
2921{
2922	if (!val) {
2923		inet_csk_enter_pingpong_mode(sk);
2924		return;
2925	}
2926
2927	inet_csk_exit_pingpong_mode(sk);
2928	if ((1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2929	    inet_csk_ack_scheduled(sk)) {
2930		inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_PUSHED;
2931		tcp_cleanup_rbuf(sk, 1);
2932		if (!(val & 1))
2933			inet_csk_enter_pingpong_mode(sk);
2934	}
2935}
2936
2937void tcp_sock_set_quickack(struct sock *sk, int val)
2938{
2939	lock_sock(sk);
2940	__tcp_sock_set_quickack(sk, val);
2941	release_sock(sk);
2942}
2943EXPORT_SYMBOL(tcp_sock_set_quickack);
2944
2945int tcp_sock_set_syncnt(struct sock *sk, int val)
2946{
2947	if (val < 1 || val > MAX_TCP_SYNCNT)
2948		return -EINVAL;
2949
2950	lock_sock(sk);
2951	inet_csk(sk)->icsk_syn_retries = val;
2952	release_sock(sk);
2953	return 0;
2954}
2955EXPORT_SYMBOL(tcp_sock_set_syncnt);
2956
2957void tcp_sock_set_user_timeout(struct sock *sk, u32 val)
2958{
2959	lock_sock(sk);
2960	inet_csk(sk)->icsk_user_timeout = val;
2961	release_sock(sk);
2962}
2963EXPORT_SYMBOL(tcp_sock_set_user_timeout);
2964
2965int tcp_sock_set_keepidle_locked(struct sock *sk, int val)
2966{
2967	struct tcp_sock *tp = tcp_sk(sk);
2968
2969	if (val < 1 || val > MAX_TCP_KEEPIDLE)
2970		return -EINVAL;
2971
2972	tp->keepalive_time = val * HZ;
2973	if (sock_flag(sk, SOCK_KEEPOPEN) &&
2974	    !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
2975		u32 elapsed = keepalive_time_elapsed(tp);
2976
2977		if (tp->keepalive_time > elapsed)
2978			elapsed = tp->keepalive_time - elapsed;
2979		else
2980			elapsed = 0;
2981		inet_csk_reset_keepalive_timer(sk, elapsed);
2982	}
2983
2984	return 0;
2985}
2986
2987int tcp_sock_set_keepidle(struct sock *sk, int val)
2988{
2989	int err;
2990
2991	lock_sock(sk);
2992	err = tcp_sock_set_keepidle_locked(sk, val);
2993	release_sock(sk);
2994	return err;
2995}
2996EXPORT_SYMBOL(tcp_sock_set_keepidle);
2997
2998int tcp_sock_set_keepintvl(struct sock *sk, int val)
2999{
3000	if (val < 1 || val > MAX_TCP_KEEPINTVL)
3001		return -EINVAL;
3002
3003	lock_sock(sk);
3004	tcp_sk(sk)->keepalive_intvl = val * HZ;
3005	release_sock(sk);
3006	return 0;
3007}
3008EXPORT_SYMBOL(tcp_sock_set_keepintvl);
3009
3010int tcp_sock_set_keepcnt(struct sock *sk, int val)
3011{
3012	if (val < 1 || val > MAX_TCP_KEEPCNT)
3013		return -EINVAL;
3014
3015	lock_sock(sk);
3016	tcp_sk(sk)->keepalive_probes = val;
3017	release_sock(sk);
3018	return 0;
3019}
3020EXPORT_SYMBOL(tcp_sock_set_keepcnt);
3021
3022/*
3023 *	Socket option code for TCP.
3024 */
3025static int do_tcp_setsockopt(struct sock *sk, int level, int optname,
3026		sockptr_t optval, unsigned int optlen)
3027{
3028	struct tcp_sock *tp = tcp_sk(sk);
3029	struct inet_connection_sock *icsk = inet_csk(sk);
3030	struct net *net = sock_net(sk);
3031	int val;
3032	int err = 0;
3033
3034	/* These are data/string values, all the others are ints */
3035	switch (optname) {
3036	case TCP_CONGESTION: {
3037		char name[TCP_CA_NAME_MAX];
3038
3039		if (optlen < 1)
3040			return -EINVAL;
3041
3042		val = strncpy_from_sockptr(name, optval,
3043					min_t(long, TCP_CA_NAME_MAX-1, optlen));
3044		if (val < 0)
3045			return -EFAULT;
3046		name[val] = 0;
3047
3048		lock_sock(sk);
3049		err = tcp_set_congestion_control(sk, name, true, true,
3050						 ns_capable(sock_net(sk)->user_ns,
3051							    CAP_NET_ADMIN));
3052		release_sock(sk);
3053		return err;
3054	}
3055	case TCP_ULP: {
3056		char name[TCP_ULP_NAME_MAX];
 
3057
3058		if (optlen < 1)
 
 
 
 
 
 
3059			return -EINVAL;
3060
3061		val = strncpy_from_sockptr(name, optval,
3062					min_t(long, TCP_ULP_NAME_MAX - 1,
3063					      optlen));
3064		if (val < 0)
3065			return -EFAULT;
3066		name[val] = 0;
 
3067
3068		lock_sock(sk);
3069		err = tcp_set_ulp(sk, name);
3070		release_sock(sk);
3071		return err;
3072	}
3073	case TCP_FASTOPEN_KEY: {
3074		__u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
3075		__u8 *backup_key = NULL;
 
 
 
 
 
3076
3077		/* Allow a backup key as well to facilitate key rotation
3078		 * First key is the active one.
3079		 */
3080		if (optlen != TCP_FASTOPEN_KEY_LENGTH &&
3081		    optlen != TCP_FASTOPEN_KEY_BUF_LENGTH)
3082			return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3083
3084		if (copy_from_sockptr(key, optval, optlen))
3085			return -EFAULT;
3086
3087		if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH)
3088			backup_key = key + TCP_FASTOPEN_KEY_LENGTH;
 
 
 
 
 
 
 
 
3089
3090		return tcp_fastopen_reset_cipher(net, sk, key, backup_key);
 
 
 
3091	}
3092	default:
3093		/* fallthru */
3094		break;
3095	}
3096
3097	if (optlen < sizeof(int))
3098		return -EINVAL;
3099
3100	if (copy_from_sockptr(&val, optval, sizeof(val)))
3101		return -EFAULT;
3102
3103	lock_sock(sk);
3104
3105	switch (optname) {
3106	case TCP_MAXSEG:
3107		/* Values greater than interface MTU won't take effect. However
3108		 * at the point when this call is done we typically don't yet
3109		 * know which interface is going to be used
3110		 */
3111		if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) {
3112			err = -EINVAL;
3113			break;
3114		}
3115		tp->rx_opt.user_mss = val;
3116		break;
3117
3118	case TCP_NODELAY:
3119		__tcp_sock_set_nodelay(sk, val);
 
 
 
 
 
 
 
 
 
 
 
 
 
3120		break;
3121
3122	case TCP_THIN_LINEAR_TIMEOUTS:
3123		if (val < 0 || val > 1)
3124			err = -EINVAL;
3125		else
3126			tp->thin_lto = val;
3127		break;
3128
3129	case TCP_THIN_DUPACK:
3130		if (val < 0 || val > 1)
3131			err = -EINVAL;
3132		break;
3133
3134	case TCP_REPAIR:
3135		if (!tcp_can_repair_sock(sk))
3136			err = -EPERM;
3137		else if (val == TCP_REPAIR_ON) {
3138			tp->repair = 1;
3139			sk->sk_reuse = SK_FORCE_REUSE;
3140			tp->repair_queue = TCP_NO_QUEUE;
3141		} else if (val == TCP_REPAIR_OFF) {
3142			tp->repair = 0;
3143			sk->sk_reuse = SK_NO_REUSE;
3144			tcp_send_window_probe(sk);
3145		} else if (val == TCP_REPAIR_OFF_NO_WP) {
3146			tp->repair = 0;
3147			sk->sk_reuse = SK_NO_REUSE;
3148		} else
3149			err = -EINVAL;
3150
3151		break;
3152
3153	case TCP_REPAIR_QUEUE:
3154		if (!tp->repair)
3155			err = -EPERM;
3156		else if ((unsigned int)val < TCP_QUEUES_NR)
3157			tp->repair_queue = val;
3158		else
3159			err = -EINVAL;
3160		break;
3161
3162	case TCP_QUEUE_SEQ:
3163		if (sk->sk_state != TCP_CLOSE)
3164			err = -EPERM;
3165		else if (tp->repair_queue == TCP_SEND_QUEUE)
3166			WRITE_ONCE(tp->write_seq, val);
3167		else if (tp->repair_queue == TCP_RECV_QUEUE) {
3168			WRITE_ONCE(tp->rcv_nxt, val);
3169			WRITE_ONCE(tp->copied_seq, val);
 
 
 
 
 
 
 
 
 
 
 
3170		}
3171		else
3172			err = -EINVAL;
3173		break;
3174
3175	case TCP_REPAIR_OPTIONS:
3176		if (!tp->repair)
3177			err = -EINVAL;
3178		else if (sk->sk_state == TCP_ESTABLISHED)
3179			err = tcp_repair_options_est(sk, optval, optlen);
3180		else
3181			err = -EPERM;
3182		break;
3183
3184	case TCP_CORK:
3185		__tcp_sock_set_cork(sk, val);
3186		break;
3187
3188	case TCP_KEEPIDLE:
3189		err = tcp_sock_set_keepidle_locked(sk, val);
 
3190		break;
3191	case TCP_KEEPINTVL:
3192		if (val < 1 || val > MAX_TCP_KEEPINTVL)
3193			err = -EINVAL;
3194		else
3195			tp->keepalive_intvl = val * HZ;
3196		break;
3197	case TCP_KEEPCNT:
3198		if (val < 1 || val > MAX_TCP_KEEPCNT)
3199			err = -EINVAL;
3200		else
3201			tp->keepalive_probes = val;
3202		break;
3203	case TCP_SYNCNT:
3204		if (val < 1 || val > MAX_TCP_SYNCNT)
3205			err = -EINVAL;
3206		else
3207			icsk->icsk_syn_retries = val;
3208		break;
3209
3210	case TCP_SAVE_SYN:
3211		if (val < 0 || val > 1)
3212			err = -EINVAL;
3213		else
3214			tp->save_syn = val;
3215		break;
3216
3217	case TCP_LINGER2:
3218		if (val < 0)
3219			tp->linger2 = -1;
3220		else if (val > TCP_FIN_TIMEOUT_MAX / HZ)
3221			tp->linger2 = TCP_FIN_TIMEOUT_MAX;
3222		else
3223			tp->linger2 = val * HZ;
3224		break;
3225
3226	case TCP_DEFER_ACCEPT:
3227		/* Translate value in seconds to number of retransmits */
3228		icsk->icsk_accept_queue.rskq_defer_accept =
3229			secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
3230					TCP_RTO_MAX / HZ);
3231		break;
3232
3233	case TCP_WINDOW_CLAMP:
3234		if (!val) {
3235			if (sk->sk_state != TCP_CLOSE) {
3236				err = -EINVAL;
3237				break;
3238			}
3239			tp->window_clamp = 0;
3240		} else
3241			tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
3242						SOCK_MIN_RCVBUF / 2 : val;
3243		break;
3244
3245	case TCP_QUICKACK:
3246		__tcp_sock_set_quickack(sk, val);
 
 
 
 
 
 
 
 
 
 
 
 
3247		break;
3248
3249#ifdef CONFIG_TCP_MD5SIG
3250	case TCP_MD5SIG:
3251	case TCP_MD5SIG_EXT:
3252		err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
3253		break;
3254#endif
3255	case TCP_USER_TIMEOUT:
3256		/* Cap the max time in ms TCP will retry or probe the window
3257		 * before giving up and aborting (ETIMEDOUT) a connection.
3258		 */
3259		if (val < 0)
3260			err = -EINVAL;
3261		else
3262			icsk->icsk_user_timeout = val;
3263		break;
3264
3265	case TCP_FASTOPEN:
3266		if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
3267		    TCPF_LISTEN))) {
3268			tcp_fastopen_init_key_once(net);
3269
3270			fastopen_queue_tune(sk, val);
3271		} else {
3272			err = -EINVAL;
3273		}
3274		break;
3275	case TCP_FASTOPEN_CONNECT:
3276		if (val > 1 || val < 0) {
3277			err = -EINVAL;
3278		} else if (net->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) {
3279			if (sk->sk_state == TCP_CLOSE)
3280				tp->fastopen_connect = val;
3281			else
3282				err = -EINVAL;
3283		} else {
3284			err = -EOPNOTSUPP;
3285		}
3286		break;
3287	case TCP_FASTOPEN_NO_COOKIE:
3288		if (val > 1 || val < 0)
3289			err = -EINVAL;
3290		else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
3291			err = -EINVAL;
3292		else
3293			tp->fastopen_no_cookie = val;
3294		break;
3295	case TCP_TIMESTAMP:
3296		if (!tp->repair)
3297			err = -EPERM;
3298		else
3299			tp->tsoffset = val - tcp_time_stamp_raw();
3300		break;
3301	case TCP_REPAIR_WINDOW:
3302		err = tcp_repair_set_window(tp, optval, optlen);
3303		break;
3304	case TCP_NOTSENT_LOWAT:
3305		tp->notsent_lowat = val;
3306		sk->sk_write_space(sk);
3307		break;
3308	case TCP_INQ:
3309		if (val > 1 || val < 0)
3310			err = -EINVAL;
3311		else
3312			tp->recvmsg_inq = val;
3313		break;
3314	case TCP_TX_DELAY:
3315		if (val)
3316			tcp_enable_tx_delay();
3317		tp->tcp_tx_delay = val;
3318		break;
3319	default:
3320		err = -ENOPROTOOPT;
3321		break;
3322	}
3323
3324	release_sock(sk);
3325	return err;
3326}
3327
3328int tcp_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval,
3329		   unsigned int optlen)
3330{
3331	const struct inet_connection_sock *icsk = inet_csk(sk);
3332
3333	if (level != SOL_TCP)
3334		return icsk->icsk_af_ops->setsockopt(sk, level, optname,
3335						     optval, optlen);
3336	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
3337}
3338EXPORT_SYMBOL(tcp_setsockopt);
3339
3340static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
3341				      struct tcp_info *info)
 
3342{
3343	u64 stats[__TCP_CHRONO_MAX], total = 0;
3344	enum tcp_chrono i;
3345
3346	for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) {
3347		stats[i] = tp->chrono_stat[i - 1];
3348		if (i == tp->chrono_type)
3349			stats[i] += tcp_jiffies32 - tp->chrono_start;
3350		stats[i] *= USEC_PER_SEC / HZ;
3351		total += stats[i];
3352	}
3353
3354	info->tcpi_busy_time = total;
3355	info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED];
3356	info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED];
3357}
 
 
3358
3359/* Return information about state of tcp endpoint in API format. */
3360void tcp_get_info(struct sock *sk, struct tcp_info *info)
3361{
3362	const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
3363	const struct inet_connection_sock *icsk = inet_csk(sk);
3364	unsigned long rate;
3365	u32 now;
3366	u64 rate64;
3367	bool slow;
3368
3369	memset(info, 0, sizeof(*info));
3370	if (sk->sk_type != SOCK_STREAM)
3371		return;
3372
3373	info->tcpi_state = inet_sk_state_load(sk);
3374
3375	/* Report meaningful fields for all TCP states, including listeners */
3376	rate = READ_ONCE(sk->sk_pacing_rate);
3377	rate64 = (rate != ~0UL) ? rate : ~0ULL;
3378	info->tcpi_pacing_rate = rate64;
3379
3380	rate = READ_ONCE(sk->sk_max_pacing_rate);
3381	rate64 = (rate != ~0UL) ? rate : ~0ULL;
3382	info->tcpi_max_pacing_rate = rate64;
3383
3384	info->tcpi_reordering = tp->reordering;
3385	info->tcpi_snd_cwnd = tp->snd_cwnd;
3386
3387	if (info->tcpi_state == TCP_LISTEN) {
3388		/* listeners aliased fields :
3389		 * tcpi_unacked -> Number of children ready for accept()
3390		 * tcpi_sacked  -> max backlog
3391		 */
3392		info->tcpi_unacked = READ_ONCE(sk->sk_ack_backlog);
3393		info->tcpi_sacked = READ_ONCE(sk->sk_max_ack_backlog);
3394		return;
3395	}
3396
3397	slow = lock_sock_fast(sk);
3398
 
3399	info->tcpi_ca_state = icsk->icsk_ca_state;
3400	info->tcpi_retransmits = icsk->icsk_retransmits;
3401	info->tcpi_probes = icsk->icsk_probes_out;
3402	info->tcpi_backoff = icsk->icsk_backoff;
3403
3404	if (tp->rx_opt.tstamp_ok)
3405		info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
3406	if (tcp_is_sack(tp))
3407		info->tcpi_options |= TCPI_OPT_SACK;
3408	if (tp->rx_opt.wscale_ok) {
3409		info->tcpi_options |= TCPI_OPT_WSCALE;
3410		info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
3411		info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
3412	}
3413
3414	if (tp->ecn_flags & TCP_ECN_OK)
3415		info->tcpi_options |= TCPI_OPT_ECN;
3416	if (tp->ecn_flags & TCP_ECN_SEEN)
3417		info->tcpi_options |= TCPI_OPT_ECN_SEEN;
3418	if (tp->syn_data_acked)
3419		info->tcpi_options |= TCPI_OPT_SYN_DATA;
3420
3421	info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
3422	info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
3423	info->tcpi_snd_mss = tp->mss_cache;
3424	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
3425
3426	info->tcpi_unacked = tp->packets_out;
3427	info->tcpi_sacked = tp->sacked_out;
3428
 
 
 
 
3429	info->tcpi_lost = tp->lost_out;
3430	info->tcpi_retrans = tp->retrans_out;
 
3431
3432	now = tcp_jiffies32;
3433	info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
3434	info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
3435	info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
3436
3437	info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
3438	info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
3439	info->tcpi_rtt = tp->srtt_us >> 3;
3440	info->tcpi_rttvar = tp->mdev_us >> 2;
3441	info->tcpi_snd_ssthresh = tp->snd_ssthresh;
 
3442	info->tcpi_advmss = tp->advmss;
 
3443
3444	info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3;
3445	info->tcpi_rcv_space = tp->rcvq_space.space;
3446
3447	info->tcpi_total_retrans = tp->total_retrans;
3448
3449	info->tcpi_bytes_acked = tp->bytes_acked;
3450	info->tcpi_bytes_received = tp->bytes_received;
3451	info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt);
3452	tcp_get_info_chrono_stats(tp, info);
3453
3454	info->tcpi_segs_out = tp->segs_out;
3455	info->tcpi_segs_in = tp->segs_in;
3456
3457	info->tcpi_min_rtt = tcp_min_rtt(tp);
3458	info->tcpi_data_segs_in = tp->data_segs_in;
3459	info->tcpi_data_segs_out = tp->data_segs_out;
3460
3461	info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
3462	rate64 = tcp_compute_delivery_rate(tp);
3463	if (rate64)
3464		info->tcpi_delivery_rate = rate64;
3465	info->tcpi_delivered = tp->delivered;
3466	info->tcpi_delivered_ce = tp->delivered_ce;
3467	info->tcpi_bytes_sent = tp->bytes_sent;
3468	info->tcpi_bytes_retrans = tp->bytes_retrans;
3469	info->tcpi_dsack_dups = tp->dsack_dups;
3470	info->tcpi_reord_seen = tp->reord_seen;
3471	info->tcpi_rcv_ooopack = tp->rcv_ooopack;
3472	info->tcpi_snd_wnd = tp->snd_wnd;
3473	info->tcpi_fastopen_client_fail = tp->fastopen_client_fail;
3474	unlock_sock_fast(sk, slow);
3475}
3476EXPORT_SYMBOL_GPL(tcp_get_info);
3477
3478static size_t tcp_opt_stats_get_size(void)
3479{
3480	return
3481		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */
3482		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */
3483		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */
3484		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */
3485		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */
3486		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */
3487		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */
3488		nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */
3489		nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */
3490		nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */
3491		nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */
3492		nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */
3493		nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */
3494		nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */
3495		nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */
3496		nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */
3497		nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */
3498		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */
3499		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */
3500		nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */
3501		nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */
3502		nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */
3503		nla_total_size(sizeof(u16)) + /* TCP_NLA_TIMEOUT_REHASH */
3504		nla_total_size(sizeof(u32)) + /* TCP_NLA_BYTES_NOTSENT */
3505		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_EDT */
3506		0;
3507}
3508
3509struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
3510					       const struct sk_buff *orig_skb)
3511{
3512	const struct tcp_sock *tp = tcp_sk(sk);
3513	struct sk_buff *stats;
3514	struct tcp_info info;
3515	unsigned long rate;
3516	u64 rate64;
3517
3518	stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC);
3519	if (!stats)
3520		return NULL;
3521
3522	tcp_get_info_chrono_stats(tp, &info);
3523	nla_put_u64_64bit(stats, TCP_NLA_BUSY,
3524			  info.tcpi_busy_time, TCP_NLA_PAD);
3525	nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED,
3526			  info.tcpi_rwnd_limited, TCP_NLA_PAD);
3527	nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED,
3528			  info.tcpi_sndbuf_limited, TCP_NLA_PAD);
3529	nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT,
3530			  tp->data_segs_out, TCP_NLA_PAD);
3531	nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS,
3532			  tp->total_retrans, TCP_NLA_PAD);
3533
3534	rate = READ_ONCE(sk->sk_pacing_rate);
3535	rate64 = (rate != ~0UL) ? rate : ~0ULL;
3536	nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD);
3537
3538	rate64 = tcp_compute_delivery_rate(tp);
3539	nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
3540
3541	nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd);
3542	nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
3543	nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
3544
3545	nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits);
3546	nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
3547	nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
3548	nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered);
3549	nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce);
3550
3551	nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
3552	nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state);
3553
3554	nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent,
3555			  TCP_NLA_PAD);
3556	nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans,
3557			  TCP_NLA_PAD);
3558	nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups);
3559	nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
3560	nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3);
3561	nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash);
3562	nla_put_u32(stats, TCP_NLA_BYTES_NOTSENT,
3563		    max_t(int, 0, tp->write_seq - tp->snd_nxt));
3564	nla_put_u64_64bit(stats, TCP_NLA_EDT, orig_skb->skb_mstamp_ns,
3565			  TCP_NLA_PAD);
3566
3567	return stats;
3568}
3569
3570static int do_tcp_getsockopt(struct sock *sk, int level,
3571		int optname, char __user *optval, int __user *optlen)
3572{
3573	struct inet_connection_sock *icsk = inet_csk(sk);
3574	struct tcp_sock *tp = tcp_sk(sk);
3575	struct net *net = sock_net(sk);
3576	int val, len;
3577
3578	if (get_user(len, optlen))
3579		return -EFAULT;
3580
3581	len = min_t(unsigned int, len, sizeof(int));
3582
3583	if (len < 0)
3584		return -EINVAL;
3585
3586	switch (optname) {
3587	case TCP_MAXSEG:
3588		val = tp->mss_cache;
3589		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
3590			val = tp->rx_opt.user_mss;
3591		if (tp->repair)
3592			val = tp->rx_opt.mss_clamp;
3593		break;
3594	case TCP_NODELAY:
3595		val = !!(tp->nonagle&TCP_NAGLE_OFF);
3596		break;
3597	case TCP_CORK:
3598		val = !!(tp->nonagle&TCP_NAGLE_CORK);
3599		break;
3600	case TCP_KEEPIDLE:
3601		val = keepalive_time_when(tp) / HZ;
3602		break;
3603	case TCP_KEEPINTVL:
3604		val = keepalive_intvl_when(tp) / HZ;
3605		break;
3606	case TCP_KEEPCNT:
3607		val = keepalive_probes(tp);
3608		break;
3609	case TCP_SYNCNT:
3610		val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
3611		break;
3612	case TCP_LINGER2:
3613		val = tp->linger2;
3614		if (val >= 0)
3615			val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ;
3616		break;
3617	case TCP_DEFER_ACCEPT:
3618		val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
3619				      TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
3620		break;
3621	case TCP_WINDOW_CLAMP:
3622		val = tp->window_clamp;
3623		break;
3624	case TCP_INFO: {
3625		struct tcp_info info;
3626
3627		if (get_user(len, optlen))
3628			return -EFAULT;
3629
3630		tcp_get_info(sk, &info);
3631
3632		len = min_t(unsigned int, len, sizeof(info));
3633		if (put_user(len, optlen))
3634			return -EFAULT;
3635		if (copy_to_user(optval, &info, len))
3636			return -EFAULT;
3637		return 0;
3638	}
3639	case TCP_CC_INFO: {
3640		const struct tcp_congestion_ops *ca_ops;
3641		union tcp_cc_info info;
3642		size_t sz = 0;
3643		int attr;
3644
3645		if (get_user(len, optlen))
3646			return -EFAULT;
3647
3648		ca_ops = icsk->icsk_ca_ops;
3649		if (ca_ops && ca_ops->get_info)
3650			sz = ca_ops->get_info(sk, ~0U, &attr, &info);
3651
3652		len = min_t(unsigned int, len, sz);
3653		if (put_user(len, optlen))
3654			return -EFAULT;
3655		if (copy_to_user(optval, &info, len))
3656			return -EFAULT;
3657		return 0;
3658	}
3659	case TCP_QUICKACK:
3660		val = !inet_csk_in_pingpong_mode(sk);
3661		break;
3662
3663	case TCP_CONGESTION:
3664		if (get_user(len, optlen))
3665			return -EFAULT;
3666		len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
3667		if (put_user(len, optlen))
3668			return -EFAULT;
3669		if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
3670			return -EFAULT;
3671		return 0;
3672
3673	case TCP_ULP:
 
 
 
3674		if (get_user(len, optlen))
3675			return -EFAULT;
3676		len = min_t(unsigned int, len, TCP_ULP_NAME_MAX);
3677		if (!icsk->icsk_ulp_ops) {
3678			if (put_user(0, optlen))
3679				return -EFAULT;
3680			return 0;
3681		}
3682		if (put_user(len, optlen))
3683			return -EFAULT;
3684		if (copy_to_user(optval, icsk->icsk_ulp_ops->name, len))
3685			return -EFAULT;
3686		return 0;
 
 
 
3687
3688	case TCP_FASTOPEN_KEY: {
3689		u64 key[TCP_FASTOPEN_KEY_BUF_LENGTH / sizeof(u64)];
3690		unsigned int key_len;
3691
3692		if (get_user(len, optlen))
3693			return -EFAULT;
 
 
3694
3695		key_len = tcp_fastopen_get_cipher(net, icsk, key) *
3696				TCP_FASTOPEN_KEY_LENGTH;
3697		len = min_t(unsigned int, len, key_len);
3698		if (put_user(len, optlen))
3699			return -EFAULT;
3700		if (copy_to_user(optval, key, len))
3701			return -EFAULT;
3702		return 0;
3703	}
3704	case TCP_THIN_LINEAR_TIMEOUTS:
3705		val = tp->thin_lto;
3706		break;
3707
3708	case TCP_THIN_DUPACK:
3709		val = 0;
3710		break;
3711
3712	case TCP_REPAIR:
3713		val = tp->repair;
3714		break;
 
 
 
3715
3716	case TCP_REPAIR_QUEUE:
3717		if (tp->repair)
3718			val = tp->repair_queue;
3719		else
3720			return -EINVAL;
3721		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3722
3723	case TCP_REPAIR_WINDOW: {
3724		struct tcp_repair_window opt;
3725
3726		if (get_user(len, optlen))
3727			return -EFAULT;
3728
3729		if (len != sizeof(opt))
3730			return -EINVAL;
 
3731
3732		if (!tp->repair)
3733			return -EPERM;
 
 
 
 
 
 
 
 
 
 
3734
3735		opt.snd_wl1	= tp->snd_wl1;
3736		opt.snd_wnd	= tp->snd_wnd;
3737		opt.max_window	= tp->max_window;
3738		opt.rcv_wnd	= tp->rcv_wnd;
3739		opt.rcv_wup	= tp->rcv_wup;
3740
3741		if (copy_to_user(optval, &opt, len))
3742			return -EFAULT;
3743		return 0;
3744	}
3745	case TCP_QUEUE_SEQ:
3746		if (tp->repair_queue == TCP_SEND_QUEUE)
3747			val = tp->write_seq;
3748		else if (tp->repair_queue == TCP_RECV_QUEUE)
3749			val = tp->rcv_nxt;
3750		else
3751			return -EINVAL;
3752		break;
3753
3754	case TCP_USER_TIMEOUT:
3755		val = icsk->icsk_user_timeout;
3756		break;
 
 
 
 
 
 
3757
3758	case TCP_FASTOPEN:
3759		val = icsk->icsk_accept_queue.fastopenq.max_qlen;
3760		break;
3761
3762	case TCP_FASTOPEN_CONNECT:
3763		val = tp->fastopen_connect;
3764		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3765
3766	case TCP_FASTOPEN_NO_COOKIE:
3767		val = tp->fastopen_no_cookie;
3768		break;
 
3769
3770	case TCP_TX_DELAY:
3771		val = tp->tcp_tx_delay;
3772		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3773
3774	case TCP_TIMESTAMP:
3775		val = tcp_time_stamp_raw() + tp->tsoffset;
3776		break;
3777	case TCP_NOTSENT_LOWAT:
3778		val = tp->notsent_lowat;
3779		break;
3780	case TCP_INQ:
3781		val = tp->recvmsg_inq;
3782		break;
3783	case TCP_SAVE_SYN:
3784		val = tp->save_syn;
3785		break;
3786	case TCP_SAVED_SYN: {
3787		if (get_user(len, optlen))
3788			return -EFAULT;
3789
3790		lock_sock(sk);
3791		if (tp->saved_syn) {
3792			if (len < tp->saved_syn[0]) {
3793				if (put_user(tp->saved_syn[0], optlen)) {
3794					release_sock(sk);
3795					return -EFAULT;
3796				}
3797				release_sock(sk);
3798				return -EINVAL;
3799			}
3800			len = tp->saved_syn[0];
3801			if (put_user(len, optlen)) {
3802				release_sock(sk);
3803				return -EFAULT;
3804			}
3805			if (copy_to_user(optval, tp->saved_syn + 1, len)) {
3806				release_sock(sk);
3807				return -EFAULT;
3808			}
3809			tcp_saved_syn_free(tp);
3810			release_sock(sk);
3811		} else {
3812			release_sock(sk);
3813			len = 0;
3814			if (put_user(len, optlen))
3815				return -EFAULT;
3816		}
3817		return 0;
3818	}
3819#ifdef CONFIG_MMU
3820	case TCP_ZEROCOPY_RECEIVE: {
3821		struct tcp_zerocopy_receive zc;
3822		int err;
3823
3824		if (get_user(len, optlen))
3825			return -EFAULT;
3826		if (len < offsetofend(struct tcp_zerocopy_receive, length))
3827			return -EINVAL;
3828		if (len > sizeof(zc)) {
3829			len = sizeof(zc);
3830			if (put_user(len, optlen))
3831				return -EFAULT;
 
 
 
 
 
 
3832		}
3833		if (copy_from_user(&zc, optval, len))
3834			return -EFAULT;
3835		lock_sock(sk);
3836		err = tcp_zerocopy_receive(sk, &zc);
3837		release_sock(sk);
3838		if (len == sizeof(zc))
3839			goto zerocopy_rcv_sk_err;
3840		switch (len) {
3841		case offsetofend(struct tcp_zerocopy_receive, err):
3842			goto zerocopy_rcv_sk_err;
3843		case offsetofend(struct tcp_zerocopy_receive, inq):
3844			goto zerocopy_rcv_inq;
3845		case offsetofend(struct tcp_zerocopy_receive, length):
3846		default:
3847			goto zerocopy_rcv_out;
3848		}
3849zerocopy_rcv_sk_err:
3850		if (!err)
3851			zc.err = sock_error(sk);
3852zerocopy_rcv_inq:
3853		zc.inq = tcp_inq_hint(sk);
3854zerocopy_rcv_out:
3855		if (!err && copy_to_user(optval, &zc, len))
3856			err = -EFAULT;
3857		return err;
3858	}
3859#endif
3860	default:
3861		return -ENOPROTOOPT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3862	}
3863
3864	if (put_user(len, optlen))
3865		return -EFAULT;
3866	if (copy_to_user(optval, &val, len))
3867		return -EFAULT;
3868	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
3869}
 
3870
3871int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
3872		   int __user *optlen)
3873{
3874	struct inet_connection_sock *icsk = inet_csk(sk);
 
 
 
 
 
 
 
 
 
3875
3876	if (level != SOL_TCP)
3877		return icsk->icsk_af_ops->getsockopt(sk, level, optname,
3878						     optval, optlen);
3879	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
3880}
3881EXPORT_SYMBOL(tcp_getsockopt);
3882
3883#ifdef CONFIG_TCP_MD5SIG
3884static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
3885static DEFINE_MUTEX(tcp_md5sig_mutex);
3886static bool tcp_md5sig_pool_populated = false;
3887
3888static void __tcp_alloc_md5sig_pool(void)
3889{
3890	struct crypto_ahash *hash;
3891	int cpu;
 
 
 
 
 
 
 
 
 
 
3892
3893	hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
3894	if (IS_ERR(hash))
3895		return;
3896
3897	for_each_possible_cpu(cpu) {
3898		void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch;
3899		struct ahash_request *req;
 
 
 
 
 
 
 
3900
3901		if (!scratch) {
3902			scratch = kmalloc_node(sizeof(union tcp_md5sum_block) +
3903					       sizeof(struct tcphdr),
3904					       GFP_KERNEL,
3905					       cpu_to_node(cpu));
3906			if (!scratch)
3907				return;
3908			per_cpu(tcp_md5sig_pool, cpu).scratch = scratch;
3909		}
3910		if (per_cpu(tcp_md5sig_pool, cpu).md5_req)
3911			continue;
3912
3913		req = ahash_request_alloc(hash, GFP_KERNEL);
3914		if (!req)
3915			return;
3916
3917		ahash_request_set_callback(req, 0, NULL, NULL);
 
 
3918
3919		per_cpu(tcp_md5sig_pool, cpu).md5_req = req;
3920	}
3921	/* before setting tcp_md5sig_pool_populated, we must commit all writes
3922	 * to memory. See smp_rmb() in tcp_get_md5sig_pool()
3923	 */
3924	smp_wmb();
3925	tcp_md5sig_pool_populated = true;
 
 
 
 
 
 
 
 
3926}
3927
3928bool tcp_alloc_md5sig_pool(void)
3929{
3930	if (unlikely(!tcp_md5sig_pool_populated)) {
3931		mutex_lock(&tcp_md5sig_mutex);
3932
3933		if (!tcp_md5sig_pool_populated) {
3934			__tcp_alloc_md5sig_pool();
3935			if (tcp_md5sig_pool_populated)
3936				static_branch_inc(&tcp_md5_needed);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3937		}
3938
3939		mutex_unlock(&tcp_md5sig_mutex);
3940	}
3941	return tcp_md5sig_pool_populated;
3942}
3943EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
3944
3945
3946/**
3947 *	tcp_get_md5sig_pool - get md5sig_pool for this user
3948 *
3949 *	We use percpu structure, so if we succeed, we exit with preemption
3950 *	and BH disabled, to make sure another thread or softirq handling
3951 *	wont try to get same context.
3952 */
3953struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
3954{
 
 
3955	local_bh_disable();
3956
3957	if (tcp_md5sig_pool_populated) {
3958		/* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
3959		smp_rmb();
3960		return this_cpu_ptr(&tcp_md5sig_pool);
3961	}
 
 
 
 
3962	local_bh_enable();
3963	return NULL;
3964}
3965EXPORT_SYMBOL(tcp_get_md5sig_pool);
3966
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3967int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3968			  const struct sk_buff *skb, unsigned int header_len)
3969{
3970	struct scatterlist sg;
3971	const struct tcphdr *tp = tcp_hdr(skb);
3972	struct ahash_request *req = hp->md5_req;
3973	unsigned int i;
3974	const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3975					   skb_headlen(skb) - header_len : 0;
3976	const struct skb_shared_info *shi = skb_shinfo(skb);
3977	struct sk_buff *frag_iter;
3978
3979	sg_init_table(&sg, 1);
3980
3981	sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
3982	ahash_request_set_crypt(req, &sg, NULL, head_data_len);
3983	if (crypto_ahash_update(req))
3984		return 1;
3985
3986	for (i = 0; i < shi->nr_frags; ++i) {
3987		const skb_frag_t *f = &shi->frags[i];
3988		unsigned int offset = skb_frag_off(f);
3989		struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
3990
3991		sg_set_page(&sg, page, skb_frag_size(f),
3992			    offset_in_page(offset));
3993		ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f));
3994		if (crypto_ahash_update(req))
3995			return 1;
3996	}
3997
3998	skb_walk_frags(skb, frag_iter)
3999		if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
4000			return 1;
4001
4002	return 0;
4003}
4004EXPORT_SYMBOL(tcp_md5_hash_skb_data);
4005
4006int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
4007{
4008	u8 keylen = READ_ONCE(key->keylen); /* paired with WRITE_ONCE() in tcp_md5_do_add */
4009	struct scatterlist sg;
4010
4011	sg_init_one(&sg, key->key, keylen);
4012	ahash_request_set_crypt(hp->md5_req, &sg, NULL, keylen);
4013
4014	/* We use data_race() because tcp_md5_do_add() might change key->key under us */
4015	return data_race(crypto_ahash_update(hp->md5_req));
4016}
4017EXPORT_SYMBOL(tcp_md5_hash_key);
4018
4019#endif
4020
4021void tcp_done(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4022{
4023	struct request_sock *req;
 
 
 
 
 
 
 
 
 
 
 
4024
4025	/* We might be called with a new socket, after
4026	 * inet_csk_prepare_forced_close() has been called
4027	 * so we can not use lockdep_sock_is_held(sk)
4028	 */
4029	req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4030
 
 
4031	if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
4032		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
4033
4034	tcp_set_state(sk, TCP_CLOSE);
4035	tcp_clear_xmit_timers(sk);
4036	if (req)
4037		reqsk_fastopen_remove(sk, req, false);
4038
4039	sk->sk_shutdown = SHUTDOWN_MASK;
4040
4041	if (!sock_flag(sk, SOCK_DEAD))
4042		sk->sk_state_change(sk);
4043	else
4044		inet_csk_destroy_sock(sk);
4045}
4046EXPORT_SYMBOL_GPL(tcp_done);
4047
4048int tcp_abort(struct sock *sk, int err)
4049{
4050	if (!sk_fullsock(sk)) {
4051		if (sk->sk_state == TCP_NEW_SYN_RECV) {
4052			struct request_sock *req = inet_reqsk(sk);
4053
4054			local_bh_disable();
4055			inet_csk_reqsk_queue_drop(req->rsk_listener, req);
4056			local_bh_enable();
4057			return 0;
4058		}
4059		return -EOPNOTSUPP;
4060	}
4061
4062	/* Don't race with userspace socket closes such as tcp_close. */
4063	lock_sock(sk);
4064
4065	if (sk->sk_state == TCP_LISTEN) {
4066		tcp_set_state(sk, TCP_CLOSE);
4067		inet_csk_listen_stop(sk);
4068	}
4069
4070	/* Don't race with BH socket closes such as inet_csk_listen_stop. */
4071	local_bh_disable();
4072	bh_lock_sock(sk);
4073
4074	if (!sock_flag(sk, SOCK_DEAD)) {
4075		sk->sk_err = err;
4076		/* This barrier is coupled with smp_rmb() in tcp_poll() */
4077		smp_wmb();
4078		sk->sk_error_report(sk);
4079		if (tcp_need_reset(sk->sk_state))
4080			tcp_send_active_reset(sk, GFP_ATOMIC);
4081		tcp_done(sk);
4082	}
4083
4084	bh_unlock_sock(sk);
4085	local_bh_enable();
4086	tcp_write_queue_purge(sk);
4087	release_sock(sk);
4088	return 0;
4089}
4090EXPORT_SYMBOL_GPL(tcp_abort);
4091
4092extern struct tcp_congestion_ops tcp_reno;
4093
4094static __initdata unsigned long thash_entries;
4095static int __init set_thash_entries(char *str)
4096{
4097	ssize_t ret;
4098
4099	if (!str)
4100		return 0;
4101
4102	ret = kstrtoul(str, 0, &thash_entries);
4103	if (ret)
4104		return 0;
4105
4106	return 1;
4107}
4108__setup("thash_entries=", set_thash_entries);
4109
4110static void __init tcp_init_mem(void)
4111{
4112	unsigned long limit = nr_free_buffer_pages() / 16;
4113
4114	limit = max(limit, 128UL);
4115	sysctl_tcp_mem[0] = limit / 4 * 3;		/* 4.68 % */
4116	sysctl_tcp_mem[1] = limit;			/* 6.25 % */
4117	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;	/* 9.37 % */
4118}
4119
4120void __init tcp_init(void)
4121{
4122	int max_rshare, max_wshare, cnt;
4123	unsigned long limit;
4124	unsigned int i;
 
 
 
4125
4126	BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
4127	BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
4128		     sizeof_field(struct sk_buff, cb));
4129
4130	percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
4131	percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
4132	inet_hashinfo_init(&tcp_hashinfo);
4133	inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
4134			    thash_entries, 21,  /* one slot per 2 MB*/
4135			    0, 64 * 1024);
4136	tcp_hashinfo.bind_bucket_cachep =
4137		kmem_cache_create("tcp_bind_bucket",
4138				  sizeof(struct inet_bind_bucket), 0,
4139				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
4140
4141	/* Size and allocate the main established and bind bucket
4142	 * hash tables.
4143	 *
4144	 * The methodology is similar to that of the buffer cache.
4145	 */
4146	tcp_hashinfo.ehash =
4147		alloc_large_system_hash("TCP established",
4148					sizeof(struct inet_ehash_bucket),
4149					thash_entries,
4150					17, /* one slot per 128 KB of memory */
 
4151					0,
4152					NULL,
4153					&tcp_hashinfo.ehash_mask,
4154					0,
4155					thash_entries ? 0 : 512 * 1024);
4156	for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
4157		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
4158
 
4159	if (inet_ehash_locks_alloc(&tcp_hashinfo))
4160		panic("TCP: failed to alloc ehash_locks");
4161	tcp_hashinfo.bhash =
4162		alloc_large_system_hash("TCP bind",
4163					sizeof(struct inet_bind_hashbucket),
4164					tcp_hashinfo.ehash_mask + 1,
4165					17, /* one slot per 128 KB of memory */
 
4166					0,
4167					&tcp_hashinfo.bhash_size,
4168					NULL,
4169					0,
4170					64 * 1024);
4171	tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
4172	for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
4173		spin_lock_init(&tcp_hashinfo.bhash[i].lock);
4174		INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
4175	}
4176
4177
4178	cnt = tcp_hashinfo.ehash_mask + 1;
 
 
4179	sysctl_tcp_max_orphans = cnt / 2;
 
 
 
 
 
 
 
4180
4181	tcp_init_mem();
4182	/* Set per-socket limits to no more than 1/128 the pressure threshold */
4183	limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
4184	max_wshare = min(4UL*1024*1024, limit);
4185	max_rshare = min(6UL*1024*1024, limit);
4186
4187	init_net.ipv4.sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
4188	init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024;
4189	init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
4190
4191	init_net.ipv4.sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
4192	init_net.ipv4.sysctl_tcp_rmem[1] = 131072;
4193	init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare);
4194
4195	pr_info("Hash tables configured (established %u bind %u)\n",
4196		tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
4197
4198	tcp_v4_init();
4199	tcp_metrics_init();
4200	BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
4201	tcp_tasklet_init();
4202	mptcp_init();
 
 
 
 
 
4203}