Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  11 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  12 *		Florian La Roche, <flla@stud.uni-sb.de>
  13 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  15 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  16 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  17 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18 *		Jorge Cwik, <jorge@laser.satlink.net>
  19 *
  20 * Fixes:
  21 *		Alan Cox	:	Numerous verify_area() calls
  22 *		Alan Cox	:	Set the ACK bit on a reset
  23 *		Alan Cox	:	Stopped it crashing if it closed while
  24 *					sk->inuse=1 and was trying to connect
  25 *					(tcp_err()).
  26 *		Alan Cox	:	All icmp error handling was broken
  27 *					pointers passed where wrong and the
  28 *					socket was looked up backwards. Nobody
  29 *					tested any icmp error code obviously.
  30 *		Alan Cox	:	tcp_err() now handled properly. It
  31 *					wakes people on errors. poll
  32 *					behaves and the icmp error race
  33 *					has gone by moving it into sock.c
  34 *		Alan Cox	:	tcp_send_reset() fixed to work for
  35 *					everything not just packets for
  36 *					unknown sockets.
  37 *		Alan Cox	:	tcp option processing.
  38 *		Alan Cox	:	Reset tweaked (still not 100%) [Had
  39 *					syn rule wrong]
  40 *		Herp Rosmanith  :	More reset fixes
  41 *		Alan Cox	:	No longer acks invalid rst frames.
  42 *					Acking any kind of RST is right out.
  43 *		Alan Cox	:	Sets an ignore me flag on an rst
  44 *					receive otherwise odd bits of prattle
  45 *					escape still
  46 *		Alan Cox	:	Fixed another acking RST frame bug.
  47 *					Should stop LAN workplace lockups.
  48 *		Alan Cox	: 	Some tidyups using the new skb list
  49 *					facilities
  50 *		Alan Cox	:	sk->keepopen now seems to work
  51 *		Alan Cox	:	Pulls options out correctly on accepts
  52 *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
  53 *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
  54 *					bit to skb ops.
  55 *		Alan Cox	:	Tidied tcp_data to avoid a potential
  56 *					nasty.
  57 *		Alan Cox	:	Added some better commenting, as the
  58 *					tcp is hard to follow
  59 *		Alan Cox	:	Removed incorrect check for 20 * psh
  60 *	Michael O'Reilly	:	ack < copied bug fix.
  61 *	Johannes Stille		:	Misc tcp fixes (not all in yet).
  62 *		Alan Cox	:	FIN with no memory -> CRASH
  63 *		Alan Cox	:	Added socket option proto entries.
  64 *					Also added awareness of them to accept.
  65 *		Alan Cox	:	Added TCP options (SOL_TCP)
  66 *		Alan Cox	:	Switched wakeup calls to callbacks,
  67 *					so the kernel can layer network
  68 *					sockets.
  69 *		Alan Cox	:	Use ip_tos/ip_ttl settings.
  70 *		Alan Cox	:	Handle FIN (more) properly (we hope).
  71 *		Alan Cox	:	RST frames sent on unsynchronised
  72 *					state ack error.
  73 *		Alan Cox	:	Put in missing check for SYN bit.
  74 *		Alan Cox	:	Added tcp_select_window() aka NET2E
  75 *					window non shrink trick.
  76 *		Alan Cox	:	Added a couple of small NET2E timer
  77 *					fixes
  78 *		Charles Hedrick :	TCP fixes
  79 *		Toomas Tamm	:	TCP window fixes
  80 *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
  81 *		Charles Hedrick	:	Rewrote most of it to actually work
  82 *		Linus		:	Rewrote tcp_read() and URG handling
  83 *					completely
  84 *		Gerhard Koerting:	Fixed some missing timer handling
  85 *		Matthew Dillon  :	Reworked TCP machine states as per RFC
  86 *		Gerhard Koerting:	PC/TCP workarounds
  87 *		Adam Caldwell	:	Assorted timer/timing errors
  88 *		Matthew Dillon	:	Fixed another RST bug
  89 *		Alan Cox	:	Move to kernel side addressing changes.
  90 *		Alan Cox	:	Beginning work on TCP fastpathing
  91 *					(not yet usable)
  92 *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
  93 *		Alan Cox	:	TCP fast path debugging
  94 *		Alan Cox	:	Window clamping
  95 *		Michael Riepe	:	Bug in tcp_check()
  96 *		Matt Dillon	:	More TCP improvements and RST bug fixes
  97 *		Matt Dillon	:	Yet more small nasties remove from the
  98 *					TCP code (Be very nice to this man if
  99 *					tcp finally works 100%) 8)
 100 *		Alan Cox	:	BSD accept semantics.
 101 *		Alan Cox	:	Reset on closedown bug.
 102 *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
 103 *		Michael Pall	:	Handle poll() after URG properly in
 104 *					all cases.
 105 *		Michael Pall	:	Undo the last fix in tcp_read_urg()
 106 *					(multi URG PUSH broke rlogin).
 107 *		Michael Pall	:	Fix the multi URG PUSH problem in
 108 *					tcp_readable(), poll() after URG
 109 *					works now.
 110 *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
 111 *					BSD api.
 112 *		Alan Cox	:	Changed the semantics of sk->socket to
 113 *					fix a race and a signal problem with
 114 *					accept() and async I/O.
 115 *		Alan Cox	:	Relaxed the rules on tcp_sendto().
 116 *		Yury Shevchuk	:	Really fixed accept() blocking problem.
 117 *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
 118 *					clients/servers which listen in on
 119 *					fixed ports.
 120 *		Alan Cox	:	Cleaned the above up and shrank it to
 121 *					a sensible code size.
 122 *		Alan Cox	:	Self connect lockup fix.
 123 *		Alan Cox	:	No connect to multicast.
 124 *		Ross Biro	:	Close unaccepted children on master
 125 *					socket close.
 126 *		Alan Cox	:	Reset tracing code.
 127 *		Alan Cox	:	Spurious resets on shutdown.
 128 *		Alan Cox	:	Giant 15 minute/60 second timer error
 129 *		Alan Cox	:	Small whoops in polling before an
 130 *					accept.
 131 *		Alan Cox	:	Kept the state trace facility since
 132 *					it's handy for debugging.
 133 *		Alan Cox	:	More reset handler fixes.
 134 *		Alan Cox	:	Started rewriting the code based on
 135 *					the RFC's for other useful protocol
 136 *					references see: Comer, KA9Q NOS, and
 137 *					for a reference on the difference
 138 *					between specifications and how BSD
 139 *					works see the 4.4lite source.
 140 *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
 141 *					close.
 142 *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
 143 *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
 144 *		Alan Cox	:	Reimplemented timers as per the RFC
 145 *					and using multiple timers for sanity.
 146 *		Alan Cox	:	Small bug fixes, and a lot of new
 147 *					comments.
 148 *		Alan Cox	:	Fixed dual reader crash by locking
 149 *					the buffers (much like datagram.c)
 150 *		Alan Cox	:	Fixed stuck sockets in probe. A probe
 151 *					now gets fed up of retrying without
 152 *					(even a no space) answer.
 153 *		Alan Cox	:	Extracted closing code better
 154 *		Alan Cox	:	Fixed the closing state machine to
 155 *					resemble the RFC.
 156 *		Alan Cox	:	More 'per spec' fixes.
 157 *		Jorge Cwik	:	Even faster checksumming.
 158 *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
 159 *					only frames. At least one pc tcp stack
 160 *					generates them.
 161 *		Alan Cox	:	Cache last socket.
 162 *		Alan Cox	:	Per route irtt.
 163 *		Matt Day	:	poll()->select() match BSD precisely on error
 164 *		Alan Cox	:	New buffers
 165 *		Marc Tamsky	:	Various sk->prot->retransmits and
 166 *					sk->retransmits misupdating fixed.
 167 *					Fixed tcp_write_timeout: stuck close,
 168 *					and TCP syn retries gets used now.
 169 *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
 170 *					ack if state is TCP_CLOSED.
 171 *		Alan Cox	:	Look up device on a retransmit - routes may
 172 *					change. Doesn't yet cope with MSS shrink right
 173 *					but it's a start!
 174 *		Marc Tamsky	:	Closing in closing fixes.
 175 *		Mike Shaver	:	RFC1122 verifications.
 176 *		Alan Cox	:	rcv_saddr errors.
 177 *		Alan Cox	:	Block double connect().
 178 *		Alan Cox	:	Small hooks for enSKIP.
 179 *		Alexey Kuznetsov:	Path MTU discovery.
 180 *		Alan Cox	:	Support soft errors.
 181 *		Alan Cox	:	Fix MTU discovery pathological case
 182 *					when the remote claims no mtu!
 183 *		Marc Tamsky	:	TCP_CLOSE fix.
 184 *		Colin (G3TNE)	:	Send a reset on syn ack replies in
 185 *					window but wrong (fixes NT lpd problems)
 186 *		Pedro Roque	:	Better TCP window handling, delayed ack.
 187 *		Joerg Reuter	:	No modification of locked buffers in
 188 *					tcp_do_retransmit()
 189 *		Eric Schenk	:	Changed receiver side silly window
 190 *					avoidance algorithm to BSD style
 191 *					algorithm. This doubles throughput
 192 *					against machines running Solaris,
 193 *					and seems to result in general
 194 *					improvement.
 195 *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
 196 *	Willy Konynenberg	:	Transparent proxying support.
 197 *	Mike McLagan		:	Routing by source
 198 *		Keith Owens	:	Do proper merging with partial SKB's in
 199 *					tcp_do_sendmsg to avoid burstiness.
 200 *		Eric Schenk	:	Fix fast close down bug with
 201 *					shutdown() followed by close().
 202 *		Andi Kleen 	:	Make poll agree with SIGIO
 203 *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
 204 *					lingertime == 0 (RFC 793 ABORT Call)
 205 *	Hirokazu Takahashi	:	Use copy_from_user() instead of
 206 *					csum_and_copy_from_user() if possible.
 207 *
 208 *		This program is free software; you can redistribute it and/or
 209 *		modify it under the terms of the GNU General Public License
 210 *		as published by the Free Software Foundation; either version
 211 *		2 of the License, or(at your option) any later version.
 212 *
 213 * Description of States:
 214 *
 215 *	TCP_SYN_SENT		sent a connection request, waiting for ack
 216 *
 217 *	TCP_SYN_RECV		received a connection request, sent ack,
 218 *				waiting for final ack in three-way handshake.
 219 *
 220 *	TCP_ESTABLISHED		connection established
 221 *
 222 *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
 223 *				transmission of remaining buffered data
 224 *
 225 *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
 226 *				to shutdown
 227 *
 228 *	TCP_CLOSING		both sides have shutdown but we still have
 229 *				data we have to finish sending
 230 *
 231 *	TCP_TIME_WAIT		timeout to catch resent junk before entering
 232 *				closed, can only be entered from FIN_WAIT2
 233 *				or CLOSING.  Required because the other end
 234 *				may not have gotten our last ACK causing it
 235 *				to retransmit the data packet (which we ignore)
 236 *
 237 *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
 238 *				us to finish writing our data and to shutdown
 239 *				(we have to close() to move on to LAST_ACK)
 240 *
 241 *	TCP_LAST_ACK		out side has shutdown after remote has
 242 *				shutdown.  There may still be data in our
 243 *				buffer that we have to finish sending
 244 *
 245 *	TCP_CLOSE		socket is finished
 246 */
 247
 
 
 
 248#include <linux/kernel.h>
 249#include <linux/module.h>
 250#include <linux/types.h>
 251#include <linux/fcntl.h>
 252#include <linux/poll.h>
 
 253#include <linux/init.h>
 254#include <linux/fs.h>
 255#include <linux/skbuff.h>
 256#include <linux/scatterlist.h>
 257#include <linux/splice.h>
 258#include <linux/net.h>
 259#include <linux/socket.h>
 260#include <linux/random.h>
 261#include <linux/bootmem.h>
 262#include <linux/highmem.h>
 263#include <linux/swap.h>
 264#include <linux/cache.h>
 265#include <linux/err.h>
 266#include <linux/crypto.h>
 267#include <linux/time.h>
 268#include <linux/slab.h>
 
 
 269
 270#include <net/icmp.h>
 
 271#include <net/tcp.h>
 272#include <net/xfrm.h>
 273#include <net/ip.h>
 274#include <net/netdma.h>
 275#include <net/sock.h>
 276
 277#include <asm/uaccess.h>
 278#include <asm/ioctls.h>
 279
 280int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
 281
 282struct percpu_counter tcp_orphan_count;
 283EXPORT_SYMBOL_GPL(tcp_orphan_count);
 284
 285long sysctl_tcp_mem[3] __read_mostly;
 286int sysctl_tcp_wmem[3] __read_mostly;
 287int sysctl_tcp_rmem[3] __read_mostly;
 288
 289EXPORT_SYMBOL(sysctl_tcp_mem);
 290EXPORT_SYMBOL(sysctl_tcp_rmem);
 291EXPORT_SYMBOL(sysctl_tcp_wmem);
 292
 293atomic_long_t tcp_memory_allocated;	/* Current allocated memory. */
 294EXPORT_SYMBOL(tcp_memory_allocated);
 295
 
 
 
 
 
 296/*
 297 * Current number of TCP sockets.
 298 */
 299struct percpu_counter tcp_sockets_allocated;
 300EXPORT_SYMBOL(tcp_sockets_allocated);
 301
 302/*
 303 * TCP splice context
 304 */
 305struct tcp_splice_state {
 306	struct pipe_inode_info *pipe;
 307	size_t len;
 308	unsigned int flags;
 309};
 310
 311/*
 312 * Pressure flag: try to collapse.
 313 * Technical note: it is used by multiple contexts non atomically.
 314 * All the __sk_mem_schedule() is of this nature: accounting
 315 * is strict, actions are advisory and have some latency.
 316 */
 317int tcp_memory_pressure __read_mostly;
 318EXPORT_SYMBOL(tcp_memory_pressure);
 
 
 
 
 
 319
 320void tcp_enter_memory_pressure(struct sock *sk)
 321{
 322	if (!tcp_memory_pressure) {
 
 
 
 
 
 
 
 
 323		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
 324		tcp_memory_pressure = 1;
 325	}
 326}
 327EXPORT_SYMBOL(tcp_enter_memory_pressure);
 
 
 
 
 
 
 
 
 
 
 
 
 
 328
 329/* Convert seconds to retransmits based on initial and max timeout */
 330static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
 331{
 332	u8 res = 0;
 333
 334	if (seconds > 0) {
 335		int period = timeout;
 336
 337		res = 1;
 338		while (seconds > period && res < 255) {
 339			res++;
 340			timeout <<= 1;
 341			if (timeout > rto_max)
 342				timeout = rto_max;
 343			period += timeout;
 344		}
 345	}
 346	return res;
 347}
 348
 349/* Convert retransmits to seconds based on initial and max timeout */
 350static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
 351{
 352	int period = 0;
 353
 354	if (retrans > 0) {
 355		period = timeout;
 356		while (--retrans) {
 357			timeout <<= 1;
 358			if (timeout > rto_max)
 359				timeout = rto_max;
 360			period += timeout;
 361		}
 362	}
 363	return period;
 364}
 365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 366/*
 367 *	Wait for a TCP event.
 368 *
 369 *	Note that we don't need to lock the socket, as the upper poll layers
 370 *	take care of normal races (between the test and the event) and we don't
 371 *	go look at any of the socket buffers directly.
 372 */
 373unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 374{
 375	unsigned int mask;
 376	struct sock *sk = sock->sk;
 377	struct tcp_sock *tp = tcp_sk(sk);
 
 378
 379	sock_poll_wait(file, sk_sleep(sk), wait);
 380	if (sk->sk_state == TCP_LISTEN)
 
 
 381		return inet_csk_listen_poll(sk);
 382
 383	/* Socket is not locked. We are protected from async events
 384	 * by poll logic and correct handling of state changes
 385	 * made by other threads is impossible in any case.
 386	 */
 387
 388	mask = 0;
 389
 390	/*
 391	 * POLLHUP is certainly not done right. But poll() doesn't
 392	 * have a notion of HUP in just one direction, and for a
 393	 * socket the read side is more interesting.
 394	 *
 395	 * Some poll() documentation says that POLLHUP is incompatible
 396	 * with the POLLOUT/POLLWR flags, so somebody should check this
 397	 * all. But careful, it tends to be safer to return too many
 398	 * bits than too few, and you can easily break real applications
 399	 * if you don't tell them that something has hung up!
 400	 *
 401	 * Check-me.
 402	 *
 403	 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
 404	 * our fs/select.c). It means that after we received EOF,
 405	 * poll always returns immediately, making impossible poll() on write()
 406	 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
 407	 * if and only if shutdown has been made in both directions.
 408	 * Actually, it is interesting to look how Solaris and DUX
 409	 * solve this dilemma. I would prefer, if POLLHUP were maskable,
 410	 * then we could set it on SND_SHUTDOWN. BTW examples given
 411	 * in Stevens' books assume exactly this behaviour, it explains
 412	 * why POLLHUP is incompatible with POLLOUT.	--ANK
 413	 *
 414	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
 415	 * blocking on fresh not-connected or disconnected socket. --ANK
 416	 */
 417	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
 418		mask |= POLLHUP;
 419	if (sk->sk_shutdown & RCV_SHUTDOWN)
 420		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
 421
 422	/* Connected? */
 423	if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
 
 424		int target = sock_rcvlowat(sk, 0, INT_MAX);
 425
 426		if (tp->urg_seq == tp->copied_seq &&
 427		    !sock_flag(sk, SOCK_URGINLINE) &&
 428		    tp->urg_data)
 429			target++;
 430
 431		/* Potential race condition. If read of tp below will
 432		 * escape above sk->sk_state, we can be illegally awaken
 433		 * in SYN_* states. */
 434		if (tp->rcv_nxt - tp->copied_seq >= target)
 435			mask |= POLLIN | POLLRDNORM;
 436
 437		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
 438			if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
 439				mask |= POLLOUT | POLLWRNORM;
 440			} else {  /* send SIGIO later */
 441				set_bit(SOCK_ASYNC_NOSPACE,
 442					&sk->sk_socket->flags);
 443				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 444
 445				/* Race breaker. If space is freed after
 446				 * wspace test but before the flags are set,
 447				 * IO signal will be lost.
 
 448				 */
 449				if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
 450					mask |= POLLOUT | POLLWRNORM;
 
 451			}
 452		} else
 453			mask |= POLLOUT | POLLWRNORM;
 454
 455		if (tp->urg_data & TCP_URG_VALID)
 456			mask |= POLLPRI;
 
 
 
 
 
 
 457	}
 458	/* This barrier is coupled with smp_wmb() in tcp_reset() */
 459	smp_rmb();
 460	if (sk->sk_err)
 461		mask |= POLLERR;
 462
 463	return mask;
 464}
 465EXPORT_SYMBOL(tcp_poll);
 466
 467int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 468{
 469	struct tcp_sock *tp = tcp_sk(sk);
 470	int answ;
 
 471
 472	switch (cmd) {
 473	case SIOCINQ:
 474		if (sk->sk_state == TCP_LISTEN)
 475			return -EINVAL;
 476
 477		lock_sock(sk);
 478		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
 479			answ = 0;
 480		else if (sock_flag(sk, SOCK_URGINLINE) ||
 481			 !tp->urg_data ||
 482			 before(tp->urg_seq, tp->copied_seq) ||
 483			 !before(tp->urg_seq, tp->rcv_nxt)) {
 484			struct sk_buff *skb;
 485
 486			answ = tp->rcv_nxt - tp->copied_seq;
 487
 488			/* Subtract 1, if FIN is in queue. */
 489			skb = skb_peek_tail(&sk->sk_receive_queue);
 490			if (answ && skb)
 491				answ -= tcp_hdr(skb)->fin;
 492		} else
 493			answ = tp->urg_seq - tp->copied_seq;
 494		release_sock(sk);
 495		break;
 496	case SIOCATMARK:
 497		answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
 
 498		break;
 499	case SIOCOUTQ:
 500		if (sk->sk_state == TCP_LISTEN)
 501			return -EINVAL;
 502
 503		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
 504			answ = 0;
 505		else
 506			answ = tp->write_seq - tp->snd_una;
 507		break;
 508	case SIOCOUTQNSD:
 509		if (sk->sk_state == TCP_LISTEN)
 510			return -EINVAL;
 511
 512		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
 513			answ = 0;
 514		else
 515			answ = tp->write_seq - tp->snd_nxt;
 
 516		break;
 517	default:
 518		return -ENOIOCTLCMD;
 519	}
 520
 521	return put_user(answ, (int __user *)arg);
 522}
 523EXPORT_SYMBOL(tcp_ioctl);
 524
 525static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
 526{
 527	TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
 528	tp->pushed_seq = tp->write_seq;
 529}
 530
 531static inline int forced_push(struct tcp_sock *tp)
 532{
 533	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
 534}
 535
 536static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
 537{
 538	struct tcp_sock *tp = tcp_sk(sk);
 539	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 540
 541	skb->csum    = 0;
 542	tcb->seq     = tcb->end_seq = tp->write_seq;
 543	tcb->flags   = TCPHDR_ACK;
 544	tcb->sacked  = 0;
 545	skb_header_release(skb);
 546	tcp_add_write_queue_tail(sk, skb);
 547	sk->sk_wmem_queued += skb->truesize;
 548	sk_mem_charge(sk, skb->truesize);
 549	if (tp->nonagle & TCP_NAGLE_PUSH)
 550		tp->nonagle &= ~TCP_NAGLE_PUSH;
 
 
 551}
 552
 553static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
 554{
 555	if (flags & MSG_OOB)
 556		tp->snd_up = tp->write_seq;
 557}
 558
 559static inline void tcp_push(struct sock *sk, int flags, int mss_now,
 560			    int nonagle)
 
 
 
 
 
 
 
 
 
 
 561{
 562	if (tcp_send_head(sk)) {
 563		struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 564
 565		if (!(flags & MSG_MORE) || forced_push(tp))
 566			tcp_mark_push(tp, tcp_write_queue_tail(sk));
 
 
 
 
 
 
 
 
 
 
 
 567
 568		tcp_mark_urg(tp, flags);
 569		__tcp_push_pending_frames(sk, mss_now,
 570					  (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
 
 
 
 
 
 
 
 
 
 571	}
 
 
 
 
 
 572}
 573
 574static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
 575				unsigned int offset, size_t len)
 576{
 577	struct tcp_splice_state *tss = rd_desc->arg.data;
 578	int ret;
 579
 580	ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
 581			      tss->flags);
 582	if (ret > 0)
 583		rd_desc->count -= ret;
 584	return ret;
 585}
 586
 587static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
 588{
 589	/* Store TCP splice context information in read_descriptor_t. */
 590	read_descriptor_t rd_desc = {
 591		.arg.data = tss,
 592		.count	  = tss->len,
 593	};
 594
 595	return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
 596}
 597
 598/**
 599 *  tcp_splice_read - splice data from TCP socket to a pipe
 600 * @sock:	socket to splice from
 601 * @ppos:	position (not valid)
 602 * @pipe:	pipe to splice to
 603 * @len:	number of bytes to splice
 604 * @flags:	splice modifier flags
 605 *
 606 * Description:
 607 *    Will read pages from given socket and fill them into a pipe.
 608 *
 609 **/
 610ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
 611			struct pipe_inode_info *pipe, size_t len,
 612			unsigned int flags)
 613{
 614	struct sock *sk = sock->sk;
 615	struct tcp_splice_state tss = {
 616		.pipe = pipe,
 617		.len = len,
 618		.flags = flags,
 619	};
 620	long timeo;
 621	ssize_t spliced;
 622	int ret;
 623
 624	sock_rps_record_flow(sk);
 625	/*
 626	 * We can't seek on a socket input
 627	 */
 628	if (unlikely(*ppos))
 629		return -ESPIPE;
 630
 631	ret = spliced = 0;
 632
 633	lock_sock(sk);
 634
 635	timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
 636	while (tss.len) {
 637		ret = __tcp_splice_read(sk, &tss);
 638		if (ret < 0)
 639			break;
 640		else if (!ret) {
 641			if (spliced)
 642				break;
 643			if (sock_flag(sk, SOCK_DONE))
 644				break;
 645			if (sk->sk_err) {
 646				ret = sock_error(sk);
 647				break;
 648			}
 649			if (sk->sk_shutdown & RCV_SHUTDOWN)
 650				break;
 651			if (sk->sk_state == TCP_CLOSE) {
 652				/*
 653				 * This occurs when user tries to read
 654				 * from never connected socket.
 655				 */
 656				if (!sock_flag(sk, SOCK_DONE))
 657					ret = -ENOTCONN;
 658				break;
 659			}
 660			if (!timeo) {
 661				ret = -EAGAIN;
 662				break;
 663			}
 664			sk_wait_data(sk, &timeo);
 
 
 
 
 
 
 665			if (signal_pending(current)) {
 666				ret = sock_intr_errno(timeo);
 667				break;
 668			}
 669			continue;
 670		}
 671		tss.len -= ret;
 672		spliced += ret;
 673
 674		if (!timeo)
 675			break;
 676		release_sock(sk);
 677		lock_sock(sk);
 678
 679		if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
 680		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
 681		    signal_pending(current))
 682			break;
 683	}
 684
 685	release_sock(sk);
 686
 687	if (spliced)
 688		return spliced;
 689
 690	return ret;
 691}
 692EXPORT_SYMBOL(tcp_splice_read);
 693
 694struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
 
 695{
 696	struct sk_buff *skb;
 697
 
 
 
 
 
 
 
 
 
 
 
 
 698	/* The TCP header must be at least 32-bit aligned.  */
 699	size = ALIGN(size, 4);
 700
 
 
 
 701	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
 702	if (skb) {
 703		if (sk_wmem_schedule(sk, skb->truesize)) {
 
 
 
 
 
 
 
 
 
 704			/*
 705			 * Make sure that we have exactly size bytes
 706			 * available to the caller, no more, no less.
 707			 */
 708			skb_reserve(skb, skb_tailroom(skb) - size);
 
 709			return skb;
 710		}
 711		__kfree_skb(skb);
 712	} else {
 713		sk->sk_prot->enter_memory_pressure(sk);
 714		sk_stream_moderate_sndbuf(sk);
 715	}
 716	return NULL;
 717}
 718
 719static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
 720				       int large_allowed)
 721{
 722	struct tcp_sock *tp = tcp_sk(sk);
 723	u32 xmit_size_goal, old_size_goal;
 724
 725	xmit_size_goal = mss_now;
 726
 727	if (large_allowed && sk_can_gso(sk)) {
 728		xmit_size_goal = ((sk->sk_gso_max_size - 1) -
 729				  inet_csk(sk)->icsk_af_ops->net_header_len -
 730				  inet_csk(sk)->icsk_ext_hdr_len -
 731				  tp->tcp_header_len);
 732
 733		xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
 
 734
 735		/* We try hard to avoid divides here */
 736		old_size_goal = tp->xmit_size_goal_segs * mss_now;
 
 737
 738		if (likely(old_size_goal <= xmit_size_goal &&
 739			   old_size_goal + mss_now > xmit_size_goal)) {
 740			xmit_size_goal = old_size_goal;
 741		} else {
 742			tp->xmit_size_goal_segs = xmit_size_goal / mss_now;
 743			xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
 744		}
 745	}
 746
 747	return max(xmit_size_goal, mss_now);
 748}
 749
 750static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
 751{
 752	int mss_now;
 753
 754	mss_now = tcp_current_mss(sk);
 755	*size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
 756
 757	return mss_now;
 758}
 759
 760static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
 761			 size_t psize, int flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 762{
 763	struct tcp_sock *tp = tcp_sk(sk);
 764	int mss_now, size_goal;
 765	int err;
 766	ssize_t copied;
 767	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 768
 769	/* Wait for a connection to finish. */
 770	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
 771		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
 
 
 
 
 
 
 
 
 
 772			goto out_err;
 
 773
 774	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
 775
 776	mss_now = tcp_send_mss(sk, &size_goal, flags);
 777	copied = 0;
 778
 779	err = -EPIPE;
 780	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
 781		goto out_err;
 782
 783	while (psize > 0) {
 784		struct sk_buff *skb = tcp_write_queue_tail(sk);
 785		struct page *page = pages[poffset / PAGE_SIZE];
 786		int copy, i, can_coalesce;
 787		int offset = poffset % PAGE_SIZE;
 788		int size = min_t(size_t, psize, PAGE_SIZE - offset);
 789
 790		if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
 
 791new_segment:
 792			if (!sk_stream_memory_free(sk))
 793				goto wait_for_sndbuf;
 794
 795			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
 
 796			if (!skb)
 797				goto wait_for_memory;
 798
 
 
 
 799			skb_entail(sk, skb);
 800			copy = size_goal;
 801		}
 802
 803		if (copy > size)
 804			copy = size;
 805
 806		i = skb_shinfo(skb)->nr_frags;
 807		can_coalesce = skb_can_coalesce(skb, i, page, offset);
 808		if (!can_coalesce && i >= MAX_SKB_FRAGS) {
 809			tcp_mark_push(tp, skb);
 810			goto new_segment;
 811		}
 812		if (!sk_wmem_schedule(sk, copy))
 813			goto wait_for_memory;
 814
 815		if (can_coalesce) {
 816			skb_shinfo(skb)->frags[i - 1].size += copy;
 817		} else {
 818			get_page(page);
 819			skb_fill_page_desc(skb, i, page, offset, copy);
 820		}
 821
 
 
 
 822		skb->len += copy;
 823		skb->data_len += copy;
 824		skb->truesize += copy;
 825		sk->sk_wmem_queued += copy;
 826		sk_mem_charge(sk, copy);
 827		skb->ip_summed = CHECKSUM_PARTIAL;
 828		tp->write_seq += copy;
 829		TCP_SKB_CB(skb)->end_seq += copy;
 830		skb_shinfo(skb)->gso_segs = 0;
 831
 832		if (!copied)
 833			TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
 834
 835		copied += copy;
 836		poffset += copy;
 837		if (!(psize -= copy))
 
 838			goto out;
 839
 840		if (skb->len < size_goal || (flags & MSG_OOB))
 841			continue;
 842
 843		if (forced_push(tp)) {
 844			tcp_mark_push(tp, skb);
 845			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
 846		} else if (skb == tcp_send_head(sk))
 847			tcp_push_one(sk, mss_now);
 848		continue;
 849
 850wait_for_sndbuf:
 851		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 852wait_for_memory:
 853		if (copied)
 854			tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
 855
 856		if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
 
 857			goto do_error;
 858
 859		mss_now = tcp_send_mss(sk, &size_goal, flags);
 860	}
 861
 862out:
 863	if (copied)
 864		tcp_push(sk, flags, mss_now, tp->nonagle);
 
 
 
 865	return copied;
 866
 867do_error:
 
 868	if (copied)
 869		goto out;
 870out_err:
 
 
 
 
 
 
 871	return sk_stream_error(sk, flags, err);
 872}
 
 
 
 
 
 
 
 
 
 
 
 
 
 873
 874int tcp_sendpage(struct sock *sk, struct page *page, int offset,
 875		 size_t size, int flags)
 876{
 877	ssize_t res;
 878
 879	if (!(sk->sk_route_caps & NETIF_F_SG) ||
 880	    !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
 881		return sock_no_sendpage(sk->sk_socket, page, offset, size,
 882					flags);
 883
 884	lock_sock(sk);
 885	res = do_tcp_sendpages(sk, &page, offset, size, flags);
 886	release_sock(sk);
 887	return res;
 
 888}
 889EXPORT_SYMBOL(tcp_sendpage);
 890
 891#define TCP_PAGE(sk)	(sk->sk_sndmsg_page)
 892#define TCP_OFF(sk)	(sk->sk_sndmsg_off)
 
 
 
 
 
 893
 894static inline int select_size(struct sock *sk, int sg)
 
 
 895{
 896	struct tcp_sock *tp = tcp_sk(sk);
 897	int tmp = tp->mss_cache;
 898
 899	if (sg) {
 900		if (sk_can_gso(sk))
 901			tmp = 0;
 902		else {
 903			int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
 904
 905			if (tmp >= pgbreak &&
 906			    tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
 907				tmp = pgbreak;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908		}
 909	}
 910
 911	return tmp;
 
 
 
 
 
 
 
 
 
 
 912}
 913
 914int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 915		size_t size)
 916{
 917	struct iovec *iov;
 918	struct tcp_sock *tp = tcp_sk(sk);
 
 919	struct sk_buff *skb;
 920	int iovlen, flags;
 921	int mss_now, size_goal;
 922	int sg, err, copied;
 
 
 923	long timeo;
 924
 925	lock_sock(sk);
 926
 927	flags = msg->msg_flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 928	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 929
 930	/* Wait for a connection to finish. */
 931	if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
 932		if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 933			goto out_err;
 934
 935	/* This should be in poll */
 936	clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
 937
 938	mss_now = tcp_send_mss(sk, &size_goal, flags);
 
 
 
 
 
 
 
 
 
 
 939
 940	/* Ok commence sending. */
 941	iovlen = msg->msg_iovlen;
 942	iov = msg->msg_iov;
 943	copied = 0;
 944
 
 
 
 945	err = -EPIPE;
 946	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
 947		goto out_err;
 948
 949	sg = sk->sk_route_caps & NETIF_F_SG;
 950
 951	while (--iovlen >= 0) {
 952		size_t seglen = iov->iov_len;
 953		unsigned char __user *from = iov->iov_base;
 954
 955		iov++;
 
 956
 957		while (seglen > 0) {
 958			int copy = 0;
 959			int max = size_goal;
 960
 961			skb = tcp_write_queue_tail(sk);
 962			if (tcp_send_head(sk)) {
 963				if (skb->ip_summed == CHECKSUM_NONE)
 964					max = mss_now;
 965				copy = max - skb->len;
 966			}
 967
 968			if (copy <= 0) {
 969new_segment:
 970				/* Allocate new segment. If the interface is SG,
 971				 * allocate skb fitting to single page.
 972				 */
 973				if (!sk_stream_memory_free(sk))
 974					goto wait_for_sndbuf;
 975
 976				skb = sk_stream_alloc_skb(sk,
 977							  select_size(sk, sg),
 978							  sk->sk_allocation);
 979				if (!skb)
 980					goto wait_for_memory;
 
 
 
 
 
 981
 982				/*
 983				 * Check whether we can use HW checksum.
 984				 */
 985				if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
 986					skb->ip_summed = CHECKSUM_PARTIAL;
 987
 988				skb_entail(sk, skb);
 989				copy = size_goal;
 990				max = size_goal;
 991			}
 992
 993			/* Try to append data to the end of skb. */
 994			if (copy > seglen)
 995				copy = seglen;
 996
 997			/* Where to copy to? */
 998			if (skb_tailroom(skb) > 0) {
 999				/* We have some space in skb head. Superb! */
1000				if (copy > skb_tailroom(skb))
1001					copy = skb_tailroom(skb);
1002				err = skb_add_data_nocache(sk, skb, from, copy);
1003				if (err)
1004					goto do_fault;
1005			} else {
1006				int merge = 0;
1007				int i = skb_shinfo(skb)->nr_frags;
1008				struct page *page = TCP_PAGE(sk);
1009				int off = TCP_OFF(sk);
1010
1011				if (skb_can_coalesce(skb, i, page, off) &&
1012				    off != PAGE_SIZE) {
1013					/* We can extend the last page
1014					 * fragment. */
1015					merge = 1;
1016				} else if (i == MAX_SKB_FRAGS || !sg) {
1017					/* Need to add new fragment and cannot
1018					 * do this because interface is non-SG,
1019					 * or because all the page slots are
1020					 * busy. */
1021					tcp_mark_push(tp, skb);
1022					goto new_segment;
1023				} else if (page) {
1024					if (off == PAGE_SIZE) {
1025						put_page(page);
1026						TCP_PAGE(sk) = page = NULL;
1027						off = 0;
1028					}
1029				} else
1030					off = 0;
1031
1032				if (copy > PAGE_SIZE - off)
1033					copy = PAGE_SIZE - off;
 
 
 
 
 
1034
1035				if (!sk_wmem_schedule(sk, copy))
1036					goto wait_for_memory;
 
 
 
 
 
 
 
 
 
 
 
 
 
1037
1038				if (!page) {
1039					/* Allocate new cache page. */
1040					if (!(page = sk_stream_alloc_page(sk)))
1041						goto wait_for_memory;
1042				}
1043
1044				/* Time to copy data. We are close to
1045				 * the end! */
1046				err = skb_copy_to_page_nocache(sk, from, skb,
1047							       page, off, copy);
1048				if (err) {
1049					/* If this page was new, give it to the
1050					 * socket so it does not get leaked.
1051					 */
1052					if (!TCP_PAGE(sk)) {
1053						TCP_PAGE(sk) = page;
1054						TCP_OFF(sk) = 0;
1055					}
1056					goto do_error;
1057				}
 
 
1058
1059				/* Update the skb. */
1060				if (merge) {
1061					skb_shinfo(skb)->frags[i - 1].size +=
1062									copy;
1063				} else {
1064					skb_fill_page_desc(skb, i, page, off, copy);
1065					if (TCP_PAGE(sk)) {
1066						get_page(page);
1067					} else if (off + copy < PAGE_SIZE) {
1068						get_page(page);
1069						TCP_PAGE(sk) = page;
1070					}
1071				}
1072
1073				TCP_OFF(sk) = off + copy;
1074			}
1075
1076			if (!copied)
1077				TCP_SKB_CB(skb)->flags &= ~TCPHDR_PSH;
 
 
 
 
1078
1079			tp->write_seq += copy;
1080			TCP_SKB_CB(skb)->end_seq += copy;
1081			skb_shinfo(skb)->gso_segs = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1082
1083			from += copy;
1084			copied += copy;
1085			if ((seglen -= copy) == 0 && iovlen == 0)
1086				goto out;
1087
1088			if (skb->len < max || (flags & MSG_OOB))
1089				continue;
 
1090
1091			if (forced_push(tp)) {
1092				tcp_mark_push(tp, skb);
1093				__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1094			} else if (skb == tcp_send_head(sk))
1095				tcp_push_one(sk, mss_now);
 
 
 
1096			continue;
1097
 
 
 
 
 
 
 
1098wait_for_sndbuf:
1099			set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1100wait_for_memory:
1101			if (copied)
1102				tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
 
1103
1104			if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1105				goto do_error;
 
1106
1107			mss_now = tcp_send_mss(sk, &size_goal, flags);
1108		}
1109	}
1110
1111out:
1112	if (copied)
1113		tcp_push(sk, flags, mss_now, tp->nonagle);
1114	release_sock(sk);
1115	return copied;
 
 
 
1116
 
 
1117do_fault:
1118	if (!skb->len) {
1119		tcp_unlink_write_queue(skb, sk);
1120		/* It is the one place in all of TCP, except connection
1121		 * reset, where we can be unlinking the send_head.
1122		 */
1123		tcp_check_send_head(sk, skb);
1124		sk_wmem_free_skb(sk, skb);
1125	}
1126
1127do_error:
1128	if (copied)
1129		goto out;
1130out_err:
 
1131	err = sk_stream_error(sk, flags, err);
1132	release_sock(sk);
 
 
 
 
 
1133	return err;
1134}
 
 
 
 
 
 
 
 
 
 
 
 
1135EXPORT_SYMBOL(tcp_sendmsg);
1136
1137/*
1138 *	Handle reading urgent data. BSD has very simple semantics for
1139 *	this, no blocking and very strange errors 8)
1140 */
1141
1142static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1143{
1144	struct tcp_sock *tp = tcp_sk(sk);
1145
1146	/* No URG data to read. */
1147	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1148	    tp->urg_data == TCP_URG_READ)
1149		return -EINVAL;	/* Yes this is right ! */
1150
1151	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1152		return -ENOTCONN;
1153
1154	if (tp->urg_data & TCP_URG_VALID) {
1155		int err = 0;
1156		char c = tp->urg_data;
1157
1158		if (!(flags & MSG_PEEK))
1159			tp->urg_data = TCP_URG_READ;
1160
1161		/* Read urgent data. */
1162		msg->msg_flags |= MSG_OOB;
1163
1164		if (len > 0) {
1165			if (!(flags & MSG_TRUNC))
1166				err = memcpy_toiovec(msg->msg_iov, &c, 1);
1167			len = 1;
1168		} else
1169			msg->msg_flags |= MSG_TRUNC;
1170
1171		return err ? -EFAULT : len;
1172	}
1173
1174	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1175		return 0;
1176
1177	/* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
1178	 * the available implementations agree in this case:
1179	 * this call should never block, independent of the
1180	 * blocking state of the socket.
1181	 * Mike <pall@rz.uni-karlsruhe.de>
1182	 */
1183	return -EAGAIN;
1184}
1185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1186/* Clean up the receive buffer for full frames taken by the user,
1187 * then send an ACK if necessary.  COPIED is the number of bytes
1188 * tcp_recvmsg has given to the user so far, it speeds up the
1189 * calculation of whether or not we must ACK for the sake of
1190 * a window update.
1191 */
1192void tcp_cleanup_rbuf(struct sock *sk, int copied)
1193{
1194	struct tcp_sock *tp = tcp_sk(sk);
1195	int time_to_ack = 0;
1196
1197#if TCP_DEBUG
1198	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1199
1200	WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1201	     "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1202	     tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1203#endif
1204
1205	if (inet_csk_ack_scheduled(sk)) {
1206		const struct inet_connection_sock *icsk = inet_csk(sk);
1207		   /* Delayed ACKs frequently hit locked sockets during bulk
1208		    * receive. */
1209		if (icsk->icsk_ack.blocked ||
1210		    /* Once-per-two-segments ACK was not sent by tcp_input.c */
1211		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1212		    /*
1213		     * If this read emptied read buffer, we send ACK, if
1214		     * connection is not bidirectional, user drained
1215		     * receive buffer and there was a small segment
1216		     * in queue.
1217		     */
1218		    (copied > 0 &&
1219		     ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1220		      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1221		       !icsk->icsk_ack.pingpong)) &&
1222		      !atomic_read(&sk->sk_rmem_alloc)))
1223			time_to_ack = 1;
1224	}
1225
1226	/* We send an ACK if we can now advertise a non-zero window
1227	 * which has been raised "significantly".
1228	 *
1229	 * Even if window raised up to infinity, do not send window open ACK
1230	 * in states, where we will not receive more. It is useless.
1231	 */
1232	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1233		__u32 rcv_window_now = tcp_receive_window(tp);
1234
1235		/* Optimize, __tcp_select_window() is not cheap. */
1236		if (2*rcv_window_now <= tp->window_clamp) {
1237			__u32 new_window = __tcp_select_window(sk);
1238
1239			/* Send ACK now, if this read freed lots of space
1240			 * in our buffer. Certainly, new_window is new window.
1241			 * We can advertise it now, if it is not less than current one.
1242			 * "Lots" means "at least twice" here.
1243			 */
1244			if (new_window && new_window >= 2 * rcv_window_now)
1245				time_to_ack = 1;
1246		}
1247	}
1248	if (time_to_ack)
1249		tcp_send_ack(sk);
1250}
1251
1252static void tcp_prequeue_process(struct sock *sk)
1253{
1254	struct sk_buff *skb;
1255	struct tcp_sock *tp = tcp_sk(sk);
1256
1257	NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1258
1259	/* RX process wants to run with disabled BHs, though it is not
1260	 * necessary */
1261	local_bh_disable();
1262	while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1263		sk_backlog_rcv(sk, skb);
1264	local_bh_enable();
1265
1266	/* Clear memory counter. */
1267	tp->ucopy.memory = 0;
1268}
1269
1270#ifdef CONFIG_NET_DMA
1271static void tcp_service_net_dma(struct sock *sk, bool wait)
1272{
1273	dma_cookie_t done, used;
1274	dma_cookie_t last_issued;
1275	struct tcp_sock *tp = tcp_sk(sk);
1276
1277	if (!tp->ucopy.dma_chan)
1278		return;
1279
1280	last_issued = tp->ucopy.dma_cookie;
1281	dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1282
1283	do {
1284		if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1285					      last_issued, &done,
1286					      &used) == DMA_SUCCESS) {
1287			/* Safe to free early-copied skbs now */
1288			__skb_queue_purge(&sk->sk_async_wait_queue);
1289			break;
1290		} else {
1291			struct sk_buff *skb;
1292			while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1293			       (dma_async_is_complete(skb->dma_cookie, done,
1294						      used) == DMA_SUCCESS)) {
1295				__skb_dequeue(&sk->sk_async_wait_queue);
1296				kfree_skb(skb);
1297			}
1298		}
1299	} while (wait);
1300}
1301#endif
1302
1303static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1304{
1305	struct sk_buff *skb;
1306	u32 offset;
1307
1308	skb_queue_walk(&sk->sk_receive_queue, skb) {
1309		offset = seq - TCP_SKB_CB(skb)->seq;
1310		if (tcp_hdr(skb)->syn)
 
1311			offset--;
1312		if (offset < skb->len || tcp_hdr(skb)->fin) {
 
1313			*off = offset;
1314			return skb;
1315		}
 
 
 
 
 
1316	}
1317	return NULL;
1318}
1319
1320/*
1321 * This routine provides an alternative to tcp_recvmsg() for routines
1322 * that would like to handle copying from skbuffs directly in 'sendfile'
1323 * fashion.
1324 * Note:
1325 *	- It is assumed that the socket was locked by the caller.
1326 *	- The routine does not block.
1327 *	- At present, there is no support for reading OOB data
1328 *	  or for 'peeking' the socket using this routine
1329 *	  (although both would be easy to implement).
1330 */
1331int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1332		  sk_read_actor_t recv_actor)
1333{
1334	struct sk_buff *skb;
1335	struct tcp_sock *tp = tcp_sk(sk);
1336	u32 seq = tp->copied_seq;
1337	u32 offset;
1338	int copied = 0;
1339
1340	if (sk->sk_state == TCP_LISTEN)
1341		return -ENOTCONN;
1342	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1343		if (offset < skb->len) {
1344			int used;
1345			size_t len;
1346
1347			len = skb->len - offset;
1348			/* Stop reading if we hit a patch of urgent data */
1349			if (tp->urg_data) {
1350				u32 urg_offset = tp->urg_seq - seq;
1351				if (urg_offset < len)
1352					len = urg_offset;
1353				if (!len)
1354					break;
1355			}
1356			used = recv_actor(desc, skb, offset, len);
1357			if (used < 0) {
1358				if (!copied)
1359					copied = used;
1360				break;
1361			} else if (used <= len) {
1362				seq += used;
1363				copied += used;
1364				offset += used;
1365			}
1366			/*
1367			 * If recv_actor drops the lock (e.g. TCP splice
1368			 * receive) the skb pointer might be invalid when
1369			 * getting here: tcp_collapse might have deleted it
1370			 * while aggregating skbs from the socket queue.
1371			 */
1372			skb = tcp_recv_skb(sk, seq-1, &offset);
1373			if (!skb || (offset+1 != skb->len))
1374				break;
 
 
 
 
 
1375		}
1376		if (tcp_hdr(skb)->fin) {
1377			sk_eat_skb(sk, skb, 0);
1378			++seq;
1379			break;
1380		}
1381		sk_eat_skb(sk, skb, 0);
1382		if (!desc->count)
1383			break;
1384		tp->copied_seq = seq;
1385	}
1386	tp->copied_seq = seq;
1387
1388	tcp_rcv_space_adjust(sk);
1389
1390	/* Clean up data we have read: This will do ACK frames. */
1391	if (copied > 0)
 
1392		tcp_cleanup_rbuf(sk, copied);
 
1393	return copied;
1394}
1395EXPORT_SYMBOL(tcp_read_sock);
1396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1397/*
1398 *	This routine copies from a sock struct into the user buffer.
1399 *
1400 *	Technical note: in 2.3 we work on _locked_ socket, so that
1401 *	tricks with *seq access order and skb->users are not required.
1402 *	Probably, code can be easily improved even more.
1403 */
1404
1405int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1406		size_t len, int nonblock, int flags, int *addr_len)
1407{
1408	struct tcp_sock *tp = tcp_sk(sk);
1409	int copied = 0;
1410	u32 peek_seq;
1411	u32 *seq;
1412	unsigned long used;
1413	int err;
1414	int target;		/* Read at least this many bytes */
1415	long timeo;
1416	struct task_struct *user_recv = NULL;
1417	int copied_early = 0;
1418	struct sk_buff *skb;
1419	u32 urg_hole = 0;
 
 
 
 
 
 
 
 
 
 
1420
1421	lock_sock(sk);
1422
1423	err = -ENOTCONN;
1424	if (sk->sk_state == TCP_LISTEN)
1425		goto out;
1426
 
1427	timeo = sock_rcvtimeo(sk, nonblock);
1428
1429	/* Urgent data needs to be handled specially. */
1430	if (flags & MSG_OOB)
1431		goto recv_urg;
1432
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1433	seq = &tp->copied_seq;
1434	if (flags & MSG_PEEK) {
1435		peek_seq = tp->copied_seq;
1436		seq = &peek_seq;
1437	}
1438
1439	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1440
1441#ifdef CONFIG_NET_DMA
1442	tp->ucopy.dma_chan = NULL;
1443	preempt_disable();
1444	skb = skb_peek_tail(&sk->sk_receive_queue);
1445	{
1446		int available = 0;
1447
1448		if (skb)
1449			available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1450		if ((available < target) &&
1451		    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1452		    !sysctl_tcp_low_latency &&
1453		    dma_find_channel(DMA_MEMCPY)) {
1454			preempt_enable_no_resched();
1455			tp->ucopy.pinned_list =
1456					dma_pin_iovec_pages(msg->msg_iov, len);
1457		} else {
1458			preempt_enable_no_resched();
1459		}
1460	}
1461#endif
1462
1463	do {
1464		u32 offset;
1465
1466		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1467		if (tp->urg_data && tp->urg_seq == *seq) {
1468			if (copied)
1469				break;
1470			if (signal_pending(current)) {
1471				copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1472				break;
1473			}
1474		}
1475
1476		/* Next get a buffer. */
1477
 
1478		skb_queue_walk(&sk->sk_receive_queue, skb) {
 
1479			/* Now that we have two receive queues this
1480			 * shouldn't happen.
1481			 */
1482			if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1483				 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1484				 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1485				 flags))
1486				break;
1487
1488			offset = *seq - TCP_SKB_CB(skb)->seq;
1489			if (tcp_hdr(skb)->syn)
 
1490				offset--;
 
1491			if (offset < skb->len)
1492				goto found_ok_skb;
1493			if (tcp_hdr(skb)->fin)
1494				goto found_fin_ok;
1495			WARN(!(flags & MSG_PEEK),
1496			     "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1497			     *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
1498		}
1499
1500		/* Well, if we have backlog, try to process it now yet. */
1501
1502		if (copied >= target && !sk->sk_backlog.tail)
1503			break;
1504
1505		if (copied) {
1506			if (sk->sk_err ||
1507			    sk->sk_state == TCP_CLOSE ||
1508			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
1509			    !timeo ||
1510			    signal_pending(current))
1511				break;
1512		} else {
1513			if (sock_flag(sk, SOCK_DONE))
1514				break;
1515
1516			if (sk->sk_err) {
1517				copied = sock_error(sk);
1518				break;
1519			}
1520
1521			if (sk->sk_shutdown & RCV_SHUTDOWN)
1522				break;
1523
1524			if (sk->sk_state == TCP_CLOSE) {
1525				if (!sock_flag(sk, SOCK_DONE)) {
1526					/* This occurs when user tries to read
1527					 * from never connected socket.
1528					 */
1529					copied = -ENOTCONN;
1530					break;
1531				}
1532				break;
1533			}
1534
1535			if (!timeo) {
1536				copied = -EAGAIN;
1537				break;
1538			}
1539
1540			if (signal_pending(current)) {
1541				copied = sock_intr_errno(timeo);
1542				break;
1543			}
1544		}
1545
1546		tcp_cleanup_rbuf(sk, copied);
1547
1548		if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1549			/* Install new reader */
1550			if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1551				user_recv = current;
1552				tp->ucopy.task = user_recv;
1553				tp->ucopy.iov = msg->msg_iov;
1554			}
1555
1556			tp->ucopy.len = len;
1557
1558			WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1559				!(flags & (MSG_PEEK | MSG_TRUNC)));
1560
1561			/* Ugly... If prequeue is not empty, we have to
1562			 * process it before releasing socket, otherwise
1563			 * order will be broken at second iteration.
1564			 * More elegant solution is required!!!
1565			 *
1566			 * Look: we have the following (pseudo)queues:
1567			 *
1568			 * 1. packets in flight
1569			 * 2. backlog
1570			 * 3. prequeue
1571			 * 4. receive_queue
1572			 *
1573			 * Each queue can be processed only if the next ones
1574			 * are empty. At this point we have empty receive_queue.
1575			 * But prequeue _can_ be not empty after 2nd iteration,
1576			 * when we jumped to start of loop because backlog
1577			 * processing added something to receive_queue.
1578			 * We cannot release_sock(), because backlog contains
1579			 * packets arrived _after_ prequeued ones.
1580			 *
1581			 * Shortly, algorithm is clear --- to process all
1582			 * the queues in order. We could make it more directly,
1583			 * requeueing packets from backlog to prequeue, if
1584			 * is not empty. It is more elegant, but eats cycles,
1585			 * unfortunately.
1586			 */
1587			if (!skb_queue_empty(&tp->ucopy.prequeue))
1588				goto do_prequeue;
1589
1590			/* __ Set realtime policy in scheduler __ */
1591		}
1592
1593#ifdef CONFIG_NET_DMA
1594		if (tp->ucopy.dma_chan)
1595			dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1596#endif
1597		if (copied >= target) {
1598			/* Do not sleep, just process backlog. */
1599			release_sock(sk);
1600			lock_sock(sk);
1601		} else
1602			sk_wait_data(sk, &timeo);
1603
1604#ifdef CONFIG_NET_DMA
1605		tcp_service_net_dma(sk, false);  /* Don't block */
1606		tp->ucopy.wakeup = 0;
1607#endif
1608
1609		if (user_recv) {
1610			int chunk;
1611
1612			/* __ Restore normal policy in scheduler __ */
1613
1614			if ((chunk = len - tp->ucopy.len) != 0) {
1615				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1616				len -= chunk;
1617				copied += chunk;
1618			}
1619
1620			if (tp->rcv_nxt == tp->copied_seq &&
1621			    !skb_queue_empty(&tp->ucopy.prequeue)) {
1622do_prequeue:
1623				tcp_prequeue_process(sk);
1624
1625				if ((chunk = len - tp->ucopy.len) != 0) {
1626					NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1627					len -= chunk;
1628					copied += chunk;
1629				}
1630			}
1631		}
 
1632		if ((flags & MSG_PEEK) &&
1633		    (peek_seq - copied - urg_hole != tp->copied_seq)) {
1634			if (net_ratelimit())
1635				printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1636				       current->comm, task_pid_nr(current));
1637			peek_seq = tp->copied_seq;
1638		}
1639		continue;
1640
1641	found_ok_skb:
1642		/* Ok so how much can we use? */
1643		used = skb->len - offset;
1644		if (len < used)
1645			used = len;
1646
1647		/* Do we have urgent data here? */
1648		if (tp->urg_data) {
1649			u32 urg_offset = tp->urg_seq - *seq;
1650			if (urg_offset < used) {
1651				if (!urg_offset) {
1652					if (!sock_flag(sk, SOCK_URGINLINE)) {
1653						++*seq;
1654						urg_hole++;
1655						offset++;
1656						used--;
1657						if (!used)
1658							goto skip_copy;
1659					}
1660				} else
1661					used = urg_offset;
1662			}
1663		}
1664
1665		if (!(flags & MSG_TRUNC)) {
1666#ifdef CONFIG_NET_DMA
1667			if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1668				tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
1669
1670			if (tp->ucopy.dma_chan) {
1671				tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1672					tp->ucopy.dma_chan, skb, offset,
1673					msg->msg_iov, used,
1674					tp->ucopy.pinned_list);
1675
1676				if (tp->ucopy.dma_cookie < 0) {
1677
1678					printk(KERN_ALERT "dma_cookie < 0\n");
1679
1680					/* Exception. Bailout! */
1681					if (!copied)
1682						copied = -EFAULT;
1683					break;
1684				}
1685
1686				dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1687
1688				if ((offset + used) == skb->len)
1689					copied_early = 1;
1690
1691			} else
1692#endif
1693			{
1694				err = skb_copy_datagram_iovec(skb, offset,
1695						msg->msg_iov, used);
1696				if (err) {
1697					/* Exception. Bailout! */
1698					if (!copied)
1699						copied = -EFAULT;
1700					break;
1701				}
1702			}
1703		}
1704
1705		*seq += used;
1706		copied += used;
1707		len -= used;
1708
1709		tcp_rcv_space_adjust(sk);
1710
1711skip_copy:
1712		if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1713			tp->urg_data = 0;
1714			tcp_fast_path_check(sk);
1715		}
1716		if (used + offset < skb->len)
1717			continue;
1718
1719		if (tcp_hdr(skb)->fin)
1720			goto found_fin_ok;
1721		if (!(flags & MSG_PEEK)) {
1722			sk_eat_skb(sk, skb, copied_early);
1723			copied_early = 0;
1724		}
 
 
 
 
1725		continue;
1726
1727	found_fin_ok:
1728		/* Process the FIN. */
1729		++*seq;
1730		if (!(flags & MSG_PEEK)) {
1731			sk_eat_skb(sk, skb, copied_early);
1732			copied_early = 0;
1733		}
1734		break;
1735	} while (len > 0);
1736
1737	if (user_recv) {
1738		if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1739			int chunk;
1740
1741			tp->ucopy.len = copied > 0 ? len : 0;
1742
1743			tcp_prequeue_process(sk);
1744
1745			if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1746				NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1747				len -= chunk;
1748				copied += chunk;
1749			}
1750		}
1751
1752		tp->ucopy.task = NULL;
1753		tp->ucopy.len = 0;
1754	}
1755
1756#ifdef CONFIG_NET_DMA
1757	tcp_service_net_dma(sk, true);  /* Wait for queue to drain */
1758	tp->ucopy.dma_chan = NULL;
1759
1760	if (tp->ucopy.pinned_list) {
1761		dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1762		tp->ucopy.pinned_list = NULL;
1763	}
1764#endif
1765
1766	/* According to UNIX98, msg_name/msg_namelen are ignored
1767	 * on connected socket. I was just happy when found this 8) --ANK
1768	 */
1769
1770	/* Clean up data we have read: This will do ACK frames. */
1771	tcp_cleanup_rbuf(sk, copied);
1772
1773	release_sock(sk);
 
 
 
 
 
 
 
 
 
 
1774	return copied;
1775
1776out:
1777	release_sock(sk);
1778	return err;
1779
1780recv_urg:
1781	err = tcp_recv_urg(sk, msg, len, flags);
1782	goto out;
 
 
 
 
1783}
1784EXPORT_SYMBOL(tcp_recvmsg);
1785
1786void tcp_set_state(struct sock *sk, int state)
1787{
1788	int oldstate = sk->sk_state;
1789
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1790	switch (state) {
1791	case TCP_ESTABLISHED:
1792		if (oldstate != TCP_ESTABLISHED)
1793			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1794		break;
1795
1796	case TCP_CLOSE:
1797		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1798			TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
1799
1800		sk->sk_prot->unhash(sk);
1801		if (inet_csk(sk)->icsk_bind_hash &&
1802		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
1803			inet_put_port(sk);
1804		/* fall through */
1805	default:
1806		if (oldstate == TCP_ESTABLISHED)
1807			TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
1808	}
1809
1810	/* Change state AFTER socket is unhashed to avoid closed
1811	 * socket sitting in hash tables.
1812	 */
1813	sk->sk_state = state;
1814
1815#ifdef STATE_TRACE
1816	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
1817#endif
1818}
1819EXPORT_SYMBOL_GPL(tcp_set_state);
1820
1821/*
1822 *	State processing on a close. This implements the state shift for
1823 *	sending our FIN frame. Note that we only send a FIN for some
1824 *	states. A shutdown() may have already sent the FIN, or we may be
1825 *	closed.
1826 */
1827
1828static const unsigned char new_state[16] = {
1829  /* current state:        new state:      action:	*/
1830  /* (Invalid)		*/ TCP_CLOSE,
1831  /* TCP_ESTABLISHED	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1832  /* TCP_SYN_SENT	*/ TCP_CLOSE,
1833  /* TCP_SYN_RECV	*/ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1834  /* TCP_FIN_WAIT1	*/ TCP_FIN_WAIT1,
1835  /* TCP_FIN_WAIT2	*/ TCP_FIN_WAIT2,
1836  /* TCP_TIME_WAIT	*/ TCP_CLOSE,
1837  /* TCP_CLOSE		*/ TCP_CLOSE,
1838  /* TCP_CLOSE_WAIT	*/ TCP_LAST_ACK  | TCP_ACTION_FIN,
1839  /* TCP_LAST_ACK	*/ TCP_LAST_ACK,
1840  /* TCP_LISTEN		*/ TCP_CLOSE,
1841  /* TCP_CLOSING	*/ TCP_CLOSING,
 
1842};
1843
1844static int tcp_close_state(struct sock *sk)
1845{
1846	int next = (int)new_state[sk->sk_state];
1847	int ns = next & TCP_STATE_MASK;
1848
1849	tcp_set_state(sk, ns);
1850
1851	return next & TCP_ACTION_FIN;
1852}
1853
1854/*
1855 *	Shutdown the sending side of a connection. Much like close except
1856 *	that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
1857 */
1858
1859void tcp_shutdown(struct sock *sk, int how)
1860{
1861	/*	We need to grab some memory, and put together a FIN,
1862	 *	and then put it into the queue to be sent.
1863	 *		Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1864	 */
1865	if (!(how & SEND_SHUTDOWN))
1866		return;
1867
1868	/* If we've already sent a FIN, or it's a closed state, skip this. */
1869	if ((1 << sk->sk_state) &
1870	    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1871	     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1872		/* Clear out any half completed packets.  FIN if needed. */
1873		if (tcp_close_state(sk))
1874			tcp_send_fin(sk);
1875	}
1876}
1877EXPORT_SYMBOL(tcp_shutdown);
1878
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1879void tcp_close(struct sock *sk, long timeout)
1880{
1881	struct sk_buff *skb;
1882	int data_was_unread = 0;
1883	int state;
1884
1885	lock_sock(sk);
1886	sk->sk_shutdown = SHUTDOWN_MASK;
1887
1888	if (sk->sk_state == TCP_LISTEN) {
1889		tcp_set_state(sk, TCP_CLOSE);
1890
1891		/* Special case. */
1892		inet_csk_listen_stop(sk);
1893
1894		goto adjudge_to_death;
1895	}
1896
1897	/*  We need to flush the recv. buffs.  We do this only on the
1898	 *  descriptor close, not protocol-sourced closes, because the
1899	 *  reader process may not have drained the data yet!
1900	 */
1901	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1902		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1903			  tcp_hdr(skb)->fin;
 
 
1904		data_was_unread += len;
1905		__kfree_skb(skb);
1906	}
1907
1908	sk_mem_reclaim(sk);
1909
1910	/* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
1911	if (sk->sk_state == TCP_CLOSE)
1912		goto adjudge_to_death;
1913
1914	/* As outlined in RFC 2525, section 2.17, we send a RST here because
1915	 * data was lost. To witness the awful effects of the old behavior of
1916	 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1917	 * GET in an FTP client, suspend the process, wait for the client to
1918	 * advertise a zero window, then kill -9 the FTP client, wheee...
1919	 * Note: timeout is always zero in such a case.
1920	 */
1921	if (data_was_unread) {
 
 
1922		/* Unread data was tossed, zap the connection. */
1923		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
1924		tcp_set_state(sk, TCP_CLOSE);
1925		tcp_send_active_reset(sk, sk->sk_allocation);
1926	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1927		/* Check zero linger _after_ checking for unread data. */
1928		sk->sk_prot->disconnect(sk, 0);
1929		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
1930	} else if (tcp_close_state(sk)) {
1931		/* We FIN if the application ate all the data before
1932		 * zapping the connection.
1933		 */
1934
1935		/* RED-PEN. Formally speaking, we have broken TCP state
1936		 * machine. State transitions:
1937		 *
1938		 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1939		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (forget it, it's impossible)
1940		 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1941		 *
1942		 * are legal only when FIN has been sent (i.e. in window),
1943		 * rather than queued out of window. Purists blame.
1944		 *
1945		 * F.e. "RFC state" is ESTABLISHED,
1946		 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1947		 *
1948		 * The visible declinations are that sometimes
1949		 * we enter time-wait state, when it is not required really
1950		 * (harmless), do not send active resets, when they are
1951		 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1952		 * they look as CLOSING or LAST_ACK for Linux)
1953		 * Probably, I missed some more holelets.
1954		 * 						--ANK
 
 
 
 
1955		 */
1956		tcp_send_fin(sk);
1957	}
1958
1959	sk_stream_wait_close(sk, timeout);
1960
1961adjudge_to_death:
1962	state = sk->sk_state;
1963	sock_hold(sk);
1964	sock_orphan(sk);
1965
1966	/* It is the last release_sock in its life. It will remove backlog. */
1967	release_sock(sk);
1968
1969
1970	/* Now socket is owned by kernel and we acquire BH lock
1971	   to finish close. No need to check for user refs.
1972	 */
1973	local_bh_disable();
1974	bh_lock_sock(sk);
1975	WARN_ON(sock_owned_by_user(sk));
 
1976
1977	percpu_counter_inc(sk->sk_prot->orphan_count);
1978
1979	/* Have we already been destroyed by a softirq or backlog? */
1980	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1981		goto out;
1982
1983	/*	This is a (useful) BSD violating of the RFC. There is a
1984	 *	problem with TCP as specified in that the other end could
1985	 *	keep a socket open forever with no application left this end.
1986	 *	We use a 3 minute timeout (about the same as BSD) then kill
1987	 *	our end. If they send after that then tough - BUT: long enough
1988	 *	that we won't make the old 4*rto = almost no time - whoops
1989	 *	reset mistake.
1990	 *
1991	 *	Nope, it was not mistake. It is really desired behaviour
1992	 *	f.e. on http servers, when such sockets are useless, but
1993	 *	consume significant resources. Let's do it with special
1994	 *	linger2	option.					--ANK
1995	 */
1996
1997	if (sk->sk_state == TCP_FIN_WAIT2) {
1998		struct tcp_sock *tp = tcp_sk(sk);
1999		if (tp->linger2 < 0) {
2000			tcp_set_state(sk, TCP_CLOSE);
2001			tcp_send_active_reset(sk, GFP_ATOMIC);
2002			NET_INC_STATS_BH(sock_net(sk),
2003					LINUX_MIB_TCPABORTONLINGER);
2004		} else {
2005			const int tmo = tcp_fin_time(sk);
2006
2007			if (tmo > TCP_TIMEWAIT_LEN) {
2008				inet_csk_reset_keepalive_timer(sk,
2009						tmo - TCP_TIMEWAIT_LEN);
2010			} else {
2011				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2012				goto out;
2013			}
2014		}
2015	}
2016	if (sk->sk_state != TCP_CLOSE) {
2017		sk_mem_reclaim(sk);
2018		if (tcp_too_many_orphans(sk, 0)) {
2019			if (net_ratelimit())
2020				printk(KERN_INFO "TCP: too many of orphaned "
2021				       "sockets\n");
2022			tcp_set_state(sk, TCP_CLOSE);
2023			tcp_send_active_reset(sk, GFP_ATOMIC);
2024			NET_INC_STATS_BH(sock_net(sk),
2025					LINUX_MIB_TCPABORTONMEMORY);
 
 
 
2026		}
2027	}
2028
2029	if (sk->sk_state == TCP_CLOSE)
 
 
 
 
 
 
 
 
 
 
2030		inet_csk_destroy_sock(sk);
 
2031	/* Otherwise, socket is reprieved until protocol close. */
2032
2033out:
2034	bh_unlock_sock(sk);
2035	local_bh_enable();
 
2036	sock_put(sk);
2037}
2038EXPORT_SYMBOL(tcp_close);
2039
2040/* These states need RST on ABORT according to RFC793 */
2041
2042static inline int tcp_need_reset(int state)
2043{
2044	return (1 << state) &
2045	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2046		TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2047}
2048
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2049int tcp_disconnect(struct sock *sk, int flags)
2050{
2051	struct inet_sock *inet = inet_sk(sk);
2052	struct inet_connection_sock *icsk = inet_csk(sk);
2053	struct tcp_sock *tp = tcp_sk(sk);
2054	int err = 0;
2055	int old_state = sk->sk_state;
 
2056
2057	if (old_state != TCP_CLOSE)
2058		tcp_set_state(sk, TCP_CLOSE);
2059
2060	/* ABORT function of RFC793 */
2061	if (old_state == TCP_LISTEN) {
2062		inet_csk_listen_stop(sk);
 
 
2063	} else if (tcp_need_reset(old_state) ||
2064		   (tp->snd_nxt != tp->write_seq &&
2065		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2066		/* The last check adjusts for discrepancy of Linux wrt. RFC
2067		 * states
2068		 */
2069		tcp_send_active_reset(sk, gfp_any());
2070		sk->sk_err = ECONNRESET;
2071	} else if (old_state == TCP_SYN_SENT)
2072		sk->sk_err = ECONNRESET;
2073
2074	tcp_clear_xmit_timers(sk);
2075	__skb_queue_purge(&sk->sk_receive_queue);
 
 
 
 
 
 
2076	tcp_write_queue_purge(sk);
2077	__skb_queue_purge(&tp->out_of_order_queue);
2078#ifdef CONFIG_NET_DMA
2079	__skb_queue_purge(&sk->sk_async_wait_queue);
2080#endif
2081
2082	inet->inet_dport = 0;
2083
2084	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2085		inet_reset_saddr(sk);
2086
2087	sk->sk_shutdown = 0;
2088	sock_reset_flag(sk, SOCK_DONE);
2089	tp->srtt = 0;
2090	if ((tp->write_seq += tp->max_window + 2) == 0)
2091		tp->write_seq = 1;
 
 
 
 
 
 
2092	icsk->icsk_backoff = 0;
2093	tp->snd_cwnd = 2;
2094	icsk->icsk_probes_out = 0;
2095	tp->packets_out = 0;
2096	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 
2097	tp->snd_cwnd_cnt = 0;
2098	tp->bytes_acked = 0;
2099	tp->window_clamp = 0;
 
2100	tcp_set_ca_state(sk, TCP_CA_Open);
 
2101	tcp_clear_retrans(tp);
2102	inet_csk_delack_init(sk);
2103	tcp_init_send_head(sk);
 
 
 
2104	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2105	__sk_dst_reset(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2106
2107	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2108
 
 
 
 
 
 
2109	sk->sk_error_report(sk);
2110	return err;
2111}
2112EXPORT_SYMBOL(tcp_disconnect);
2113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2114/*
2115 *	Socket option code for TCP.
2116 */
2117static int do_tcp_setsockopt(struct sock *sk, int level,
2118		int optname, char __user *optval, unsigned int optlen)
2119{
2120	struct tcp_sock *tp = tcp_sk(sk);
2121	struct inet_connection_sock *icsk = inet_csk(sk);
 
2122	int val;
2123	int err = 0;
2124
2125	/* These are data/string values, all the others are ints */
2126	switch (optname) {
2127	case TCP_CONGESTION: {
2128		char name[TCP_CA_NAME_MAX];
2129
2130		if (optlen < 1)
2131			return -EINVAL;
2132
2133		val = strncpy_from_user(name, optval,
2134					min_t(long, TCP_CA_NAME_MAX-1, optlen));
2135		if (val < 0)
2136			return -EFAULT;
2137		name[val] = 0;
2138
2139		lock_sock(sk);
2140		err = tcp_set_congestion_control(sk, name);
 
 
2141		release_sock(sk);
2142		return err;
2143	}
2144	case TCP_COOKIE_TRANSACTIONS: {
2145		struct tcp_cookie_transactions ctd;
2146		struct tcp_cookie_values *cvp = NULL;
2147
2148		if (sizeof(ctd) > optlen)
2149			return -EINVAL;
2150		if (copy_from_user(&ctd, optval, sizeof(ctd)))
2151			return -EFAULT;
2152
2153		if (ctd.tcpct_used > sizeof(ctd.tcpct_value) ||
2154		    ctd.tcpct_s_data_desired > TCP_MSS_DESIRED)
2155			return -EINVAL;
2156
2157		if (ctd.tcpct_cookie_desired == 0) {
2158			/* default to global value */
2159		} else if ((0x1 & ctd.tcpct_cookie_desired) ||
2160			   ctd.tcpct_cookie_desired > TCP_COOKIE_MAX ||
2161			   ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) {
2162			return -EINVAL;
2163		}
2164
2165		if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) {
2166			/* Supercedes all other values */
2167			lock_sock(sk);
2168			if (tp->cookie_values != NULL) {
2169				kref_put(&tp->cookie_values->kref,
2170					 tcp_cookie_values_release);
2171				tp->cookie_values = NULL;
2172			}
2173			tp->rx_opt.cookie_in_always = 0; /* false */
2174			tp->rx_opt.cookie_out_never = 1; /* true */
2175			release_sock(sk);
2176			return err;
2177		}
2178
2179		/* Allocate ancillary memory before locking.
 
2180		 */
2181		if (ctd.tcpct_used > 0 ||
2182		    (tp->cookie_values == NULL &&
2183		     (sysctl_tcp_cookie_size > 0 ||
2184		      ctd.tcpct_cookie_desired > 0 ||
2185		      ctd.tcpct_s_data_desired > 0))) {
2186			cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used,
2187				      GFP_KERNEL);
2188			if (cvp == NULL)
2189				return -ENOMEM;
2190
2191			kref_init(&cvp->kref);
2192		}
2193		lock_sock(sk);
2194		tp->rx_opt.cookie_in_always =
2195			(TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags);
2196		tp->rx_opt.cookie_out_never = 0; /* false */
2197
2198		if (tp->cookie_values != NULL) {
2199			if (cvp != NULL) {
2200				/* Changed values are recorded by a changed
2201				 * pointer, ensuring the cookie will differ,
2202				 * without separately hashing each value later.
2203				 */
2204				kref_put(&tp->cookie_values->kref,
2205					 tcp_cookie_values_release);
2206			} else {
2207				cvp = tp->cookie_values;
2208			}
2209		}
2210
2211		if (cvp != NULL) {
2212			cvp->cookie_desired = ctd.tcpct_cookie_desired;
2213
2214			if (ctd.tcpct_used > 0) {
2215				memcpy(cvp->s_data_payload, ctd.tcpct_value,
2216				       ctd.tcpct_used);
2217				cvp->s_data_desired = ctd.tcpct_used;
2218				cvp->s_data_constant = 1; /* true */
2219			} else {
2220				/* No constant payload data. */
2221				cvp->s_data_desired = ctd.tcpct_s_data_desired;
2222				cvp->s_data_constant = 0; /* false */
2223			}
2224
2225			tp->cookie_values = cvp;
2226		}
2227		release_sock(sk);
2228		return err;
2229	}
2230	default:
2231		/* fallthru */
2232		break;
2233	}
2234
2235	if (optlen < sizeof(int))
2236		return -EINVAL;
2237
2238	if (get_user(val, (int __user *)optval))
2239		return -EFAULT;
2240
2241	lock_sock(sk);
2242
2243	switch (optname) {
2244	case TCP_MAXSEG:
2245		/* Values greater than interface MTU won't take effect. However
2246		 * at the point when this call is done we typically don't yet
2247		 * know which interface is going to be used */
2248		if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
 
2249			err = -EINVAL;
2250			break;
2251		}
2252		tp->rx_opt.user_mss = val;
2253		break;
2254
2255	case TCP_NODELAY:
2256		if (val) {
2257			/* TCP_NODELAY is weaker than TCP_CORK, so that
2258			 * this option on corked socket is remembered, but
2259			 * it is not activated until cork is cleared.
2260			 *
2261			 * However, when TCP_NODELAY is set we make
2262			 * an explicit push, which overrides even TCP_CORK
2263			 * for currently queued segments.
2264			 */
2265			tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2266			tcp_push_pending_frames(sk);
2267		} else {
2268			tp->nonagle &= ~TCP_NAGLE_OFF;
2269		}
2270		break;
2271
2272	case TCP_THIN_LINEAR_TIMEOUTS:
2273		if (val < 0 || val > 1)
2274			err = -EINVAL;
2275		else
2276			tp->thin_lto = val;
2277		break;
2278
2279	case TCP_THIN_DUPACK:
2280		if (val < 0 || val > 1)
2281			err = -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2282		else
2283			tp->thin_dupack = val;
2284		break;
2285
2286	case TCP_CORK:
2287		/* When set indicates to always queue non-full frames.
2288		 * Later the user clears this option and we transmit
2289		 * any pending partial frames in the queue.  This is
2290		 * meant to be used alongside sendfile() to get properly
2291		 * filled frames when the user (for example) must write
2292		 * out headers with a write() call first and then use
2293		 * sendfile to send out the data parts.
2294		 *
2295		 * TCP_CORK can be set together with TCP_NODELAY and it is
2296		 * stronger than TCP_NODELAY.
2297		 */
2298		if (val) {
2299			tp->nonagle |= TCP_NAGLE_CORK;
2300		} else {
2301			tp->nonagle &= ~TCP_NAGLE_CORK;
2302			if (tp->nonagle&TCP_NAGLE_OFF)
2303				tp->nonagle |= TCP_NAGLE_PUSH;
2304			tcp_push_pending_frames(sk);
2305		}
2306		break;
2307
2308	case TCP_KEEPIDLE:
2309		if (val < 1 || val > MAX_TCP_KEEPIDLE)
2310			err = -EINVAL;
2311		else {
2312			tp->keepalive_time = val * HZ;
2313			if (sock_flag(sk, SOCK_KEEPOPEN) &&
2314			    !((1 << sk->sk_state) &
2315			      (TCPF_CLOSE | TCPF_LISTEN))) {
2316				u32 elapsed = keepalive_time_elapsed(tp);
2317				if (tp->keepalive_time > elapsed)
2318					elapsed = tp->keepalive_time - elapsed;
2319				else
2320					elapsed = 0;
2321				inet_csk_reset_keepalive_timer(sk, elapsed);
2322			}
2323		}
2324		break;
2325	case TCP_KEEPINTVL:
2326		if (val < 1 || val > MAX_TCP_KEEPINTVL)
2327			err = -EINVAL;
2328		else
2329			tp->keepalive_intvl = val * HZ;
2330		break;
2331	case TCP_KEEPCNT:
2332		if (val < 1 || val > MAX_TCP_KEEPCNT)
2333			err = -EINVAL;
2334		else
2335			tp->keepalive_probes = val;
2336		break;
2337	case TCP_SYNCNT:
2338		if (val < 1 || val > MAX_TCP_SYNCNT)
2339			err = -EINVAL;
2340		else
2341			icsk->icsk_syn_retries = val;
2342		break;
2343
 
 
 
 
 
 
 
2344	case TCP_LINGER2:
2345		if (val < 0)
2346			tp->linger2 = -1;
2347		else if (val > sysctl_tcp_fin_timeout / HZ)
2348			tp->linger2 = 0;
2349		else
2350			tp->linger2 = val * HZ;
2351		break;
2352
2353	case TCP_DEFER_ACCEPT:
2354		/* Translate value in seconds to number of retransmits */
2355		icsk->icsk_accept_queue.rskq_defer_accept =
2356			secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2357					TCP_RTO_MAX / HZ);
2358		break;
2359
2360	case TCP_WINDOW_CLAMP:
2361		if (!val) {
2362			if (sk->sk_state != TCP_CLOSE) {
2363				err = -EINVAL;
2364				break;
2365			}
2366			tp->window_clamp = 0;
2367		} else
2368			tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2369						SOCK_MIN_RCVBUF / 2 : val;
2370		break;
2371
2372	case TCP_QUICKACK:
2373		if (!val) {
2374			icsk->icsk_ack.pingpong = 1;
2375		} else {
2376			icsk->icsk_ack.pingpong = 0;
2377			if ((1 << sk->sk_state) &
2378			    (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2379			    inet_csk_ack_scheduled(sk)) {
2380				icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2381				tcp_cleanup_rbuf(sk, 1);
2382				if (!(val & 1))
2383					icsk->icsk_ack.pingpong = 1;
2384			}
2385		}
2386		break;
2387
2388#ifdef CONFIG_TCP_MD5SIG
2389	case TCP_MD5SIG:
2390		/* Read the IP->Key mappings from userspace */
2391		err = tp->af_specific->md5_parse(sk, optval, optlen);
 
 
 
2392		break;
2393#endif
2394	case TCP_USER_TIMEOUT:
2395		/* Cap the max timeout in ms TCP will retry/retrans
2396		 * before giving up and aborting (ETIMEDOUT) a connection.
2397		 */
2398		icsk->icsk_user_timeout = msecs_to_jiffies(val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2399		break;
2400	default:
2401		err = -ENOPROTOOPT;
2402		break;
2403	}
2404
2405	release_sock(sk);
2406	return err;
2407}
2408
2409int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2410		   unsigned int optlen)
2411{
2412	struct inet_connection_sock *icsk = inet_csk(sk);
2413
2414	if (level != SOL_TCP)
2415		return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2416						     optval, optlen);
2417	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2418}
2419EXPORT_SYMBOL(tcp_setsockopt);
2420
2421#ifdef CONFIG_COMPAT
2422int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2423			  char __user *optval, unsigned int optlen)
2424{
2425	if (level != SOL_TCP)
2426		return inet_csk_compat_setsockopt(sk, level, optname,
2427						  optval, optlen);
2428	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2429}
2430EXPORT_SYMBOL(compat_tcp_setsockopt);
2431#endif
2432
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2433/* Return information about state of tcp endpoint in API format. */
2434void tcp_get_info(struct sock *sk, struct tcp_info *info)
2435{
2436	struct tcp_sock *tp = tcp_sk(sk);
2437	const struct inet_connection_sock *icsk = inet_csk(sk);
2438	u32 now = tcp_time_stamp;
 
 
 
2439
2440	memset(info, 0, sizeof(*info));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2441
2442	info->tcpi_state = sk->sk_state;
2443	info->tcpi_ca_state = icsk->icsk_ca_state;
2444	info->tcpi_retransmits = icsk->icsk_retransmits;
2445	info->tcpi_probes = icsk->icsk_probes_out;
2446	info->tcpi_backoff = icsk->icsk_backoff;
2447
2448	if (tp->rx_opt.tstamp_ok)
2449		info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2450	if (tcp_is_sack(tp))
2451		info->tcpi_options |= TCPI_OPT_SACK;
2452	if (tp->rx_opt.wscale_ok) {
2453		info->tcpi_options |= TCPI_OPT_WSCALE;
2454		info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2455		info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2456	}
2457
2458	if (tp->ecn_flags&TCP_ECN_OK)
2459		info->tcpi_options |= TCPI_OPT_ECN;
 
 
 
 
2460
2461	info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2462	info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2463	info->tcpi_snd_mss = tp->mss_cache;
2464	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2465
2466	if (sk->sk_state == TCP_LISTEN) {
2467		info->tcpi_unacked = sk->sk_ack_backlog;
2468		info->tcpi_sacked = sk->sk_max_ack_backlog;
2469	} else {
2470		info->tcpi_unacked = tp->packets_out;
2471		info->tcpi_sacked = tp->sacked_out;
2472	}
2473	info->tcpi_lost = tp->lost_out;
2474	info->tcpi_retrans = tp->retrans_out;
2475	info->tcpi_fackets = tp->fackets_out;
2476
 
2477	info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2478	info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2479	info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2480
2481	info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2482	info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2483	info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2484	info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2485	info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2486	info->tcpi_snd_cwnd = tp->snd_cwnd;
2487	info->tcpi_advmss = tp->advmss;
2488	info->tcpi_reordering = tp->reordering;
2489
2490	info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2491	info->tcpi_rcv_space = tp->rcvq_space.space;
2492
2493	info->tcpi_total_retrans = tp->total_retrans;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2494}
2495EXPORT_SYMBOL_GPL(tcp_get_info);
2496
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2497static int do_tcp_getsockopt(struct sock *sk, int level,
2498		int optname, char __user *optval, int __user *optlen)
2499{
2500	struct inet_connection_sock *icsk = inet_csk(sk);
2501	struct tcp_sock *tp = tcp_sk(sk);
 
2502	int val, len;
2503
2504	if (get_user(len, optlen))
2505		return -EFAULT;
2506
2507	len = min_t(unsigned int, len, sizeof(int));
2508
2509	if (len < 0)
2510		return -EINVAL;
2511
2512	switch (optname) {
2513	case TCP_MAXSEG:
2514		val = tp->mss_cache;
2515		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2516			val = tp->rx_opt.user_mss;
 
 
2517		break;
2518	case TCP_NODELAY:
2519		val = !!(tp->nonagle&TCP_NAGLE_OFF);
2520		break;
2521	case TCP_CORK:
2522		val = !!(tp->nonagle&TCP_NAGLE_CORK);
2523		break;
2524	case TCP_KEEPIDLE:
2525		val = keepalive_time_when(tp) / HZ;
2526		break;
2527	case TCP_KEEPINTVL:
2528		val = keepalive_intvl_when(tp) / HZ;
2529		break;
2530	case TCP_KEEPCNT:
2531		val = keepalive_probes(tp);
2532		break;
2533	case TCP_SYNCNT:
2534		val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2535		break;
2536	case TCP_LINGER2:
2537		val = tp->linger2;
2538		if (val >= 0)
2539			val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2540		break;
2541	case TCP_DEFER_ACCEPT:
2542		val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
2543				      TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
2544		break;
2545	case TCP_WINDOW_CLAMP:
2546		val = tp->window_clamp;
2547		break;
2548	case TCP_INFO: {
2549		struct tcp_info info;
2550
2551		if (get_user(len, optlen))
2552			return -EFAULT;
2553
2554		tcp_get_info(sk, &info);
2555
2556		len = min_t(unsigned int, len, sizeof(info));
2557		if (put_user(len, optlen))
2558			return -EFAULT;
2559		if (copy_to_user(optval, &info, len))
2560			return -EFAULT;
2561		return 0;
2562	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2563	case TCP_QUICKACK:
2564		val = !icsk->icsk_ack.pingpong;
2565		break;
2566
2567	case TCP_CONGESTION:
2568		if (get_user(len, optlen))
2569			return -EFAULT;
2570		len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2571		if (put_user(len, optlen))
2572			return -EFAULT;
2573		if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2574			return -EFAULT;
2575		return 0;
2576
2577	case TCP_COOKIE_TRANSACTIONS: {
2578		struct tcp_cookie_transactions ctd;
2579		struct tcp_cookie_values *cvp = tp->cookie_values;
2580
2581		if (get_user(len, optlen))
2582			return -EFAULT;
2583		if (len < sizeof(ctd))
2584			return -EINVAL;
2585
2586		memset(&ctd, 0, sizeof(ctd));
2587		ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ?
2588				   TCP_COOKIE_IN_ALWAYS : 0)
2589				| (tp->rx_opt.cookie_out_never ?
2590				   TCP_COOKIE_OUT_NEVER : 0);
 
 
 
2591
2592		if (cvp != NULL) {
2593			ctd.tcpct_flags |= (cvp->s_data_in ?
2594					    TCP_S_DATA_IN : 0)
2595					 | (cvp->s_data_out ?
2596					    TCP_S_DATA_OUT : 0);
2597
2598			ctd.tcpct_cookie_desired = cvp->cookie_desired;
2599			ctd.tcpct_s_data_desired = cvp->s_data_desired;
2600
2601			memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0],
2602			       cvp->cookie_pair_size);
2603			ctd.tcpct_used = cvp->cookie_pair_size;
 
 
 
2604		}
 
2605
2606		if (put_user(sizeof(ctd), optlen))
 
2607			return -EFAULT;
2608		if (copy_to_user(optval, &ctd, sizeof(ctd)))
2609			return -EFAULT;
2610		return 0;
2611	}
2612	case TCP_THIN_LINEAR_TIMEOUTS:
2613		val = tp->thin_lto;
2614		break;
 
2615	case TCP_THIN_DUPACK:
2616		val = tp->thin_dupack;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2617		break;
2618
2619	case TCP_USER_TIMEOUT:
2620		val = jiffies_to_msecs(icsk->icsk_user_timeout);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2621		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2622	default:
2623		return -ENOPROTOOPT;
2624	}
2625
2626	if (put_user(len, optlen))
2627		return -EFAULT;
2628	if (copy_to_user(optval, &val, len))
2629		return -EFAULT;
2630	return 0;
2631}
2632
2633int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2634		   int __user *optlen)
2635{
2636	struct inet_connection_sock *icsk = inet_csk(sk);
2637
2638	if (level != SOL_TCP)
2639		return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2640						     optval, optlen);
2641	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2642}
2643EXPORT_SYMBOL(tcp_getsockopt);
2644
2645#ifdef CONFIG_COMPAT
2646int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2647			  char __user *optval, int __user *optlen)
2648{
2649	if (level != SOL_TCP)
2650		return inet_csk_compat_getsockopt(sk, level, optname,
2651						  optval, optlen);
2652	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2653}
2654EXPORT_SYMBOL(compat_tcp_getsockopt);
2655#endif
2656
2657struct sk_buff *tcp_tso_segment(struct sk_buff *skb, u32 features)
2658{
2659	struct sk_buff *segs = ERR_PTR(-EINVAL);
2660	struct tcphdr *th;
2661	unsigned thlen;
2662	unsigned int seq;
2663	__be32 delta;
2664	unsigned int oldlen;
2665	unsigned int mss;
2666
2667	if (!pskb_may_pull(skb, sizeof(*th)))
2668		goto out;
2669
2670	th = tcp_hdr(skb);
2671	thlen = th->doff * 4;
2672	if (thlen < sizeof(*th))
2673		goto out;
2674
2675	if (!pskb_may_pull(skb, thlen))
2676		goto out;
2677
2678	oldlen = (u16)~skb->len;
2679	__skb_pull(skb, thlen);
2680
2681	mss = skb_shinfo(skb)->gso_size;
2682	if (unlikely(skb->len <= mss))
2683		goto out;
2684
2685	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2686		/* Packet is from an untrusted source, reset gso_segs. */
2687		int type = skb_shinfo(skb)->gso_type;
2688
2689		if (unlikely(type &
2690			     ~(SKB_GSO_TCPV4 |
2691			       SKB_GSO_DODGY |
2692			       SKB_GSO_TCP_ECN |
2693			       SKB_GSO_TCPV6 |
2694			       0) ||
2695			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2696			goto out;
2697
2698		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
2699
2700		segs = NULL;
2701		goto out;
2702	}
2703
2704	segs = skb_segment(skb, features);
2705	if (IS_ERR(segs))
2706		goto out;
2707
2708	delta = htonl(oldlen + (thlen + mss));
2709
2710	skb = segs;
2711	th = tcp_hdr(skb);
2712	seq = ntohl(th->seq);
2713
2714	do {
2715		th->fin = th->psh = 0;
2716
2717		th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2718				       (__force u32)delta));
2719		if (skb->ip_summed != CHECKSUM_PARTIAL)
2720			th->check =
2721			     csum_fold(csum_partial(skb_transport_header(skb),
2722						    thlen, skb->csum));
2723
2724		seq += mss;
2725		skb = skb->next;
2726		th = tcp_hdr(skb);
2727
2728		th->seq = htonl(seq);
2729		th->cwr = 0;
2730	} while (skb->next);
2731
2732	delta = htonl(oldlen + (skb->tail - skb->transport_header) +
2733		      skb->data_len);
2734	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2735				(__force u32)delta));
2736	if (skb->ip_summed != CHECKSUM_PARTIAL)
2737		th->check = csum_fold(csum_partial(skb_transport_header(skb),
2738						   thlen, skb->csum));
2739
2740out:
2741	return segs;
2742}
2743EXPORT_SYMBOL(tcp_tso_segment);
2744
2745struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2746{
2747	struct sk_buff **pp = NULL;
2748	struct sk_buff *p;
2749	struct tcphdr *th;
2750	struct tcphdr *th2;
2751	unsigned int len;
2752	unsigned int thlen;
2753	__be32 flags;
2754	unsigned int mss = 1;
2755	unsigned int hlen;
2756	unsigned int off;
2757	int flush = 1;
2758	int i;
2759
2760	off = skb_gro_offset(skb);
2761	hlen = off + sizeof(*th);
2762	th = skb_gro_header_fast(skb, off);
2763	if (skb_gro_header_hard(skb, hlen)) {
2764		th = skb_gro_header_slow(skb, hlen, off);
2765		if (unlikely(!th))
2766			goto out;
2767	}
2768
2769	thlen = th->doff * 4;
2770	if (thlen < sizeof(*th))
2771		goto out;
2772
2773	hlen = off + thlen;
2774	if (skb_gro_header_hard(skb, hlen)) {
2775		th = skb_gro_header_slow(skb, hlen, off);
2776		if (unlikely(!th))
2777			goto out;
2778	}
2779
2780	skb_gro_pull(skb, thlen);
2781
2782	len = skb_gro_len(skb);
2783	flags = tcp_flag_word(th);
2784
2785	for (; (p = *head); head = &p->next) {
2786		if (!NAPI_GRO_CB(p)->same_flow)
2787			continue;
2788
2789		th2 = tcp_hdr(p);
 
 
2790
2791		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
2792			NAPI_GRO_CB(p)->same_flow = 0;
2793			continue;
 
 
 
 
 
2794		}
 
 
2795
2796		goto found;
2797	}
2798
2799	goto out_check_final;
2800
2801found:
2802	flush = NAPI_GRO_CB(p)->flush;
2803	flush |= (__force int)(flags & TCP_FLAG_CWR);
2804	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
2805		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
2806	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
2807	for (i = sizeof(*th); i < thlen; i += 4)
2808		flush |= *(u32 *)((u8 *)th + i) ^
2809			 *(u32 *)((u8 *)th2 + i);
2810
2811	mss = skb_shinfo(p)->gso_size;
2812
2813	flush |= (len - 1) >= mss;
2814	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
2815
2816	if (flush || skb_gro_receive(head, skb)) {
2817		mss = 1;
2818		goto out_check_final;
2819	}
2820
2821	p = *head;
2822	th2 = tcp_hdr(p);
2823	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
2824
2825out_check_final:
2826	flush = len < mss;
2827	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
2828					TCP_FLAG_RST | TCP_FLAG_SYN |
2829					TCP_FLAG_FIN));
2830
2831	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
2832		pp = head;
2833
2834out:
2835	NAPI_GRO_CB(skb)->flush |= flush;
2836
2837	return pp;
2838}
2839EXPORT_SYMBOL(tcp_gro_receive);
2840
2841int tcp_gro_complete(struct sk_buff *skb)
2842{
2843	struct tcphdr *th = tcp_hdr(skb);
2844
2845	skb->csum_start = skb_transport_header(skb) - skb->head;
2846	skb->csum_offset = offsetof(struct tcphdr, check);
2847	skb->ip_summed = CHECKSUM_PARTIAL;
2848
2849	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
2850
2851	if (th->cwr)
2852		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
2853
2854	return 0;
2855}
2856EXPORT_SYMBOL(tcp_gro_complete);
2857
2858#ifdef CONFIG_TCP_MD5SIG
2859static unsigned long tcp_md5sig_users;
2860static struct tcp_md5sig_pool * __percpu *tcp_md5sig_pool;
2861static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2862
2863static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool * __percpu *pool)
2864{
2865	int cpu;
2866	for_each_possible_cpu(cpu) {
2867		struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2868		if (p) {
2869			if (p->md5_desc.tfm)
2870				crypto_free_hash(p->md5_desc.tfm);
2871			kfree(p);
2872		}
2873	}
2874	free_percpu(pool);
2875}
2876
2877void tcp_free_md5sig_pool(void)
2878{
2879	struct tcp_md5sig_pool * __percpu *pool = NULL;
2880
2881	spin_lock_bh(&tcp_md5sig_pool_lock);
2882	if (--tcp_md5sig_users == 0) {
2883		pool = tcp_md5sig_pool;
2884		tcp_md5sig_pool = NULL;
2885	}
2886	spin_unlock_bh(&tcp_md5sig_pool_lock);
2887	if (pool)
2888		__tcp_free_md5sig_pool(pool);
2889}
2890EXPORT_SYMBOL(tcp_free_md5sig_pool);
2891
2892static struct tcp_md5sig_pool * __percpu *
2893__tcp_alloc_md5sig_pool(struct sock *sk)
2894{
2895	int cpu;
2896	struct tcp_md5sig_pool * __percpu *pool;
2897
2898	pool = alloc_percpu(struct tcp_md5sig_pool *);
2899	if (!pool)
2900		return NULL;
2901
2902	for_each_possible_cpu(cpu) {
2903		struct tcp_md5sig_pool *p;
2904		struct crypto_hash *hash;
2905
2906		p = kzalloc(sizeof(*p), sk->sk_allocation);
2907		if (!p)
2908			goto out_free;
2909		*per_cpu_ptr(pool, cpu) = p;
2910
2911		hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2912		if (!hash || IS_ERR(hash))
2913			goto out_free;
2914
2915		p->md5_desc.tfm = hash;
2916	}
2917	return pool;
2918out_free:
2919	__tcp_free_md5sig_pool(pool);
2920	return NULL;
2921}
2922
2923struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
2924{
2925	struct tcp_md5sig_pool * __percpu *pool;
2926	int alloc = 0;
2927
2928retry:
2929	spin_lock_bh(&tcp_md5sig_pool_lock);
2930	pool = tcp_md5sig_pool;
2931	if (tcp_md5sig_users++ == 0) {
2932		alloc = 1;
2933		spin_unlock_bh(&tcp_md5sig_pool_lock);
2934	} else if (!pool) {
2935		tcp_md5sig_users--;
2936		spin_unlock_bh(&tcp_md5sig_pool_lock);
2937		cpu_relax();
2938		goto retry;
2939	} else
2940		spin_unlock_bh(&tcp_md5sig_pool_lock);
2941
2942	if (alloc) {
2943		/* we cannot hold spinlock here because this may sleep. */
2944		struct tcp_md5sig_pool * __percpu *p;
2945
2946		p = __tcp_alloc_md5sig_pool(sk);
2947		spin_lock_bh(&tcp_md5sig_pool_lock);
2948		if (!p) {
2949			tcp_md5sig_users--;
2950			spin_unlock_bh(&tcp_md5sig_pool_lock);
2951			return NULL;
2952		}
2953		pool = tcp_md5sig_pool;
2954		if (pool) {
2955			/* oops, it has already been assigned. */
2956			spin_unlock_bh(&tcp_md5sig_pool_lock);
2957			__tcp_free_md5sig_pool(p);
2958		} else {
2959			tcp_md5sig_pool = pool = p;
2960			spin_unlock_bh(&tcp_md5sig_pool_lock);
2961		}
2962	}
2963	return pool;
2964}
2965EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2966
2967
2968/**
2969 *	tcp_get_md5sig_pool - get md5sig_pool for this user
2970 *
2971 *	We use percpu structure, so if we succeed, we exit with preemption
2972 *	and BH disabled, to make sure another thread or softirq handling
2973 *	wont try to get same context.
2974 */
2975struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2976{
2977	struct tcp_md5sig_pool * __percpu *p;
2978
2979	local_bh_disable();
2980
2981	spin_lock(&tcp_md5sig_pool_lock);
2982	p = tcp_md5sig_pool;
2983	if (p)
2984		tcp_md5sig_users++;
2985	spin_unlock(&tcp_md5sig_pool_lock);
2986
2987	if (p)
2988		return *this_cpu_ptr(p);
2989
2990	local_bh_enable();
2991	return NULL;
2992}
2993EXPORT_SYMBOL(tcp_get_md5sig_pool);
2994
2995void tcp_put_md5sig_pool(void)
2996{
2997	local_bh_enable();
2998	tcp_free_md5sig_pool();
2999}
3000EXPORT_SYMBOL(tcp_put_md5sig_pool);
3001
3002int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
3003			struct tcphdr *th)
3004{
3005	struct scatterlist sg;
3006	int err;
3007
3008	__sum16 old_checksum = th->check;
3009	th->check = 0;
3010	/* options aren't included in the hash */
3011	sg_init_one(&sg, th, sizeof(struct tcphdr));
3012	err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(struct tcphdr));
3013	th->check = old_checksum;
3014	return err;
3015}
3016EXPORT_SYMBOL(tcp_md5_hash_header);
3017
3018int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3019			  struct sk_buff *skb, unsigned header_len)
3020{
3021	struct scatterlist sg;
3022	const struct tcphdr *tp = tcp_hdr(skb);
3023	struct hash_desc *desc = &hp->md5_desc;
3024	unsigned i;
3025	const unsigned head_data_len = skb_headlen(skb) > header_len ?
3026				       skb_headlen(skb) - header_len : 0;
3027	const struct skb_shared_info *shi = skb_shinfo(skb);
3028	struct sk_buff *frag_iter;
3029
3030	sg_init_table(&sg, 1);
3031
3032	sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
3033	if (crypto_hash_update(desc, &sg, head_data_len))
 
3034		return 1;
3035
3036	for (i = 0; i < shi->nr_frags; ++i) {
3037		const struct skb_frag_struct *f = &shi->frags[i];
3038		sg_set_page(&sg, f->page, f->size, f->page_offset);
3039		if (crypto_hash_update(desc, &sg, f->size))
 
 
 
 
 
3040			return 1;
3041	}
3042
3043	skb_walk_frags(skb, frag_iter)
3044		if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3045			return 1;
3046
3047	return 0;
3048}
3049EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3050
3051int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, struct tcp_md5sig_key *key)
3052{
3053	struct scatterlist sg;
3054
3055	sg_init_one(&sg, key->key, key->keylen);
3056	return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
 
3057}
3058EXPORT_SYMBOL(tcp_md5_hash_key);
3059
3060#endif
3061
3062/**
3063 * Each Responder maintains up to two secret values concurrently for
3064 * efficient secret rollover.  Each secret value has 4 states:
3065 *
3066 * Generating.  (tcp_secret_generating != tcp_secret_primary)
3067 *    Generates new Responder-Cookies, but not yet used for primary
3068 *    verification.  This is a short-term state, typically lasting only
3069 *    one round trip time (RTT).
3070 *
3071 * Primary.  (tcp_secret_generating == tcp_secret_primary)
3072 *    Used both for generation and primary verification.
3073 *
3074 * Retiring.  (tcp_secret_retiring != tcp_secret_secondary)
3075 *    Used for verification, until the first failure that can be
3076 *    verified by the newer Generating secret.  At that time, this
3077 *    cookie's state is changed to Secondary, and the Generating
3078 *    cookie's state is changed to Primary.  This is a short-term state,
3079 *    typically lasting only one round trip time (RTT).
3080 *
3081 * Secondary.  (tcp_secret_retiring == tcp_secret_secondary)
3082 *    Used for secondary verification, after primary verification
3083 *    failures.  This state lasts no more than twice the Maximum Segment
3084 *    Lifetime (2MSL).  Then, the secret is discarded.
3085 */
3086struct tcp_cookie_secret {
3087	/* The secret is divided into two parts.  The digest part is the
3088	 * equivalent of previously hashing a secret and saving the state,
3089	 * and serves as an initialization vector (IV).  The message part
3090	 * serves as the trailing secret.
3091	 */
3092	u32				secrets[COOKIE_WORKSPACE_WORDS];
3093	unsigned long			expires;
3094};
3095
3096#define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL)
3097#define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2)
3098#define TCP_SECRET_LIFE (HZ * 600)
3099
3100static struct tcp_cookie_secret tcp_secret_one;
3101static struct tcp_cookie_secret tcp_secret_two;
3102
3103/* Essentially a circular list, without dynamic allocation. */
3104static struct tcp_cookie_secret *tcp_secret_generating;
3105static struct tcp_cookie_secret *tcp_secret_primary;
3106static struct tcp_cookie_secret *tcp_secret_retiring;
3107static struct tcp_cookie_secret *tcp_secret_secondary;
3108
3109static DEFINE_SPINLOCK(tcp_secret_locker);
3110
3111/* Select a pseudo-random word in the cookie workspace.
3112 */
3113static inline u32 tcp_cookie_work(const u32 *ws, const int n)
3114{
3115	return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])];
3116}
3117
3118/* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed.
3119 * Called in softirq context.
3120 * Returns: 0 for success.
3121 */
3122int tcp_cookie_generator(u32 *bakery)
3123{
3124	unsigned long jiffy = jiffies;
3125
3126	if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) {
3127		spin_lock_bh(&tcp_secret_locker);
3128		if (!time_after_eq(jiffy, tcp_secret_generating->expires)) {
3129			/* refreshed by another */
3130			memcpy(bakery,
3131			       &tcp_secret_generating->secrets[0],
3132			       COOKIE_WORKSPACE_WORDS);
3133		} else {
3134			/* still needs refreshing */
3135			get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS);
3136
3137			/* The first time, paranoia assumes that the
3138			 * randomization function isn't as strong.  But,
3139			 * this secret initialization is delayed until
3140			 * the last possible moment (packet arrival).
3141			 * Although that time is observable, it is
3142			 * unpredictably variable.  Mash in the most
3143			 * volatile clock bits available, and expire the
3144			 * secret extra quickly.
3145			 */
3146			if (unlikely(tcp_secret_primary->expires ==
3147				     tcp_secret_secondary->expires)) {
3148				struct timespec tv;
3149
3150				getnstimeofday(&tv);
3151				bakery[COOKIE_DIGEST_WORDS+0] ^=
3152					(u32)tv.tv_nsec;
3153
3154				tcp_secret_secondary->expires = jiffy
3155					+ TCP_SECRET_1MSL
3156					+ (0x0f & tcp_cookie_work(bakery, 0));
3157			} else {
3158				tcp_secret_secondary->expires = jiffy
3159					+ TCP_SECRET_LIFE
3160					+ (0xff & tcp_cookie_work(bakery, 1));
3161				tcp_secret_primary->expires = jiffy
3162					+ TCP_SECRET_2MSL
3163					+ (0x1f & tcp_cookie_work(bakery, 2));
3164			}
3165			memcpy(&tcp_secret_secondary->secrets[0],
3166			       bakery, COOKIE_WORKSPACE_WORDS);
3167
3168			rcu_assign_pointer(tcp_secret_generating,
3169					   tcp_secret_secondary);
3170			rcu_assign_pointer(tcp_secret_retiring,
3171					   tcp_secret_primary);
3172			/*
3173			 * Neither call_rcu() nor synchronize_rcu() needed.
3174			 * Retiring data is not freed.  It is replaced after
3175			 * further (locked) pointer updates, and a quiet time
3176			 * (minimum 1MSL, maximum LIFE - 2MSL).
3177			 */
3178		}
3179		spin_unlock_bh(&tcp_secret_locker);
3180	} else {
3181		rcu_read_lock_bh();
3182		memcpy(bakery,
3183		       &rcu_dereference(tcp_secret_generating)->secrets[0],
3184		       COOKIE_WORKSPACE_WORDS);
3185		rcu_read_unlock_bh();
3186	}
3187	return 0;
3188}
3189EXPORT_SYMBOL(tcp_cookie_generator);
3190
3191void tcp_done(struct sock *sk)
3192{
3193	if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3194		TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3195
3196	tcp_set_state(sk, TCP_CLOSE);
3197	tcp_clear_xmit_timers(sk);
 
 
3198
3199	sk->sk_shutdown = SHUTDOWN_MASK;
3200
3201	if (!sock_flag(sk, SOCK_DEAD))
3202		sk->sk_state_change(sk);
3203	else
3204		inet_csk_destroy_sock(sk);
3205}
3206EXPORT_SYMBOL_GPL(tcp_done);
3207
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3208extern struct tcp_congestion_ops tcp_reno;
3209
3210static __initdata unsigned long thash_entries;
3211static int __init set_thash_entries(char *str)
3212{
 
 
3213	if (!str)
3214		return 0;
3215	thash_entries = simple_strtoul(str, &str, 0);
 
 
 
 
3216	return 1;
3217}
3218__setup("thash_entries=", set_thash_entries);
3219
 
 
 
 
 
 
 
 
 
 
3220void __init tcp_init(void)
3221{
3222	struct sk_buff *skb = NULL;
3223	unsigned long limit;
3224	int i, max_share, cnt;
3225	unsigned long jiffy = jiffies;
3226
3227	BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
3228
3229	percpu_counter_init(&tcp_sockets_allocated, 0);
3230	percpu_counter_init(&tcp_orphan_count, 0);
 
 
 
 
 
 
 
 
3231	tcp_hashinfo.bind_bucket_cachep =
3232		kmem_cache_create("tcp_bind_bucket",
3233				  sizeof(struct inet_bind_bucket), 0,
3234				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3235
3236	/* Size and allocate the main established and bind bucket
3237	 * hash tables.
3238	 *
3239	 * The methodology is similar to that of the buffer cache.
3240	 */
3241	tcp_hashinfo.ehash =
3242		alloc_large_system_hash("TCP established",
3243					sizeof(struct inet_ehash_bucket),
3244					thash_entries,
3245					(totalram_pages >= 128 * 1024) ?
3246					13 : 15,
3247					0,
3248					NULL,
3249					&tcp_hashinfo.ehash_mask,
 
3250					thash_entries ? 0 : 512 * 1024);
3251	for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) {
3252		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
3253		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i);
3254	}
3255	if (inet_ehash_locks_alloc(&tcp_hashinfo))
3256		panic("TCP: failed to alloc ehash_locks");
3257	tcp_hashinfo.bhash =
3258		alloc_large_system_hash("TCP bind",
3259					sizeof(struct inet_bind_hashbucket),
3260					tcp_hashinfo.ehash_mask + 1,
3261					(totalram_pages >= 128 * 1024) ?
3262					13 : 15,
3263					0,
3264					&tcp_hashinfo.bhash_size,
3265					NULL,
 
3266					64 * 1024);
3267	tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
3268	for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3269		spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3270		INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3271	}
3272
3273
3274	cnt = tcp_hashinfo.ehash_mask + 1;
3275
3276	tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
3277	sysctl_tcp_max_orphans = cnt / 2;
3278	sysctl_max_syn_backlog = max(128, cnt / 256);
3279
3280	limit = nr_free_buffer_pages() / 8;
3281	limit = max(limit, 128UL);
3282	sysctl_tcp_mem[0] = limit / 4 * 3;
3283	sysctl_tcp_mem[1] = limit;
3284	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
3285
 
3286	/* Set per-socket limits to no more than 1/128 the pressure threshold */
3287	limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
3288	max_share = min(4UL*1024*1024, limit);
3289
3290	sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3291	sysctl_tcp_wmem[1] = 16*1024;
3292	sysctl_tcp_wmem[2] = max(64*1024, max_share);
3293
3294	sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3295	sysctl_tcp_rmem[1] = 87380;
3296	sysctl_tcp_rmem[2] = max(87380, max_share);
3297
3298	printk(KERN_INFO "TCP: Hash tables configured "
3299	       "(established %u bind %u)\n",
3300	       tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3301
3302	tcp_register_congestion_control(&tcp_reno);
3303
3304	memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets));
3305	memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets));
3306	tcp_secret_one.expires = jiffy; /* past due */
3307	tcp_secret_two.expires = jiffy; /* past due */
3308	tcp_secret_generating = &tcp_secret_one;
3309	tcp_secret_primary = &tcp_secret_one;
3310	tcp_secret_retiring = &tcp_secret_two;
3311	tcp_secret_secondary = &tcp_secret_two;
3312}
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Implementation of the Transmission Control Protocol(TCP).
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  13 *		Florian La Roche, <flla@stud.uni-sb.de>
  14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  19 *		Jorge Cwik, <jorge@laser.satlink.net>
  20 *
  21 * Fixes:
  22 *		Alan Cox	:	Numerous verify_area() calls
  23 *		Alan Cox	:	Set the ACK bit on a reset
  24 *		Alan Cox	:	Stopped it crashing if it closed while
  25 *					sk->inuse=1 and was trying to connect
  26 *					(tcp_err()).
  27 *		Alan Cox	:	All icmp error handling was broken
  28 *					pointers passed where wrong and the
  29 *					socket was looked up backwards. Nobody
  30 *					tested any icmp error code obviously.
  31 *		Alan Cox	:	tcp_err() now handled properly. It
  32 *					wakes people on errors. poll
  33 *					behaves and the icmp error race
  34 *					has gone by moving it into sock.c
  35 *		Alan Cox	:	tcp_send_reset() fixed to work for
  36 *					everything not just packets for
  37 *					unknown sockets.
  38 *		Alan Cox	:	tcp option processing.
  39 *		Alan Cox	:	Reset tweaked (still not 100%) [Had
  40 *					syn rule wrong]
  41 *		Herp Rosmanith  :	More reset fixes
  42 *		Alan Cox	:	No longer acks invalid rst frames.
  43 *					Acking any kind of RST is right out.
  44 *		Alan Cox	:	Sets an ignore me flag on an rst
  45 *					receive otherwise odd bits of prattle
  46 *					escape still
  47 *		Alan Cox	:	Fixed another acking RST frame bug.
  48 *					Should stop LAN workplace lockups.
  49 *		Alan Cox	: 	Some tidyups using the new skb list
  50 *					facilities
  51 *		Alan Cox	:	sk->keepopen now seems to work
  52 *		Alan Cox	:	Pulls options out correctly on accepts
  53 *		Alan Cox	:	Fixed assorted sk->rqueue->next errors
  54 *		Alan Cox	:	PSH doesn't end a TCP read. Switched a
  55 *					bit to skb ops.
  56 *		Alan Cox	:	Tidied tcp_data to avoid a potential
  57 *					nasty.
  58 *		Alan Cox	:	Added some better commenting, as the
  59 *					tcp is hard to follow
  60 *		Alan Cox	:	Removed incorrect check for 20 * psh
  61 *	Michael O'Reilly	:	ack < copied bug fix.
  62 *	Johannes Stille		:	Misc tcp fixes (not all in yet).
  63 *		Alan Cox	:	FIN with no memory -> CRASH
  64 *		Alan Cox	:	Added socket option proto entries.
  65 *					Also added awareness of them to accept.
  66 *		Alan Cox	:	Added TCP options (SOL_TCP)
  67 *		Alan Cox	:	Switched wakeup calls to callbacks,
  68 *					so the kernel can layer network
  69 *					sockets.
  70 *		Alan Cox	:	Use ip_tos/ip_ttl settings.
  71 *		Alan Cox	:	Handle FIN (more) properly (we hope).
  72 *		Alan Cox	:	RST frames sent on unsynchronised
  73 *					state ack error.
  74 *		Alan Cox	:	Put in missing check for SYN bit.
  75 *		Alan Cox	:	Added tcp_select_window() aka NET2E
  76 *					window non shrink trick.
  77 *		Alan Cox	:	Added a couple of small NET2E timer
  78 *					fixes
  79 *		Charles Hedrick :	TCP fixes
  80 *		Toomas Tamm	:	TCP window fixes
  81 *		Alan Cox	:	Small URG fix to rlogin ^C ack fight
  82 *		Charles Hedrick	:	Rewrote most of it to actually work
  83 *		Linus		:	Rewrote tcp_read() and URG handling
  84 *					completely
  85 *		Gerhard Koerting:	Fixed some missing timer handling
  86 *		Matthew Dillon  :	Reworked TCP machine states as per RFC
  87 *		Gerhard Koerting:	PC/TCP workarounds
  88 *		Adam Caldwell	:	Assorted timer/timing errors
  89 *		Matthew Dillon	:	Fixed another RST bug
  90 *		Alan Cox	:	Move to kernel side addressing changes.
  91 *		Alan Cox	:	Beginning work on TCP fastpathing
  92 *					(not yet usable)
  93 *		Arnt Gulbrandsen:	Turbocharged tcp_check() routine.
  94 *		Alan Cox	:	TCP fast path debugging
  95 *		Alan Cox	:	Window clamping
  96 *		Michael Riepe	:	Bug in tcp_check()
  97 *		Matt Dillon	:	More TCP improvements and RST bug fixes
  98 *		Matt Dillon	:	Yet more small nasties remove from the
  99 *					TCP code (Be very nice to this man if
 100 *					tcp finally works 100%) 8)
 101 *		Alan Cox	:	BSD accept semantics.
 102 *		Alan Cox	:	Reset on closedown bug.
 103 *	Peter De Schrijver	:	ENOTCONN check missing in tcp_sendto().
 104 *		Michael Pall	:	Handle poll() after URG properly in
 105 *					all cases.
 106 *		Michael Pall	:	Undo the last fix in tcp_read_urg()
 107 *					(multi URG PUSH broke rlogin).
 108 *		Michael Pall	:	Fix the multi URG PUSH problem in
 109 *					tcp_readable(), poll() after URG
 110 *					works now.
 111 *		Michael Pall	:	recv(...,MSG_OOB) never blocks in the
 112 *					BSD api.
 113 *		Alan Cox	:	Changed the semantics of sk->socket to
 114 *					fix a race and a signal problem with
 115 *					accept() and async I/O.
 116 *		Alan Cox	:	Relaxed the rules on tcp_sendto().
 117 *		Yury Shevchuk	:	Really fixed accept() blocking problem.
 118 *		Craig I. Hagan  :	Allow for BSD compatible TIME_WAIT for
 119 *					clients/servers which listen in on
 120 *					fixed ports.
 121 *		Alan Cox	:	Cleaned the above up and shrank it to
 122 *					a sensible code size.
 123 *		Alan Cox	:	Self connect lockup fix.
 124 *		Alan Cox	:	No connect to multicast.
 125 *		Ross Biro	:	Close unaccepted children on master
 126 *					socket close.
 127 *		Alan Cox	:	Reset tracing code.
 128 *		Alan Cox	:	Spurious resets on shutdown.
 129 *		Alan Cox	:	Giant 15 minute/60 second timer error
 130 *		Alan Cox	:	Small whoops in polling before an
 131 *					accept.
 132 *		Alan Cox	:	Kept the state trace facility since
 133 *					it's handy for debugging.
 134 *		Alan Cox	:	More reset handler fixes.
 135 *		Alan Cox	:	Started rewriting the code based on
 136 *					the RFC's for other useful protocol
 137 *					references see: Comer, KA9Q NOS, and
 138 *					for a reference on the difference
 139 *					between specifications and how BSD
 140 *					works see the 4.4lite source.
 141 *		A.N.Kuznetsov	:	Don't time wait on completion of tidy
 142 *					close.
 143 *		Linus Torvalds	:	Fin/Shutdown & copied_seq changes.
 144 *		Linus Torvalds	:	Fixed BSD port reuse to work first syn
 145 *		Alan Cox	:	Reimplemented timers as per the RFC
 146 *					and using multiple timers for sanity.
 147 *		Alan Cox	:	Small bug fixes, and a lot of new
 148 *					comments.
 149 *		Alan Cox	:	Fixed dual reader crash by locking
 150 *					the buffers (much like datagram.c)
 151 *		Alan Cox	:	Fixed stuck sockets in probe. A probe
 152 *					now gets fed up of retrying without
 153 *					(even a no space) answer.
 154 *		Alan Cox	:	Extracted closing code better
 155 *		Alan Cox	:	Fixed the closing state machine to
 156 *					resemble the RFC.
 157 *		Alan Cox	:	More 'per spec' fixes.
 158 *		Jorge Cwik	:	Even faster checksumming.
 159 *		Alan Cox	:	tcp_data() doesn't ack illegal PSH
 160 *					only frames. At least one pc tcp stack
 161 *					generates them.
 162 *		Alan Cox	:	Cache last socket.
 163 *		Alan Cox	:	Per route irtt.
 164 *		Matt Day	:	poll()->select() match BSD precisely on error
 165 *		Alan Cox	:	New buffers
 166 *		Marc Tamsky	:	Various sk->prot->retransmits and
 167 *					sk->retransmits misupdating fixed.
 168 *					Fixed tcp_write_timeout: stuck close,
 169 *					and TCP syn retries gets used now.
 170 *		Mark Yarvis	:	In tcp_read_wakeup(), don't send an
 171 *					ack if state is TCP_CLOSED.
 172 *		Alan Cox	:	Look up device on a retransmit - routes may
 173 *					change. Doesn't yet cope with MSS shrink right
 174 *					but it's a start!
 175 *		Marc Tamsky	:	Closing in closing fixes.
 176 *		Mike Shaver	:	RFC1122 verifications.
 177 *		Alan Cox	:	rcv_saddr errors.
 178 *		Alan Cox	:	Block double connect().
 179 *		Alan Cox	:	Small hooks for enSKIP.
 180 *		Alexey Kuznetsov:	Path MTU discovery.
 181 *		Alan Cox	:	Support soft errors.
 182 *		Alan Cox	:	Fix MTU discovery pathological case
 183 *					when the remote claims no mtu!
 184 *		Marc Tamsky	:	TCP_CLOSE fix.
 185 *		Colin (G3TNE)	:	Send a reset on syn ack replies in
 186 *					window but wrong (fixes NT lpd problems)
 187 *		Pedro Roque	:	Better TCP window handling, delayed ack.
 188 *		Joerg Reuter	:	No modification of locked buffers in
 189 *					tcp_do_retransmit()
 190 *		Eric Schenk	:	Changed receiver side silly window
 191 *					avoidance algorithm to BSD style
 192 *					algorithm. This doubles throughput
 193 *					against machines running Solaris,
 194 *					and seems to result in general
 195 *					improvement.
 196 *	Stefan Magdalinski	:	adjusted tcp_readable() to fix FIONREAD
 197 *	Willy Konynenberg	:	Transparent proxying support.
 198 *	Mike McLagan		:	Routing by source
 199 *		Keith Owens	:	Do proper merging with partial SKB's in
 200 *					tcp_do_sendmsg to avoid burstiness.
 201 *		Eric Schenk	:	Fix fast close down bug with
 202 *					shutdown() followed by close().
 203 *		Andi Kleen 	:	Make poll agree with SIGIO
 204 *	Salvatore Sanfilippo	:	Support SO_LINGER with linger == 1 and
 205 *					lingertime == 0 (RFC 793 ABORT Call)
 206 *	Hirokazu Takahashi	:	Use copy_from_user() instead of
 207 *					csum_and_copy_from_user() if possible.
 208 *
 
 
 
 
 
 209 * Description of States:
 210 *
 211 *	TCP_SYN_SENT		sent a connection request, waiting for ack
 212 *
 213 *	TCP_SYN_RECV		received a connection request, sent ack,
 214 *				waiting for final ack in three-way handshake.
 215 *
 216 *	TCP_ESTABLISHED		connection established
 217 *
 218 *	TCP_FIN_WAIT1		our side has shutdown, waiting to complete
 219 *				transmission of remaining buffered data
 220 *
 221 *	TCP_FIN_WAIT2		all buffered data sent, waiting for remote
 222 *				to shutdown
 223 *
 224 *	TCP_CLOSING		both sides have shutdown but we still have
 225 *				data we have to finish sending
 226 *
 227 *	TCP_TIME_WAIT		timeout to catch resent junk before entering
 228 *				closed, can only be entered from FIN_WAIT2
 229 *				or CLOSING.  Required because the other end
 230 *				may not have gotten our last ACK causing it
 231 *				to retransmit the data packet (which we ignore)
 232 *
 233 *	TCP_CLOSE_WAIT		remote side has shutdown and is waiting for
 234 *				us to finish writing our data and to shutdown
 235 *				(we have to close() to move on to LAST_ACK)
 236 *
 237 *	TCP_LAST_ACK		out side has shutdown after remote has
 238 *				shutdown.  There may still be data in our
 239 *				buffer that we have to finish sending
 240 *
 241 *	TCP_CLOSE		socket is finished
 242 */
 243
 244#define pr_fmt(fmt) "TCP: " fmt
 245
 246#include <crypto/hash.h>
 247#include <linux/kernel.h>
 248#include <linux/module.h>
 249#include <linux/types.h>
 250#include <linux/fcntl.h>
 251#include <linux/poll.h>
 252#include <linux/inet_diag.h>
 253#include <linux/init.h>
 254#include <linux/fs.h>
 255#include <linux/skbuff.h>
 256#include <linux/scatterlist.h>
 257#include <linux/splice.h>
 258#include <linux/net.h>
 259#include <linux/socket.h>
 260#include <linux/random.h>
 261#include <linux/memblock.h>
 262#include <linux/highmem.h>
 263#include <linux/swap.h>
 264#include <linux/cache.h>
 265#include <linux/err.h>
 
 266#include <linux/time.h>
 267#include <linux/slab.h>
 268#include <linux/errqueue.h>
 269#include <linux/static_key.h>
 270
 271#include <net/icmp.h>
 272#include <net/inet_common.h>
 273#include <net/tcp.h>
 274#include <net/xfrm.h>
 275#include <net/ip.h>
 
 276#include <net/sock.h>
 277
 278#include <linux/uaccess.h>
 279#include <asm/ioctls.h>
 280#include <net/busy_poll.h>
 
 281
 282struct percpu_counter tcp_orphan_count;
 283EXPORT_SYMBOL_GPL(tcp_orphan_count);
 284
 285long sysctl_tcp_mem[3] __read_mostly;
 
 
 
 286EXPORT_SYMBOL(sysctl_tcp_mem);
 
 
 287
 288atomic_long_t tcp_memory_allocated;	/* Current allocated memory. */
 289EXPORT_SYMBOL(tcp_memory_allocated);
 290
 291#if IS_ENABLED(CONFIG_SMC)
 292DEFINE_STATIC_KEY_FALSE(tcp_have_smc);
 293EXPORT_SYMBOL(tcp_have_smc);
 294#endif
 295
 296/*
 297 * Current number of TCP sockets.
 298 */
 299struct percpu_counter tcp_sockets_allocated;
 300EXPORT_SYMBOL(tcp_sockets_allocated);
 301
 302/*
 303 * TCP splice context
 304 */
 305struct tcp_splice_state {
 306	struct pipe_inode_info *pipe;
 307	size_t len;
 308	unsigned int flags;
 309};
 310
 311/*
 312 * Pressure flag: try to collapse.
 313 * Technical note: it is used by multiple contexts non atomically.
 314 * All the __sk_mem_schedule() is of this nature: accounting
 315 * is strict, actions are advisory and have some latency.
 316 */
 317unsigned long tcp_memory_pressure __read_mostly;
 318EXPORT_SYMBOL_GPL(tcp_memory_pressure);
 319
 320DEFINE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
 321EXPORT_SYMBOL(tcp_rx_skb_cache_key);
 322
 323DEFINE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
 324
 325void tcp_enter_memory_pressure(struct sock *sk)
 326{
 327	unsigned long val;
 328
 329	if (READ_ONCE(tcp_memory_pressure))
 330		return;
 331	val = jiffies;
 332
 333	if (!val)
 334		val--;
 335	if (!cmpxchg(&tcp_memory_pressure, 0, val))
 336		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
 
 
 337}
 338EXPORT_SYMBOL_GPL(tcp_enter_memory_pressure);
 339
 340void tcp_leave_memory_pressure(struct sock *sk)
 341{
 342	unsigned long val;
 343
 344	if (!READ_ONCE(tcp_memory_pressure))
 345		return;
 346	val = xchg(&tcp_memory_pressure, 0);
 347	if (val)
 348		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURESCHRONO,
 349			      jiffies_to_msecs(jiffies - val));
 350}
 351EXPORT_SYMBOL_GPL(tcp_leave_memory_pressure);
 352
 353/* Convert seconds to retransmits based on initial and max timeout */
 354static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
 355{
 356	u8 res = 0;
 357
 358	if (seconds > 0) {
 359		int period = timeout;
 360
 361		res = 1;
 362		while (seconds > period && res < 255) {
 363			res++;
 364			timeout <<= 1;
 365			if (timeout > rto_max)
 366				timeout = rto_max;
 367			period += timeout;
 368		}
 369	}
 370	return res;
 371}
 372
 373/* Convert retransmits to seconds based on initial and max timeout */
 374static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
 375{
 376	int period = 0;
 377
 378	if (retrans > 0) {
 379		period = timeout;
 380		while (--retrans) {
 381			timeout <<= 1;
 382			if (timeout > rto_max)
 383				timeout = rto_max;
 384			period += timeout;
 385		}
 386	}
 387	return period;
 388}
 389
 390static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)
 391{
 392	u32 rate = READ_ONCE(tp->rate_delivered);
 393	u32 intv = READ_ONCE(tp->rate_interval_us);
 394	u64 rate64 = 0;
 395
 396	if (rate && intv) {
 397		rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC;
 398		do_div(rate64, intv);
 399	}
 400	return rate64;
 401}
 402
 403/* Address-family independent initialization for a tcp_sock.
 404 *
 405 * NOTE: A lot of things set to zero explicitly by call to
 406 *       sk_alloc() so need not be done here.
 407 */
 408void tcp_init_sock(struct sock *sk)
 409{
 410	struct inet_connection_sock *icsk = inet_csk(sk);
 411	struct tcp_sock *tp = tcp_sk(sk);
 412
 413	tp->out_of_order_queue = RB_ROOT;
 414	sk->tcp_rtx_queue = RB_ROOT;
 415	tcp_init_xmit_timers(sk);
 416	INIT_LIST_HEAD(&tp->tsq_node);
 417	INIT_LIST_HEAD(&tp->tsorted_sent_queue);
 418
 419	icsk->icsk_rto = TCP_TIMEOUT_INIT;
 420	tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
 421	minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
 422
 423	/* So many TCP implementations out there (incorrectly) count the
 424	 * initial SYN frame in their delayed-ACK and congestion control
 425	 * algorithms that we must have the following bandaid to talk
 426	 * efficiently to them.  -DaveM
 427	 */
 428	tp->snd_cwnd = TCP_INIT_CWND;
 429
 430	/* There's a bubble in the pipe until at least the first ACK. */
 431	tp->app_limited = ~0U;
 432
 433	/* See draft-stevens-tcpca-spec-01 for discussion of the
 434	 * initialization of these values.
 435	 */
 436	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 437	tp->snd_cwnd_clamp = ~0;
 438	tp->mss_cache = TCP_MSS_DEFAULT;
 439
 440	tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
 441	tcp_assign_congestion_control(sk);
 442
 443	tp->tsoffset = 0;
 444	tp->rack.reo_wnd_steps = 1;
 445
 446	sk->sk_state = TCP_CLOSE;
 447
 448	sk->sk_write_space = sk_stream_write_space;
 449	sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
 450
 451	icsk->icsk_sync_mss = tcp_sync_mss;
 452
 453	WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
 454	WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
 455
 456	sk_sockets_allocated_inc(sk);
 457	sk->sk_route_forced_caps = NETIF_F_GSO;
 458}
 459EXPORT_SYMBOL(tcp_init_sock);
 460
 461static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
 462{
 463	struct sk_buff *skb = tcp_write_queue_tail(sk);
 464
 465	if (tsflags && skb) {
 466		struct skb_shared_info *shinfo = skb_shinfo(skb);
 467		struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 468
 469		sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags);
 470		if (tsflags & SOF_TIMESTAMPING_TX_ACK)
 471			tcb->txstamp_ack = 1;
 472		if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK)
 473			shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1;
 474	}
 475}
 476
 477static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
 478					  int target, struct sock *sk)
 479{
 480	return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) ||
 481		(sk->sk_prot->stream_memory_read ?
 482		sk->sk_prot->stream_memory_read(sk) : false);
 483}
 484
 485/*
 486 *	Wait for a TCP event.
 487 *
 488 *	Note that we don't need to lock the socket, as the upper poll layers
 489 *	take care of normal races (between the test and the event) and we don't
 490 *	go look at any of the socket buffers directly.
 491 */
 492__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 493{
 494	__poll_t mask;
 495	struct sock *sk = sock->sk;
 496	const struct tcp_sock *tp = tcp_sk(sk);
 497	int state;
 498
 499	sock_poll_wait(file, sock, wait);
 500
 501	state = inet_sk_state_load(sk);
 502	if (state == TCP_LISTEN)
 503		return inet_csk_listen_poll(sk);
 504
 505	/* Socket is not locked. We are protected from async events
 506	 * by poll logic and correct handling of state changes
 507	 * made by other threads is impossible in any case.
 508	 */
 509
 510	mask = 0;
 511
 512	/*
 513	 * EPOLLHUP is certainly not done right. But poll() doesn't
 514	 * have a notion of HUP in just one direction, and for a
 515	 * socket the read side is more interesting.
 516	 *
 517	 * Some poll() documentation says that EPOLLHUP is incompatible
 518	 * with the EPOLLOUT/POLLWR flags, so somebody should check this
 519	 * all. But careful, it tends to be safer to return too many
 520	 * bits than too few, and you can easily break real applications
 521	 * if you don't tell them that something has hung up!
 522	 *
 523	 * Check-me.
 524	 *
 525	 * Check number 1. EPOLLHUP is _UNMASKABLE_ event (see UNIX98 and
 526	 * our fs/select.c). It means that after we received EOF,
 527	 * poll always returns immediately, making impossible poll() on write()
 528	 * in state CLOSE_WAIT. One solution is evident --- to set EPOLLHUP
 529	 * if and only if shutdown has been made in both directions.
 530	 * Actually, it is interesting to look how Solaris and DUX
 531	 * solve this dilemma. I would prefer, if EPOLLHUP were maskable,
 532	 * then we could set it on SND_SHUTDOWN. BTW examples given
 533	 * in Stevens' books assume exactly this behaviour, it explains
 534	 * why EPOLLHUP is incompatible with EPOLLOUT.	--ANK
 535	 *
 536	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
 537	 * blocking on fresh not-connected or disconnected socket. --ANK
 538	 */
 539	if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
 540		mask |= EPOLLHUP;
 541	if (sk->sk_shutdown & RCV_SHUTDOWN)
 542		mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
 543
 544	/* Connected or passive Fast Open socket? */
 545	if (state != TCP_SYN_SENT &&
 546	    (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
 547		int target = sock_rcvlowat(sk, 0, INT_MAX);
 548
 549		if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
 550		    !sock_flag(sk, SOCK_URGINLINE) &&
 551		    tp->urg_data)
 552			target++;
 553
 554		if (tcp_stream_is_readable(tp, target, sk))
 555			mask |= EPOLLIN | EPOLLRDNORM;
 
 
 
 556
 557		if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
 558			if (sk_stream_is_writeable(sk)) {
 559				mask |= EPOLLOUT | EPOLLWRNORM;
 560			} else {  /* send SIGIO later */
 561				sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
 562				set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
 563
 564				/* Race breaker. If space is freed after
 565				 * wspace test but before the flags are set,
 566				 * IO signal will be lost. Memory barrier
 567				 * pairs with the input side.
 568				 */
 569				smp_mb__after_atomic();
 570				if (sk_stream_is_writeable(sk))
 571					mask |= EPOLLOUT | EPOLLWRNORM;
 572			}
 573		} else
 574			mask |= EPOLLOUT | EPOLLWRNORM;
 575
 576		if (tp->urg_data & TCP_URG_VALID)
 577			mask |= EPOLLPRI;
 578	} else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
 579		/* Active TCP fastopen socket with defer_connect
 580		 * Return EPOLLOUT so application can call write()
 581		 * in order for kernel to generate SYN+data
 582		 */
 583		mask |= EPOLLOUT | EPOLLWRNORM;
 584	}
 585	/* This barrier is coupled with smp_wmb() in tcp_reset() */
 586	smp_rmb();
 587	if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
 588		mask |= EPOLLERR;
 589
 590	return mask;
 591}
 592EXPORT_SYMBOL(tcp_poll);
 593
 594int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
 595{
 596	struct tcp_sock *tp = tcp_sk(sk);
 597	int answ;
 598	bool slow;
 599
 600	switch (cmd) {
 601	case SIOCINQ:
 602		if (sk->sk_state == TCP_LISTEN)
 603			return -EINVAL;
 604
 605		slow = lock_sock_fast(sk);
 606		answ = tcp_inq(sk);
 607		unlock_sock_fast(sk, slow);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 608		break;
 609	case SIOCATMARK:
 610		answ = tp->urg_data &&
 611		       READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
 612		break;
 613	case SIOCOUTQ:
 614		if (sk->sk_state == TCP_LISTEN)
 615			return -EINVAL;
 616
 617		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
 618			answ = 0;
 619		else
 620			answ = READ_ONCE(tp->write_seq) - tp->snd_una;
 621		break;
 622	case SIOCOUTQNSD:
 623		if (sk->sk_state == TCP_LISTEN)
 624			return -EINVAL;
 625
 626		if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
 627			answ = 0;
 628		else
 629			answ = READ_ONCE(tp->write_seq) -
 630			       READ_ONCE(tp->snd_nxt);
 631		break;
 632	default:
 633		return -ENOIOCTLCMD;
 634	}
 635
 636	return put_user(answ, (int __user *)arg);
 637}
 638EXPORT_SYMBOL(tcp_ioctl);
 639
 640static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
 641{
 642	TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
 643	tp->pushed_seq = tp->write_seq;
 644}
 645
 646static inline bool forced_push(const struct tcp_sock *tp)
 647{
 648	return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
 649}
 650
 651static void skb_entail(struct sock *sk, struct sk_buff *skb)
 652{
 653	struct tcp_sock *tp = tcp_sk(sk);
 654	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
 655
 656	skb->csum    = 0;
 657	tcb->seq     = tcb->end_seq = tp->write_seq;
 658	tcb->tcp_flags = TCPHDR_ACK;
 659	tcb->sacked  = 0;
 660	__skb_header_release(skb);
 661	tcp_add_write_queue_tail(sk, skb);
 662	sk_wmem_queued_add(sk, skb->truesize);
 663	sk_mem_charge(sk, skb->truesize);
 664	if (tp->nonagle & TCP_NAGLE_PUSH)
 665		tp->nonagle &= ~TCP_NAGLE_PUSH;
 666
 667	tcp_slow_start_after_idle_check(sk);
 668}
 669
 670static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
 671{
 672	if (flags & MSG_OOB)
 673		tp->snd_up = tp->write_seq;
 674}
 675
 676/* If a not yet filled skb is pushed, do not send it if
 677 * we have data packets in Qdisc or NIC queues :
 678 * Because TX completion will happen shortly, it gives a chance
 679 * to coalesce future sendmsg() payload into this skb, without
 680 * need for a timer, and with no latency trade off.
 681 * As packets containing data payload have a bigger truesize
 682 * than pure acks (dataless) packets, the last checks prevent
 683 * autocorking if we only have an ACK in Qdisc/NIC queues,
 684 * or if TX completion was delayed after we processed ACK packet.
 685 */
 686static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
 687				int size_goal)
 688{
 689	return skb->len < size_goal &&
 690	       sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
 691	       !tcp_rtx_queue_empty(sk) &&
 692	       refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
 693}
 694
 695static void tcp_push(struct sock *sk, int flags, int mss_now,
 696		     int nonagle, int size_goal)
 697{
 698	struct tcp_sock *tp = tcp_sk(sk);
 699	struct sk_buff *skb;
 700
 701	skb = tcp_write_queue_tail(sk);
 702	if (!skb)
 703		return;
 704	if (!(flags & MSG_MORE) || forced_push(tp))
 705		tcp_mark_push(tp, skb);
 706
 707	tcp_mark_urg(tp, flags);
 708
 709	if (tcp_should_autocork(sk, skb, size_goal)) {
 710
 711		/* avoid atomic op if TSQ_THROTTLED bit is already set */
 712		if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
 713			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
 714			set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
 715		}
 716		/* It is possible TX completion already happened
 717		 * before we set TSQ_THROTTLED.
 718		 */
 719		if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize)
 720			return;
 721	}
 722
 723	if (flags & MSG_MORE)
 724		nonagle = TCP_NAGLE_CORK;
 725
 726	__tcp_push_pending_frames(sk, mss_now, nonagle);
 727}
 728
 729static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
 730				unsigned int offset, size_t len)
 731{
 732	struct tcp_splice_state *tss = rd_desc->arg.data;
 733	int ret;
 734
 735	ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe,
 736			      min(rd_desc->count, len), tss->flags);
 737	if (ret > 0)
 738		rd_desc->count -= ret;
 739	return ret;
 740}
 741
 742static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
 743{
 744	/* Store TCP splice context information in read_descriptor_t. */
 745	read_descriptor_t rd_desc = {
 746		.arg.data = tss,
 747		.count	  = tss->len,
 748	};
 749
 750	return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
 751}
 752
 753/**
 754 *  tcp_splice_read - splice data from TCP socket to a pipe
 755 * @sock:	socket to splice from
 756 * @ppos:	position (not valid)
 757 * @pipe:	pipe to splice to
 758 * @len:	number of bytes to splice
 759 * @flags:	splice modifier flags
 760 *
 761 * Description:
 762 *    Will read pages from given socket and fill them into a pipe.
 763 *
 764 **/
 765ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
 766			struct pipe_inode_info *pipe, size_t len,
 767			unsigned int flags)
 768{
 769	struct sock *sk = sock->sk;
 770	struct tcp_splice_state tss = {
 771		.pipe = pipe,
 772		.len = len,
 773		.flags = flags,
 774	};
 775	long timeo;
 776	ssize_t spliced;
 777	int ret;
 778
 779	sock_rps_record_flow(sk);
 780	/*
 781	 * We can't seek on a socket input
 782	 */
 783	if (unlikely(*ppos))
 784		return -ESPIPE;
 785
 786	ret = spliced = 0;
 787
 788	lock_sock(sk);
 789
 790	timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
 791	while (tss.len) {
 792		ret = __tcp_splice_read(sk, &tss);
 793		if (ret < 0)
 794			break;
 795		else if (!ret) {
 796			if (spliced)
 797				break;
 798			if (sock_flag(sk, SOCK_DONE))
 799				break;
 800			if (sk->sk_err) {
 801				ret = sock_error(sk);
 802				break;
 803			}
 804			if (sk->sk_shutdown & RCV_SHUTDOWN)
 805				break;
 806			if (sk->sk_state == TCP_CLOSE) {
 807				/*
 808				 * This occurs when user tries to read
 809				 * from never connected socket.
 810				 */
 811				ret = -ENOTCONN;
 
 812				break;
 813			}
 814			if (!timeo) {
 815				ret = -EAGAIN;
 816				break;
 817			}
 818			/* if __tcp_splice_read() got nothing while we have
 819			 * an skb in receive queue, we do not want to loop.
 820			 * This might happen with URG data.
 821			 */
 822			if (!skb_queue_empty(&sk->sk_receive_queue))
 823				break;
 824			sk_wait_data(sk, &timeo, NULL);
 825			if (signal_pending(current)) {
 826				ret = sock_intr_errno(timeo);
 827				break;
 828			}
 829			continue;
 830		}
 831		tss.len -= ret;
 832		spliced += ret;
 833
 834		if (!timeo)
 835			break;
 836		release_sock(sk);
 837		lock_sock(sk);
 838
 839		if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
 840		    (sk->sk_shutdown & RCV_SHUTDOWN) ||
 841		    signal_pending(current))
 842			break;
 843	}
 844
 845	release_sock(sk);
 846
 847	if (spliced)
 848		return spliced;
 849
 850	return ret;
 851}
 852EXPORT_SYMBOL(tcp_splice_read);
 853
 854struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
 855				    bool force_schedule)
 856{
 857	struct sk_buff *skb;
 858
 859	if (likely(!size)) {
 860		skb = sk->sk_tx_skb_cache;
 861		if (skb) {
 862			skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
 863			sk->sk_tx_skb_cache = NULL;
 864			pskb_trim(skb, 0);
 865			INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
 866			skb_shinfo(skb)->tx_flags = 0;
 867			memset(TCP_SKB_CB(skb), 0, sizeof(struct tcp_skb_cb));
 868			return skb;
 869		}
 870	}
 871	/* The TCP header must be at least 32-bit aligned.  */
 872	size = ALIGN(size, 4);
 873
 874	if (unlikely(tcp_under_memory_pressure(sk)))
 875		sk_mem_reclaim_partial(sk);
 876
 877	skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
 878	if (likely(skb)) {
 879		bool mem_scheduled;
 880
 881		if (force_schedule) {
 882			mem_scheduled = true;
 883			sk_forced_mem_schedule(sk, skb->truesize);
 884		} else {
 885			mem_scheduled = sk_wmem_schedule(sk, skb->truesize);
 886		}
 887		if (likely(mem_scheduled)) {
 888			skb_reserve(skb, sk->sk_prot->max_header);
 889			/*
 890			 * Make sure that we have exactly size bytes
 891			 * available to the caller, no more, no less.
 892			 */
 893			skb->reserved_tailroom = skb->end - skb->tail - size;
 894			INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
 895			return skb;
 896		}
 897		__kfree_skb(skb);
 898	} else {
 899		sk->sk_prot->enter_memory_pressure(sk);
 900		sk_stream_moderate_sndbuf(sk);
 901	}
 902	return NULL;
 903}
 904
 905static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
 906				       int large_allowed)
 907{
 908	struct tcp_sock *tp = tcp_sk(sk);
 909	u32 new_size_goal, size_goal;
 
 
 
 
 
 
 
 
 910
 911	if (!large_allowed)
 912		return mss_now;
 913
 914	/* Note : tcp_tso_autosize() will eventually split this later */
 915	new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER;
 916	new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
 917
 918	/* We try hard to avoid divides here */
 919	size_goal = tp->gso_segs * mss_now;
 920	if (unlikely(new_size_goal < size_goal ||
 921		     new_size_goal >= size_goal + mss_now)) {
 922		tp->gso_segs = min_t(u16, new_size_goal / mss_now,
 923				     sk->sk_gso_max_segs);
 924		size_goal = tp->gso_segs * mss_now;
 925	}
 926
 927	return max(size_goal, mss_now);
 928}
 929
 930static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
 931{
 932	int mss_now;
 933
 934	mss_now = tcp_current_mss(sk);
 935	*size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
 936
 937	return mss_now;
 938}
 939
 940/* In some cases, both sendpage() and sendmsg() could have added
 941 * an skb to the write queue, but failed adding payload on it.
 942 * We need to remove it to consume less memory, but more
 943 * importantly be able to generate EPOLLOUT for Edge Trigger epoll()
 944 * users.
 945 */
 946static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb)
 947{
 948	if (skb && !skb->len) {
 949		tcp_unlink_write_queue(skb, sk);
 950		if (tcp_write_queue_empty(sk))
 951			tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
 952		sk_wmem_free_skb(sk, skb);
 953	}
 954}
 955
 956ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
 957			 size_t size, int flags)
 958{
 959	struct tcp_sock *tp = tcp_sk(sk);
 960	int mss_now, size_goal;
 961	int err;
 962	ssize_t copied;
 963	long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
 964
 965	if (IS_ENABLED(CONFIG_DEBUG_VM) &&
 966	    WARN_ONCE(PageSlab(page), "page must not be a Slab one"))
 967		return -EINVAL;
 968
 969	/* Wait for a connection to finish. One exception is TCP Fast Open
 970	 * (passive side) where data is allowed to be sent before a connection
 971	 * is fully established.
 972	 */
 973	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
 974	    !tcp_passive_fastopen(sk)) {
 975		err = sk_stream_wait_connect(sk, &timeo);
 976		if (err != 0)
 977			goto out_err;
 978	}
 979
 980	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 981
 982	mss_now = tcp_send_mss(sk, &size_goal, flags);
 983	copied = 0;
 984
 985	err = -EPIPE;
 986	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
 987		goto out_err;
 988
 989	while (size > 0) {
 990		struct sk_buff *skb = tcp_write_queue_tail(sk);
 991		int copy, i;
 992		bool can_coalesce;
 
 
 993
 994		if (!skb || (copy = size_goal - skb->len) <= 0 ||
 995		    !tcp_skb_can_collapse_to(skb)) {
 996new_segment:
 997			if (!sk_stream_memory_free(sk))
 998				goto wait_for_sndbuf;
 999
1000			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
1001					tcp_rtx_and_write_queues_empty(sk));
1002			if (!skb)
1003				goto wait_for_memory;
1004
1005#ifdef CONFIG_TLS_DEVICE
1006			skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
1007#endif
1008			skb_entail(sk, skb);
1009			copy = size_goal;
1010		}
1011
1012		if (copy > size)
1013			copy = size;
1014
1015		i = skb_shinfo(skb)->nr_frags;
1016		can_coalesce = skb_can_coalesce(skb, i, page, offset);
1017		if (!can_coalesce && i >= sysctl_max_skb_frags) {
1018			tcp_mark_push(tp, skb);
1019			goto new_segment;
1020		}
1021		if (!sk_wmem_schedule(sk, copy))
1022			goto wait_for_memory;
1023
1024		if (can_coalesce) {
1025			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1026		} else {
1027			get_page(page);
1028			skb_fill_page_desc(skb, i, page, offset, copy);
1029		}
1030
1031		if (!(flags & MSG_NO_SHARED_FRAGS))
1032			skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1033
1034		skb->len += copy;
1035		skb->data_len += copy;
1036		skb->truesize += copy;
1037		sk_wmem_queued_add(sk, copy);
1038		sk_mem_charge(sk, copy);
1039		skb->ip_summed = CHECKSUM_PARTIAL;
1040		WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
1041		TCP_SKB_CB(skb)->end_seq += copy;
1042		tcp_skb_pcount_set(skb, 0);
1043
1044		if (!copied)
1045			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1046
1047		copied += copy;
1048		offset += copy;
1049		size -= copy;
1050		if (!size)
1051			goto out;
1052
1053		if (skb->len < size_goal || (flags & MSG_OOB))
1054			continue;
1055
1056		if (forced_push(tp)) {
1057			tcp_mark_push(tp, skb);
1058			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1059		} else if (skb == tcp_send_head(sk))
1060			tcp_push_one(sk, mss_now);
1061		continue;
1062
1063wait_for_sndbuf:
1064		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1065wait_for_memory:
1066		tcp_push(sk, flags & ~MSG_MORE, mss_now,
1067			 TCP_NAGLE_PUSH, size_goal);
1068
1069		err = sk_stream_wait_memory(sk, &timeo);
1070		if (err != 0)
1071			goto do_error;
1072
1073		mss_now = tcp_send_mss(sk, &size_goal, flags);
1074	}
1075
1076out:
1077	if (copied) {
1078		tcp_tx_timestamp(sk, sk->sk_tsflags);
1079		if (!(flags & MSG_SENDPAGE_NOTLAST))
1080			tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1081	}
1082	return copied;
1083
1084do_error:
1085	tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk));
1086	if (copied)
1087		goto out;
1088out_err:
1089	/* make sure we wake any epoll edge trigger waiter */
1090	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 &&
1091		     err == -EAGAIN)) {
1092		sk->sk_write_space(sk);
1093		tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1094	}
1095	return sk_stream_error(sk, flags, err);
1096}
1097EXPORT_SYMBOL_GPL(do_tcp_sendpages);
1098
1099int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
1100			size_t size, int flags)
1101{
1102	if (!(sk->sk_route_caps & NETIF_F_SG))
1103		return sock_no_sendpage_locked(sk, page, offset, size, flags);
1104
1105	tcp_rate_check_app_limited(sk);  /* is sending application-limited? */
1106
1107	return do_tcp_sendpages(sk, page, offset, size, flags);
1108}
1109EXPORT_SYMBOL_GPL(tcp_sendpage_locked);
1110
1111int tcp_sendpage(struct sock *sk, struct page *page, int offset,
1112		 size_t size, int flags)
1113{
1114	int ret;
 
 
 
 
 
1115
1116	lock_sock(sk);
1117	ret = tcp_sendpage_locked(sk, page, offset, size, flags);
1118	release_sock(sk);
1119
1120	return ret;
1121}
1122EXPORT_SYMBOL(tcp_sendpage);
1123
1124void tcp_free_fastopen_req(struct tcp_sock *tp)
1125{
1126	if (tp->fastopen_req) {
1127		kfree(tp->fastopen_req);
1128		tp->fastopen_req = NULL;
1129	}
1130}
1131
1132static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1133				int *copied, size_t size,
1134				struct ubuf_info *uarg)
1135{
1136	struct tcp_sock *tp = tcp_sk(sk);
1137	struct inet_sock *inet = inet_sk(sk);
1138	struct sockaddr *uaddr = msg->msg_name;
1139	int err, flags;
 
 
 
 
1140
1141	if (!(sock_net(sk)->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
1142	    (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
1143	     uaddr->sa_family == AF_UNSPEC))
1144		return -EOPNOTSUPP;
1145	if (tp->fastopen_req)
1146		return -EALREADY; /* Another Fast Open is in progress */
1147
1148	tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1149				   sk->sk_allocation);
1150	if (unlikely(!tp->fastopen_req))
1151		return -ENOBUFS;
1152	tp->fastopen_req->data = msg;
1153	tp->fastopen_req->size = size;
1154	tp->fastopen_req->uarg = uarg;
1155
1156	if (inet->defer_connect) {
1157		err = tcp_connect(sk);
1158		/* Same failure procedure as in tcp_v4/6_connect */
1159		if (err) {
1160			tcp_set_state(sk, TCP_CLOSE);
1161			inet->inet_dport = 0;
1162			sk->sk_route_caps = 0;
1163		}
1164	}
1165	flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1166	err = __inet_stream_connect(sk->sk_socket, uaddr,
1167				    msg->msg_namelen, flags, 1);
1168	/* fastopen_req could already be freed in __inet_stream_connect
1169	 * if the connection times out or gets rst
1170	 */
1171	if (tp->fastopen_req) {
1172		*copied = tp->fastopen_req->copied;
1173		tcp_free_fastopen_req(tp);
1174		inet->defer_connect = 0;
1175	}
1176	return err;
1177}
1178
1179int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
 
1180{
 
1181	struct tcp_sock *tp = tcp_sk(sk);
1182	struct ubuf_info *uarg = NULL;
1183	struct sk_buff *skb;
1184	struct sockcm_cookie sockc;
1185	int flags, err, copied = 0;
1186	int mss_now = 0, size_goal, copied_syn = 0;
1187	int process_backlog = 0;
1188	bool zc = false;
1189	long timeo;
1190
 
 
1191	flags = msg->msg_flags;
1192
1193	if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
1194		skb = tcp_write_queue_tail(sk);
1195		uarg = sock_zerocopy_realloc(sk, size, skb_zcopy(skb));
1196		if (!uarg) {
1197			err = -ENOBUFS;
1198			goto out_err;
1199		}
1200
1201		zc = sk->sk_route_caps & NETIF_F_SG;
1202		if (!zc)
1203			uarg->zerocopy = 0;
1204	}
1205
1206	if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
1207	    !tp->repair) {
1208		err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size, uarg);
1209		if (err == -EINPROGRESS && copied_syn > 0)
1210			goto out;
1211		else if (err)
1212			goto out_err;
1213	}
1214
1215	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1216
1217	tcp_rate_check_app_limited(sk);  /* is sending application-limited? */
1218
1219	/* Wait for a connection to finish. One exception is TCP Fast Open
1220	 * (passive side) where data is allowed to be sent before a connection
1221	 * is fully established.
1222	 */
1223	if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1224	    !tcp_passive_fastopen(sk)) {
1225		err = sk_stream_wait_connect(sk, &timeo);
1226		if (err != 0)
1227			goto do_error;
1228	}
1229
1230	if (unlikely(tp->repair)) {
1231		if (tp->repair_queue == TCP_RECV_QUEUE) {
1232			copied = tcp_send_rcvq(sk, msg, size);
1233			goto out_nopush;
1234		}
1235
1236		err = -EINVAL;
1237		if (tp->repair_queue == TCP_NO_QUEUE)
1238			goto out_err;
1239
1240		/* 'common' sending to sendq */
1241	}
1242
1243	sockcm_init(&sockc, sk);
1244	if (msg->msg_controllen) {
1245		err = sock_cmsg_send(sk, msg, &sockc);
1246		if (unlikely(err)) {
1247			err = -EINVAL;
1248			goto out_err;
1249		}
1250	}
1251
1252	/* This should be in poll */
1253	sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1254
1255	/* Ok commence sending. */
 
 
1256	copied = 0;
1257
1258restart:
1259	mss_now = tcp_send_mss(sk, &size_goal, flags);
1260
1261	err = -EPIPE;
1262	if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1263		goto do_error;
 
 
 
 
 
 
1264
1265	while (msg_data_left(msg)) {
1266		int copy = 0;
1267
1268		skb = tcp_write_queue_tail(sk);
1269		if (skb)
1270			copy = size_goal - skb->len;
1271
1272		if (copy <= 0 || !tcp_skb_can_collapse_to(skb)) {
1273			bool first_skb;
 
 
 
 
1274
 
1275new_segment:
1276			if (!sk_stream_memory_free(sk))
1277				goto wait_for_sndbuf;
 
 
 
1278
1279			if (unlikely(process_backlog >= 16)) {
1280				process_backlog = 0;
1281				if (sk_flush_backlog(sk))
1282					goto restart;
1283			}
1284			first_skb = tcp_rtx_and_write_queues_empty(sk);
1285			skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
1286						  first_skb);
1287			if (!skb)
1288				goto wait_for_memory;
1289
1290			process_backlog++;
1291			skb->ip_summed = CHECKSUM_PARTIAL;
 
 
 
1292
1293			skb_entail(sk, skb);
1294			copy = size_goal;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1295
1296			/* All packets are restored as if they have
1297			 * already been sent. skb_mstamp_ns isn't set to
1298			 * avoid wrong rtt estimation.
1299			 */
1300			if (tp->repair)
1301				TCP_SKB_CB(skb)->sacked |= TCPCB_REPAIRED;
1302		}
1303
1304		/* Try to append data to the end of skb. */
1305		if (copy > msg_data_left(msg))
1306			copy = msg_data_left(msg);
1307
1308		/* Where to copy to? */
1309		if (skb_availroom(skb) > 0 && !zc) {
1310			/* We have some space in skb head. Superb! */
1311			copy = min_t(int, copy, skb_availroom(skb));
1312			err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy);
1313			if (err)
1314				goto do_fault;
1315		} else if (!zc) {
1316			bool merge = true;
1317			int i = skb_shinfo(skb)->nr_frags;
1318			struct page_frag *pfrag = sk_page_frag(sk);
1319
1320			if (!sk_page_frag_refill(sk, pfrag))
1321				goto wait_for_memory;
 
 
 
1322
1323			if (!skb_can_coalesce(skb, i, pfrag->page,
1324					      pfrag->offset)) {
1325				if (i >= sysctl_max_skb_frags) {
1326					tcp_mark_push(tp, skb);
1327					goto new_segment;
 
 
 
 
 
 
 
 
1328				}
1329				merge = false;
1330			}
1331
1332			copy = min_t(int, copy, pfrag->size - pfrag->offset);
 
 
 
 
 
 
 
 
 
 
 
 
1333
1334			if (!sk_wmem_schedule(sk, copy))
1335				goto wait_for_memory;
1336
1337			err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1338						       pfrag->page,
1339						       pfrag->offset,
1340						       copy);
1341			if (err)
1342				goto do_error;
1343
1344			/* Update the skb. */
1345			if (merge) {
1346				skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1347			} else {
1348				skb_fill_page_desc(skb, i, pfrag->page,
1349						   pfrag->offset, copy);
1350				page_ref_inc(pfrag->page);
1351			}
1352			pfrag->offset += copy;
1353		} else {
1354			err = skb_zerocopy_iter_stream(sk, skb, msg, copy, uarg);
1355			if (err == -EMSGSIZE || err == -EEXIST) {
1356				tcp_mark_push(tp, skb);
1357				goto new_segment;
1358			}
1359			if (err < 0)
1360				goto do_error;
1361			copy = err;
1362		}
1363
1364		if (!copied)
1365			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
 
 
1366
1367		WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
1368		TCP_SKB_CB(skb)->end_seq += copy;
1369		tcp_skb_pcount_set(skb, 0);
1370
1371		copied += copy;
1372		if (!msg_data_left(msg)) {
1373			if (unlikely(flags & MSG_EOR))
1374				TCP_SKB_CB(skb)->eor = 1;
1375			goto out;
1376		}
1377
1378		if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair))
1379			continue;
1380
1381		if (forced_push(tp)) {
1382			tcp_mark_push(tp, skb);
1383			__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1384		} else if (skb == tcp_send_head(sk))
1385			tcp_push_one(sk, mss_now);
1386		continue;
1387
1388wait_for_sndbuf:
1389		set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1390wait_for_memory:
1391		if (copied)
1392			tcp_push(sk, flags & ~MSG_MORE, mss_now,
1393				 TCP_NAGLE_PUSH, size_goal);
1394
1395		err = sk_stream_wait_memory(sk, &timeo);
1396		if (err != 0)
1397			goto do_error;
1398
1399		mss_now = tcp_send_mss(sk, &size_goal, flags);
 
1400	}
1401
1402out:
1403	if (copied) {
1404		tcp_tx_timestamp(sk, sockc.tsflags);
1405		tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1406	}
1407out_nopush:
1408	sock_zerocopy_put(uarg);
1409	return copied + copied_syn;
1410
1411do_error:
1412	skb = tcp_write_queue_tail(sk);
1413do_fault:
1414	tcp_remove_empty_skb(sk, skb);
 
 
 
 
 
 
 
1415
1416	if (copied + copied_syn)
 
1417		goto out;
1418out_err:
1419	sock_zerocopy_put_abort(uarg, true);
1420	err = sk_stream_error(sk, flags, err);
1421	/* make sure we wake any epoll edge trigger waiter */
1422	if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 &&
1423		     err == -EAGAIN)) {
1424		sk->sk_write_space(sk);
1425		tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
1426	}
1427	return err;
1428}
1429EXPORT_SYMBOL_GPL(tcp_sendmsg_locked);
1430
1431int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
1432{
1433	int ret;
1434
1435	lock_sock(sk);
1436	ret = tcp_sendmsg_locked(sk, msg, size);
1437	release_sock(sk);
1438
1439	return ret;
1440}
1441EXPORT_SYMBOL(tcp_sendmsg);
1442
1443/*
1444 *	Handle reading urgent data. BSD has very simple semantics for
1445 *	this, no blocking and very strange errors 8)
1446 */
1447
1448static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1449{
1450	struct tcp_sock *tp = tcp_sk(sk);
1451
1452	/* No URG data to read. */
1453	if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1454	    tp->urg_data == TCP_URG_READ)
1455		return -EINVAL;	/* Yes this is right ! */
1456
1457	if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1458		return -ENOTCONN;
1459
1460	if (tp->urg_data & TCP_URG_VALID) {
1461		int err = 0;
1462		char c = tp->urg_data;
1463
1464		if (!(flags & MSG_PEEK))
1465			tp->urg_data = TCP_URG_READ;
1466
1467		/* Read urgent data. */
1468		msg->msg_flags |= MSG_OOB;
1469
1470		if (len > 0) {
1471			if (!(flags & MSG_TRUNC))
1472				err = memcpy_to_msg(msg, &c, 1);
1473			len = 1;
1474		} else
1475			msg->msg_flags |= MSG_TRUNC;
1476
1477		return err ? -EFAULT : len;
1478	}
1479
1480	if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1481		return 0;
1482
1483	/* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
1484	 * the available implementations agree in this case:
1485	 * this call should never block, independent of the
1486	 * blocking state of the socket.
1487	 * Mike <pall@rz.uni-karlsruhe.de>
1488	 */
1489	return -EAGAIN;
1490}
1491
1492static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1493{
1494	struct sk_buff *skb;
1495	int copied = 0, err = 0;
1496
1497	/* XXX -- need to support SO_PEEK_OFF */
1498
1499	skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
1500		err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
1501		if (err)
1502			return err;
1503		copied += skb->len;
1504	}
1505
1506	skb_queue_walk(&sk->sk_write_queue, skb) {
1507		err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
1508		if (err)
1509			break;
1510
1511		copied += skb->len;
1512	}
1513
1514	return err ?: copied;
1515}
1516
1517/* Clean up the receive buffer for full frames taken by the user,
1518 * then send an ACK if necessary.  COPIED is the number of bytes
1519 * tcp_recvmsg has given to the user so far, it speeds up the
1520 * calculation of whether or not we must ACK for the sake of
1521 * a window update.
1522 */
1523static void tcp_cleanup_rbuf(struct sock *sk, int copied)
1524{
1525	struct tcp_sock *tp = tcp_sk(sk);
1526	bool time_to_ack = false;
1527
 
1528	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1529
1530	WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1531	     "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1532	     tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
 
1533
1534	if (inet_csk_ack_scheduled(sk)) {
1535		const struct inet_connection_sock *icsk = inet_csk(sk);
1536		   /* Delayed ACKs frequently hit locked sockets during bulk
1537		    * receive. */
1538		if (icsk->icsk_ack.blocked ||
1539		    /* Once-per-two-segments ACK was not sent by tcp_input.c */
1540		    tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1541		    /*
1542		     * If this read emptied read buffer, we send ACK, if
1543		     * connection is not bidirectional, user drained
1544		     * receive buffer and there was a small segment
1545		     * in queue.
1546		     */
1547		    (copied > 0 &&
1548		     ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1549		      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1550		       !inet_csk_in_pingpong_mode(sk))) &&
1551		      !atomic_read(&sk->sk_rmem_alloc)))
1552			time_to_ack = true;
1553	}
1554
1555	/* We send an ACK if we can now advertise a non-zero window
1556	 * which has been raised "significantly".
1557	 *
1558	 * Even if window raised up to infinity, do not send window open ACK
1559	 * in states, where we will not receive more. It is useless.
1560	 */
1561	if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1562		__u32 rcv_window_now = tcp_receive_window(tp);
1563
1564		/* Optimize, __tcp_select_window() is not cheap. */
1565		if (2*rcv_window_now <= tp->window_clamp) {
1566			__u32 new_window = __tcp_select_window(sk);
1567
1568			/* Send ACK now, if this read freed lots of space
1569			 * in our buffer. Certainly, new_window is new window.
1570			 * We can advertise it now, if it is not less than current one.
1571			 * "Lots" means "at least twice" here.
1572			 */
1573			if (new_window && new_window >= 2 * rcv_window_now)
1574				time_to_ack = true;
1575		}
1576	}
1577	if (time_to_ack)
1578		tcp_send_ack(sk);
1579}
1580
1581static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1582{
1583	struct sk_buff *skb;
1584	u32 offset;
1585
1586	while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1587		offset = seq - TCP_SKB_CB(skb)->seq;
1588		if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
1589			pr_err_once("%s: found a SYN, please report !\n", __func__);
1590			offset--;
1591		}
1592		if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
1593			*off = offset;
1594			return skb;
1595		}
1596		/* This looks weird, but this can happen if TCP collapsing
1597		 * splitted a fat GRO packet, while we released socket lock
1598		 * in skb_splice_bits()
1599		 */
1600		sk_eat_skb(sk, skb);
1601	}
1602	return NULL;
1603}
1604
1605/*
1606 * This routine provides an alternative to tcp_recvmsg() for routines
1607 * that would like to handle copying from skbuffs directly in 'sendfile'
1608 * fashion.
1609 * Note:
1610 *	- It is assumed that the socket was locked by the caller.
1611 *	- The routine does not block.
1612 *	- At present, there is no support for reading OOB data
1613 *	  or for 'peeking' the socket using this routine
1614 *	  (although both would be easy to implement).
1615 */
1616int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1617		  sk_read_actor_t recv_actor)
1618{
1619	struct sk_buff *skb;
1620	struct tcp_sock *tp = tcp_sk(sk);
1621	u32 seq = tp->copied_seq;
1622	u32 offset;
1623	int copied = 0;
1624
1625	if (sk->sk_state == TCP_LISTEN)
1626		return -ENOTCONN;
1627	while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1628		if (offset < skb->len) {
1629			int used;
1630			size_t len;
1631
1632			len = skb->len - offset;
1633			/* Stop reading if we hit a patch of urgent data */
1634			if (tp->urg_data) {
1635				u32 urg_offset = tp->urg_seq - seq;
1636				if (urg_offset < len)
1637					len = urg_offset;
1638				if (!len)
1639					break;
1640			}
1641			used = recv_actor(desc, skb, offset, len);
1642			if (used <= 0) {
1643				if (!copied)
1644					copied = used;
1645				break;
1646			} else if (used <= len) {
1647				seq += used;
1648				copied += used;
1649				offset += used;
1650			}
1651			/* If recv_actor drops the lock (e.g. TCP splice
 
1652			 * receive) the skb pointer might be invalid when
1653			 * getting here: tcp_collapse might have deleted it
1654			 * while aggregating skbs from the socket queue.
1655			 */
1656			skb = tcp_recv_skb(sk, seq - 1, &offset);
1657			if (!skb)
1658				break;
1659			/* TCP coalescing might have appended data to the skb.
1660			 * Try to splice more frags
1661			 */
1662			if (offset + 1 != skb->len)
1663				continue;
1664		}
1665		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
1666			sk_eat_skb(sk, skb);
1667			++seq;
1668			break;
1669		}
1670		sk_eat_skb(sk, skb);
1671		if (!desc->count)
1672			break;
1673		WRITE_ONCE(tp->copied_seq, seq);
1674	}
1675	WRITE_ONCE(tp->copied_seq, seq);
1676
1677	tcp_rcv_space_adjust(sk);
1678
1679	/* Clean up data we have read: This will do ACK frames. */
1680	if (copied > 0) {
1681		tcp_recv_skb(sk, seq, &offset);
1682		tcp_cleanup_rbuf(sk, copied);
1683	}
1684	return copied;
1685}
1686EXPORT_SYMBOL(tcp_read_sock);
1687
1688int tcp_peek_len(struct socket *sock)
1689{
1690	return tcp_inq(sock->sk);
1691}
1692EXPORT_SYMBOL(tcp_peek_len);
1693
1694/* Make sure sk_rcvbuf is big enough to satisfy SO_RCVLOWAT hint */
1695int tcp_set_rcvlowat(struct sock *sk, int val)
1696{
1697	int cap;
1698
1699	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
1700		cap = sk->sk_rcvbuf >> 1;
1701	else
1702		cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
1703	val = min(val, cap);
1704	WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
1705
1706	/* Check if we need to signal EPOLLIN right now */
1707	tcp_data_ready(sk);
1708
1709	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK)
1710		return 0;
1711
1712	val <<= 1;
1713	if (val > sk->sk_rcvbuf) {
1714		WRITE_ONCE(sk->sk_rcvbuf, val);
1715		tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val);
1716	}
1717	return 0;
1718}
1719EXPORT_SYMBOL(tcp_set_rcvlowat);
1720
1721#ifdef CONFIG_MMU
1722static const struct vm_operations_struct tcp_vm_ops = {
1723};
1724
1725int tcp_mmap(struct file *file, struct socket *sock,
1726	     struct vm_area_struct *vma)
1727{
1728	if (vma->vm_flags & (VM_WRITE | VM_EXEC))
1729		return -EPERM;
1730	vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
1731
1732	/* Instruct vm_insert_page() to not down_read(mmap_sem) */
1733	vma->vm_flags |= VM_MIXEDMAP;
1734
1735	vma->vm_ops = &tcp_vm_ops;
1736	return 0;
1737}
1738EXPORT_SYMBOL(tcp_mmap);
1739
1740static int tcp_zerocopy_receive(struct sock *sk,
1741				struct tcp_zerocopy_receive *zc)
1742{
1743	unsigned long address = (unsigned long)zc->address;
1744	const skb_frag_t *frags = NULL;
1745	u32 length = 0, seq, offset;
1746	struct vm_area_struct *vma;
1747	struct sk_buff *skb = NULL;
1748	struct tcp_sock *tp;
1749	int inq;
1750	int ret;
1751
1752	if (address & (PAGE_SIZE - 1) || address != zc->address)
1753		return -EINVAL;
1754
1755	if (sk->sk_state == TCP_LISTEN)
1756		return -ENOTCONN;
1757
1758	sock_rps_record_flow(sk);
1759
1760	down_read(&current->mm->mmap_sem);
1761
1762	ret = -EINVAL;
1763	vma = find_vma(current->mm, address);
1764	if (!vma || vma->vm_start > address || vma->vm_ops != &tcp_vm_ops)
1765		goto out;
1766	zc->length = min_t(unsigned long, zc->length, vma->vm_end - address);
1767
1768	tp = tcp_sk(sk);
1769	seq = tp->copied_seq;
1770	inq = tcp_inq(sk);
1771	zc->length = min_t(u32, zc->length, inq);
1772	zc->length &= ~(PAGE_SIZE - 1);
1773	if (zc->length) {
1774		zap_page_range(vma, address, zc->length);
1775		zc->recv_skip_hint = 0;
1776	} else {
1777		zc->recv_skip_hint = inq;
1778	}
1779	ret = 0;
1780	while (length + PAGE_SIZE <= zc->length) {
1781		if (zc->recv_skip_hint < PAGE_SIZE) {
1782			if (skb) {
1783				skb = skb->next;
1784				offset = seq - TCP_SKB_CB(skb)->seq;
1785			} else {
1786				skb = tcp_recv_skb(sk, seq, &offset);
1787			}
1788
1789			zc->recv_skip_hint = skb->len - offset;
1790			offset -= skb_headlen(skb);
1791			if ((int)offset < 0 || skb_has_frag_list(skb))
1792				break;
1793			frags = skb_shinfo(skb)->frags;
1794			while (offset) {
1795				if (skb_frag_size(frags) > offset)
1796					goto out;
1797				offset -= skb_frag_size(frags);
1798				frags++;
1799			}
1800		}
1801		if (skb_frag_size(frags) != PAGE_SIZE || skb_frag_off(frags)) {
1802			int remaining = zc->recv_skip_hint;
1803
1804			while (remaining && (skb_frag_size(frags) != PAGE_SIZE ||
1805					     skb_frag_off(frags))) {
1806				remaining -= skb_frag_size(frags);
1807				frags++;
1808			}
1809			zc->recv_skip_hint -= remaining;
1810			break;
1811		}
1812		ret = vm_insert_page(vma, address + length,
1813				     skb_frag_page(frags));
1814		if (ret)
1815			break;
1816		length += PAGE_SIZE;
1817		seq += PAGE_SIZE;
1818		zc->recv_skip_hint -= PAGE_SIZE;
1819		frags++;
1820	}
1821out:
1822	up_read(&current->mm->mmap_sem);
1823	if (length) {
1824		WRITE_ONCE(tp->copied_seq, seq);
1825		tcp_rcv_space_adjust(sk);
1826
1827		/* Clean up data we have read: This will do ACK frames. */
1828		tcp_recv_skb(sk, seq, &offset);
1829		tcp_cleanup_rbuf(sk, length);
1830		ret = 0;
1831		if (length == zc->length)
1832			zc->recv_skip_hint = 0;
1833	} else {
1834		if (!zc->recv_skip_hint && sock_flag(sk, SOCK_DONE))
1835			ret = -EIO;
1836	}
1837	zc->length = length;
1838	return ret;
1839}
1840#endif
1841
1842static void tcp_update_recv_tstamps(struct sk_buff *skb,
1843				    struct scm_timestamping_internal *tss)
1844{
1845	if (skb->tstamp)
1846		tss->ts[0] = ktime_to_timespec64(skb->tstamp);
1847	else
1848		tss->ts[0] = (struct timespec64) {0};
1849
1850	if (skb_hwtstamps(skb)->hwtstamp)
1851		tss->ts[2] = ktime_to_timespec64(skb_hwtstamps(skb)->hwtstamp);
1852	else
1853		tss->ts[2] = (struct timespec64) {0};
1854}
1855
1856/* Similar to __sock_recv_timestamp, but does not require an skb */
1857static void tcp_recv_timestamp(struct msghdr *msg, const struct sock *sk,
1858			       struct scm_timestamping_internal *tss)
1859{
1860	int new_tstamp = sock_flag(sk, SOCK_TSTAMP_NEW);
1861	bool has_timestamping = false;
1862
1863	if (tss->ts[0].tv_sec || tss->ts[0].tv_nsec) {
1864		if (sock_flag(sk, SOCK_RCVTSTAMP)) {
1865			if (sock_flag(sk, SOCK_RCVTSTAMPNS)) {
1866				if (new_tstamp) {
1867					struct __kernel_timespec kts = {tss->ts[0].tv_sec, tss->ts[0].tv_nsec};
1868
1869					put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_NEW,
1870						 sizeof(kts), &kts);
1871				} else {
1872					struct timespec ts_old = timespec64_to_timespec(tss->ts[0]);
1873
1874					put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMPNS_OLD,
1875						 sizeof(ts_old), &ts_old);
1876				}
1877			} else {
1878				if (new_tstamp) {
1879					struct __kernel_sock_timeval stv;
1880
1881					stv.tv_sec = tss->ts[0].tv_sec;
1882					stv.tv_usec = tss->ts[0].tv_nsec / 1000;
1883					put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW,
1884						 sizeof(stv), &stv);
1885				} else {
1886					struct __kernel_old_timeval tv;
1887
1888					tv.tv_sec = tss->ts[0].tv_sec;
1889					tv.tv_usec = tss->ts[0].tv_nsec / 1000;
1890					put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD,
1891						 sizeof(tv), &tv);
1892				}
1893			}
1894		}
1895
1896		if (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE)
1897			has_timestamping = true;
1898		else
1899			tss->ts[0] = (struct timespec64) {0};
1900	}
1901
1902	if (tss->ts[2].tv_sec || tss->ts[2].tv_nsec) {
1903		if (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)
1904			has_timestamping = true;
1905		else
1906			tss->ts[2] = (struct timespec64) {0};
1907	}
1908
1909	if (has_timestamping) {
1910		tss->ts[1] = (struct timespec64) {0};
1911		if (sock_flag(sk, SOCK_TSTAMP_NEW))
1912			put_cmsg_scm_timestamping64(msg, tss);
1913		else
1914			put_cmsg_scm_timestamping(msg, tss);
1915	}
1916}
1917
1918static int tcp_inq_hint(struct sock *sk)
1919{
1920	const struct tcp_sock *tp = tcp_sk(sk);
1921	u32 copied_seq = READ_ONCE(tp->copied_seq);
1922	u32 rcv_nxt = READ_ONCE(tp->rcv_nxt);
1923	int inq;
1924
1925	inq = rcv_nxt - copied_seq;
1926	if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) {
1927		lock_sock(sk);
1928		inq = tp->rcv_nxt - tp->copied_seq;
1929		release_sock(sk);
1930	}
1931	/* After receiving a FIN, tell the user-space to continue reading
1932	 * by returning a non-zero inq.
1933	 */
1934	if (inq == 0 && sock_flag(sk, SOCK_DONE))
1935		inq = 1;
1936	return inq;
1937}
1938
1939/*
1940 *	This routine copies from a sock struct into the user buffer.
1941 *
1942 *	Technical note: in 2.3 we work on _locked_ socket, so that
1943 *	tricks with *seq access order and skb->users are not required.
1944 *	Probably, code can be easily improved even more.
1945 */
1946
1947int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1948		int flags, int *addr_len)
1949{
1950	struct tcp_sock *tp = tcp_sk(sk);
1951	int copied = 0;
1952	u32 peek_seq;
1953	u32 *seq;
1954	unsigned long used;
1955	int err, inq;
1956	int target;		/* Read at least this many bytes */
1957	long timeo;
1958	struct sk_buff *skb, *last;
 
 
1959	u32 urg_hole = 0;
1960	struct scm_timestamping_internal tss;
1961	bool has_tss = false;
1962	bool has_cmsg;
1963
1964	if (unlikely(flags & MSG_ERRQUEUE))
1965		return inet_recv_error(sk, msg, len, addr_len);
1966
1967	if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
1968	    (sk->sk_state == TCP_ESTABLISHED))
1969		sk_busy_loop(sk, nonblock);
1970
1971	lock_sock(sk);
1972
1973	err = -ENOTCONN;
1974	if (sk->sk_state == TCP_LISTEN)
1975		goto out;
1976
1977	has_cmsg = tp->recvmsg_inq;
1978	timeo = sock_rcvtimeo(sk, nonblock);
1979
1980	/* Urgent data needs to be handled specially. */
1981	if (flags & MSG_OOB)
1982		goto recv_urg;
1983
1984	if (unlikely(tp->repair)) {
1985		err = -EPERM;
1986		if (!(flags & MSG_PEEK))
1987			goto out;
1988
1989		if (tp->repair_queue == TCP_SEND_QUEUE)
1990			goto recv_sndq;
1991
1992		err = -EINVAL;
1993		if (tp->repair_queue == TCP_NO_QUEUE)
1994			goto out;
1995
1996		/* 'common' recv queue MSG_PEEK-ing */
1997	}
1998
1999	seq = &tp->copied_seq;
2000	if (flags & MSG_PEEK) {
2001		peek_seq = tp->copied_seq;
2002		seq = &peek_seq;
2003	}
2004
2005	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
2006
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2007	do {
2008		u32 offset;
2009
2010		/* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
2011		if (tp->urg_data && tp->urg_seq == *seq) {
2012			if (copied)
2013				break;
2014			if (signal_pending(current)) {
2015				copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
2016				break;
2017			}
2018		}
2019
2020		/* Next get a buffer. */
2021
2022		last = skb_peek_tail(&sk->sk_receive_queue);
2023		skb_queue_walk(&sk->sk_receive_queue, skb) {
2024			last = skb;
2025			/* Now that we have two receive queues this
2026			 * shouldn't happen.
2027			 */
2028			if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
2029				 "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
2030				 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
2031				 flags))
2032				break;
2033
2034			offset = *seq - TCP_SKB_CB(skb)->seq;
2035			if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
2036				pr_err_once("%s: found a SYN, please report !\n", __func__);
2037				offset--;
2038			}
2039			if (offset < skb->len)
2040				goto found_ok_skb;
2041			if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2042				goto found_fin_ok;
2043			WARN(!(flags & MSG_PEEK),
2044			     "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
2045			     *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
2046		}
2047
2048		/* Well, if we have backlog, try to process it now yet. */
2049
2050		if (copied >= target && !sk->sk_backlog.tail)
2051			break;
2052
2053		if (copied) {
2054			if (sk->sk_err ||
2055			    sk->sk_state == TCP_CLOSE ||
2056			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
2057			    !timeo ||
2058			    signal_pending(current))
2059				break;
2060		} else {
2061			if (sock_flag(sk, SOCK_DONE))
2062				break;
2063
2064			if (sk->sk_err) {
2065				copied = sock_error(sk);
2066				break;
2067			}
2068
2069			if (sk->sk_shutdown & RCV_SHUTDOWN)
2070				break;
2071
2072			if (sk->sk_state == TCP_CLOSE) {
2073				/* This occurs when user tries to read
2074				 * from never connected socket.
2075				 */
2076				copied = -ENOTCONN;
 
 
 
2077				break;
2078			}
2079
2080			if (!timeo) {
2081				copied = -EAGAIN;
2082				break;
2083			}
2084
2085			if (signal_pending(current)) {
2086				copied = sock_intr_errno(timeo);
2087				break;
2088			}
2089		}
2090
2091		tcp_cleanup_rbuf(sk, copied);
2092
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2093		if (copied >= target) {
2094			/* Do not sleep, just process backlog. */
2095			release_sock(sk);
2096			lock_sock(sk);
2097		} else {
2098			sk_wait_data(sk, &timeo, last);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2099		}
2100
2101		if ((flags & MSG_PEEK) &&
2102		    (peek_seq - copied - urg_hole != tp->copied_seq)) {
2103			net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
2104					    current->comm,
2105					    task_pid_nr(current));
2106			peek_seq = tp->copied_seq;
2107		}
2108		continue;
2109
2110found_ok_skb:
2111		/* Ok so how much can we use? */
2112		used = skb->len - offset;
2113		if (len < used)
2114			used = len;
2115
2116		/* Do we have urgent data here? */
2117		if (tp->urg_data) {
2118			u32 urg_offset = tp->urg_seq - *seq;
2119			if (urg_offset < used) {
2120				if (!urg_offset) {
2121					if (!sock_flag(sk, SOCK_URGINLINE)) {
2122						WRITE_ONCE(*seq, *seq + 1);
2123						urg_hole++;
2124						offset++;
2125						used--;
2126						if (!used)
2127							goto skip_copy;
2128					}
2129				} else
2130					used = urg_offset;
2131			}
2132		}
2133
2134		if (!(flags & MSG_TRUNC)) {
2135			err = skb_copy_datagram_msg(skb, offset, msg, used);
2136			if (err) {
2137				/* Exception. Bailout! */
2138				if (!copied)
2139					copied = -EFAULT;
2140				break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2141			}
2142		}
2143
2144		WRITE_ONCE(*seq, *seq + used);
2145		copied += used;
2146		len -= used;
2147
2148		tcp_rcv_space_adjust(sk);
2149
2150skip_copy:
2151		if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
2152			tp->urg_data = 0;
2153			tcp_fast_path_check(sk);
2154		}
2155		if (used + offset < skb->len)
2156			continue;
2157
2158		if (TCP_SKB_CB(skb)->has_rxtstamp) {
2159			tcp_update_recv_tstamps(skb, &tss);
2160			has_tss = true;
2161			has_cmsg = true;
 
2162		}
2163		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2164			goto found_fin_ok;
2165		if (!(flags & MSG_PEEK))
2166			sk_eat_skb(sk, skb);
2167		continue;
2168
2169found_fin_ok:
2170		/* Process the FIN. */
2171		WRITE_ONCE(*seq, *seq + 1);
2172		if (!(flags & MSG_PEEK))
2173			sk_eat_skb(sk, skb);
 
 
2174		break;
2175	} while (len > 0);
2176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2177	/* According to UNIX98, msg_name/msg_namelen are ignored
2178	 * on connected socket. I was just happy when found this 8) --ANK
2179	 */
2180
2181	/* Clean up data we have read: This will do ACK frames. */
2182	tcp_cleanup_rbuf(sk, copied);
2183
2184	release_sock(sk);
2185
2186	if (has_cmsg) {
2187		if (has_tss)
2188			tcp_recv_timestamp(msg, sk, &tss);
2189		if (tp->recvmsg_inq) {
2190			inq = tcp_inq_hint(sk);
2191			put_cmsg(msg, SOL_TCP, TCP_CM_INQ, sizeof(inq), &inq);
2192		}
2193	}
2194
2195	return copied;
2196
2197out:
2198	release_sock(sk);
2199	return err;
2200
2201recv_urg:
2202	err = tcp_recv_urg(sk, msg, len, flags);
2203	goto out;
2204
2205recv_sndq:
2206	err = tcp_peek_sndq(sk, msg, len);
2207	goto out;
2208}
2209EXPORT_SYMBOL(tcp_recvmsg);
2210
2211void tcp_set_state(struct sock *sk, int state)
2212{
2213	int oldstate = sk->sk_state;
2214
2215	/* We defined a new enum for TCP states that are exported in BPF
2216	 * so as not force the internal TCP states to be frozen. The
2217	 * following checks will detect if an internal state value ever
2218	 * differs from the BPF value. If this ever happens, then we will
2219	 * need to remap the internal value to the BPF value before calling
2220	 * tcp_call_bpf_2arg.
2221	 */
2222	BUILD_BUG_ON((int)BPF_TCP_ESTABLISHED != (int)TCP_ESTABLISHED);
2223	BUILD_BUG_ON((int)BPF_TCP_SYN_SENT != (int)TCP_SYN_SENT);
2224	BUILD_BUG_ON((int)BPF_TCP_SYN_RECV != (int)TCP_SYN_RECV);
2225	BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT1 != (int)TCP_FIN_WAIT1);
2226	BUILD_BUG_ON((int)BPF_TCP_FIN_WAIT2 != (int)TCP_FIN_WAIT2);
2227	BUILD_BUG_ON((int)BPF_TCP_TIME_WAIT != (int)TCP_TIME_WAIT);
2228	BUILD_BUG_ON((int)BPF_TCP_CLOSE != (int)TCP_CLOSE);
2229	BUILD_BUG_ON((int)BPF_TCP_CLOSE_WAIT != (int)TCP_CLOSE_WAIT);
2230	BUILD_BUG_ON((int)BPF_TCP_LAST_ACK != (int)TCP_LAST_ACK);
2231	BUILD_BUG_ON((int)BPF_TCP_LISTEN != (int)TCP_LISTEN);
2232	BUILD_BUG_ON((int)BPF_TCP_CLOSING != (int)TCP_CLOSING);
2233	BUILD_BUG_ON((int)BPF_TCP_NEW_SYN_RECV != (int)TCP_NEW_SYN_RECV);
2234	BUILD_BUG_ON((int)BPF_TCP_MAX_STATES != (int)TCP_MAX_STATES);
2235
2236	if (BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk), BPF_SOCK_OPS_STATE_CB_FLAG))
2237		tcp_call_bpf_2arg(sk, BPF_SOCK_OPS_STATE_CB, oldstate, state);
2238
2239	switch (state) {
2240	case TCP_ESTABLISHED:
2241		if (oldstate != TCP_ESTABLISHED)
2242			TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2243		break;
2244
2245	case TCP_CLOSE:
2246		if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
2247			TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
2248
2249		sk->sk_prot->unhash(sk);
2250		if (inet_csk(sk)->icsk_bind_hash &&
2251		    !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
2252			inet_put_port(sk);
2253		/* fall through */
2254	default:
2255		if (oldstate == TCP_ESTABLISHED)
2256			TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2257	}
2258
2259	/* Change state AFTER socket is unhashed to avoid closed
2260	 * socket sitting in hash tables.
2261	 */
2262	inet_sk_state_store(sk, state);
 
 
 
 
2263}
2264EXPORT_SYMBOL_GPL(tcp_set_state);
2265
2266/*
2267 *	State processing on a close. This implements the state shift for
2268 *	sending our FIN frame. Note that we only send a FIN for some
2269 *	states. A shutdown() may have already sent the FIN, or we may be
2270 *	closed.
2271 */
2272
2273static const unsigned char new_state[16] = {
2274  /* current state:        new state:      action:	*/
2275  [0 /* (Invalid) */]	= TCP_CLOSE,
2276  [TCP_ESTABLISHED]	= TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2277  [TCP_SYN_SENT]	= TCP_CLOSE,
2278  [TCP_SYN_RECV]	= TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2279  [TCP_FIN_WAIT1]	= TCP_FIN_WAIT1,
2280  [TCP_FIN_WAIT2]	= TCP_FIN_WAIT2,
2281  [TCP_TIME_WAIT]	= TCP_CLOSE,
2282  [TCP_CLOSE]		= TCP_CLOSE,
2283  [TCP_CLOSE_WAIT]	= TCP_LAST_ACK  | TCP_ACTION_FIN,
2284  [TCP_LAST_ACK]	= TCP_LAST_ACK,
2285  [TCP_LISTEN]		= TCP_CLOSE,
2286  [TCP_CLOSING]		= TCP_CLOSING,
2287  [TCP_NEW_SYN_RECV]	= TCP_CLOSE,	/* should not happen ! */
2288};
2289
2290static int tcp_close_state(struct sock *sk)
2291{
2292	int next = (int)new_state[sk->sk_state];
2293	int ns = next & TCP_STATE_MASK;
2294
2295	tcp_set_state(sk, ns);
2296
2297	return next & TCP_ACTION_FIN;
2298}
2299
2300/*
2301 *	Shutdown the sending side of a connection. Much like close except
2302 *	that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
2303 */
2304
2305void tcp_shutdown(struct sock *sk, int how)
2306{
2307	/*	We need to grab some memory, and put together a FIN,
2308	 *	and then put it into the queue to be sent.
2309	 *		Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2310	 */
2311	if (!(how & SEND_SHUTDOWN))
2312		return;
2313
2314	/* If we've already sent a FIN, or it's a closed state, skip this. */
2315	if ((1 << sk->sk_state) &
2316	    (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2317	     TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
2318		/* Clear out any half completed packets.  FIN if needed. */
2319		if (tcp_close_state(sk))
2320			tcp_send_fin(sk);
2321	}
2322}
2323EXPORT_SYMBOL(tcp_shutdown);
2324
2325bool tcp_check_oom(struct sock *sk, int shift)
2326{
2327	bool too_many_orphans, out_of_socket_memory;
2328
2329	too_many_orphans = tcp_too_many_orphans(sk, shift);
2330	out_of_socket_memory = tcp_out_of_memory(sk);
2331
2332	if (too_many_orphans)
2333		net_info_ratelimited("too many orphaned sockets\n");
2334	if (out_of_socket_memory)
2335		net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2336	return too_many_orphans || out_of_socket_memory;
2337}
2338
2339void tcp_close(struct sock *sk, long timeout)
2340{
2341	struct sk_buff *skb;
2342	int data_was_unread = 0;
2343	int state;
2344
2345	lock_sock(sk);
2346	sk->sk_shutdown = SHUTDOWN_MASK;
2347
2348	if (sk->sk_state == TCP_LISTEN) {
2349		tcp_set_state(sk, TCP_CLOSE);
2350
2351		/* Special case. */
2352		inet_csk_listen_stop(sk);
2353
2354		goto adjudge_to_death;
2355	}
2356
2357	/*  We need to flush the recv. buffs.  We do this only on the
2358	 *  descriptor close, not protocol-sourced closes, because the
2359	 *  reader process may not have drained the data yet!
2360	 */
2361	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
2362		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq;
2363
2364		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2365			len--;
2366		data_was_unread += len;
2367		__kfree_skb(skb);
2368	}
2369
2370	sk_mem_reclaim(sk);
2371
2372	/* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2373	if (sk->sk_state == TCP_CLOSE)
2374		goto adjudge_to_death;
2375
2376	/* As outlined in RFC 2525, section 2.17, we send a RST here because
2377	 * data was lost. To witness the awful effects of the old behavior of
2378	 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2379	 * GET in an FTP client, suspend the process, wait for the client to
2380	 * advertise a zero window, then kill -9 the FTP client, wheee...
2381	 * Note: timeout is always zero in such a case.
2382	 */
2383	if (unlikely(tcp_sk(sk)->repair)) {
2384		sk->sk_prot->disconnect(sk, 0);
2385	} else if (data_was_unread) {
2386		/* Unread data was tossed, zap the connection. */
2387		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
2388		tcp_set_state(sk, TCP_CLOSE);
2389		tcp_send_active_reset(sk, sk->sk_allocation);
2390	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2391		/* Check zero linger _after_ checking for unread data. */
2392		sk->sk_prot->disconnect(sk, 0);
2393		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
2394	} else if (tcp_close_state(sk)) {
2395		/* We FIN if the application ate all the data before
2396		 * zapping the connection.
2397		 */
2398
2399		/* RED-PEN. Formally speaking, we have broken TCP state
2400		 * machine. State transitions:
2401		 *
2402		 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2403		 * TCP_SYN_RECV	-> TCP_FIN_WAIT1 (forget it, it's impossible)
2404		 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2405		 *
2406		 * are legal only when FIN has been sent (i.e. in window),
2407		 * rather than queued out of window. Purists blame.
2408		 *
2409		 * F.e. "RFC state" is ESTABLISHED,
2410		 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2411		 *
2412		 * The visible declinations are that sometimes
2413		 * we enter time-wait state, when it is not required really
2414		 * (harmless), do not send active resets, when they are
2415		 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2416		 * they look as CLOSING or LAST_ACK for Linux)
2417		 * Probably, I missed some more holelets.
2418		 * 						--ANK
2419		 * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2420		 * in a single packet! (May consider it later but will
2421		 * probably need API support or TCP_CORK SYN-ACK until
2422		 * data is written and socket is closed.)
2423		 */
2424		tcp_send_fin(sk);
2425	}
2426
2427	sk_stream_wait_close(sk, timeout);
2428
2429adjudge_to_death:
2430	state = sk->sk_state;
2431	sock_hold(sk);
2432	sock_orphan(sk);
2433
 
 
 
 
 
 
 
2434	local_bh_disable();
2435	bh_lock_sock(sk);
2436	/* remove backlog if any, without releasing ownership. */
2437	__release_sock(sk);
2438
2439	percpu_counter_inc(sk->sk_prot->orphan_count);
2440
2441	/* Have we already been destroyed by a softirq or backlog? */
2442	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
2443		goto out;
2444
2445	/*	This is a (useful) BSD violating of the RFC. There is a
2446	 *	problem with TCP as specified in that the other end could
2447	 *	keep a socket open forever with no application left this end.
2448	 *	We use a 1 minute timeout (about the same as BSD) then kill
2449	 *	our end. If they send after that then tough - BUT: long enough
2450	 *	that we won't make the old 4*rto = almost no time - whoops
2451	 *	reset mistake.
2452	 *
2453	 *	Nope, it was not mistake. It is really desired behaviour
2454	 *	f.e. on http servers, when such sockets are useless, but
2455	 *	consume significant resources. Let's do it with special
2456	 *	linger2	option.					--ANK
2457	 */
2458
2459	if (sk->sk_state == TCP_FIN_WAIT2) {
2460		struct tcp_sock *tp = tcp_sk(sk);
2461		if (tp->linger2 < 0) {
2462			tcp_set_state(sk, TCP_CLOSE);
2463			tcp_send_active_reset(sk, GFP_ATOMIC);
2464			__NET_INC_STATS(sock_net(sk),
2465					LINUX_MIB_TCPABORTONLINGER);
2466		} else {
2467			const int tmo = tcp_fin_time(sk);
2468
2469			if (tmo > TCP_TIMEWAIT_LEN) {
2470				inet_csk_reset_keepalive_timer(sk,
2471						tmo - TCP_TIMEWAIT_LEN);
2472			} else {
2473				tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2474				goto out;
2475			}
2476		}
2477	}
2478	if (sk->sk_state != TCP_CLOSE) {
2479		sk_mem_reclaim(sk);
2480		if (tcp_check_oom(sk, 0)) {
 
 
 
2481			tcp_set_state(sk, TCP_CLOSE);
2482			tcp_send_active_reset(sk, GFP_ATOMIC);
2483			__NET_INC_STATS(sock_net(sk),
2484					LINUX_MIB_TCPABORTONMEMORY);
2485		} else if (!check_net(sock_net(sk))) {
2486			/* Not possible to send reset; just close */
2487			tcp_set_state(sk, TCP_CLOSE);
2488		}
2489	}
2490
2491	if (sk->sk_state == TCP_CLOSE) {
2492		struct request_sock *req;
2493
2494		req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
2495						lockdep_sock_is_held(sk));
2496		/* We could get here with a non-NULL req if the socket is
2497		 * aborted (e.g., closed with unread data) before 3WHS
2498		 * finishes.
2499		 */
2500		if (req)
2501			reqsk_fastopen_remove(sk, req, false);
2502		inet_csk_destroy_sock(sk);
2503	}
2504	/* Otherwise, socket is reprieved until protocol close. */
2505
2506out:
2507	bh_unlock_sock(sk);
2508	local_bh_enable();
2509	release_sock(sk);
2510	sock_put(sk);
2511}
2512EXPORT_SYMBOL(tcp_close);
2513
2514/* These states need RST on ABORT according to RFC793 */
2515
2516static inline bool tcp_need_reset(int state)
2517{
2518	return (1 << state) &
2519	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2520		TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2521}
2522
2523static void tcp_rtx_queue_purge(struct sock *sk)
2524{
2525	struct rb_node *p = rb_first(&sk->tcp_rtx_queue);
2526
2527	while (p) {
2528		struct sk_buff *skb = rb_to_skb(p);
2529
2530		p = rb_next(p);
2531		/* Since we are deleting whole queue, no need to
2532		 * list_del(&skb->tcp_tsorted_anchor)
2533		 */
2534		tcp_rtx_queue_unlink(skb, sk);
2535		sk_wmem_free_skb(sk, skb);
2536	}
2537}
2538
2539void tcp_write_queue_purge(struct sock *sk)
2540{
2541	struct sk_buff *skb;
2542
2543	tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
2544	while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
2545		tcp_skb_tsorted_anchor_cleanup(skb);
2546		sk_wmem_free_skb(sk, skb);
2547	}
2548	tcp_rtx_queue_purge(sk);
2549	skb = sk->sk_tx_skb_cache;
2550	if (skb) {
2551		__kfree_skb(skb);
2552		sk->sk_tx_skb_cache = NULL;
2553	}
2554	INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue);
2555	sk_mem_reclaim(sk);
2556	tcp_clear_all_retrans_hints(tcp_sk(sk));
2557	tcp_sk(sk)->packets_out = 0;
2558	inet_csk(sk)->icsk_backoff = 0;
2559}
2560
2561int tcp_disconnect(struct sock *sk, int flags)
2562{
2563	struct inet_sock *inet = inet_sk(sk);
2564	struct inet_connection_sock *icsk = inet_csk(sk);
2565	struct tcp_sock *tp = tcp_sk(sk);
 
2566	int old_state = sk->sk_state;
2567	u32 seq;
2568
2569	if (old_state != TCP_CLOSE)
2570		tcp_set_state(sk, TCP_CLOSE);
2571
2572	/* ABORT function of RFC793 */
2573	if (old_state == TCP_LISTEN) {
2574		inet_csk_listen_stop(sk);
2575	} else if (unlikely(tp->repair)) {
2576		sk->sk_err = ECONNABORTED;
2577	} else if (tcp_need_reset(old_state) ||
2578		   (tp->snd_nxt != tp->write_seq &&
2579		    (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2580		/* The last check adjusts for discrepancy of Linux wrt. RFC
2581		 * states
2582		 */
2583		tcp_send_active_reset(sk, gfp_any());
2584		sk->sk_err = ECONNRESET;
2585	} else if (old_state == TCP_SYN_SENT)
2586		sk->sk_err = ECONNRESET;
2587
2588	tcp_clear_xmit_timers(sk);
2589	__skb_queue_purge(&sk->sk_receive_queue);
2590	if (sk->sk_rx_skb_cache) {
2591		__kfree_skb(sk->sk_rx_skb_cache);
2592		sk->sk_rx_skb_cache = NULL;
2593	}
2594	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
2595	tp->urg_data = 0;
2596	tcp_write_queue_purge(sk);
2597	tcp_fastopen_active_disable_ofo_check(sk);
2598	skb_rbtree_purge(&tp->out_of_order_queue);
 
 
2599
2600	inet->inet_dport = 0;
2601
2602	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2603		inet_reset_saddr(sk);
2604
2605	sk->sk_shutdown = 0;
2606	sock_reset_flag(sk, SOCK_DONE);
2607	tp->srtt_us = 0;
2608	tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
2609	tp->rcv_rtt_last_tsecr = 0;
2610
2611	seq = tp->write_seq + tp->max_window + 2;
2612	if (!seq)
2613		seq = 1;
2614	WRITE_ONCE(tp->write_seq, seq);
2615
2616	icsk->icsk_backoff = 0;
2617	tp->snd_cwnd = 2;
2618	icsk->icsk_probes_out = 0;
2619	icsk->icsk_rto = TCP_TIMEOUT_INIT;
2620	tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2621	tp->snd_cwnd = TCP_INIT_CWND;
2622	tp->snd_cwnd_cnt = 0;
 
2623	tp->window_clamp = 0;
2624	tp->delivered_ce = 0;
2625	tcp_set_ca_state(sk, TCP_CA_Open);
2626	tp->is_sack_reneg = 0;
2627	tcp_clear_retrans(tp);
2628	inet_csk_delack_init(sk);
2629	/* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2630	 * issue in __tcp_select_window()
2631	 */
2632	icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
2633	memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2634	__sk_dst_reset(sk);
2635	dst_release(sk->sk_rx_dst);
2636	sk->sk_rx_dst = NULL;
2637	tcp_saved_syn_free(tp);
2638	tp->compressed_ack = 0;
2639	tp->bytes_sent = 0;
2640	tp->bytes_acked = 0;
2641	tp->bytes_received = 0;
2642	tp->bytes_retrans = 0;
2643	tp->duplicate_sack[0].start_seq = 0;
2644	tp->duplicate_sack[0].end_seq = 0;
2645	tp->dsack_dups = 0;
2646	tp->reord_seen = 0;
2647	tp->retrans_out = 0;
2648	tp->sacked_out = 0;
2649	tp->tlp_high_seq = 0;
2650	tp->last_oow_ack_time = 0;
2651	/* There's a bubble in the pipe until at least the first ACK. */
2652	tp->app_limited = ~0U;
2653	tp->rack.mstamp = 0;
2654	tp->rack.advanced = 0;
2655	tp->rack.reo_wnd_steps = 1;
2656	tp->rack.last_delivered = 0;
2657	tp->rack.reo_wnd_persist = 0;
2658	tp->rack.dsack_seen = 0;
2659	tp->syn_data_acked = 0;
2660	tp->rx_opt.saw_tstamp = 0;
2661	tp->rx_opt.dsack = 0;
2662	tp->rx_opt.num_sacks = 0;
2663	tp->rcv_ooopack = 0;
2664
2665
2666	/* Clean up fastopen related fields */
2667	tcp_free_fastopen_req(tp);
2668	inet->defer_connect = 0;
2669
2670	WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2671
2672	if (sk->sk_frag.page) {
2673		put_page(sk->sk_frag.page);
2674		sk->sk_frag.page = NULL;
2675		sk->sk_frag.offset = 0;
2676	}
2677
2678	sk->sk_error_report(sk);
2679	return 0;
2680}
2681EXPORT_SYMBOL(tcp_disconnect);
2682
2683static inline bool tcp_can_repair_sock(const struct sock *sk)
2684{
2685	return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
2686		(sk->sk_state != TCP_LISTEN);
2687}
2688
2689static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len)
2690{
2691	struct tcp_repair_window opt;
2692
2693	if (!tp->repair)
2694		return -EPERM;
2695
2696	if (len != sizeof(opt))
2697		return -EINVAL;
2698
2699	if (copy_from_user(&opt, optbuf, sizeof(opt)))
2700		return -EFAULT;
2701
2702	if (opt.max_window < opt.snd_wnd)
2703		return -EINVAL;
2704
2705	if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
2706		return -EINVAL;
2707
2708	if (after(opt.rcv_wup, tp->rcv_nxt))
2709		return -EINVAL;
2710
2711	tp->snd_wl1	= opt.snd_wl1;
2712	tp->snd_wnd	= opt.snd_wnd;
2713	tp->max_window	= opt.max_window;
2714
2715	tp->rcv_wnd	= opt.rcv_wnd;
2716	tp->rcv_wup	= opt.rcv_wup;
2717
2718	return 0;
2719}
2720
2721static int tcp_repair_options_est(struct sock *sk,
2722		struct tcp_repair_opt __user *optbuf, unsigned int len)
2723{
2724	struct tcp_sock *tp = tcp_sk(sk);
2725	struct tcp_repair_opt opt;
2726
2727	while (len >= sizeof(opt)) {
2728		if (copy_from_user(&opt, optbuf, sizeof(opt)))
2729			return -EFAULT;
2730
2731		optbuf++;
2732		len -= sizeof(opt);
2733
2734		switch (opt.opt_code) {
2735		case TCPOPT_MSS:
2736			tp->rx_opt.mss_clamp = opt.opt_val;
2737			tcp_mtup_init(sk);
2738			break;
2739		case TCPOPT_WINDOW:
2740			{
2741				u16 snd_wscale = opt.opt_val & 0xFFFF;
2742				u16 rcv_wscale = opt.opt_val >> 16;
2743
2744				if (snd_wscale > TCP_MAX_WSCALE || rcv_wscale > TCP_MAX_WSCALE)
2745					return -EFBIG;
2746
2747				tp->rx_opt.snd_wscale = snd_wscale;
2748				tp->rx_opt.rcv_wscale = rcv_wscale;
2749				tp->rx_opt.wscale_ok = 1;
2750			}
2751			break;
2752		case TCPOPT_SACK_PERM:
2753			if (opt.opt_val != 0)
2754				return -EINVAL;
2755
2756			tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
2757			break;
2758		case TCPOPT_TIMESTAMP:
2759			if (opt.opt_val != 0)
2760				return -EINVAL;
2761
2762			tp->rx_opt.tstamp_ok = 1;
2763			break;
2764		}
2765	}
2766
2767	return 0;
2768}
2769
2770DEFINE_STATIC_KEY_FALSE(tcp_tx_delay_enabled);
2771EXPORT_SYMBOL(tcp_tx_delay_enabled);
2772
2773static void tcp_enable_tx_delay(void)
2774{
2775	if (!static_branch_unlikely(&tcp_tx_delay_enabled)) {
2776		static int __tcp_tx_delay_enabled = 0;
2777
2778		if (cmpxchg(&__tcp_tx_delay_enabled, 0, 1) == 0) {
2779			static_branch_enable(&tcp_tx_delay_enabled);
2780			pr_info("TCP_TX_DELAY enabled\n");
2781		}
2782	}
2783}
2784
2785/*
2786 *	Socket option code for TCP.
2787 */
2788static int do_tcp_setsockopt(struct sock *sk, int level,
2789		int optname, char __user *optval, unsigned int optlen)
2790{
2791	struct tcp_sock *tp = tcp_sk(sk);
2792	struct inet_connection_sock *icsk = inet_csk(sk);
2793	struct net *net = sock_net(sk);
2794	int val;
2795	int err = 0;
2796
2797	/* These are data/string values, all the others are ints */
2798	switch (optname) {
2799	case TCP_CONGESTION: {
2800		char name[TCP_CA_NAME_MAX];
2801
2802		if (optlen < 1)
2803			return -EINVAL;
2804
2805		val = strncpy_from_user(name, optval,
2806					min_t(long, TCP_CA_NAME_MAX-1, optlen));
2807		if (val < 0)
2808			return -EFAULT;
2809		name[val] = 0;
2810
2811		lock_sock(sk);
2812		err = tcp_set_congestion_control(sk, name, true, true,
2813						 ns_capable(sock_net(sk)->user_ns,
2814							    CAP_NET_ADMIN));
2815		release_sock(sk);
2816		return err;
2817	}
2818	case TCP_ULP: {
2819		char name[TCP_ULP_NAME_MAX];
 
2820
2821		if (optlen < 1)
 
 
 
 
 
 
2822			return -EINVAL;
2823
2824		val = strncpy_from_user(name, optval,
2825					min_t(long, TCP_ULP_NAME_MAX - 1,
2826					      optlen));
2827		if (val < 0)
2828			return -EFAULT;
2829		name[val] = 0;
 
2830
2831		lock_sock(sk);
2832		err = tcp_set_ulp(sk, name);
2833		release_sock(sk);
2834		return err;
2835	}
2836	case TCP_FASTOPEN_KEY: {
2837		__u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
2838		__u8 *backup_key = NULL;
 
 
 
 
 
2839
2840		/* Allow a backup key as well to facilitate key rotation
2841		 * First key is the active one.
2842		 */
2843		if (optlen != TCP_FASTOPEN_KEY_LENGTH &&
2844		    optlen != TCP_FASTOPEN_KEY_BUF_LENGTH)
2845			return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2846
2847		if (copy_from_user(key, optval, optlen))
2848			return -EFAULT;
2849
2850		if (optlen == TCP_FASTOPEN_KEY_BUF_LENGTH)
2851			backup_key = key + TCP_FASTOPEN_KEY_LENGTH;
 
 
 
 
 
 
 
 
2852
2853		return tcp_fastopen_reset_cipher(net, sk, key, backup_key);
 
 
 
2854	}
2855	default:
2856		/* fallthru */
2857		break;
2858	}
2859
2860	if (optlen < sizeof(int))
2861		return -EINVAL;
2862
2863	if (get_user(val, (int __user *)optval))
2864		return -EFAULT;
2865
2866	lock_sock(sk);
2867
2868	switch (optname) {
2869	case TCP_MAXSEG:
2870		/* Values greater than interface MTU won't take effect. However
2871		 * at the point when this call is done we typically don't yet
2872		 * know which interface is going to be used
2873		 */
2874		if (val && (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW)) {
2875			err = -EINVAL;
2876			break;
2877		}
2878		tp->rx_opt.user_mss = val;
2879		break;
2880
2881	case TCP_NODELAY:
2882		if (val) {
2883			/* TCP_NODELAY is weaker than TCP_CORK, so that
2884			 * this option on corked socket is remembered, but
2885			 * it is not activated until cork is cleared.
2886			 *
2887			 * However, when TCP_NODELAY is set we make
2888			 * an explicit push, which overrides even TCP_CORK
2889			 * for currently queued segments.
2890			 */
2891			tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2892			tcp_push_pending_frames(sk);
2893		} else {
2894			tp->nonagle &= ~TCP_NAGLE_OFF;
2895		}
2896		break;
2897
2898	case TCP_THIN_LINEAR_TIMEOUTS:
2899		if (val < 0 || val > 1)
2900			err = -EINVAL;
2901		else
2902			tp->thin_lto = val;
2903		break;
2904
2905	case TCP_THIN_DUPACK:
2906		if (val < 0 || val > 1)
2907			err = -EINVAL;
2908		break;
2909
2910	case TCP_REPAIR:
2911		if (!tcp_can_repair_sock(sk))
2912			err = -EPERM;
2913		else if (val == TCP_REPAIR_ON) {
2914			tp->repair = 1;
2915			sk->sk_reuse = SK_FORCE_REUSE;
2916			tp->repair_queue = TCP_NO_QUEUE;
2917		} else if (val == TCP_REPAIR_OFF) {
2918			tp->repair = 0;
2919			sk->sk_reuse = SK_NO_REUSE;
2920			tcp_send_window_probe(sk);
2921		} else if (val == TCP_REPAIR_OFF_NO_WP) {
2922			tp->repair = 0;
2923			sk->sk_reuse = SK_NO_REUSE;
2924		} else
2925			err = -EINVAL;
2926
2927		break;
2928
2929	case TCP_REPAIR_QUEUE:
2930		if (!tp->repair)
2931			err = -EPERM;
2932		else if ((unsigned int)val < TCP_QUEUES_NR)
2933			tp->repair_queue = val;
2934		else
2935			err = -EINVAL;
2936		break;
2937
2938	case TCP_QUEUE_SEQ:
2939		if (sk->sk_state != TCP_CLOSE)
2940			err = -EPERM;
2941		else if (tp->repair_queue == TCP_SEND_QUEUE)
2942			WRITE_ONCE(tp->write_seq, val);
2943		else if (tp->repair_queue == TCP_RECV_QUEUE)
2944			WRITE_ONCE(tp->rcv_nxt, val);
2945		else
2946			err = -EINVAL;
2947		break;
2948
2949	case TCP_REPAIR_OPTIONS:
2950		if (!tp->repair)
2951			err = -EINVAL;
2952		else if (sk->sk_state == TCP_ESTABLISHED)
2953			err = tcp_repair_options_est(sk,
2954					(struct tcp_repair_opt __user *)optval,
2955					optlen);
2956		else
2957			err = -EPERM;
2958		break;
2959
2960	case TCP_CORK:
2961		/* When set indicates to always queue non-full frames.
2962		 * Later the user clears this option and we transmit
2963		 * any pending partial frames in the queue.  This is
2964		 * meant to be used alongside sendfile() to get properly
2965		 * filled frames when the user (for example) must write
2966		 * out headers with a write() call first and then use
2967		 * sendfile to send out the data parts.
2968		 *
2969		 * TCP_CORK can be set together with TCP_NODELAY and it is
2970		 * stronger than TCP_NODELAY.
2971		 */
2972		if (val) {
2973			tp->nonagle |= TCP_NAGLE_CORK;
2974		} else {
2975			tp->nonagle &= ~TCP_NAGLE_CORK;
2976			if (tp->nonagle&TCP_NAGLE_OFF)
2977				tp->nonagle |= TCP_NAGLE_PUSH;
2978			tcp_push_pending_frames(sk);
2979		}
2980		break;
2981
2982	case TCP_KEEPIDLE:
2983		if (val < 1 || val > MAX_TCP_KEEPIDLE)
2984			err = -EINVAL;
2985		else {
2986			tp->keepalive_time = val * HZ;
2987			if (sock_flag(sk, SOCK_KEEPOPEN) &&
2988			    !((1 << sk->sk_state) &
2989			      (TCPF_CLOSE | TCPF_LISTEN))) {
2990				u32 elapsed = keepalive_time_elapsed(tp);
2991				if (tp->keepalive_time > elapsed)
2992					elapsed = tp->keepalive_time - elapsed;
2993				else
2994					elapsed = 0;
2995				inet_csk_reset_keepalive_timer(sk, elapsed);
2996			}
2997		}
2998		break;
2999	case TCP_KEEPINTVL:
3000		if (val < 1 || val > MAX_TCP_KEEPINTVL)
3001			err = -EINVAL;
3002		else
3003			tp->keepalive_intvl = val * HZ;
3004		break;
3005	case TCP_KEEPCNT:
3006		if (val < 1 || val > MAX_TCP_KEEPCNT)
3007			err = -EINVAL;
3008		else
3009			tp->keepalive_probes = val;
3010		break;
3011	case TCP_SYNCNT:
3012		if (val < 1 || val > MAX_TCP_SYNCNT)
3013			err = -EINVAL;
3014		else
3015			icsk->icsk_syn_retries = val;
3016		break;
3017
3018	case TCP_SAVE_SYN:
3019		if (val < 0 || val > 1)
3020			err = -EINVAL;
3021		else
3022			tp->save_syn = val;
3023		break;
3024
3025	case TCP_LINGER2:
3026		if (val < 0)
3027			tp->linger2 = -1;
3028		else if (val > net->ipv4.sysctl_tcp_fin_timeout / HZ)
3029			tp->linger2 = 0;
3030		else
3031			tp->linger2 = val * HZ;
3032		break;
3033
3034	case TCP_DEFER_ACCEPT:
3035		/* Translate value in seconds to number of retransmits */
3036		icsk->icsk_accept_queue.rskq_defer_accept =
3037			secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
3038					TCP_RTO_MAX / HZ);
3039		break;
3040
3041	case TCP_WINDOW_CLAMP:
3042		if (!val) {
3043			if (sk->sk_state != TCP_CLOSE) {
3044				err = -EINVAL;
3045				break;
3046			}
3047			tp->window_clamp = 0;
3048		} else
3049			tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
3050						SOCK_MIN_RCVBUF / 2 : val;
3051		break;
3052
3053	case TCP_QUICKACK:
3054		if (!val) {
3055			inet_csk_enter_pingpong_mode(sk);
3056		} else {
3057			inet_csk_exit_pingpong_mode(sk);
3058			if ((1 << sk->sk_state) &
3059			    (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
3060			    inet_csk_ack_scheduled(sk)) {
3061				icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
3062				tcp_cleanup_rbuf(sk, 1);
3063				if (!(val & 1))
3064					inet_csk_enter_pingpong_mode(sk);
3065			}
3066		}
3067		break;
3068
3069#ifdef CONFIG_TCP_MD5SIG
3070	case TCP_MD5SIG:
3071	case TCP_MD5SIG_EXT:
3072		if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
3073			err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
3074		else
3075			err = -EINVAL;
3076		break;
3077#endif
3078	case TCP_USER_TIMEOUT:
3079		/* Cap the max time in ms TCP will retry or probe the window
3080		 * before giving up and aborting (ETIMEDOUT) a connection.
3081		 */
3082		if (val < 0)
3083			err = -EINVAL;
3084		else
3085			icsk->icsk_user_timeout = val;
3086		break;
3087
3088	case TCP_FASTOPEN:
3089		if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
3090		    TCPF_LISTEN))) {
3091			tcp_fastopen_init_key_once(net);
3092
3093			fastopen_queue_tune(sk, val);
3094		} else {
3095			err = -EINVAL;
3096		}
3097		break;
3098	case TCP_FASTOPEN_CONNECT:
3099		if (val > 1 || val < 0) {
3100			err = -EINVAL;
3101		} else if (net->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) {
3102			if (sk->sk_state == TCP_CLOSE)
3103				tp->fastopen_connect = val;
3104			else
3105				err = -EINVAL;
3106		} else {
3107			err = -EOPNOTSUPP;
3108		}
3109		break;
3110	case TCP_FASTOPEN_NO_COOKIE:
3111		if (val > 1 || val < 0)
3112			err = -EINVAL;
3113		else if (!((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
3114			err = -EINVAL;
3115		else
3116			tp->fastopen_no_cookie = val;
3117		break;
3118	case TCP_TIMESTAMP:
3119		if (!tp->repair)
3120			err = -EPERM;
3121		else
3122			tp->tsoffset = val - tcp_time_stamp_raw();
3123		break;
3124	case TCP_REPAIR_WINDOW:
3125		err = tcp_repair_set_window(tp, optval, optlen);
3126		break;
3127	case TCP_NOTSENT_LOWAT:
3128		tp->notsent_lowat = val;
3129		sk->sk_write_space(sk);
3130		break;
3131	case TCP_INQ:
3132		if (val > 1 || val < 0)
3133			err = -EINVAL;
3134		else
3135			tp->recvmsg_inq = val;
3136		break;
3137	case TCP_TX_DELAY:
3138		if (val)
3139			tcp_enable_tx_delay();
3140		tp->tcp_tx_delay = val;
3141		break;
3142	default:
3143		err = -ENOPROTOOPT;
3144		break;
3145	}
3146
3147	release_sock(sk);
3148	return err;
3149}
3150
3151int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
3152		   unsigned int optlen)
3153{
3154	const struct inet_connection_sock *icsk = inet_csk(sk);
3155
3156	if (level != SOL_TCP)
3157		return icsk->icsk_af_ops->setsockopt(sk, level, optname,
3158						     optval, optlen);
3159	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
3160}
3161EXPORT_SYMBOL(tcp_setsockopt);
3162
3163#ifdef CONFIG_COMPAT
3164int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
3165			  char __user *optval, unsigned int optlen)
3166{
3167	if (level != SOL_TCP)
3168		return inet_csk_compat_setsockopt(sk, level, optname,
3169						  optval, optlen);
3170	return do_tcp_setsockopt(sk, level, optname, optval, optlen);
3171}
3172EXPORT_SYMBOL(compat_tcp_setsockopt);
3173#endif
3174
3175static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
3176				      struct tcp_info *info)
3177{
3178	u64 stats[__TCP_CHRONO_MAX], total = 0;
3179	enum tcp_chrono i;
3180
3181	for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) {
3182		stats[i] = tp->chrono_stat[i - 1];
3183		if (i == tp->chrono_type)
3184			stats[i] += tcp_jiffies32 - tp->chrono_start;
3185		stats[i] *= USEC_PER_SEC / HZ;
3186		total += stats[i];
3187	}
3188
3189	info->tcpi_busy_time = total;
3190	info->tcpi_rwnd_limited = stats[TCP_CHRONO_RWND_LIMITED];
3191	info->tcpi_sndbuf_limited = stats[TCP_CHRONO_SNDBUF_LIMITED];
3192}
3193
3194/* Return information about state of tcp endpoint in API format. */
3195void tcp_get_info(struct sock *sk, struct tcp_info *info)
3196{
3197	const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
3198	const struct inet_connection_sock *icsk = inet_csk(sk);
3199	unsigned long rate;
3200	u32 now;
3201	u64 rate64;
3202	bool slow;
3203
3204	memset(info, 0, sizeof(*info));
3205	if (sk->sk_type != SOCK_STREAM)
3206		return;
3207
3208	info->tcpi_state = inet_sk_state_load(sk);
3209
3210	/* Report meaningful fields for all TCP states, including listeners */
3211	rate = READ_ONCE(sk->sk_pacing_rate);
3212	rate64 = (rate != ~0UL) ? rate : ~0ULL;
3213	info->tcpi_pacing_rate = rate64;
3214
3215	rate = READ_ONCE(sk->sk_max_pacing_rate);
3216	rate64 = (rate != ~0UL) ? rate : ~0ULL;
3217	info->tcpi_max_pacing_rate = rate64;
3218
3219	info->tcpi_reordering = tp->reordering;
3220	info->tcpi_snd_cwnd = tp->snd_cwnd;
3221
3222	if (info->tcpi_state == TCP_LISTEN) {
3223		/* listeners aliased fields :
3224		 * tcpi_unacked -> Number of children ready for accept()
3225		 * tcpi_sacked  -> max backlog
3226		 */
3227		info->tcpi_unacked = sk->sk_ack_backlog;
3228		info->tcpi_sacked = sk->sk_max_ack_backlog;
3229		return;
3230	}
3231
3232	slow = lock_sock_fast(sk);
3233
 
3234	info->tcpi_ca_state = icsk->icsk_ca_state;
3235	info->tcpi_retransmits = icsk->icsk_retransmits;
3236	info->tcpi_probes = icsk->icsk_probes_out;
3237	info->tcpi_backoff = icsk->icsk_backoff;
3238
3239	if (tp->rx_opt.tstamp_ok)
3240		info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
3241	if (tcp_is_sack(tp))
3242		info->tcpi_options |= TCPI_OPT_SACK;
3243	if (tp->rx_opt.wscale_ok) {
3244		info->tcpi_options |= TCPI_OPT_WSCALE;
3245		info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
3246		info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
3247	}
3248
3249	if (tp->ecn_flags & TCP_ECN_OK)
3250		info->tcpi_options |= TCPI_OPT_ECN;
3251	if (tp->ecn_flags & TCP_ECN_SEEN)
3252		info->tcpi_options |= TCPI_OPT_ECN_SEEN;
3253	if (tp->syn_data_acked)
3254		info->tcpi_options |= TCPI_OPT_SYN_DATA;
3255
3256	info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
3257	info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
3258	info->tcpi_snd_mss = tp->mss_cache;
3259	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
3260
3261	info->tcpi_unacked = tp->packets_out;
3262	info->tcpi_sacked = tp->sacked_out;
3263
 
 
 
 
3264	info->tcpi_lost = tp->lost_out;
3265	info->tcpi_retrans = tp->retrans_out;
 
3266
3267	now = tcp_jiffies32;
3268	info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
3269	info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
3270	info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
3271
3272	info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
3273	info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
3274	info->tcpi_rtt = tp->srtt_us >> 3;
3275	info->tcpi_rttvar = tp->mdev_us >> 2;
3276	info->tcpi_snd_ssthresh = tp->snd_ssthresh;
 
3277	info->tcpi_advmss = tp->advmss;
 
3278
3279	info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3;
3280	info->tcpi_rcv_space = tp->rcvq_space.space;
3281
3282	info->tcpi_total_retrans = tp->total_retrans;
3283
3284	info->tcpi_bytes_acked = tp->bytes_acked;
3285	info->tcpi_bytes_received = tp->bytes_received;
3286	info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt);
3287	tcp_get_info_chrono_stats(tp, info);
3288
3289	info->tcpi_segs_out = tp->segs_out;
3290	info->tcpi_segs_in = tp->segs_in;
3291
3292	info->tcpi_min_rtt = tcp_min_rtt(tp);
3293	info->tcpi_data_segs_in = tp->data_segs_in;
3294	info->tcpi_data_segs_out = tp->data_segs_out;
3295
3296	info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
3297	rate64 = tcp_compute_delivery_rate(tp);
3298	if (rate64)
3299		info->tcpi_delivery_rate = rate64;
3300	info->tcpi_delivered = tp->delivered;
3301	info->tcpi_delivered_ce = tp->delivered_ce;
3302	info->tcpi_bytes_sent = tp->bytes_sent;
3303	info->tcpi_bytes_retrans = tp->bytes_retrans;
3304	info->tcpi_dsack_dups = tp->dsack_dups;
3305	info->tcpi_reord_seen = tp->reord_seen;
3306	info->tcpi_rcv_ooopack = tp->rcv_ooopack;
3307	info->tcpi_snd_wnd = tp->snd_wnd;
3308	unlock_sock_fast(sk, slow);
3309}
3310EXPORT_SYMBOL_GPL(tcp_get_info);
3311
3312static size_t tcp_opt_stats_get_size(void)
3313{
3314	return
3315		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BUSY */
3316		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_RWND_LIMITED */
3317		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_SNDBUF_LIMITED */
3318		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DATA_SEGS_OUT */
3319		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_TOTAL_RETRANS */
3320		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_PACING_RATE */
3321		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_DELIVERY_RATE */
3322		nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_CWND */
3323		nla_total_size(sizeof(u32)) + /* TCP_NLA_REORDERING */
3324		nla_total_size(sizeof(u32)) + /* TCP_NLA_MIN_RTT */
3325		nla_total_size(sizeof(u8)) + /* TCP_NLA_RECUR_RETRANS */
3326		nla_total_size(sizeof(u8)) + /* TCP_NLA_DELIVERY_RATE_APP_LMT */
3327		nla_total_size(sizeof(u32)) + /* TCP_NLA_SNDQ_SIZE */
3328		nla_total_size(sizeof(u8)) + /* TCP_NLA_CA_STATE */
3329		nla_total_size(sizeof(u32)) + /* TCP_NLA_SND_SSTHRESH */
3330		nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED */
3331		nla_total_size(sizeof(u32)) + /* TCP_NLA_DELIVERED_CE */
3332		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_SENT */
3333		nla_total_size_64bit(sizeof(u64)) + /* TCP_NLA_BYTES_RETRANS */
3334		nla_total_size(sizeof(u32)) + /* TCP_NLA_DSACK_DUPS */
3335		nla_total_size(sizeof(u32)) + /* TCP_NLA_REORD_SEEN */
3336		nla_total_size(sizeof(u32)) + /* TCP_NLA_SRTT */
3337		0;
3338}
3339
3340struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
3341{
3342	const struct tcp_sock *tp = tcp_sk(sk);
3343	struct sk_buff *stats;
3344	struct tcp_info info;
3345	unsigned long rate;
3346	u64 rate64;
3347
3348	stats = alloc_skb(tcp_opt_stats_get_size(), GFP_ATOMIC);
3349	if (!stats)
3350		return NULL;
3351
3352	tcp_get_info_chrono_stats(tp, &info);
3353	nla_put_u64_64bit(stats, TCP_NLA_BUSY,
3354			  info.tcpi_busy_time, TCP_NLA_PAD);
3355	nla_put_u64_64bit(stats, TCP_NLA_RWND_LIMITED,
3356			  info.tcpi_rwnd_limited, TCP_NLA_PAD);
3357	nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED,
3358			  info.tcpi_sndbuf_limited, TCP_NLA_PAD);
3359	nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT,
3360			  tp->data_segs_out, TCP_NLA_PAD);
3361	nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS,
3362			  tp->total_retrans, TCP_NLA_PAD);
3363
3364	rate = READ_ONCE(sk->sk_pacing_rate);
3365	rate64 = (rate != ~0UL) ? rate : ~0ULL;
3366	nla_put_u64_64bit(stats, TCP_NLA_PACING_RATE, rate64, TCP_NLA_PAD);
3367
3368	rate64 = tcp_compute_delivery_rate(tp);
3369	nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
3370
3371	nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd);
3372	nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
3373	nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
3374
3375	nla_put_u8(stats, TCP_NLA_RECUR_RETRANS, inet_csk(sk)->icsk_retransmits);
3376	nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
3377	nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
3378	nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered);
3379	nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce);
3380
3381	nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
3382	nla_put_u8(stats, TCP_NLA_CA_STATE, inet_csk(sk)->icsk_ca_state);
3383
3384	nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent,
3385			  TCP_NLA_PAD);
3386	nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans,
3387			  TCP_NLA_PAD);
3388	nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups);
3389	nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
3390	nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3);
3391
3392	return stats;
3393}
3394
3395static int do_tcp_getsockopt(struct sock *sk, int level,
3396		int optname, char __user *optval, int __user *optlen)
3397{
3398	struct inet_connection_sock *icsk = inet_csk(sk);
3399	struct tcp_sock *tp = tcp_sk(sk);
3400	struct net *net = sock_net(sk);
3401	int val, len;
3402
3403	if (get_user(len, optlen))
3404		return -EFAULT;
3405
3406	len = min_t(unsigned int, len, sizeof(int));
3407
3408	if (len < 0)
3409		return -EINVAL;
3410
3411	switch (optname) {
3412	case TCP_MAXSEG:
3413		val = tp->mss_cache;
3414		if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
3415			val = tp->rx_opt.user_mss;
3416		if (tp->repair)
3417			val = tp->rx_opt.mss_clamp;
3418		break;
3419	case TCP_NODELAY:
3420		val = !!(tp->nonagle&TCP_NAGLE_OFF);
3421		break;
3422	case TCP_CORK:
3423		val = !!(tp->nonagle&TCP_NAGLE_CORK);
3424		break;
3425	case TCP_KEEPIDLE:
3426		val = keepalive_time_when(tp) / HZ;
3427		break;
3428	case TCP_KEEPINTVL:
3429		val = keepalive_intvl_when(tp) / HZ;
3430		break;
3431	case TCP_KEEPCNT:
3432		val = keepalive_probes(tp);
3433		break;
3434	case TCP_SYNCNT:
3435		val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
3436		break;
3437	case TCP_LINGER2:
3438		val = tp->linger2;
3439		if (val >= 0)
3440			val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ;
3441		break;
3442	case TCP_DEFER_ACCEPT:
3443		val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
3444				      TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
3445		break;
3446	case TCP_WINDOW_CLAMP:
3447		val = tp->window_clamp;
3448		break;
3449	case TCP_INFO: {
3450		struct tcp_info info;
3451
3452		if (get_user(len, optlen))
3453			return -EFAULT;
3454
3455		tcp_get_info(sk, &info);
3456
3457		len = min_t(unsigned int, len, sizeof(info));
3458		if (put_user(len, optlen))
3459			return -EFAULT;
3460		if (copy_to_user(optval, &info, len))
3461			return -EFAULT;
3462		return 0;
3463	}
3464	case TCP_CC_INFO: {
3465		const struct tcp_congestion_ops *ca_ops;
3466		union tcp_cc_info info;
3467		size_t sz = 0;
3468		int attr;
3469
3470		if (get_user(len, optlen))
3471			return -EFAULT;
3472
3473		ca_ops = icsk->icsk_ca_ops;
3474		if (ca_ops && ca_ops->get_info)
3475			sz = ca_ops->get_info(sk, ~0U, &attr, &info);
3476
3477		len = min_t(unsigned int, len, sz);
3478		if (put_user(len, optlen))
3479			return -EFAULT;
3480		if (copy_to_user(optval, &info, len))
3481			return -EFAULT;
3482		return 0;
3483	}
3484	case TCP_QUICKACK:
3485		val = !inet_csk_in_pingpong_mode(sk);
3486		break;
3487
3488	case TCP_CONGESTION:
3489		if (get_user(len, optlen))
3490			return -EFAULT;
3491		len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
3492		if (put_user(len, optlen))
3493			return -EFAULT;
3494		if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
3495			return -EFAULT;
3496		return 0;
3497
3498	case TCP_ULP:
 
 
 
3499		if (get_user(len, optlen))
3500			return -EFAULT;
3501		len = min_t(unsigned int, len, TCP_ULP_NAME_MAX);
3502		if (!icsk->icsk_ulp_ops) {
3503			if (put_user(0, optlen))
3504				return -EFAULT;
3505			return 0;
3506		}
3507		if (put_user(len, optlen))
3508			return -EFAULT;
3509		if (copy_to_user(optval, icsk->icsk_ulp_ops->name, len))
3510			return -EFAULT;
3511		return 0;
3512
3513	case TCP_FASTOPEN_KEY: {
3514		__u8 key[TCP_FASTOPEN_KEY_BUF_LENGTH];
3515		struct tcp_fastopen_context *ctx;
3516		unsigned int key_len = 0;
 
3517
3518		if (get_user(len, optlen))
3519			return -EFAULT;
3520
3521		rcu_read_lock();
3522		ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
3523		if (ctx) {
3524			key_len = tcp_fastopen_context_len(ctx) *
3525					TCP_FASTOPEN_KEY_LENGTH;
3526			memcpy(&key[0], &ctx->key[0], key_len);
3527		}
3528		rcu_read_unlock();
3529
3530		len = min_t(unsigned int, len, key_len);
3531		if (put_user(len, optlen))
3532			return -EFAULT;
3533		if (copy_to_user(optval, key, len))
3534			return -EFAULT;
3535		return 0;
3536	}
3537	case TCP_THIN_LINEAR_TIMEOUTS:
3538		val = tp->thin_lto;
3539		break;
3540
3541	case TCP_THIN_DUPACK:
3542		val = 0;
3543		break;
3544
3545	case TCP_REPAIR:
3546		val = tp->repair;
3547		break;
3548
3549	case TCP_REPAIR_QUEUE:
3550		if (tp->repair)
3551			val = tp->repair_queue;
3552		else
3553			return -EINVAL;
3554		break;
3555
3556	case TCP_REPAIR_WINDOW: {
3557		struct tcp_repair_window opt;
3558
3559		if (get_user(len, optlen))
3560			return -EFAULT;
3561
3562		if (len != sizeof(opt))
3563			return -EINVAL;
3564
3565		if (!tp->repair)
3566			return -EPERM;
3567
3568		opt.snd_wl1	= tp->snd_wl1;
3569		opt.snd_wnd	= tp->snd_wnd;
3570		opt.max_window	= tp->max_window;
3571		opt.rcv_wnd	= tp->rcv_wnd;
3572		opt.rcv_wup	= tp->rcv_wup;
3573
3574		if (copy_to_user(optval, &opt, len))
3575			return -EFAULT;
3576		return 0;
3577	}
3578	case TCP_QUEUE_SEQ:
3579		if (tp->repair_queue == TCP_SEND_QUEUE)
3580			val = tp->write_seq;
3581		else if (tp->repair_queue == TCP_RECV_QUEUE)
3582			val = tp->rcv_nxt;
3583		else
3584			return -EINVAL;
3585		break;
3586
3587	case TCP_USER_TIMEOUT:
3588		val = icsk->icsk_user_timeout;
3589		break;
3590
3591	case TCP_FASTOPEN:
3592		val = icsk->icsk_accept_queue.fastopenq.max_qlen;
3593		break;
3594
3595	case TCP_FASTOPEN_CONNECT:
3596		val = tp->fastopen_connect;
3597		break;
3598
3599	case TCP_FASTOPEN_NO_COOKIE:
3600		val = tp->fastopen_no_cookie;
3601		break;
3602
3603	case TCP_TX_DELAY:
3604		val = tp->tcp_tx_delay;
3605		break;
3606
3607	case TCP_TIMESTAMP:
3608		val = tcp_time_stamp_raw() + tp->tsoffset;
3609		break;
3610	case TCP_NOTSENT_LOWAT:
3611		val = tp->notsent_lowat;
3612		break;
3613	case TCP_INQ:
3614		val = tp->recvmsg_inq;
3615		break;
3616	case TCP_SAVE_SYN:
3617		val = tp->save_syn;
3618		break;
3619	case TCP_SAVED_SYN: {
3620		if (get_user(len, optlen))
3621			return -EFAULT;
3622
3623		lock_sock(sk);
3624		if (tp->saved_syn) {
3625			if (len < tp->saved_syn[0]) {
3626				if (put_user(tp->saved_syn[0], optlen)) {
3627					release_sock(sk);
3628					return -EFAULT;
3629				}
3630				release_sock(sk);
3631				return -EINVAL;
3632			}
3633			len = tp->saved_syn[0];
3634			if (put_user(len, optlen)) {
3635				release_sock(sk);
3636				return -EFAULT;
3637			}
3638			if (copy_to_user(optval, tp->saved_syn + 1, len)) {
3639				release_sock(sk);
3640				return -EFAULT;
3641			}
3642			tcp_saved_syn_free(tp);
3643			release_sock(sk);
3644		} else {
3645			release_sock(sk);
3646			len = 0;
3647			if (put_user(len, optlen))
3648				return -EFAULT;
3649		}
3650		return 0;
3651	}
3652#ifdef CONFIG_MMU
3653	case TCP_ZEROCOPY_RECEIVE: {
3654		struct tcp_zerocopy_receive zc;
3655		int err;
3656
3657		if (get_user(len, optlen))
3658			return -EFAULT;
3659		if (len != sizeof(zc))
3660			return -EINVAL;
3661		if (copy_from_user(&zc, optval, len))
3662			return -EFAULT;
3663		lock_sock(sk);
3664		err = tcp_zerocopy_receive(sk, &zc);
3665		release_sock(sk);
3666		if (!err && copy_to_user(optval, &zc, len))
3667			err = -EFAULT;
3668		return err;
3669	}
3670#endif
3671	default:
3672		return -ENOPROTOOPT;
3673	}
3674
3675	if (put_user(len, optlen))
3676		return -EFAULT;
3677	if (copy_to_user(optval, &val, len))
3678		return -EFAULT;
3679	return 0;
3680}
3681
3682int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
3683		   int __user *optlen)
3684{
3685	struct inet_connection_sock *icsk = inet_csk(sk);
3686
3687	if (level != SOL_TCP)
3688		return icsk->icsk_af_ops->getsockopt(sk, level, optname,
3689						     optval, optlen);
3690	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
3691}
3692EXPORT_SYMBOL(tcp_getsockopt);
3693
3694#ifdef CONFIG_COMPAT
3695int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
3696			  char __user *optval, int __user *optlen)
3697{
3698	if (level != SOL_TCP)
3699		return inet_csk_compat_getsockopt(sk, level, optname,
3700						  optval, optlen);
3701	return do_tcp_getsockopt(sk, level, optname, optval, optlen);
3702}
3703EXPORT_SYMBOL(compat_tcp_getsockopt);
3704#endif
3705
3706#ifdef CONFIG_TCP_MD5SIG
3707static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
3708static DEFINE_MUTEX(tcp_md5sig_mutex);
3709static bool tcp_md5sig_pool_populated = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3710
3711static void __tcp_alloc_md5sig_pool(void)
3712{
3713	struct crypto_ahash *hash;
3714	int cpu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3715
3716	hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
3717	if (IS_ERR(hash))
3718		return;
3719
3720	for_each_possible_cpu(cpu) {
3721		void *scratch = per_cpu(tcp_md5sig_pool, cpu).scratch;
3722		struct ahash_request *req;
3723
3724		if (!scratch) {
3725			scratch = kmalloc_node(sizeof(union tcp_md5sum_block) +
3726					       sizeof(struct tcphdr),
3727					       GFP_KERNEL,
3728					       cpu_to_node(cpu));
3729			if (!scratch)
3730				return;
3731			per_cpu(tcp_md5sig_pool, cpu).scratch = scratch;
3732		}
3733		if (per_cpu(tcp_md5sig_pool, cpu).md5_req)
3734			continue;
3735
3736		req = ahash_request_alloc(hash, GFP_KERNEL);
3737		if (!req)
3738			return;
 
 
 
 
 
 
 
 
 
 
 
 
 
3739
3740		ahash_request_set_callback(req, 0, NULL, NULL);
 
3741
3742		per_cpu(tcp_md5sig_pool, cpu).md5_req = req;
 
 
3743	}
3744	/* before setting tcp_md5sig_pool_populated, we must commit all writes
3745	 * to memory. See smp_rmb() in tcp_get_md5sig_pool()
3746	 */
3747	smp_wmb();
3748	tcp_md5sig_pool_populated = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
3749}
 
3750
3751bool tcp_alloc_md5sig_pool(void)
3752{
3753	if (unlikely(!tcp_md5sig_pool_populated)) {
3754		mutex_lock(&tcp_md5sig_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
3755
3756		if (!tcp_md5sig_pool_populated) {
3757			__tcp_alloc_md5sig_pool();
3758			if (tcp_md5sig_pool_populated)
3759				static_branch_inc(&tcp_md5_needed);
 
 
 
 
 
 
 
 
 
 
3760		}
 
 
 
3761
3762		mutex_unlock(&tcp_md5sig_mutex);
 
 
 
 
 
 
 
3763	}
3764	return tcp_md5sig_pool_populated;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3765}
3766EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
3767
3768
3769/**
3770 *	tcp_get_md5sig_pool - get md5sig_pool for this user
3771 *
3772 *	We use percpu structure, so if we succeed, we exit with preemption
3773 *	and BH disabled, to make sure another thread or softirq handling
3774 *	wont try to get same context.
3775 */
3776struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
3777{
 
 
3778	local_bh_disable();
3779
3780	if (tcp_md5sig_pool_populated) {
3781		/* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
3782		smp_rmb();
3783		return this_cpu_ptr(&tcp_md5sig_pool);
3784	}
 
 
 
 
3785	local_bh_enable();
3786	return NULL;
3787}
3788EXPORT_SYMBOL(tcp_get_md5sig_pool);
3789
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3790int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3791			  const struct sk_buff *skb, unsigned int header_len)
3792{
3793	struct scatterlist sg;
3794	const struct tcphdr *tp = tcp_hdr(skb);
3795	struct ahash_request *req = hp->md5_req;
3796	unsigned int i;
3797	const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3798					   skb_headlen(skb) - header_len : 0;
3799	const struct skb_shared_info *shi = skb_shinfo(skb);
3800	struct sk_buff *frag_iter;
3801
3802	sg_init_table(&sg, 1);
3803
3804	sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
3805	ahash_request_set_crypt(req, &sg, NULL, head_data_len);
3806	if (crypto_ahash_update(req))
3807		return 1;
3808
3809	for (i = 0; i < shi->nr_frags; ++i) {
3810		const skb_frag_t *f = &shi->frags[i];
3811		unsigned int offset = skb_frag_off(f);
3812		struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
3813
3814		sg_set_page(&sg, page, skb_frag_size(f),
3815			    offset_in_page(offset));
3816		ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f));
3817		if (crypto_ahash_update(req))
3818			return 1;
3819	}
3820
3821	skb_walk_frags(skb, frag_iter)
3822		if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3823			return 1;
3824
3825	return 0;
3826}
3827EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3828
3829int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
3830{
3831	struct scatterlist sg;
3832
3833	sg_init_one(&sg, key->key, key->keylen);
3834	ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen);
3835	return crypto_ahash_update(hp->md5_req);
3836}
3837EXPORT_SYMBOL(tcp_md5_hash_key);
3838
3839#endif
3840
3841void tcp_done(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3842{
3843	struct request_sock *req;
3844
3845	/* We might be called with a new socket, after
3846	 * inet_csk_prepare_forced_close() has been called
3847	 * so we can not use lockdep_sock_is_held(sk)
3848	 */
3849	req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3850
 
 
3851	if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3852		TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3853
3854	tcp_set_state(sk, TCP_CLOSE);
3855	tcp_clear_xmit_timers(sk);
3856	if (req)
3857		reqsk_fastopen_remove(sk, req, false);
3858
3859	sk->sk_shutdown = SHUTDOWN_MASK;
3860
3861	if (!sock_flag(sk, SOCK_DEAD))
3862		sk->sk_state_change(sk);
3863	else
3864		inet_csk_destroy_sock(sk);
3865}
3866EXPORT_SYMBOL_GPL(tcp_done);
3867
3868int tcp_abort(struct sock *sk, int err)
3869{
3870	if (!sk_fullsock(sk)) {
3871		if (sk->sk_state == TCP_NEW_SYN_RECV) {
3872			struct request_sock *req = inet_reqsk(sk);
3873
3874			local_bh_disable();
3875			inet_csk_reqsk_queue_drop(req->rsk_listener, req);
3876			local_bh_enable();
3877			return 0;
3878		}
3879		return -EOPNOTSUPP;
3880	}
3881
3882	/* Don't race with userspace socket closes such as tcp_close. */
3883	lock_sock(sk);
3884
3885	if (sk->sk_state == TCP_LISTEN) {
3886		tcp_set_state(sk, TCP_CLOSE);
3887		inet_csk_listen_stop(sk);
3888	}
3889
3890	/* Don't race with BH socket closes such as inet_csk_listen_stop. */
3891	local_bh_disable();
3892	bh_lock_sock(sk);
3893
3894	if (!sock_flag(sk, SOCK_DEAD)) {
3895		sk->sk_err = err;
3896		/* This barrier is coupled with smp_rmb() in tcp_poll() */
3897		smp_wmb();
3898		sk->sk_error_report(sk);
3899		if (tcp_need_reset(sk->sk_state))
3900			tcp_send_active_reset(sk, GFP_ATOMIC);
3901		tcp_done(sk);
3902	}
3903
3904	bh_unlock_sock(sk);
3905	local_bh_enable();
3906	tcp_write_queue_purge(sk);
3907	release_sock(sk);
3908	return 0;
3909}
3910EXPORT_SYMBOL_GPL(tcp_abort);
3911
3912extern struct tcp_congestion_ops tcp_reno;
3913
3914static __initdata unsigned long thash_entries;
3915static int __init set_thash_entries(char *str)
3916{
3917	ssize_t ret;
3918
3919	if (!str)
3920		return 0;
3921
3922	ret = kstrtoul(str, 0, &thash_entries);
3923	if (ret)
3924		return 0;
3925
3926	return 1;
3927}
3928__setup("thash_entries=", set_thash_entries);
3929
3930static void __init tcp_init_mem(void)
3931{
3932	unsigned long limit = nr_free_buffer_pages() / 16;
3933
3934	limit = max(limit, 128UL);
3935	sysctl_tcp_mem[0] = limit / 4 * 3;		/* 4.68 % */
3936	sysctl_tcp_mem[1] = limit;			/* 6.25 % */
3937	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;	/* 9.37 % */
3938}
3939
3940void __init tcp_init(void)
3941{
3942	int max_rshare, max_wshare, cnt;
3943	unsigned long limit;
3944	unsigned int i;
 
 
 
3945
3946	BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
3947	BUILD_BUG_ON(sizeof(struct tcp_skb_cb) >
3948		     FIELD_SIZEOF(struct sk_buff, cb));
3949
3950	percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
3951	percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
3952	inet_hashinfo_init(&tcp_hashinfo);
3953	inet_hashinfo2_init(&tcp_hashinfo, "tcp_listen_portaddr_hash",
3954			    thash_entries, 21,  /* one slot per 2 MB*/
3955			    0, 64 * 1024);
3956	tcp_hashinfo.bind_bucket_cachep =
3957		kmem_cache_create("tcp_bind_bucket",
3958				  sizeof(struct inet_bind_bucket), 0,
3959				  SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3960
3961	/* Size and allocate the main established and bind bucket
3962	 * hash tables.
3963	 *
3964	 * The methodology is similar to that of the buffer cache.
3965	 */
3966	tcp_hashinfo.ehash =
3967		alloc_large_system_hash("TCP established",
3968					sizeof(struct inet_ehash_bucket),
3969					thash_entries,
3970					17, /* one slot per 128 KB of memory */
 
3971					0,
3972					NULL,
3973					&tcp_hashinfo.ehash_mask,
3974					0,
3975					thash_entries ? 0 : 512 * 1024);
3976	for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
3977		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
3978
 
3979	if (inet_ehash_locks_alloc(&tcp_hashinfo))
3980		panic("TCP: failed to alloc ehash_locks");
3981	tcp_hashinfo.bhash =
3982		alloc_large_system_hash("TCP bind",
3983					sizeof(struct inet_bind_hashbucket),
3984					tcp_hashinfo.ehash_mask + 1,
3985					17, /* one slot per 128 KB of memory */
 
3986					0,
3987					&tcp_hashinfo.bhash_size,
3988					NULL,
3989					0,
3990					64 * 1024);
3991	tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3992	for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3993		spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3994		INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3995	}
3996
3997
3998	cnt = tcp_hashinfo.ehash_mask + 1;
 
 
3999	sysctl_tcp_max_orphans = cnt / 2;
 
 
 
 
 
 
 
4000
4001	tcp_init_mem();
4002	/* Set per-socket limits to no more than 1/128 the pressure threshold */
4003	limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
4004	max_wshare = min(4UL*1024*1024, limit);
4005	max_rshare = min(6UL*1024*1024, limit);
4006
4007	init_net.ipv4.sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
4008	init_net.ipv4.sysctl_tcp_wmem[1] = 16*1024;
4009	init_net.ipv4.sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
4010
4011	init_net.ipv4.sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
4012	init_net.ipv4.sysctl_tcp_rmem[1] = 131072;
4013	init_net.ipv4.sysctl_tcp_rmem[2] = max(131072, max_rshare);
4014
4015	pr_info("Hash tables configured (established %u bind %u)\n",
4016		tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
4017
4018	tcp_v4_init();
4019	tcp_metrics_init();
4020	BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
4021	tcp_tasklet_init();
 
 
 
 
 
 
4022}