Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  11 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  12 *		Florian La Roche, <flla@stud.uni-sb.de>
  13 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  15 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  16 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  17 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18 *		Jorge Cwik, <jorge@laser.satlink.net>
  19 */
  20
  21/*
  22 * Changes:
  23 *		Pedro Roque	:	Fast Retransmit/Recovery.
  24 *					Two receive queues.
  25 *					Retransmit queue handled by TCP.
  26 *					Better retransmit timer handling.
  27 *					New congestion avoidance.
  28 *					Header prediction.
  29 *					Variable renaming.
  30 *
  31 *		Eric		:	Fast Retransmit.
  32 *		Randy Scott	:	MSS option defines.
  33 *		Eric Schenk	:	Fixes to slow start algorithm.
  34 *		Eric Schenk	:	Yet another double ACK bug.
  35 *		Eric Schenk	:	Delayed ACK bug fixes.
  36 *		Eric Schenk	:	Floyd style fast retrans war avoidance.
  37 *		David S. Miller	:	Don't allow zero congestion window.
  38 *		Eric Schenk	:	Fix retransmitter so that it sends
  39 *					next packet on ack of previous packet.
  40 *		Andi Kleen	:	Moved open_request checking here
  41 *					and process RSTs for open_requests.
  42 *		Andi Kleen	:	Better prune_queue, and other fixes.
  43 *		Andrey Savochkin:	Fix RTT measurements in the presence of
  44 *					timestamps.
  45 *		Andrey Savochkin:	Check sequence numbers correctly when
  46 *					removing SACKs due to in sequence incoming
  47 *					data segments.
  48 *		Andi Kleen:		Make sure we never ack data there is not
  49 *					enough room for. Also make this condition
  50 *					a fatal error if it might still happen.
  51 *		Andi Kleen:		Add tcp_measure_rcv_mss to make
  52 *					connections with MSS<min(MTU,ann. MSS)
  53 *					work without delayed acks.
  54 *		Andi Kleen:		Process packets with PSH set in the
  55 *					fast path.
  56 *		J Hadi Salim:		ECN support
  57 *	 	Andrei Gurtov,
  58 *		Pasi Sarolahti,
  59 *		Panu Kuhlberg:		Experimental audit of TCP (re)transmission
  60 *					engine. Lots of bugs are found.
  61 *		Pasi Sarolahti:		F-RTO for dealing with spurious RTOs
  62 */
  63
 
 
  64#include <linux/mm.h>
  65#include <linux/slab.h>
  66#include <linux/module.h>
  67#include <linux/sysctl.h>
  68#include <linux/kernel.h>
 
  69#include <net/dst.h>
  70#include <net/tcp.h>
 
  71#include <net/inet_common.h>
  72#include <linux/ipsec.h>
  73#include <asm/unaligned.h>
  74#include <net/netdma.h>
  75
  76int sysctl_tcp_timestamps __read_mostly = 1;
  77int sysctl_tcp_window_scaling __read_mostly = 1;
  78int sysctl_tcp_sack __read_mostly = 1;
  79int sysctl_tcp_fack __read_mostly = 1;
  80int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
  81EXPORT_SYMBOL(sysctl_tcp_reordering);
  82int sysctl_tcp_ecn __read_mostly = 2;
  83EXPORT_SYMBOL(sysctl_tcp_ecn);
  84int sysctl_tcp_dsack __read_mostly = 1;
  85int sysctl_tcp_app_win __read_mostly = 31;
  86int sysctl_tcp_adv_win_scale __read_mostly = 2;
  87EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
  88
  89int sysctl_tcp_stdurg __read_mostly;
  90int sysctl_tcp_rfc1337 __read_mostly;
  91int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
  92int sysctl_tcp_frto __read_mostly = 2;
  93int sysctl_tcp_frto_response __read_mostly;
  94int sysctl_tcp_nometrics_save __read_mostly;
  95
  96int sysctl_tcp_thin_dupack __read_mostly;
  97
  98int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
  99int sysctl_tcp_abc __read_mostly;
 100
 101#define FLAG_DATA		0x01 /* Incoming frame contained data.		*/
 102#define FLAG_WIN_UPDATE		0x02 /* Incoming ACK was a window update.	*/
 103#define FLAG_DATA_ACKED		0x04 /* This ACK acknowledged new data.		*/
 104#define FLAG_RETRANS_DATA_ACKED	0x08 /* "" "" some of which was retransmitted.	*/
 105#define FLAG_SYN_ACKED		0x10 /* This ACK acknowledged SYN.		*/
 106#define FLAG_DATA_SACKED	0x20 /* New SACK.				*/
 107#define FLAG_ECE		0x40 /* ECE in this ACK				*/
 108#define FLAG_DATA_LOST		0x80 /* SACK detected data lossage.		*/
 109#define FLAG_SLOWPATH		0x100 /* Do not skip RFC checks for window update.*/
 110#define FLAG_ONLY_ORIG_SACKED	0x200 /* SACKs only non-rexmit sent before RTO */
 111#define FLAG_SND_UNA_ADVANCED	0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
 112#define FLAG_DSACKING_ACK	0x800 /* SACK blocks contained D-SACK info */
 113#define FLAG_NONHEAD_RETRANS_ACKED	0x1000 /* Non-head rexmitted data was ACKed */
 114#define FLAG_SACK_RENEGING	0x2000 /* snd_una advanced to a sacked seq */
 
 
 
 
 115
 116#define FLAG_ACKED		(FLAG_DATA_ACKED|FLAG_SYN_ACKED)
 117#define FLAG_NOT_DUP		(FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
 118#define FLAG_CA_ALERT		(FLAG_DATA_SACKED|FLAG_ECE)
 119#define FLAG_FORWARD_PROGRESS	(FLAG_ACKED|FLAG_DATA_SACKED)
 120#define FLAG_ANY_PROGRESS	(FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED)
 121
 122#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
 123#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
 124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 125/* Adapt the MSS value used to make delayed ack decision to the
 126 * real world.
 127 */
 128static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
 129{
 130	struct inet_connection_sock *icsk = inet_csk(sk);
 131	const unsigned int lss = icsk->icsk_ack.last_seg_size;
 132	unsigned int len;
 133
 134	icsk->icsk_ack.last_seg_size = 0;
 135
 136	/* skb->len may jitter because of SACKs, even if peer
 137	 * sends good full-sized frames.
 138	 */
 139	len = skb_shinfo(skb)->gso_size ? : skb->len;
 140	if (len >= icsk->icsk_ack.rcv_mss) {
 141		icsk->icsk_ack.rcv_mss = len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 142	} else {
 143		/* Otherwise, we make more careful check taking into account,
 144		 * that SACKs block is variable.
 145		 *
 146		 * "len" is invariant segment length, including TCP header.
 147		 */
 148		len += skb->data - skb_transport_header(skb);
 149		if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
 150		    /* If PSH is not set, packet should be
 151		     * full sized, provided peer TCP is not badly broken.
 152		     * This observation (if it is correct 8)) allows
 153		     * to handle super-low mtu links fairly.
 154		     */
 155		    (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
 156		     !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
 157			/* Subtract also invariant (if peer is RFC compliant),
 158			 * tcp header plus fixed timestamp option length.
 159			 * Resulting "len" is MSS free of SACK jitter.
 160			 */
 161			len -= tcp_sk(sk)->tcp_header_len;
 162			icsk->icsk_ack.last_seg_size = len;
 163			if (len == lss) {
 164				icsk->icsk_ack.rcv_mss = len;
 165				return;
 166			}
 167		}
 168		if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
 169			icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
 170		icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
 171	}
 172}
 173
 174static void tcp_incr_quickack(struct sock *sk)
 175{
 176	struct inet_connection_sock *icsk = inet_csk(sk);
 177	unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
 178
 179	if (quickacks == 0)
 180		quickacks = 2;
 
 181	if (quickacks > icsk->icsk_ack.quick)
 182		icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
 183}
 184
 185static void tcp_enter_quickack_mode(struct sock *sk)
 186{
 187	struct inet_connection_sock *icsk = inet_csk(sk);
 188	tcp_incr_quickack(sk);
 189	icsk->icsk_ack.pingpong = 0;
 
 190	icsk->icsk_ack.ato = TCP_ATO_MIN;
 191}
 192
 193/* Send ACKs quickly, if "quick" count is not exhausted
 194 * and the session is not interactive.
 195 */
 196
 197static inline int tcp_in_quickack_mode(const struct sock *sk)
 198{
 199	const struct inet_connection_sock *icsk = inet_csk(sk);
 200	return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
 
 
 
 201}
 202
 203static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
 204{
 205	if (tp->ecn_flags & TCP_ECN_OK)
 206		tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
 207}
 208
 209static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, struct sk_buff *skb)
 210{
 211	if (tcp_hdr(skb)->cwr)
 212		tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
 
 
 
 
 
 
 
 
 213}
 214
 215static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
 216{
 217	tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
 218}
 219
 220static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
 221{
 222	if (tp->ecn_flags & TCP_ECN_OK) {
 223		if (INET_ECN_is_ce(TCP_SKB_CB(skb)->flags))
 224			tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
 
 225		/* Funny extension: if ECT is not set on a segment,
 226		 * it is surely retransmit. It is not in ECN RFC,
 227		 * but Linux follows this rule. */
 228		else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))
 229			tcp_enter_quickack_mode((struct sock *)tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 230	}
 231}
 232
 233static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, struct tcphdr *th)
 
 
 
 
 
 
 234{
 235	if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
 236		tp->ecn_flags &= ~TCP_ECN_OK;
 237}
 238
 239static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, struct tcphdr *th)
 240{
 241	if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
 242		tp->ecn_flags &= ~TCP_ECN_OK;
 243}
 244
 245static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
 246{
 247	if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
 248		return 1;
 249	return 0;
 250}
 251
 252/* Buffer size and advertised window tuning.
 253 *
 254 * 1. Tuning sk->sk_sndbuf, when connection enters established state.
 255 */
 256
 257static void tcp_fixup_sndbuf(struct sock *sk)
 258{
 259	int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
 260		     sizeof(struct sk_buff);
 
 
 261
 262	if (sk->sk_sndbuf < 3 * sndmem) {
 263		sk->sk_sndbuf = 3 * sndmem;
 264		if (sk->sk_sndbuf > sysctl_tcp_wmem[2])
 265			sk->sk_sndbuf = sysctl_tcp_wmem[2];
 266	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 267}
 268
 269/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
 270 *
 271 * All tcp_full_space() is split to two parts: "network" buffer, allocated
 272 * forward and advertised in receiver window (tp->rcv_wnd) and
 273 * "application buffer", required to isolate scheduling/application
 274 * latencies from network.
 275 * window_clamp is maximal advertised window. It can be less than
 276 * tcp_full_space(), in this case tcp_full_space() - window_clamp
 277 * is reserved for "application" buffer. The less window_clamp is
 278 * the smoother our behaviour from viewpoint of network, but the lower
 279 * throughput and the higher sensitivity of the connection to losses. 8)
 280 *
 281 * rcv_ssthresh is more strict window_clamp used at "slow start"
 282 * phase to predict further behaviour of this connection.
 283 * It is used for two goals:
 284 * - to enforce header prediction at sender, even when application
 285 *   requires some significant "application buffer". It is check #1.
 286 * - to prevent pruning of receive queue because of misprediction
 287 *   of receiver window. Check #2.
 288 *
 289 * The scheme does not work when sender sends good segments opening
 290 * window and then starts to feed us spaghetti. But it should work
 291 * in common situations. Otherwise, we have to rely on queue collapsing.
 292 */
 293
 294/* Slow part of check#2. */
 295static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
 
 296{
 297	struct tcp_sock *tp = tcp_sk(sk);
 298	/* Optimize this! */
 299	int truesize = tcp_win_from_space(skb->truesize) >> 1;
 300	int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1;
 301
 302	while (tp->rcv_ssthresh <= window) {
 303		if (truesize <= skb->len)
 304			return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
 305
 306		truesize >>= 1;
 307		window >>= 1;
 308	}
 309	return 0;
 310}
 311
 312static void tcp_grow_window(struct sock *sk, struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 313{
 314	struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 
 
 
 315
 316	/* Check #1 */
 317	if (tp->rcv_ssthresh < tp->window_clamp &&
 318	    (int)tp->rcv_ssthresh < tcp_space(sk) &&
 319	    !tcp_memory_pressure) {
 320		int incr;
 321
 322		/* Check #2. Increase window, if skb with such overhead
 323		 * will fit to rcvbuf in future.
 324		 */
 325		if (tcp_win_from_space(skb->truesize) <= skb->len)
 326			incr = 2 * tp->advmss;
 327		else
 328			incr = __tcp_grow_window(sk, skb);
 329
 330		if (incr) {
 331			tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
 332					       tp->window_clamp);
 333			inet_csk(sk)->icsk_ack.quick |= 1;
 334		}
 
 
 
 
 
 335	}
 336}
 337
 338/* 3. Tuning rcvbuf, when connection enters established state. */
 339
 340static void tcp_fixup_rcvbuf(struct sock *sk)
 341{
 342	struct tcp_sock *tp = tcp_sk(sk);
 343	int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
 344
 345	/* Try to select rcvbuf so that 4 mss-sized segments
 346	 * will fit to window and corresponding skbs will fit to our rcvbuf.
 347	 * (was 3; 4 is minimum to allow fast retransmit to work.)
 348	 */
 349	while (tcp_win_from_space(rcvmem) < tp->advmss)
 350		rcvmem += 128;
 351	if (sk->sk_rcvbuf < 4 * rcvmem)
 352		sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]);
 353}
 354
 355/* 4. Try to fixup all. It is made immediately after connection enters
 356 *    established state.
 357 */
 358static void tcp_init_buffer_space(struct sock *sk)
 359{
 
 360	struct tcp_sock *tp = tcp_sk(sk);
 361	int maxwin;
 362
 363	if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
 364		tcp_fixup_rcvbuf(sk);
 365	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
 366		tcp_fixup_sndbuf(sk);
 367
 368	tp->rcvq_space.space = tp->rcv_wnd;
 
 
 369
 370	maxwin = tcp_full_space(sk);
 371
 372	if (tp->window_clamp >= maxwin) {
 373		tp->window_clamp = maxwin;
 374
 375		if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss)
 376			tp->window_clamp = max(maxwin -
 377					       (maxwin >> sysctl_tcp_app_win),
 378					       4 * tp->advmss);
 379	}
 380
 381	/* Force reservation of one segment. */
 382	if (sysctl_tcp_app_win &&
 383	    tp->window_clamp > 2 * tp->advmss &&
 384	    tp->window_clamp + tp->advmss > maxwin)
 385		tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
 
 386
 387	tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
 388	tp->snd_cwnd_stamp = tcp_time_stamp;
 
 
 389}
 390
 391/* 5. Recalculate window clamp after socket hit its memory bounds. */
 392static void tcp_clamp_window(struct sock *sk)
 393{
 394	struct tcp_sock *tp = tcp_sk(sk);
 395	struct inet_connection_sock *icsk = inet_csk(sk);
 
 
 396
 397	icsk->icsk_ack.quick = 0;
 
 398
 399	if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
 400	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
 401	    !tcp_memory_pressure &&
 402	    atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
 403		sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
 404				    sysctl_tcp_rmem[2]);
 405	}
 406	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
 407		tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
 408}
 409
 410/* Initialize RCV_MSS value.
 411 * RCV_MSS is an our guess about MSS used by the peer.
 412 * We haven't any direct information about the MSS.
 413 * It's better to underestimate the RCV_MSS rather than overestimate.
 414 * Overestimations make us ACKing less frequently than needed.
 415 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
 416 */
 417void tcp_initialize_rcv_mss(struct sock *sk)
 418{
 419	struct tcp_sock *tp = tcp_sk(sk);
 420	unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
 421
 422	hint = min(hint, tp->rcv_wnd / 2);
 423	hint = min(hint, TCP_MSS_DEFAULT);
 424	hint = max(hint, TCP_MIN_MSS);
 425
 426	inet_csk(sk)->icsk_ack.rcv_mss = hint;
 427}
 428EXPORT_SYMBOL(tcp_initialize_rcv_mss);
 429
 430/* Receiver "autotuning" code.
 431 *
 432 * The algorithm for RTT estimation w/o timestamps is based on
 433 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
 434 * <http://public.lanl.gov/radiant/pubs.html#DRS>
 435 *
 436 * More detail on this code can be found at
 437 * <http://staff.psc.edu/jheffner/>,
 438 * though this reference is out of date.  A new paper
 439 * is pending.
 440 */
 441static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
 442{
 443	u32 new_sample = tp->rcv_rtt_est.rtt;
 444	long m = sample;
 445
 446	if (m == 0)
 447		m = 1;
 448
 449	if (new_sample != 0) {
 450		/* If we sample in larger samples in the non-timestamp
 451		 * case, we could grossly overestimate the RTT especially
 452		 * with chatty applications or bulk transfer apps which
 453		 * are stalled on filesystem I/O.
 454		 *
 455		 * Also, since we are only going for a minimum in the
 456		 * non-timestamp case, we do not smooth things out
 457		 * else with timestamps disabled convergence takes too
 458		 * long.
 459		 */
 460		if (!win_dep) {
 461			m -= (new_sample >> 3);
 462			new_sample += m;
 463		} else if (m < new_sample)
 464			new_sample = m << 3;
 
 
 
 465	} else {
 466		/* No previous measure. */
 467		new_sample = m << 3;
 468	}
 469
 470	if (tp->rcv_rtt_est.rtt != new_sample)
 471		tp->rcv_rtt_est.rtt = new_sample;
 472}
 473
 474static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
 475{
 
 
 476	if (tp->rcv_rtt_est.time == 0)
 477		goto new_measure;
 478	if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
 479		return;
 480	tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1);
 
 
 
 481
 482new_measure:
 483	tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
 484	tp->rcv_rtt_est.time = tcp_time_stamp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 485}
 486
 487static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
 488					  const struct sk_buff *skb)
 489{
 490	struct tcp_sock *tp = tcp_sk(sk);
 491	if (tp->rx_opt.rcv_tsecr &&
 492	    (TCP_SKB_CB(skb)->end_seq -
 493	     TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
 494		tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
 
 
 
 
 
 
 
 
 495}
 496
 497/*
 498 * This function should be called every time data is copied to user space.
 499 * It calculates the appropriate TCP receive buffer space.
 500 */
 501void tcp_rcv_space_adjust(struct sock *sk)
 502{
 503	struct tcp_sock *tp = tcp_sk(sk);
 
 504	int time;
 505	int space;
 506
 507	if (tp->rcvq_space.time == 0)
 508		goto new_measure;
 509
 510	time = tcp_time_stamp - tp->rcvq_space.time;
 511	if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
 
 512		return;
 513
 514	space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
 
 
 
 515
 516	space = max(tp->rcvq_space.space, space);
 
 
 
 
 
 
 
 517
 518	if (tp->rcvq_space.space != space) {
 519		int rcvmem;
 
 
 520
 521		tp->rcvq_space.space = space;
 
 
 
 522
 523		if (sysctl_tcp_moderate_rcvbuf &&
 524		    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
 525			int new_clamp = space;
 
 526
 527			/* Receive space grows, normalize in order to
 528			 * take into account packet headers and sk_buff
 529			 * structure overhead.
 530			 */
 531			space /= tp->advmss;
 532			if (!space)
 533				space = 1;
 534			rcvmem = (tp->advmss + MAX_TCP_HEADER +
 535				  16 + sizeof(struct sk_buff));
 536			while (tcp_win_from_space(rcvmem) < tp->advmss)
 537				rcvmem += 128;
 538			space *= rcvmem;
 539			space = min(space, sysctl_tcp_rmem[2]);
 540			if (space > sk->sk_rcvbuf) {
 541				sk->sk_rcvbuf = space;
 542
 543				/* Make the window clamp follow along.  */
 544				tp->window_clamp = new_clamp;
 545			}
 546		}
 547	}
 
 548
 549new_measure:
 550	tp->rcvq_space.seq = tp->copied_seq;
 551	tp->rcvq_space.time = tcp_time_stamp;
 
 
 
 
 
 
 
 
 
 
 552}
 553
 554/* There is something which you must keep in mind when you analyze the
 555 * behavior of the tp->ato delayed ack timeout interval.  When a
 556 * connection starts up, we want to ack as quickly as possible.  The
 557 * problem is that "good" TCP's do slow start at the beginning of data
 558 * transmission.  The means that until we send the first few ACK's the
 559 * sender will sit on his end and only queue most of his data, because
 560 * he can only send snd_cwnd unacked packets at any given time.  For
 561 * each ACK we send, he increments snd_cwnd and transmits more of his
 562 * queue.  -DaveM
 563 */
 564static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
 565{
 566	struct tcp_sock *tp = tcp_sk(sk);
 567	struct inet_connection_sock *icsk = inet_csk(sk);
 568	u32 now;
 569
 570	inet_csk_schedule_ack(sk);
 571
 572	tcp_measure_rcv_mss(sk, skb);
 573
 574	tcp_rcv_rtt_measure(tp);
 575
 576	now = tcp_time_stamp;
 577
 578	if (!icsk->icsk_ack.ato) {
 579		/* The _first_ data packet received, initialize
 580		 * delayed ACK engine.
 581		 */
 582		tcp_incr_quickack(sk);
 583		icsk->icsk_ack.ato = TCP_ATO_MIN;
 584	} else {
 585		int m = now - icsk->icsk_ack.lrcvtime;
 586
 587		if (m <= TCP_ATO_MIN / 2) {
 588			/* The fastest case is the first. */
 589			icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
 590		} else if (m < icsk->icsk_ack.ato) {
 591			icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
 592			if (icsk->icsk_ack.ato > icsk->icsk_rto)
 593				icsk->icsk_ack.ato = icsk->icsk_rto;
 594		} else if (m > icsk->icsk_rto) {
 595			/* Too long gap. Apparently sender failed to
 596			 * restart window, so that we send ACKs quickly.
 597			 */
 598			tcp_incr_quickack(sk);
 599			sk_mem_reclaim(sk);
 600		}
 601	}
 602	icsk->icsk_ack.lrcvtime = now;
 
 603
 604	TCP_ECN_check_ce(tp, skb);
 605
 606	if (skb->len >= 128)
 607		tcp_grow_window(sk, skb);
 608}
 609
 610/* Called to compute a smoothed rtt estimate. The data fed to this
 611 * routine either comes from timestamps, or from segments that were
 612 * known _not_ to have been retransmitted [see Karn/Partridge
 613 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
 614 * piece by Van Jacobson.
 615 * NOTE: the next three routines used to be one big routine.
 616 * To save cycles in the RFC 1323 implementation it was better to break
 617 * it up into three procedures. -- erics
 618 */
 619static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
 620{
 621	struct tcp_sock *tp = tcp_sk(sk);
 622	long m = mrtt; /* RTT */
 
 623
 624	/*	The following amusing code comes from Jacobson's
 625	 *	article in SIGCOMM '88.  Note that rtt and mdev
 626	 *	are scaled versions of rtt and mean deviation.
 627	 *	This is designed to be as fast as possible
 628	 *	m stands for "measurement".
 629	 *
 630	 *	On a 1990 paper the rto value is changed to:
 631	 *	RTO = rtt + 4 * mdev
 632	 *
 633	 * Funny. This algorithm seems to be very broken.
 634	 * These formulae increase RTO, when it should be decreased, increase
 635	 * too slowly, when it should be increased quickly, decrease too quickly
 636	 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
 637	 * does not matter how to _calculate_ it. Seems, it was trap
 638	 * that VJ failed to avoid. 8)
 639	 */
 640	if (m == 0)
 641		m = 1;
 642	if (tp->srtt != 0) {
 643		m -= (tp->srtt >> 3);	/* m is now error in rtt est */
 644		tp->srtt += m;		/* rtt = 7/8 rtt + 1/8 new */
 645		if (m < 0) {
 646			m = -m;		/* m is now abs(error) */
 647			m -= (tp->mdev >> 2);   /* similar update on mdev */
 648			/* This is similar to one of Eifel findings.
 649			 * Eifel blocks mdev updates when rtt decreases.
 650			 * This solution is a bit different: we use finer gain
 651			 * for mdev in this case (alpha*beta).
 652			 * Like Eifel it also prevents growth of rto,
 653			 * but also it limits too fast rto decreases,
 654			 * happening in pure Eifel.
 655			 */
 656			if (m > 0)
 657				m >>= 3;
 658		} else {
 659			m -= (tp->mdev >> 2);   /* similar update on mdev */
 660		}
 661		tp->mdev += m;	    	/* mdev = 3/4 mdev + 1/4 new */
 662		if (tp->mdev > tp->mdev_max) {
 663			tp->mdev_max = tp->mdev;
 664			if (tp->mdev_max > tp->rttvar)
 665				tp->rttvar = tp->mdev_max;
 666		}
 667		if (after(tp->snd_una, tp->rtt_seq)) {
 668			if (tp->mdev_max < tp->rttvar)
 669				tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2;
 670			tp->rtt_seq = tp->snd_nxt;
 671			tp->mdev_max = tcp_rto_min(sk);
 
 
 672		}
 673	} else {
 674		/* no previous measure. */
 675		tp->srtt = m << 3;	/* take the measured time to be rtt */
 676		tp->mdev = m << 1;	/* make sure rto = 3*rtt */
 677		tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
 
 678		tp->rtt_seq = tp->snd_nxt;
 
 
 679	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 680}
 681
 682/* Calculate rto without backoff.  This is the second half of Van Jacobson's
 683 * routine referred to above.
 684 */
 685static inline void tcp_set_rto(struct sock *sk)
 686{
 687	const struct tcp_sock *tp = tcp_sk(sk);
 688	/* Old crap is replaced with new one. 8)
 689	 *
 690	 * More seriously:
 691	 * 1. If rtt variance happened to be less 50msec, it is hallucination.
 692	 *    It cannot be less due to utterly erratic ACK generation made
 693	 *    at least by solaris and freebsd. "Erratic ACKs" has _nothing_
 694	 *    to do with delayed acks, because at cwnd>2 true delack timeout
 695	 *    is invisible. Actually, Linux-2.4 also generates erratic
 696	 *    ACKs in some circumstances.
 697	 */
 698	inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
 699
 700	/* 2. Fixups made earlier cannot be right.
 701	 *    If we do not estimate RTO correctly without them,
 702	 *    all the algo is pure shit and should be replaced
 703	 *    with correct one. It is exactly, which we pretend to do.
 704	 */
 705
 706	/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
 707	 * guarantees that rto is higher.
 708	 */
 709	tcp_bound_rto(sk);
 710}
 711
 712/* Save metrics learned by this TCP session.
 713   This function is called only, when TCP finishes successfully
 714   i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
 715 */
 716void tcp_update_metrics(struct sock *sk)
 717{
 718	struct tcp_sock *tp = tcp_sk(sk);
 719	struct dst_entry *dst = __sk_dst_get(sk);
 720
 721	if (sysctl_tcp_nometrics_save)
 722		return;
 723
 724	dst_confirm(dst);
 725
 726	if (dst && (dst->flags & DST_HOST)) {
 727		const struct inet_connection_sock *icsk = inet_csk(sk);
 728		int m;
 729		unsigned long rtt;
 730
 731		if (icsk->icsk_backoff || !tp->srtt) {
 732			/* This session failed to estimate rtt. Why?
 733			 * Probably, no packets returned in time.
 734			 * Reset our results.
 735			 */
 736			if (!(dst_metric_locked(dst, RTAX_RTT)))
 737				dst_metric_set(dst, RTAX_RTT, 0);
 738			return;
 739		}
 740
 741		rtt = dst_metric_rtt(dst, RTAX_RTT);
 742		m = rtt - tp->srtt;
 743
 744		/* If newly calculated rtt larger than stored one,
 745		 * store new one. Otherwise, use EWMA. Remember,
 746		 * rtt overestimation is always better than underestimation.
 747		 */
 748		if (!(dst_metric_locked(dst, RTAX_RTT))) {
 749			if (m <= 0)
 750				set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
 751			else
 752				set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
 753		}
 754
 755		if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
 756			unsigned long var;
 757			if (m < 0)
 758				m = -m;
 759
 760			/* Scale deviation to rttvar fixed point */
 761			m >>= 1;
 762			if (m < tp->mdev)
 763				m = tp->mdev;
 764
 765			var = dst_metric_rtt(dst, RTAX_RTTVAR);
 766			if (m >= var)
 767				var = m;
 768			else
 769				var -= (var - m) >> 2;
 770
 771			set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
 772		}
 773
 774		if (tcp_in_initial_slowstart(tp)) {
 775			/* Slow start still did not finish. */
 776			if (dst_metric(dst, RTAX_SSTHRESH) &&
 777			    !dst_metric_locked(dst, RTAX_SSTHRESH) &&
 778			    (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
 779				dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
 780			if (!dst_metric_locked(dst, RTAX_CWND) &&
 781			    tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
 782				dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
 783		} else if (tp->snd_cwnd > tp->snd_ssthresh &&
 784			   icsk->icsk_ca_state == TCP_CA_Open) {
 785			/* Cong. avoidance phase, cwnd is reliable. */
 786			if (!dst_metric_locked(dst, RTAX_SSTHRESH))
 787				dst_metric_set(dst, RTAX_SSTHRESH,
 788					       max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
 789			if (!dst_metric_locked(dst, RTAX_CWND))
 790				dst_metric_set(dst, RTAX_CWND,
 791					       (dst_metric(dst, RTAX_CWND) +
 792						tp->snd_cwnd) >> 1);
 793		} else {
 794			/* Else slow start did not finish, cwnd is non-sense,
 795			   ssthresh may be also invalid.
 796			 */
 797			if (!dst_metric_locked(dst, RTAX_CWND))
 798				dst_metric_set(dst, RTAX_CWND,
 799					       (dst_metric(dst, RTAX_CWND) +
 800						tp->snd_ssthresh) >> 1);
 801			if (dst_metric(dst, RTAX_SSTHRESH) &&
 802			    !dst_metric_locked(dst, RTAX_SSTHRESH) &&
 803			    tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
 804				dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
 805		}
 806
 807		if (!dst_metric_locked(dst, RTAX_REORDERING)) {
 808			if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
 809			    tp->reordering != sysctl_tcp_reordering)
 810				dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
 811		}
 812	}
 813}
 814
 815__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst)
 816{
 817	__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
 818
 819	if (!cwnd)
 820		cwnd = TCP_INIT_CWND;
 821	return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
 822}
 823
 824/* Set slow start threshold and cwnd not falling to slow start */
 825void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
 826{
 827	struct tcp_sock *tp = tcp_sk(sk);
 828	const struct inet_connection_sock *icsk = inet_csk(sk);
 829
 830	tp->prior_ssthresh = 0;
 831	tp->bytes_acked = 0;
 832	if (icsk->icsk_ca_state < TCP_CA_CWR) {
 833		tp->undo_marker = 0;
 834		if (set_ssthresh)
 835			tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
 836		tp->snd_cwnd = min(tp->snd_cwnd,
 837				   tcp_packets_in_flight(tp) + 1U);
 838		tp->snd_cwnd_cnt = 0;
 839		tp->high_seq = tp->snd_nxt;
 840		tp->snd_cwnd_stamp = tcp_time_stamp;
 841		TCP_ECN_queue_cwr(tp);
 842
 843		tcp_set_ca_state(sk, TCP_CA_CWR);
 844	}
 845}
 846
 847/*
 848 * Packet counting of FACK is based on in-order assumptions, therefore TCP
 849 * disables it when reordering is detected
 
 
 850 */
 851static void tcp_disable_fack(struct tcp_sock *tp)
 852{
 853	/* RFC3517 uses different metric in lost marker => reset on change */
 854	if (tcp_is_fack(tp))
 855		tp->lost_skb_hint = NULL;
 856	tp->rx_opt.sack_ok &= ~2;
 857}
 858
 859/* Take a notice that peer is sending D-SACKs */
 860static void tcp_dsack_seen(struct tcp_sock *tp)
 861{
 862	tp->rx_opt.sack_ok |= 4;
 863}
 864
 865/* Initialize metrics on socket. */
 866
 867static void tcp_init_metrics(struct sock *sk)
 868{
 869	struct tcp_sock *tp = tcp_sk(sk);
 870	struct dst_entry *dst = __sk_dst_get(sk);
 871
 872	if (dst == NULL)
 873		goto reset;
 874
 875	dst_confirm(dst);
 
 
 
 
 
 
 
 
 
 
 
 
 876
 877	if (dst_metric_locked(dst, RTAX_CWND))
 878		tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND);
 879	if (dst_metric(dst, RTAX_SSTHRESH)) {
 880		tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
 881		if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
 882			tp->snd_ssthresh = tp->snd_cwnd_clamp;
 883	} else {
 884		/* ssthresh may have been reduced unnecessarily during.
 885		 * 3WHS. Restore it back to its initial default.
 886		 */
 887		tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
 888	}
 889	if (dst_metric(dst, RTAX_REORDERING) &&
 890	    tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
 891		tcp_disable_fack(tp);
 892		tp->reordering = dst_metric(dst, RTAX_REORDERING);
 893	}
 894
 895	if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0)
 896		goto reset;
 
 897
 898	/* Initial rtt is determined from SYN,SYN-ACK.
 899	 * The segment is small and rtt may appear much
 900	 * less than real one. Use per-dst memory
 901	 * to make it more realistic.
 902	 *
 903	 * A bit of theory. RTT is time passed after "normal" sized packet
 904	 * is sent until it is ACKed. In normal circumstances sending small
 905	 * packets force peer to delay ACKs and calculation is correct too.
 906	 * The algorithm is adaptive and, provided we follow specs, it
 907	 * NEVER underestimate RTT. BUT! If peer tries to make some clever
 908	 * tricks sort of "quick acks" for time long enough to decrease RTT
 909	 * to low value, and then abruptly stops to do it and starts to delay
 910	 * ACKs, wait for troubles.
 911	 */
 912	if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
 913		tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
 914		tp->rtt_seq = tp->snd_nxt;
 915	}
 916	if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
 917		tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
 918		tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
 919	}
 920	tcp_set_rto(sk);
 921reset:
 922	if (tp->srtt == 0) {
 923		/* RFC2988bis: We've failed to get a valid RTT sample from
 924		 * 3WHS. This is most likely due to retransmission,
 925		 * including spurious one. Reset the RTO back to 3secs
 926		 * from the more aggressive 1sec to avoid more spurious
 927		 * retransmission.
 928		 */
 929		tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
 930		inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
 931	}
 932	/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
 933	 * retransmitted. In light of RFC2988bis' more aggressive 1sec
 934	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
 935	 * retransmission has occurred.
 936	 */
 937	if (tp->total_retrans > 1)
 938		tp->snd_cwnd = 1;
 939	else
 940		tp->snd_cwnd = tcp_init_cwnd(tp, dst);
 941	tp->snd_cwnd_stamp = tcp_time_stamp;
 942}
 943
 944static void tcp_update_reordering(struct sock *sk, const int metric,
 945				  const int ts)
 
 
 
 
 946{
 947	struct tcp_sock *tp = tcp_sk(sk);
 948	if (metric > tp->reordering) {
 949		int mib_idx;
 950
 951		tp->reordering = min(TCP_MAX_REORDERING, metric);
 952
 953		/* This exciting event is worth to be remembered. 8) */
 954		if (ts)
 955			mib_idx = LINUX_MIB_TCPTSREORDER;
 956		else if (tcp_is_reno(tp))
 957			mib_idx = LINUX_MIB_TCPRENOREORDER;
 958		else if (tcp_is_fack(tp))
 959			mib_idx = LINUX_MIB_TCPFACKREORDER;
 960		else
 961			mib_idx = LINUX_MIB_TCPSACKREORDER;
 962
 963		NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
 964#if FASTRETRANS_DEBUG > 1
 965		printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
 966		       tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
 967		       tp->reordering,
 968		       tp->fackets_out,
 969		       tp->sacked_out,
 970		       tp->undo_marker ? tp->undo_retrans : 0);
 971#endif
 972		tcp_disable_fack(tp);
 
 973	}
 
 
 
 
 
 974}
 975
 976/* This must be called before lost_out is incremented */
 
 
 
 
 977static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
 978{
 979	if ((tp->retransmit_skb_hint == NULL) ||
 980	    before(TCP_SKB_CB(skb)->seq,
 981		   TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
 
 982		tp->retransmit_skb_hint = skb;
 
 983
 984	if (!tp->lost_out ||
 985	    after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high))
 986		tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
 
 
 
 987}
 988
 989static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
 990{
 991	if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
 992		tcp_verify_retransmit_hint(tp, skb);
 
 
 
 993
 
 
 
 
 
 
 
 
 
 
 
 994		tp->lost_out += tcp_skb_pcount(skb);
 995		TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
 
 996	}
 997}
 998
 999static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
1000					    struct sk_buff *skb)
 
1001{
1002	tcp_verify_retransmit_hint(tp, skb);
1003
1004	if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
1005		tp->lost_out += tcp_skb_pcount(skb);
1006		TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1007	}
1008}
1009
1010/* This procedure tags the retransmission queue when SACKs arrive.
1011 *
1012 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
1013 * Packets in queue with these bits set are counted in variables
1014 * sacked_out, retrans_out and lost_out, correspondingly.
1015 *
1016 * Valid combinations are:
1017 * Tag  InFlight	Description
1018 * 0	1		- orig segment is in flight.
1019 * S	0		- nothing flies, orig reached receiver.
1020 * L	0		- nothing flies, orig lost by net.
1021 * R	2		- both orig and retransmit are in flight.
1022 * L|R	1		- orig is lost, retransmit is in flight.
1023 * S|R  1		- orig reached receiver, retrans is still in flight.
1024 * (L|S|R is logically valid, it could occur when L|R is sacked,
1025 *  but it is equivalent to plain S and code short-curcuits it to S.
1026 *  L|S is logically invalid, it would mean -1 packet in flight 8))
1027 *
1028 * These 6 states form finite state machine, controlled by the following events:
1029 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
1030 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
1031 * 3. Loss detection event of one of three flavors:
1032 *	A. Scoreboard estimator decided the packet is lost.
1033 *	   A'. Reno "three dupacks" marks head of queue lost.
1034 *	   A''. Its FACK modfication, head until snd.fack is lost.
1035 *	B. SACK arrives sacking data transmitted after never retransmitted
1036 *	   hole was sent out.
1037 *	C. SACK arrives sacking SND.NXT at the moment, when the
1038 *	   segment was retransmitted.
1039 * 4. D-SACK added new rule: D-SACK changes any tag to S.
1040 *
1041 * It is pleasant to note, that state diagram turns out to be commutative,
1042 * so that we are allowed not to be bothered by order of our actions,
1043 * when multiple events arrive simultaneously. (see the function below).
1044 *
1045 * Reordering detection.
1046 * --------------------
1047 * Reordering metric is maximal distance, which a packet can be displaced
1048 * in packet stream. With SACKs we can estimate it:
1049 *
1050 * 1. SACK fills old hole and the corresponding segment was not
1051 *    ever retransmitted -> reordering. Alas, we cannot use it
1052 *    when segment was retransmitted.
1053 * 2. The last flaw is solved with D-SACK. D-SACK arrives
1054 *    for retransmitted and already SACKed segment -> reordering..
1055 * Both of these heuristics are not used in Loss state, when we cannot
1056 * account for retransmits accurately.
1057 *
1058 * SACK block validation.
1059 * ----------------------
1060 *
1061 * SACK block range validation checks that the received SACK block fits to
1062 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT.
1063 * Note that SND.UNA is not included to the range though being valid because
1064 * it means that the receiver is rather inconsistent with itself reporting
1065 * SACK reneging when it should advance SND.UNA. Such SACK block this is
1066 * perfectly valid, however, in light of RFC2018 which explicitly states
1067 * that "SACK block MUST reflect the newest segment.  Even if the newest
1068 * segment is going to be discarded ...", not that it looks very clever
1069 * in case of head skb. Due to potentional receiver driven attacks, we
1070 * choose to avoid immediate execution of a walk in write queue due to
1071 * reneging and defer head skb's loss recovery to standard loss recovery
1072 * procedure that will eventually trigger (nothing forbids us doing this).
1073 *
1074 * Implements also blockage to start_seq wrap-around. Problem lies in the
1075 * fact that though start_seq (s) is before end_seq (i.e., not reversed),
1076 * there's no guarantee that it will be before snd_nxt (n). The problem
1077 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt
1078 * wrap (s_w):
1079 *
1080 *         <- outs wnd ->                          <- wrapzone ->
1081 *         u     e      n                         u_w   e_w  s n_w
1082 *         |     |      |                          |     |   |  |
1083 * |<------------+------+----- TCP seqno space --------------+---------->|
1084 * ...-- <2^31 ->|                                           |<--------...
1085 * ...---- >2^31 ------>|                                    |<--------...
1086 *
1087 * Current code wouldn't be vulnerable but it's better still to discard such
1088 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat
1089 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in
1090 * snd_nxt wrap -> snd_una region will then become "well defined", i.e.,
1091 * equal to the ideal case (infinite seqno space without wrap caused issues).
1092 *
1093 * With D-SACK the lower bound is extended to cover sequence space below
1094 * SND.UNA down to undo_marker, which is the last point of interest. Yet
1095 * again, D-SACK block must not to go across snd_una (for the same reason as
1096 * for the normal SACK blocks, explained above). But there all simplicity
1097 * ends, TCP might receive valid D-SACKs below that. As long as they reside
1098 * fully below undo_marker they do not affect behavior in anyway and can
1099 * therefore be safely ignored. In rare cases (which are more or less
1100 * theoretical ones), the D-SACK will nicely cross that boundary due to skb
1101 * fragmentation and packet reordering past skb's retransmission. To consider
1102 * them correctly, the acceptable range must be extended even more though
1103 * the exact amount is rather hard to quantify. However, tp->max_window can
1104 * be used as an exaggerated estimate.
1105 */
1106static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
1107				  u32 start_seq, u32 end_seq)
1108{
1109	/* Too far in future, or reversed (interpretation is ambiguous) */
1110	if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
1111		return 0;
1112
1113	/* Nasty start_seq wrap-around check (see comments above) */
1114	if (!before(start_seq, tp->snd_nxt))
1115		return 0;
1116
1117	/* In outstanding window? ...This is valid exit for D-SACKs too.
1118	 * start_seq == snd_una is non-sensical (see comments above)
1119	 */
1120	if (after(start_seq, tp->snd_una))
1121		return 1;
1122
1123	if (!is_dsack || !tp->undo_marker)
1124		return 0;
1125
1126	/* ...Then it's D-SACK, and must reside below snd_una completely */
1127	if (after(end_seq, tp->snd_una))
1128		return 0;
1129
1130	if (!before(start_seq, tp->undo_marker))
1131		return 1;
1132
1133	/* Too old */
1134	if (!after(end_seq, tp->undo_marker))
1135		return 0;
1136
1137	/* Undo_marker boundary crossing (overestimates a lot). Known already:
1138	 *   start_seq < undo_marker and end_seq >= undo_marker.
1139	 */
1140	return !before(start_seq, end_seq - tp->max_window);
1141}
1142
1143/* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
1144 * Event "C". Later note: FACK people cheated me again 8), we have to account
1145 * for reordering! Ugly, but should help.
1146 *
1147 * Search retransmitted skbs from write_queue that were sent when snd_nxt was
1148 * less than what is now known to be received by the other end (derived from
1149 * highest SACK block). Also calculate the lowest snd_nxt among the remaining
1150 * retransmitted skbs to avoid some costly processing per ACKs.
1151 */
1152static void tcp_mark_lost_retrans(struct sock *sk)
1153{
1154	const struct inet_connection_sock *icsk = inet_csk(sk);
1155	struct tcp_sock *tp = tcp_sk(sk);
1156	struct sk_buff *skb;
1157	int cnt = 0;
1158	u32 new_low_seq = tp->snd_nxt;
1159	u32 received_upto = tcp_highest_sack_seq(tp);
1160
1161	if (!tcp_is_fack(tp) || !tp->retrans_out ||
1162	    !after(received_upto, tp->lost_retrans_low) ||
1163	    icsk->icsk_ca_state != TCP_CA_Recovery)
1164		return;
1165
1166	tcp_for_write_queue(skb, sk) {
1167		u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
1168
1169		if (skb == tcp_send_head(sk))
1170			break;
1171		if (cnt == tp->retrans_out)
1172			break;
1173		if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1174			continue;
1175
1176		if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
1177			continue;
1178
1179		/* TODO: We would like to get rid of tcp_is_fack(tp) only
1180		 * constraint here (see above) but figuring out that at
1181		 * least tp->reordering SACK blocks reside between ack_seq
1182		 * and received_upto is not easy task to do cheaply with
1183		 * the available datastructures.
1184		 *
1185		 * Whether FACK should check here for tp->reordering segs
1186		 * in-between one could argue for either way (it would be
1187		 * rather simple to implement as we could count fack_count
1188		 * during the walk and do tp->fackets_out - fack_count).
1189		 */
1190		if (after(received_upto, ack_seq)) {
1191			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1192			tp->retrans_out -= tcp_skb_pcount(skb);
1193
1194			tcp_skb_mark_lost_uncond_verify(tp, skb);
1195			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
1196		} else {
1197			if (before(ack_seq, new_low_seq))
1198				new_low_seq = ack_seq;
1199			cnt += tcp_skb_pcount(skb);
1200		}
1201	}
1202
1203	if (tp->retrans_out)
1204		tp->lost_retrans_low = new_low_seq;
1205}
1206
1207static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1208			   struct tcp_sack_block_wire *sp, int num_sacks,
1209			   u32 prior_snd_una)
1210{
1211	struct tcp_sock *tp = tcp_sk(sk);
1212	u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1213	u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
1214	int dup_sack = 0;
1215
1216	if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1217		dup_sack = 1;
1218		tcp_dsack_seen(tp);
1219		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
1220	} else if (num_sacks > 1) {
1221		u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1222		u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
1223
1224		if (!after(end_seq_0, end_seq_1) &&
1225		    !before(start_seq_0, start_seq_1)) {
1226			dup_sack = 1;
1227			tcp_dsack_seen(tp);
1228			NET_INC_STATS_BH(sock_net(sk),
1229					LINUX_MIB_TCPDSACKOFORECV);
1230		}
 
 
 
 
1231	}
1232
 
 
1233	/* D-SACK for already forgotten data... Do dumb counting. */
1234	if (dup_sack && tp->undo_marker && tp->undo_retrans &&
1235	    !after(end_seq_0, prior_snd_una) &&
1236	    after(end_seq_0, tp->undo_marker))
1237		tp->undo_retrans--;
1238
1239	return dup_sack;
1240}
1241
1242struct tcp_sacktag_state {
1243	int reord;
1244	int fack_count;
1245	int flag;
1246};
1247
1248/* Check if skb is fully within the SACK block. In presence of GSO skbs,
1249 * the incoming SACK may not exactly match but we can find smaller MSS
1250 * aligned portion of it that matches. Therefore we might need to fragment
1251 * which may fail and creates some hassle (caller must handle error case
1252 * returns).
1253 *
1254 * FIXME: this could be merged to shift decision code
1255 */
1256static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1257				 u32 start_seq, u32 end_seq)
1258{
1259	int in_sack, err;
 
1260	unsigned int pkt_len;
1261	unsigned int mss;
1262
1263	in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1264		  !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1265
1266	if (tcp_skb_pcount(skb) > 1 && !in_sack &&
1267	    after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
1268		mss = tcp_skb_mss(skb);
1269		in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1270
1271		if (!in_sack) {
1272			pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
1273			if (pkt_len < mss)
1274				pkt_len = mss;
1275		} else {
1276			pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
1277			if (pkt_len < mss)
1278				return -EINVAL;
1279		}
1280
1281		/* Round if necessary so that SACKs cover only full MSSes
1282		 * and/or the remaining small portion (if present)
1283		 */
1284		if (pkt_len > mss) {
1285			unsigned int new_len = (pkt_len / mss) * mss;
1286			if (!in_sack && new_len < pkt_len) {
1287				new_len += mss;
1288				if (new_len > skb->len)
1289					return 0;
1290			}
1291			pkt_len = new_len;
1292		}
1293		err = tcp_fragment(sk, skb, pkt_len, mss);
 
 
 
 
 
1294		if (err < 0)
1295			return err;
1296	}
1297
1298	return in_sack;
1299}
1300
1301static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1302			  struct tcp_sacktag_state *state,
1303			  int dup_sack, int pcount)
 
 
 
1304{
1305	struct tcp_sock *tp = tcp_sk(sk);
1306	u8 sacked = TCP_SKB_CB(skb)->sacked;
1307	int fack_count = state->fack_count;
1308
1309	/* Account D-SACK for retransmitted packet. */
1310	if (dup_sack && (sacked & TCPCB_RETRANS)) {
1311		if (tp->undo_marker && tp->undo_retrans &&
1312		    after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
1313			tp->undo_retrans--;
1314		if (sacked & TCPCB_SACKED_ACKED)
1315			state->reord = min(fack_count, state->reord);
 
1316	}
1317
1318	/* Nothing to do; acked frame is about to be dropped (was ACKed). */
1319	if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1320		return sacked;
1321
1322	if (!(sacked & TCPCB_SACKED_ACKED)) {
 
 
1323		if (sacked & TCPCB_SACKED_RETRANS) {
1324			/* If the segment is not tagged as lost,
1325			 * we do not clear RETRANS, believing
1326			 * that retransmission is still in flight.
1327			 */
1328			if (sacked & TCPCB_LOST) {
1329				sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
1330				tp->lost_out -= pcount;
1331				tp->retrans_out -= pcount;
1332			}
1333		} else {
1334			if (!(sacked & TCPCB_RETRANS)) {
1335				/* New sack for not retransmitted frame,
1336				 * which was in hole. It is reordering.
1337				 */
1338				if (before(TCP_SKB_CB(skb)->seq,
1339					   tcp_highest_sack_seq(tp)))
1340					state->reord = min(fack_count,
1341							   state->reord);
1342
1343				/* SACK enhanced F-RTO (RFC4138; Appendix B) */
1344				if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
1345					state->flag |= FLAG_ONLY_ORIG_SACKED;
 
 
1346			}
1347
1348			if (sacked & TCPCB_LOST) {
1349				sacked &= ~TCPCB_LOST;
1350				tp->lost_out -= pcount;
1351			}
1352		}
1353
1354		sacked |= TCPCB_SACKED_ACKED;
1355		state->flag |= FLAG_DATA_SACKED;
1356		tp->sacked_out += pcount;
1357
1358		fack_count += pcount;
1359
1360		/* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1361		if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
1362		    before(TCP_SKB_CB(skb)->seq,
1363			   TCP_SKB_CB(tp->lost_skb_hint)->seq))
1364			tp->lost_cnt_hint += pcount;
1365
1366		if (fack_count > tp->fackets_out)
1367			tp->fackets_out = fack_count;
1368	}
1369
1370	/* D-SACK. We can detect redundant retransmission in S|R and plain R
1371	 * frames and clear it. undo_retrans is decreased above, L|R frames
1372	 * are accounted above as well.
1373	 */
1374	if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
1375		sacked &= ~TCPCB_SACKED_RETRANS;
1376		tp->retrans_out -= pcount;
1377	}
1378
1379	return sacked;
1380}
1381
1382static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
1383			   struct tcp_sacktag_state *state,
1384			   unsigned int pcount, int shifted, int mss,
1385			   int dup_sack)
 
 
 
 
1386{
1387	struct tcp_sock *tp = tcp_sk(sk);
1388	struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
 
1389
1390	BUG_ON(!pcount);
1391
 
 
 
 
 
 
 
 
 
 
 
1392	if (skb == tp->lost_skb_hint)
1393		tp->lost_cnt_hint += pcount;
1394
1395	TCP_SKB_CB(prev)->end_seq += shifted;
1396	TCP_SKB_CB(skb)->seq += shifted;
1397
1398	skb_shinfo(prev)->gso_segs += pcount;
1399	BUG_ON(skb_shinfo(skb)->gso_segs < pcount);
1400	skb_shinfo(skb)->gso_segs -= pcount;
1401
1402	/* When we're adding to gso_segs == 1, gso_size will be zero,
1403	 * in theory this shouldn't be necessary but as long as DSACK
1404	 * code can come after this skb later on it's better to keep
1405	 * setting gso_size to something.
1406	 */
1407	if (!skb_shinfo(prev)->gso_size) {
1408		skb_shinfo(prev)->gso_size = mss;
1409		skb_shinfo(prev)->gso_type = sk->sk_gso_type;
1410	}
1411
1412	/* CHECKME: To clear or not to clear? Mimics normal skb currently */
1413	if (skb_shinfo(skb)->gso_segs <= 1) {
1414		skb_shinfo(skb)->gso_size = 0;
1415		skb_shinfo(skb)->gso_type = 0;
1416	}
1417
1418	/* We discard results */
1419	tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
1420
1421	/* Difference in this won't matter, both ACKed by the same cumul. ACK */
1422	TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
1423
1424	if (skb->len > 0) {
1425		BUG_ON(!tcp_skb_pcount(skb));
1426		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
1427		return 0;
1428	}
1429
1430	/* Whole SKB was eaten :-) */
1431
1432	if (skb == tp->retransmit_skb_hint)
1433		tp->retransmit_skb_hint = prev;
1434	if (skb == tp->scoreboard_skb_hint)
1435		tp->scoreboard_skb_hint = prev;
1436	if (skb == tp->lost_skb_hint) {
1437		tp->lost_skb_hint = prev;
1438		tp->lost_cnt_hint -= tcp_skb_pcount(prev);
1439	}
1440
1441	TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(prev)->flags;
 
 
 
 
1442	if (skb == tcp_highest_sack(sk))
1443		tcp_advance_highest_sack(sk, skb);
1444
1445	tcp_unlink_write_queue(skb, sk);
1446	sk_wmem_free_skb(sk, skb);
 
1447
1448	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
1449
1450	return 1;
 
 
1451}
1452
1453/* I wish gso_size would have a bit more sane initialization than
1454 * something-or-zero which complicates things
1455 */
1456static int tcp_skb_seglen(struct sk_buff *skb)
1457{
1458	return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
1459}
1460
1461/* Shifting pages past head area doesn't work */
1462static int skb_can_shift(struct sk_buff *skb)
1463{
1464	return !skb_headlen(skb) && skb_is_nonlinear(skb);
1465}
1466
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1467/* Try collapsing SACK blocks spanning across multiple skbs to a single
1468 * skb.
1469 */
1470static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1471					  struct tcp_sacktag_state *state,
1472					  u32 start_seq, u32 end_seq,
1473					  int dup_sack)
1474{
1475	struct tcp_sock *tp = tcp_sk(sk);
1476	struct sk_buff *prev;
1477	int mss;
1478	int pcount = 0;
1479	int len;
1480	int in_sack;
1481
1482	if (!sk_can_gso(sk))
1483		goto fallback;
1484
1485	/* Normally R but no L won't result in plain S */
1486	if (!dup_sack &&
1487	    (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
1488		goto fallback;
1489	if (!skb_can_shift(skb))
1490		goto fallback;
1491	/* This frame is about to be dropped (was ACKed). */
1492	if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1493		goto fallback;
1494
1495	/* Can only happen with delayed DSACK + discard craziness */
1496	if (unlikely(skb == tcp_write_queue_head(sk)))
 
1497		goto fallback;
1498	prev = tcp_write_queue_prev(sk, skb);
1499
1500	if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
1501		goto fallback;
1502
 
 
 
1503	in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1504		  !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1505
1506	if (in_sack) {
1507		len = skb->len;
1508		pcount = tcp_skb_pcount(skb);
1509		mss = tcp_skb_seglen(skb);
1510
1511		/* TODO: Fix DSACKs to not fragment already SACKed and we can
1512		 * drop this restriction as unnecessary
1513		 */
1514		if (mss != tcp_skb_seglen(prev))
1515			goto fallback;
1516	} else {
1517		if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
1518			goto noop;
1519		/* CHECKME: This is non-MSS split case only?, this will
1520		 * cause skipped skbs due to advancing loop btw, original
1521		 * has that feature too
1522		 */
1523		if (tcp_skb_pcount(skb) <= 1)
1524			goto noop;
1525
1526		in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1527		if (!in_sack) {
1528			/* TODO: head merge to next could be attempted here
1529			 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)),
1530			 * though it might not be worth of the additional hassle
1531			 *
1532			 * ...we can probably just fallback to what was done
1533			 * previously. We could try merging non-SACKed ones
1534			 * as well but it probably isn't going to buy off
1535			 * because later SACKs might again split them, and
1536			 * it would make skb timestamp tracking considerably
1537			 * harder problem.
1538			 */
1539			goto fallback;
1540		}
1541
1542		len = end_seq - TCP_SKB_CB(skb)->seq;
1543		BUG_ON(len < 0);
1544		BUG_ON(len > skb->len);
1545
1546		/* MSS boundaries should be honoured or else pcount will
1547		 * severely break even though it makes things bit trickier.
1548		 * Optimize common case to avoid most of the divides
1549		 */
1550		mss = tcp_skb_mss(skb);
1551
1552		/* TODO: Fix DSACKs to not fragment already SACKed and we can
1553		 * drop this restriction as unnecessary
1554		 */
1555		if (mss != tcp_skb_seglen(prev))
1556			goto fallback;
1557
1558		if (len == mss) {
1559			pcount = 1;
1560		} else if (len < mss) {
1561			goto noop;
1562		} else {
1563			pcount = len / mss;
1564			len = pcount * mss;
1565		}
1566	}
1567
1568	if (!skb_shift(prev, skb, len))
 
1569		goto fallback;
1570	if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
 
 
 
1571		goto out;
1572
1573	/* Hole filled allows collapsing with the next as well, this is very
1574	 * useful when hole on every nth skb pattern happens
1575	 */
1576	if (prev == tcp_write_queue_tail(sk))
 
1577		goto out;
1578	skb = tcp_write_queue_next(sk, prev);
1579
1580	if (!skb_can_shift(skb) ||
1581	    (skb == tcp_send_head(sk)) ||
1582	    ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
1583	    (mss != tcp_skb_seglen(skb)))
1584		goto out;
1585
 
 
1586	len = skb->len;
1587	if (skb_shift(prev, skb, len)) {
1588		pcount += tcp_skb_pcount(skb);
1589		tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
1590	}
1591
1592out:
1593	state->fack_count += pcount;
1594	return prev;
1595
1596noop:
1597	return skb;
1598
1599fallback:
1600	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
1601	return NULL;
1602}
1603
1604static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1605					struct tcp_sack_block *next_dup,
1606					struct tcp_sacktag_state *state,
1607					u32 start_seq, u32 end_seq,
1608					int dup_sack_in)
1609{
1610	struct tcp_sock *tp = tcp_sk(sk);
1611	struct sk_buff *tmp;
1612
1613	tcp_for_write_queue_from(skb, sk) {
1614		int in_sack = 0;
1615		int dup_sack = dup_sack_in;
1616
1617		if (skb == tcp_send_head(sk))
1618			break;
1619
1620		/* queue is in-order => we can short-circuit the walk early */
1621		if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1622			break;
1623
1624		if ((next_dup != NULL) &&
1625		    before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
1626			in_sack = tcp_match_skb_to_sack(sk, skb,
1627							next_dup->start_seq,
1628							next_dup->end_seq);
1629			if (in_sack > 0)
1630				dup_sack = 1;
1631		}
1632
1633		/* skb reference here is a bit tricky to get right, since
1634		 * shifting can eat and free both this skb and the next,
1635		 * so not even _safe variant of the loop is enough.
1636		 */
1637		if (in_sack <= 0) {
1638			tmp = tcp_shift_skb_data(sk, skb, state,
1639						 start_seq, end_seq, dup_sack);
1640			if (tmp != NULL) {
1641				if (tmp != skb) {
1642					skb = tmp;
1643					continue;
1644				}
1645
1646				in_sack = 0;
1647			} else {
1648				in_sack = tcp_match_skb_to_sack(sk, skb,
1649								start_seq,
1650								end_seq);
1651			}
1652		}
1653
1654		if (unlikely(in_sack < 0))
1655			break;
1656
1657		if (in_sack) {
1658			TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk,
1659								  state,
1660								  dup_sack,
1661								  tcp_skb_pcount(skb));
 
 
 
 
 
 
 
 
1662
1663			if (!before(TCP_SKB_CB(skb)->seq,
1664				    tcp_highest_sack_seq(tp)))
1665				tcp_advance_highest_sack(sk, skb);
1666		}
1667
1668		state->fack_count += tcp_skb_pcount(skb);
1669	}
1670	return skb;
1671}
1672
1673/* Avoid all extra work that is being done by sacktag while walking in
1674 * a normal way
1675 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1676static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
1677					struct tcp_sacktag_state *state,
1678					u32 skip_to_seq)
1679{
1680	tcp_for_write_queue_from(skb, sk) {
1681		if (skb == tcp_send_head(sk))
1682			break;
1683
1684		if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
1685			break;
1686
1687		state->fack_count += tcp_skb_pcount(skb);
1688	}
1689	return skb;
1690}
1691
1692static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1693						struct sock *sk,
1694						struct tcp_sack_block *next_dup,
1695						struct tcp_sacktag_state *state,
1696						u32 skip_to_seq)
1697{
1698	if (next_dup == NULL)
1699		return skb;
1700
1701	if (before(next_dup->start_seq, skip_to_seq)) {
1702		skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq);
1703		skb = tcp_sacktag_walk(skb, sk, NULL, state,
1704				       next_dup->start_seq, next_dup->end_seq,
1705				       1);
1706	}
1707
1708	return skb;
1709}
1710
1711static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache)
1712{
1713	return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
1714}
1715
1716static int
1717tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb,
1718			u32 prior_snd_una)
1719{
1720	const struct inet_connection_sock *icsk = inet_csk(sk);
1721	struct tcp_sock *tp = tcp_sk(sk);
1722	unsigned char *ptr = (skb_transport_header(ack_skb) +
1723			      TCP_SKB_CB(ack_skb)->sacked);
1724	struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
1725	struct tcp_sack_block sp[TCP_NUM_SACKS];
1726	struct tcp_sack_block *cache;
1727	struct tcp_sacktag_state state;
1728	struct sk_buff *skb;
1729	int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
1730	int used_sacks;
1731	int found_dup_sack = 0;
1732	int i, j;
1733	int first_sack_index;
1734
1735	state.flag = 0;
1736	state.reord = tp->packets_out;
1737
1738	if (!tp->sacked_out) {
1739		if (WARN_ON(tp->fackets_out))
1740			tp->fackets_out = 0;
1741		tcp_highest_sack_reset(sk);
1742	}
1743
1744	found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
1745					 num_sacks, prior_snd_una);
1746	if (found_dup_sack)
1747		state.flag |= FLAG_DSACKING_ACK;
1748
1749	/* Eliminate too old ACKs, but take into
1750	 * account more or less fresh ones, they can
1751	 * contain valid SACK info.
1752	 */
1753	if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
1754		return 0;
1755
1756	if (!tp->packets_out)
1757		goto out;
1758
1759	used_sacks = 0;
1760	first_sack_index = 0;
1761	for (i = 0; i < num_sacks; i++) {
1762		int dup_sack = !i && found_dup_sack;
1763
1764		sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
1765		sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
1766
1767		if (!tcp_is_sackblock_valid(tp, dup_sack,
1768					    sp[used_sacks].start_seq,
1769					    sp[used_sacks].end_seq)) {
1770			int mib_idx;
1771
1772			if (dup_sack) {
1773				if (!tp->undo_marker)
1774					mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
1775				else
1776					mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
1777			} else {
1778				/* Don't count olds caused by ACK reordering */
1779				if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
1780				    !after(sp[used_sacks].end_seq, tp->snd_una))
1781					continue;
1782				mib_idx = LINUX_MIB_TCPSACKDISCARD;
1783			}
1784
1785			NET_INC_STATS_BH(sock_net(sk), mib_idx);
1786			if (i == 0)
1787				first_sack_index = -1;
1788			continue;
1789		}
1790
1791		/* Ignore very old stuff early */
1792		if (!after(sp[used_sacks].end_seq, prior_snd_una))
 
 
1793			continue;
 
1794
1795		used_sacks++;
1796	}
1797
1798	/* order SACK blocks to allow in order walk of the retrans queue */
1799	for (i = used_sacks - 1; i > 0; i--) {
1800		for (j = 0; j < i; j++) {
1801			if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
1802				swap(sp[j], sp[j + 1]);
1803
1804				/* Track where the first SACK block goes to */
1805				if (j == first_sack_index)
1806					first_sack_index = j + 1;
1807			}
1808		}
1809	}
1810
1811	skb = tcp_write_queue_head(sk);
1812	state.fack_count = 0;
1813	i = 0;
1814
1815	if (!tp->sacked_out) {
1816		/* It's already past, so skip checking against it */
1817		cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
1818	} else {
1819		cache = tp->recv_sack_cache;
1820		/* Skip empty blocks in at head of the cache */
1821		while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq &&
1822		       !cache->end_seq)
1823			cache++;
1824	}
1825
1826	while (i < used_sacks) {
1827		u32 start_seq = sp[i].start_seq;
1828		u32 end_seq = sp[i].end_seq;
1829		int dup_sack = (found_dup_sack && (i == first_sack_index));
1830		struct tcp_sack_block *next_dup = NULL;
1831
1832		if (found_dup_sack && ((i + 1) == first_sack_index))
1833			next_dup = &sp[i + 1];
1834
1835		/* Event "B" in the comment above. */
1836		if (after(end_seq, tp->high_seq))
1837			state.flag |= FLAG_DATA_LOST;
1838
1839		/* Skip too early cached blocks */
1840		while (tcp_sack_cache_ok(tp, cache) &&
1841		       !before(start_seq, cache->end_seq))
1842			cache++;
1843
1844		/* Can skip some work by looking recv_sack_cache? */
1845		if (tcp_sack_cache_ok(tp, cache) && !dup_sack &&
1846		    after(end_seq, cache->start_seq)) {
1847
1848			/* Head todo? */
1849			if (before(start_seq, cache->start_seq)) {
1850				skb = tcp_sacktag_skip(skb, sk, &state,
1851						       start_seq);
1852				skb = tcp_sacktag_walk(skb, sk, next_dup,
1853						       &state,
1854						       start_seq,
1855						       cache->start_seq,
1856						       dup_sack);
1857			}
1858
1859			/* Rest of the block already fully processed? */
1860			if (!after(end_seq, cache->end_seq))
1861				goto advance_sp;
1862
1863			skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
1864						       &state,
1865						       cache->end_seq);
1866
1867			/* ...tail remains todo... */
1868			if (tcp_highest_sack_seq(tp) == cache->end_seq) {
1869				/* ...but better entrypoint exists! */
1870				skb = tcp_highest_sack(sk);
1871				if (skb == NULL)
1872					break;
1873				state.fack_count = tp->fackets_out;
1874				cache++;
1875				goto walk;
1876			}
1877
1878			skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq);
1879			/* Check overlap against next cached too (past this one already) */
1880			cache++;
1881			continue;
1882		}
1883
1884		if (!before(start_seq, tcp_highest_sack_seq(tp))) {
1885			skb = tcp_highest_sack(sk);
1886			if (skb == NULL)
1887				break;
1888			state.fack_count = tp->fackets_out;
1889		}
1890		skb = tcp_sacktag_skip(skb, sk, &state, start_seq);
1891
1892walk:
1893		skb = tcp_sacktag_walk(skb, sk, next_dup, &state,
1894				       start_seq, end_seq, dup_sack);
1895
1896advance_sp:
1897		/* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct
1898		 * due to in-order walk
1899		 */
1900		if (after(end_seq, tp->frto_highmark))
1901			state.flag &= ~FLAG_ONLY_ORIG_SACKED;
1902
1903		i++;
1904	}
1905
1906	/* Clear the head of the cache sack blocks so we can skip it next time */
1907	for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) {
1908		tp->recv_sack_cache[i].start_seq = 0;
1909		tp->recv_sack_cache[i].end_seq = 0;
1910	}
1911	for (j = 0; j < used_sacks; j++)
1912		tp->recv_sack_cache[i++] = sp[j];
1913
1914	tcp_mark_lost_retrans(sk);
 
1915
1916	tcp_verify_left_out(tp);
1917
1918	if ((state.reord < tp->fackets_out) &&
1919	    ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) &&
1920	    (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
1921		tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
1922
1923out:
1924
1925#if FASTRETRANS_DEBUG > 0
1926	WARN_ON((int)tp->sacked_out < 0);
1927	WARN_ON((int)tp->lost_out < 0);
1928	WARN_ON((int)tp->retrans_out < 0);
1929	WARN_ON((int)tcp_packets_in_flight(tp) < 0);
1930#endif
1931	return state.flag;
1932}
1933
1934/* Limits sacked_out so that sum with lost_out isn't ever larger than
1935 * packets_out. Returns zero if sacked_out adjustement wasn't necessary.
1936 */
1937static int tcp_limit_reno_sacked(struct tcp_sock *tp)
1938{
1939	u32 holes;
1940
1941	holes = max(tp->lost_out, 1U);
1942	holes = min(holes, tp->packets_out);
1943
1944	if ((tp->sacked_out + holes) > tp->packets_out) {
1945		tp->sacked_out = tp->packets_out - holes;
1946		return 1;
1947	}
1948	return 0;
1949}
1950
1951/* If we receive more dupacks than we expected counting segments
1952 * in assumption of absent reordering, interpret this as reordering.
1953 * The only another reason could be bug in receiver TCP.
1954 */
1955static void tcp_check_reno_reordering(struct sock *sk, const int addend)
1956{
1957	struct tcp_sock *tp = tcp_sk(sk);
1958	if (tcp_limit_reno_sacked(tp))
1959		tcp_update_reordering(sk, tp->packets_out + addend, 0);
 
 
 
 
 
 
1960}
1961
1962/* Emulate SACKs for SACKless connection: account for a new dupack. */
1963
1964static void tcp_add_reno_sack(struct sock *sk)
1965{
1966	struct tcp_sock *tp = tcp_sk(sk);
1967	tp->sacked_out++;
1968	tcp_check_reno_reordering(sk, 0);
1969	tcp_verify_left_out(tp);
 
 
 
 
 
 
 
 
1970}
1971
1972/* Account for ACK, ACKing some data in Reno Recovery phase. */
1973
1974static void tcp_remove_reno_sacks(struct sock *sk, int acked)
1975{
1976	struct tcp_sock *tp = tcp_sk(sk);
1977
1978	if (acked > 0) {
1979		/* One ACK acked hole. The rest eat duplicate ACKs. */
 
 
1980		if (acked - 1 >= tp->sacked_out)
1981			tp->sacked_out = 0;
1982		else
1983			tp->sacked_out -= acked - 1;
1984	}
1985	tcp_check_reno_reordering(sk, acked);
1986	tcp_verify_left_out(tp);
1987}
1988
1989static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
1990{
1991	tp->sacked_out = 0;
1992}
1993
1994static int tcp_is_sackfrto(const struct tcp_sock *tp)
1995{
1996	return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp);
 
 
 
 
 
 
 
 
1997}
1998
1999/* F-RTO can only be used if TCP has never retransmitted anything other than
2000 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
2001 */
2002int tcp_use_frto(struct sock *sk)
2003{
2004	const struct tcp_sock *tp = tcp_sk(sk);
2005	const struct inet_connection_sock *icsk = inet_csk(sk);
2006	struct sk_buff *skb;
2007
2008	if (!sysctl_tcp_frto)
2009		return 0;
2010
2011	/* MTU probe and F-RTO won't really play nicely along currently */
2012	if (icsk->icsk_mtup.probe_size)
2013		return 0;
2014
2015	if (tcp_is_sackfrto(tp))
2016		return 1;
2017
2018	/* Avoid expensive walking of rexmit queue if possible */
2019	if (tp->retrans_out > 1)
2020		return 0;
2021
2022	skb = tcp_write_queue_head(sk);
2023	if (tcp_skb_is_last(sk, skb))
2024		return 1;
2025	skb = tcp_write_queue_next(sk, skb);	/* Skips head */
2026	tcp_for_write_queue_from(skb, sk) {
2027		if (skb == tcp_send_head(sk))
2028			break;
2029		if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
2030			return 0;
2031		/* Short-circuit when first non-SACKed skb has been checked */
2032		if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
2033			break;
2034	}
2035	return 1;
2036}
2037
2038/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
2039 * recovery a bit and use heuristics in tcp_process_frto() to detect if
2040 * the RTO was spurious. Only clear SACKED_RETRANS of the head here to
2041 * keep retrans_out counting accurate (with SACK F-RTO, other than head
2042 * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS
2043 * bits are handled if the Loss state is really to be entered (in
2044 * tcp_enter_frto_loss).
2045 *
2046 * Do like tcp_enter_loss() would; when RTO expires the second time it
2047 * does:
2048 *  "Reduce ssthresh if it has not yet been made inside this window."
2049 */
2050void tcp_enter_frto(struct sock *sk)
2051{
2052	const struct inet_connection_sock *icsk = inet_csk(sk);
2053	struct tcp_sock *tp = tcp_sk(sk);
2054	struct sk_buff *skb;
2055
2056	if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) ||
2057	    tp->snd_una == tp->high_seq ||
2058	    ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) &&
2059	     !icsk->icsk_retransmits)) {
2060		tp->prior_ssthresh = tcp_current_ssthresh(sk);
2061		/* Our state is too optimistic in ssthresh() call because cwnd
2062		 * is not reduced until tcp_enter_frto_loss() when previous F-RTO
2063		 * recovery has not yet completed. Pattern would be this: RTO,
2064		 * Cumulative ACK, RTO (2xRTO for the same segment does not end
2065		 * up here twice).
2066		 * RFC4138 should be more specific on what to do, even though
2067		 * RTO is quite unlikely to occur after the first Cumulative ACK
2068		 * due to back-off and complexity of triggering events ...
2069		 */
2070		if (tp->frto_counter) {
2071			u32 stored_cwnd;
2072			stored_cwnd = tp->snd_cwnd;
2073			tp->snd_cwnd = 2;
2074			tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
2075			tp->snd_cwnd = stored_cwnd;
2076		} else {
2077			tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
2078		}
2079		/* ... in theory, cong.control module could do "any tricks" in
2080		 * ssthresh(), which means that ca_state, lost bits and lost_out
2081		 * counter would have to be faked before the call occurs. We
2082		 * consider that too expensive, unlikely and hacky, so modules
2083		 * using these in ssthresh() must deal these incompatibility
2084		 * issues if they receives CA_EVENT_FRTO and frto_counter != 0
2085		 */
2086		tcp_ca_event(sk, CA_EVENT_FRTO);
2087	}
2088
2089	tp->undo_marker = tp->snd_una;
2090	tp->undo_retrans = 0;
2091
2092	skb = tcp_write_queue_head(sk);
2093	if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
2094		tp->undo_marker = 0;
2095	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2096		TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
2097		tp->retrans_out -= tcp_skb_pcount(skb);
2098	}
2099	tcp_verify_left_out(tp);
2100
2101	/* Too bad if TCP was application limited */
2102	tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
2103
2104	/* Earlier loss recovery underway (see RFC4138; Appendix B).
2105	 * The last condition is necessary at least in tp->frto_counter case.
2106	 */
2107	if (tcp_is_sackfrto(tp) && (tp->frto_counter ||
2108	    ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) &&
2109	    after(tp->high_seq, tp->snd_una)) {
2110		tp->frto_highmark = tp->high_seq;
2111	} else {
2112		tp->frto_highmark = tp->snd_nxt;
2113	}
2114	tcp_set_ca_state(sk, TCP_CA_Disorder);
2115	tp->high_seq = tp->snd_nxt;
2116	tp->frto_counter = 1;
2117}
2118
2119/* Enter Loss state after F-RTO was applied. Dupack arrived after RTO,
2120 * which indicates that we should follow the traditional RTO recovery,
2121 * i.e. mark everything lost and do go-back-N retransmission.
2122 */
2123static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
2124{
2125	struct tcp_sock *tp = tcp_sk(sk);
2126	struct sk_buff *skb;
 
2127
2128	tp->lost_out = 0;
2129	tp->retrans_out = 0;
2130	if (tcp_is_reno(tp))
 
 
 
 
 
2131		tcp_reset_reno_sack(tp);
 
2132
2133	tcp_for_write_queue(skb, sk) {
2134		if (skb == tcp_send_head(sk))
2135			break;
2136
2137		TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
2138		/*
2139		 * Count the retransmission made on RTO correctly (only when
2140		 * waiting for the first ACK and did not get it)...
2141		 */
2142		if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) {
2143			/* For some reason this R-bit might get cleared? */
2144			if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
2145				tp->retrans_out += tcp_skb_pcount(skb);
2146			/* ...enter this if branch just for the first segment */
2147			flag |= FLAG_DATA_ACKED;
2148		} else {
2149			if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
2150				tp->undo_marker = 0;
2151			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
2152		}
2153
2154		/* Marking forward transmissions that were made after RTO lost
2155		 * can cause unnecessary retransmissions in some scenarios,
2156		 * SACK blocks will mitigate that in some but not in all cases.
2157		 * We used to not mark them but it was causing break-ups with
2158		 * receivers that do only in-order receival.
2159		 *
2160		 * TODO: we could detect presence of such receiver and select
2161		 * different behavior per flow.
2162		 */
2163		if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
2164			TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
2165			tp->lost_out += tcp_skb_pcount(skb);
2166			tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
2167		}
2168	}
2169	tcp_verify_left_out(tp);
2170
2171	tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments;
2172	tp->snd_cwnd_cnt = 0;
2173	tp->snd_cwnd_stamp = tcp_time_stamp;
2174	tp->frto_counter = 0;
2175	tp->bytes_acked = 0;
2176
2177	tp->reordering = min_t(unsigned int, tp->reordering,
2178			       sysctl_tcp_reordering);
2179	tcp_set_ca_state(sk, TCP_CA_Loss);
2180	tp->high_seq = tp->snd_nxt;
2181	TCP_ECN_queue_cwr(tp);
2182
2183	tcp_clear_all_retrans_hints(tp);
2184}
2185
2186static void tcp_clear_retrans_partial(struct tcp_sock *tp)
2187{
2188	tp->retrans_out = 0;
2189	tp->lost_out = 0;
2190
2191	tp->undo_marker = 0;
2192	tp->undo_retrans = 0;
2193}
2194
2195void tcp_clear_retrans(struct tcp_sock *tp)
2196{
2197	tcp_clear_retrans_partial(tp);
2198
2199	tp->fackets_out = 0;
2200	tp->sacked_out = 0;
2201}
2202
2203/* Enter Loss state. If "how" is not zero, forget all SACK information
2204 * and reset tags completely, otherwise preserve SACKs. If receiver
2205 * dropped its ofo queue, we will know this due to reneging detection.
2206 */
2207void tcp_enter_loss(struct sock *sk, int how)
2208{
2209	const struct inet_connection_sock *icsk = inet_csk(sk);
2210	struct tcp_sock *tp = tcp_sk(sk);
2211	struct sk_buff *skb;
 
 
 
 
2212
2213	/* Reduce ssthresh if it has not yet been made inside this window. */
2214	if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq ||
 
2215	    (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
2216		tp->prior_ssthresh = tcp_current_ssthresh(sk);
 
2217		tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
2218		tcp_ca_event(sk, CA_EVENT_LOSS);
 
2219	}
2220	tp->snd_cwnd	   = 1;
2221	tp->snd_cwnd_cnt   = 0;
2222	tp->snd_cwnd_stamp = tcp_time_stamp;
2223
2224	tp->bytes_acked = 0;
2225	tcp_clear_retrans_partial(tp);
2226
2227	if (tcp_is_reno(tp))
2228		tcp_reset_reno_sack(tp);
2229
2230	if (!how) {
2231		/* Push undo marker, if it was plain RTO and nothing
2232		 * was retransmitted. */
2233		tp->undo_marker = tp->snd_una;
2234	} else {
2235		tp->sacked_out = 0;
2236		tp->fackets_out = 0;
2237	}
2238	tcp_clear_all_retrans_hints(tp);
2239
2240	tcp_for_write_queue(skb, sk) {
2241		if (skb == tcp_send_head(sk))
2242			break;
2243
2244		if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
2245			tp->undo_marker = 0;
2246		TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
2247		if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) {
2248			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
2249			TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
2250			tp->lost_out += tcp_skb_pcount(skb);
2251			tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
2252		}
2253	}
2254	tcp_verify_left_out(tp);
2255
2256	tp->reordering = min_t(unsigned int, tp->reordering,
2257			       sysctl_tcp_reordering);
2258	tcp_set_ca_state(sk, TCP_CA_Loss);
2259	tp->high_seq = tp->snd_nxt;
2260	TCP_ECN_queue_cwr(tp);
2261	/* Abort F-RTO algorithm if one is in progress */
2262	tp->frto_counter = 0;
 
 
 
 
 
 
 
2263}
2264
2265/* If ACK arrived pointing to a remembered SACK, it means that our
2266 * remembered SACKs do not reflect real state of receiver i.e.
2267 * receiver _host_ is heavily congested (or buggy).
2268 *
2269 * Do processing similar to RTO timeout.
2270 */
2271static int tcp_check_sack_reneging(struct sock *sk, int flag)
2272{
2273	if (flag & FLAG_SACK_RENEGING) {
2274		struct inet_connection_sock *icsk = inet_csk(sk);
2275		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
 
 
 
 
 
 
2276
2277		tcp_enter_loss(sk, 1);
2278		icsk->icsk_retransmits++;
2279		tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
2280		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2281					  icsk->icsk_rto, TCP_RTO_MAX);
2282		return 1;
 
2283	}
2284	return 0;
2285}
2286
2287static inline int tcp_fackets_out(struct tcp_sock *tp)
2288{
2289	return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out;
2290}
2291
2292/* Heurestics to calculate number of duplicate ACKs. There's no dupACKs
2293 * counter when SACK is enabled (without SACK, sacked_out is used for
2294 * that purpose).
2295 *
2296 * Instead, with FACK TCP uses fackets_out that includes both SACKed
2297 * segments up to the highest received SACK block so far and holes in
2298 * between them.
2299 *
2300 * With reordering, holes may still be in flight, so RFC3517 recovery
2301 * uses pure sacked_out (total number of SACKed segments) even though
2302 * it violates the RFC that uses duplicate ACKs, often these are equal
2303 * but when e.g. out-of-window ACKs or packet duplication occurs,
2304 * they differ. Since neither occurs due to loss, TCP should really
2305 * ignore them.
2306 */
2307static inline int tcp_dupack_heuristics(struct tcp_sock *tp)
2308{
2309	return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
2310}
2311
2312static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
2313{
2314	return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
2315}
2316
2317static inline int tcp_head_timedout(struct sock *sk)
2318{
2319	struct tcp_sock *tp = tcp_sk(sk);
2320
2321	return tp->packets_out &&
2322	       tcp_skb_timedout(sk, tcp_write_queue_head(sk));
2323}
2324
2325/* Linux NewReno/SACK/FACK/ECN state machine.
2326 * --------------------------------------
2327 *
2328 * "Open"	Normal state, no dubious events, fast path.
2329 * "Disorder"   In all the respects it is "Open",
2330 *		but requires a bit more attention. It is entered when
2331 *		we see some SACKs or dupacks. It is split of "Open"
2332 *		mainly to move some processing from fast path to slow one.
2333 * "CWR"	CWND was reduced due to some Congestion Notification event.
2334 *		It can be ECN, ICMP source quench, local device congestion.
2335 * "Recovery"	CWND was reduced, we are fast-retransmitting.
2336 * "Loss"	CWND was reduced due to RTO timeout or SACK reneging.
2337 *
2338 * tcp_fastretrans_alert() is entered:
2339 * - each incoming ACK, if state is not "Open"
2340 * - when arrived ACK is unusual, namely:
2341 *	* SACK
2342 *	* Duplicate ACK.
2343 *	* ECN ECE.
2344 *
2345 * Counting packets in flight is pretty simple.
2346 *
2347 *	in_flight = packets_out - left_out + retrans_out
2348 *
2349 *	packets_out is SND.NXT-SND.UNA counted in packets.
2350 *
2351 *	retrans_out is number of retransmitted segments.
2352 *
2353 *	left_out is number of segments left network, but not ACKed yet.
2354 *
2355 *		left_out = sacked_out + lost_out
2356 *
2357 *     sacked_out: Packets, which arrived to receiver out of order
2358 *		   and hence not ACKed. With SACKs this number is simply
2359 *		   amount of SACKed data. Even without SACKs
2360 *		   it is easy to give pretty reliable estimate of this number,
2361 *		   counting duplicate ACKs.
2362 *
2363 *       lost_out: Packets lost by network. TCP has no explicit
2364 *		   "loss notification" feedback from network (for now).
2365 *		   It means that this number can be only _guessed_.
2366 *		   Actually, it is the heuristics to predict lossage that
2367 *		   distinguishes different algorithms.
2368 *
2369 *	F.e. after RTO, when all the queue is considered as lost,
2370 *	lost_out = packets_out and in_flight = retrans_out.
2371 *
2372 *		Essentially, we have now two algorithms counting
2373 *		lost packets.
2374 *
2375 *		FACK: It is the simplest heuristics. As soon as we decided
2376 *		that something is lost, we decide that _all_ not SACKed
2377 *		packets until the most forward SACK are lost. I.e.
2378 *		lost_out = fackets_out - sacked_out and left_out = fackets_out.
2379 *		It is absolutely correct estimate, if network does not reorder
2380 *		packets. And it loses any connection to reality when reordering
2381 *		takes place. We use FACK by default until reordering
2382 *		is suspected on the path to this destination.
2383 *
2384 *		NewReno: when Recovery is entered, we assume that one segment
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2385 *		is lost (classic Reno). While we are in Recovery and
2386 *		a partial ACK arrives, we assume that one more packet
2387 *		is lost (NewReno). This heuristics are the same in NewReno
2388 *		and SACK.
2389 *
2390 *  Imagine, that's all! Forget about all this shamanism about CWND inflation
2391 *  deflation etc. CWND is real congestion window, never inflated, changes
2392 *  only according to classic VJ rules.
2393 *
2394 * Really tricky (and requiring careful tuning) part of algorithm
2395 * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
2396 * The first determines the moment _when_ we should reduce CWND and,
2397 * hence, slow down forward transmission. In fact, it determines the moment
2398 * when we decide that hole is caused by loss, rather than by a reorder.
2399 *
2400 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill
2401 * holes, caused by lost packets.
2402 *
2403 * And the most logically complicated part of algorithm is undo
2404 * heuristics. We detect false retransmits due to both too early
2405 * fast retransmit (reordering) and underestimated RTO, analyzing
2406 * timestamps and D-SACKs. When we detect that some segments were
2407 * retransmitted by mistake and CWND reduction was wrong, we undo
2408 * window reduction and abort recovery phase. This logic is hidden
2409 * inside several functions named tcp_try_undo_<something>.
2410 */
2411
2412/* This function decides, when we should leave Disordered state
2413 * and enter Recovery phase, reducing congestion window.
2414 *
2415 * Main question: may we further continue forward transmission
2416 * with the same cwnd?
2417 */
2418static int tcp_time_to_recover(struct sock *sk)
2419{
2420	struct tcp_sock *tp = tcp_sk(sk);
2421	__u32 packets_out;
2422
2423	/* Do not perform any recovery during F-RTO algorithm */
2424	if (tp->frto_counter)
2425		return 0;
2426
2427	/* Trick#1: The loss is proven. */
2428	if (tp->lost_out)
2429		return 1;
2430
2431	/* Not-A-Trick#2 : Classic rule... */
2432	if (tcp_dupack_heuristics(tp) > tp->reordering)
2433		return 1;
2434
2435	/* Trick#3 : when we use RFC2988 timer restart, fast
2436	 * retransmit can be triggered by timeout of queue head.
2437	 */
2438	if (tcp_is_fack(tp) && tcp_head_timedout(sk))
2439		return 1;
2440
2441	/* Trick#4: It is still not OK... But will it be useful to delay
2442	 * recovery more?
2443	 */
2444	packets_out = tp->packets_out;
2445	if (packets_out <= tp->reordering &&
2446	    tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
2447	    !tcp_may_send_now(sk)) {
2448		/* We have nothing to send. This connection is limited
2449		 * either by receiver window or by application.
2450		 */
2451		return 1;
2452	}
2453
2454	/* If a thin stream is detected, retransmit after first
2455	 * received dupack. Employ only if SACK is supported in order
2456	 * to avoid possible corner-case series of spurious retransmissions
2457	 * Use only if there are no unsent data.
2458	 */
2459	if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
2460	    tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
2461	    tcp_is_sack(tp) && !tcp_send_head(sk))
2462		return 1;
2463
2464	return 0;
2465}
2466
2467/* New heuristics: it is possible only after we switched to restart timer
2468 * each time when something is ACKed. Hence, we can detect timed out packets
2469 * during fast retransmit without falling to slow start.
2470 *
2471 * Usefulness of this as is very questionable, since we should know which of
2472 * the segments is the next to timeout which is relatively expensive to find
2473 * in general case unless we add some data structure just for that. The
2474 * current approach certainly won't find the right one too often and when it
2475 * finally does find _something_ it usually marks large part of the window
2476 * right away (because a retransmission with a larger timestamp blocks the
2477 * loop from advancing). -ij
2478 */
2479static void tcp_timeout_skbs(struct sock *sk)
2480{
2481	struct tcp_sock *tp = tcp_sk(sk);
2482	struct sk_buff *skb;
2483
2484	if (!tcp_is_fack(tp) || !tcp_head_timedout(sk))
2485		return;
2486
2487	skb = tp->scoreboard_skb_hint;
2488	if (tp->scoreboard_skb_hint == NULL)
2489		skb = tcp_write_queue_head(sk);
2490
2491	tcp_for_write_queue_from(skb, sk) {
2492		if (skb == tcp_send_head(sk))
2493			break;
2494		if (!tcp_skb_timedout(sk, skb))
2495			break;
2496
2497		tcp_skb_mark_lost(tp, skb);
2498	}
2499
2500	tp->scoreboard_skb_hint = skb;
2501
2502	tcp_verify_left_out(tp);
2503}
2504
2505/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
2506 * is against sacked "cnt", otherwise it's against facked "cnt"
2507 */
2508static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2509{
2510	struct tcp_sock *tp = tcp_sk(sk);
2511	struct sk_buff *skb;
2512	int cnt, oldcnt;
2513	int err;
2514	unsigned int mss;
2515
2516	WARN_ON(packets > tp->packets_out);
2517	if (tp->lost_skb_hint) {
2518		skb = tp->lost_skb_hint;
2519		cnt = tp->lost_cnt_hint;
2520		/* Head already handled? */
2521		if (mark_head && skb != tcp_write_queue_head(sk))
2522			return;
 
2523	} else {
2524		skb = tcp_write_queue_head(sk);
2525		cnt = 0;
2526	}
2527
2528	tcp_for_write_queue_from(skb, sk) {
2529		if (skb == tcp_send_head(sk))
2530			break;
2531		/* TODO: do this better */
2532		/* this is not the most efficient way to do this... */
2533		tp->lost_skb_hint = skb;
2534		tp->lost_cnt_hint = cnt;
2535
2536		if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq))
2537			break;
2538
2539		oldcnt = cnt;
2540		if (tcp_is_fack(tp) || tcp_is_reno(tp) ||
2541		    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
2542			cnt += tcp_skb_pcount(skb);
2543
2544		if (cnt > packets) {
2545			if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
2546			    (oldcnt >= packets))
2547				break;
2548
2549			mss = skb_shinfo(skb)->gso_size;
2550			err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss);
2551			if (err < 0)
2552				break;
2553			cnt = packets;
2554		}
2555
2556		tcp_skb_mark_lost(tp, skb);
 
2557
2558		if (mark_head)
2559			break;
2560	}
2561	tcp_verify_left_out(tp);
2562}
2563
2564/* Account newly detected lost packet(s) */
2565
2566static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
2567{
2568	struct tcp_sock *tp = tcp_sk(sk);
2569
2570	if (tcp_is_reno(tp)) {
2571		tcp_mark_head_lost(sk, 1, 1);
2572	} else if (tcp_is_fack(tp)) {
2573		int lost = tp->fackets_out - tp->reordering;
2574		if (lost <= 0)
2575			lost = 1;
2576		tcp_mark_head_lost(sk, lost, 0);
2577	} else {
2578		int sacked_upto = tp->sacked_out - tp->reordering;
2579		if (sacked_upto >= 0)
2580			tcp_mark_head_lost(sk, sacked_upto, 0);
2581		else if (fast_rexmit)
2582			tcp_mark_head_lost(sk, 1, 1);
2583	}
 
2584
2585	tcp_timeout_skbs(sk);
 
 
 
2586}
2587
2588/* CWND moderation, preventing bursts due to too big ACKs
2589 * in dubious situations.
2590 */
2591static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
 
2592{
2593	tp->snd_cwnd = min(tp->snd_cwnd,
2594			   tcp_packets_in_flight(tp) + tcp_max_burst(tp));
2595	tp->snd_cwnd_stamp = tcp_time_stamp;
2596}
2597
2598/* Lower bound on congestion window is slow start threshold
2599 * unless congestion avoidance choice decides to overide it.
2600 */
2601static inline u32 tcp_cwnd_min(const struct sock *sk)
2602{
2603	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
2604
2605	return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh;
 
 
 
 
 
 
 
 
 
 
 
 
 
2606}
2607
2608/* Decrease cwnd each second ack. */
2609static void tcp_cwnd_down(struct sock *sk, int flag)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2610{
2611	struct tcp_sock *tp = tcp_sk(sk);
2612	int decr = tp->snd_cwnd_cnt + 1;
2613
2614	if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) ||
2615	    (tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) {
2616		tp->snd_cwnd_cnt = decr & 1;
2617		decr >>= 1;
2618
2619		if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
2620			tp->snd_cwnd -= decr;
 
2621
2622		tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
2623		tp->snd_cwnd_stamp = tcp_time_stamp;
2624	}
2625}
2626
2627/* Nothing was retransmitted or returned timestamp is less
2628 * than timestamp of the first retransmission.
 
2629 */
2630static inline int tcp_packet_delayed(struct tcp_sock *tp)
2631{
2632	return !tp->retrans_stamp ||
2633		(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2634		 before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp));
2635}
2636
2637/* Undo procedures. */
2638
2639#if FASTRETRANS_DEBUG > 1
2640static void DBGUNDO(struct sock *sk, const char *msg)
2641{
 
2642	struct tcp_sock *tp = tcp_sk(sk);
2643	struct inet_sock *inet = inet_sk(sk);
2644
2645	if (sk->sk_family == AF_INET) {
2646		printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
2647		       msg,
2648		       &inet->inet_daddr, ntohs(inet->inet_dport),
2649		       tp->snd_cwnd, tcp_left_out(tp),
2650		       tp->snd_ssthresh, tp->prior_ssthresh,
2651		       tp->packets_out);
2652	}
2653#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
2654	else if (sk->sk_family == AF_INET6) {
2655		struct ipv6_pinfo *np = inet6_sk(sk);
2656		printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
2657		       msg,
2658		       &np->daddr, ntohs(inet->inet_dport),
2659		       tp->snd_cwnd, tcp_left_out(tp),
2660		       tp->snd_ssthresh, tp->prior_ssthresh,
2661		       tp->packets_out);
2662	}
2663#endif
2664}
2665#else
2666#define DBGUNDO(x...) do { } while (0)
2667#endif
 
2668
2669static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
2670{
2671	struct tcp_sock *tp = tcp_sk(sk);
2672
 
 
 
 
 
 
 
 
 
 
2673	if (tp->prior_ssthresh) {
2674		const struct inet_connection_sock *icsk = inet_csk(sk);
2675
2676		if (icsk->icsk_ca_ops->undo_cwnd)
2677			tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
2678		else
2679			tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
2680
2681		if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) {
2682			tp->snd_ssthresh = tp->prior_ssthresh;
2683			TCP_ECN_withdraw_cwr(tp);
2684		}
2685	} else {
2686		tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
2687	}
2688	tp->snd_cwnd_stamp = tcp_time_stamp;
 
 
2689}
2690
2691static inline int tcp_may_undo(struct tcp_sock *tp)
2692{
2693	return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
2694}
2695
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2696/* People celebrate: "We love our President!" */
2697static int tcp_try_undo_recovery(struct sock *sk)
2698{
2699	struct tcp_sock *tp = tcp_sk(sk);
2700
2701	if (tcp_may_undo(tp)) {
2702		int mib_idx;
2703
2704		/* Happy end! We did not retransmit anything
2705		 * or our original transmission succeeded.
2706		 */
2707		DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
2708		tcp_undo_cwr(sk, true);
2709		if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
2710			mib_idx = LINUX_MIB_TCPLOSSUNDO;
2711		else
2712			mib_idx = LINUX_MIB_TCPFULLUNDO;
2713
2714		NET_INC_STATS_BH(sock_net(sk), mib_idx);
2715		tp->undo_marker = 0;
2716	}
2717	if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2718		/* Hold old state until something *above* high_seq
2719		 * is ACKed. For Reno it is MUST to prevent false
2720		 * fast retransmits (RFC2582). SACK TCP is safe. */
2721		tcp_moderate_cwnd(tp);
2722		return 1;
2723	}
 
 
2724	tcp_set_ca_state(sk, TCP_CA_Open);
2725	return 0;
 
2726}
2727
2728/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
2729static void tcp_try_undo_dsack(struct sock *sk)
2730{
2731	struct tcp_sock *tp = tcp_sk(sk);
2732
2733	if (tp->undo_marker && !tp->undo_retrans) {
 
 
2734		DBGUNDO(sk, "D-SACK");
2735		tcp_undo_cwr(sk, true);
2736		tp->undo_marker = 0;
2737		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2738	}
 
2739}
2740
2741/* We can clear retrans_stamp when there are no retransmissions in the
2742 * window. It would seem that it is trivially available for us in
2743 * tp->retrans_out, however, that kind of assumptions doesn't consider
2744 * what will happen if errors occur when sending retransmission for the
2745 * second time. ...It could the that such segment has only
2746 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2747 * the head skb is enough except for some reneging corner cases that
2748 * are not worth the effort.
2749 *
2750 * Main reason for all this complexity is the fact that connection dying
2751 * time now depends on the validity of the retrans_stamp, in particular,
2752 * that successive retransmissions of a segment must not advance
2753 * retrans_stamp under any conditions.
2754 */
2755static int tcp_any_retrans_done(struct sock *sk)
2756{
2757	struct tcp_sock *tp = tcp_sk(sk);
2758	struct sk_buff *skb;
2759
2760	if (tp->retrans_out)
2761		return 1;
2762
2763	skb = tcp_write_queue_head(sk);
2764	if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2765		return 1;
2766
2767	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2768}
2769
2770/* Undo during fast recovery after partial ACK. */
2771
2772static int tcp_try_undo_partial(struct sock *sk, int acked)
 
 
 
 
 
 
 
2773{
2774	struct tcp_sock *tp = tcp_sk(sk);
2775	/* Partial ACK arrived. Force Hoe's retransmit. */
2776	int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering);
2777
2778	if (tcp_may_undo(tp)) {
2779		/* Plain luck! Hole if filled with delayed
2780		 * packet, rather than with a retransmit.
2781		 */
2782		if (!tcp_any_retrans_done(sk))
2783			tp->retrans_stamp = 0;
 
 
 
2784
2785		tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
 
 
 
 
2786
2787		DBGUNDO(sk, "Hoe");
2788		tcp_undo_cwr(sk, false);
2789		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
2790
2791		/* So... Do not make Hoe's retransmit yet.
2792		 * If the first packet was delayed, the rest
2793		 * ones are most probably delayed as well.
2794		 */
2795		failed = 0;
 
 
 
 
 
 
2796	}
2797	return failed;
 
 
2798}
2799
2800/* Undo during loss recovery after partial ACK. */
2801static int tcp_try_undo_loss(struct sock *sk)
2802{
2803	struct tcp_sock *tp = tcp_sk(sk);
2804
2805	if (tcp_may_undo(tp)) {
2806		struct sk_buff *skb;
2807		tcp_for_write_queue(skb, sk) {
2808			if (skb == tcp_send_head(sk))
2809				break;
2810			TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
2811		}
2812
2813		tcp_clear_all_retrans_hints(tp);
2814
2815		DBGUNDO(sk, "partial loss");
2816		tp->lost_out = 0;
2817		tcp_undo_cwr(sk, true);
2818		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2819		inet_csk(sk)->icsk_retransmits = 0;
2820		tp->undo_marker = 0;
2821		if (tcp_is_sack(tp))
2822			tcp_set_ca_state(sk, TCP_CA_Open);
2823		return 1;
2824	}
2825	return 0;
2826}
2827
2828static inline void tcp_complete_cwr(struct sock *sk)
 
2829{
2830	struct tcp_sock *tp = tcp_sk(sk);
2831	/* Do not moderate cwnd if it's already undone in cwr or recovery */
2832	if (tp->undo_marker && tp->snd_cwnd > tp->snd_ssthresh) {
2833		tp->snd_cwnd = tp->snd_ssthresh;
2834		tp->snd_cwnd_stamp = tcp_time_stamp;
 
 
2835	}
2836	tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2837}
 
2838
2839static void tcp_try_keep_open(struct sock *sk)
2840{
2841	struct tcp_sock *tp = tcp_sk(sk);
2842	int state = TCP_CA_Open;
2843
2844	if (tcp_left_out(tp) || tcp_any_retrans_done(sk) || tp->undo_marker)
2845		state = TCP_CA_Disorder;
2846
2847	if (inet_csk(sk)->icsk_ca_state != state) {
2848		tcp_set_ca_state(sk, state);
2849		tp->high_seq = tp->snd_nxt;
2850	}
2851}
2852
2853static void tcp_try_to_open(struct sock *sk, int flag)
2854{
2855	struct tcp_sock *tp = tcp_sk(sk);
2856
2857	tcp_verify_left_out(tp);
2858
2859	if (!tp->frto_counter && !tcp_any_retrans_done(sk))
2860		tp->retrans_stamp = 0;
2861
2862	if (flag & FLAG_ECE)
2863		tcp_enter_cwr(sk, 1);
2864
2865	if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
2866		tcp_try_keep_open(sk);
2867		tcp_moderate_cwnd(tp);
2868	} else {
2869		tcp_cwnd_down(sk, flag);
2870	}
2871}
2872
2873static void tcp_mtup_probe_failed(struct sock *sk)
2874{
2875	struct inet_connection_sock *icsk = inet_csk(sk);
2876
2877	icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
2878	icsk->icsk_mtup.probe_size = 0;
 
2879}
2880
2881static void tcp_mtup_probe_success(struct sock *sk)
2882{
2883	struct tcp_sock *tp = tcp_sk(sk);
2884	struct inet_connection_sock *icsk = inet_csk(sk);
 
2885
2886	/* FIXME: breaks with very large cwnd */
2887	tp->prior_ssthresh = tcp_current_ssthresh(sk);
2888	tp->snd_cwnd = tp->snd_cwnd *
2889		       tcp_mss_to_mtu(sk, tp->mss_cache) /
2890		       icsk->icsk_mtup.probe_size;
 
 
 
2891	tp->snd_cwnd_cnt = 0;
2892	tp->snd_cwnd_stamp = tcp_time_stamp;
2893	tp->snd_ssthresh = tcp_current_ssthresh(sk);
2894
2895	icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
2896	icsk->icsk_mtup.probe_size = 0;
2897	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2898}
2899
2900/* Do a simple retransmit without using the backoff mechanisms in
2901 * tcp_timer. This is used for path mtu discovery.
2902 * The socket is already locked here.
2903 */
2904void tcp_simple_retransmit(struct sock *sk)
2905{
2906	const struct inet_connection_sock *icsk = inet_csk(sk);
2907	struct tcp_sock *tp = tcp_sk(sk);
2908	struct sk_buff *skb;
2909	unsigned int mss = tcp_current_mss(sk);
2910	u32 prior_lost = tp->lost_out;
2911
2912	tcp_for_write_queue(skb, sk) {
2913		if (skb == tcp_send_head(sk))
2914			break;
2915		if (tcp_skb_seglen(skb) > mss &&
2916		    !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
2917			if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2918				TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
2919				tp->retrans_out -= tcp_skb_pcount(skb);
2920			}
2921			tcp_skb_mark_lost_uncond_verify(tp, skb);
2922		}
 
 
 
 
 
 
 
2923	}
2924
2925	tcp_clear_retrans_hints_partial(tp);
2926
2927	if (prior_lost == tp->lost_out)
2928		return;
2929
2930	if (tcp_is_reno(tp))
2931		tcp_limit_reno_sacked(tp);
2932
2933	tcp_verify_left_out(tp);
2934
2935	/* Don't muck with the congestion window here.
2936	 * Reason is that we do not increase amount of _data_
2937	 * in network, but units changed and effective
2938	 * cwnd/ssthresh really reduced now.
2939	 */
2940	if (icsk->icsk_ca_state != TCP_CA_Loss) {
2941		tp->high_seq = tp->snd_nxt;
2942		tp->snd_ssthresh = tcp_current_ssthresh(sk);
2943		tp->prior_ssthresh = 0;
2944		tp->undo_marker = 0;
2945		tcp_set_ca_state(sk, TCP_CA_Loss);
2946	}
2947	tcp_xmit_retransmit_queue(sk);
2948}
2949EXPORT_SYMBOL(tcp_simple_retransmit);
2950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2951/* Process an event, which can update packets-in-flight not trivially.
2952 * Main goal of this function is to calculate new estimate for left_out,
2953 * taking into account both packets sitting in receiver's buffer and
2954 * packets lost by network.
2955 *
2956 * Besides that it does CWND reduction, when packet loss is detected
2957 * and changes state of machine.
 
2958 *
2959 * It does _not_ decide what to send, it is made in function
2960 * tcp_xmit_retransmit_queue().
2961 */
2962static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
 
2963{
2964	struct inet_connection_sock *icsk = inet_csk(sk);
2965	struct tcp_sock *tp = tcp_sk(sk);
2966	int is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
2967	int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
2968				    (tcp_fackets_out(tp) > tp->reordering));
2969	int fast_rexmit = 0, mib_idx;
2970
2971	if (WARN_ON(!tp->packets_out && tp->sacked_out))
2972		tp->sacked_out = 0;
2973	if (WARN_ON(!tp->sacked_out && tp->fackets_out))
2974		tp->fackets_out = 0;
2975
2976	/* Now state machine starts.
2977	 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
2978	if (flag & FLAG_ECE)
2979		tp->prior_ssthresh = 0;
2980
2981	/* B. In all the states check for reneging SACKs. */
2982	if (tcp_check_sack_reneging(sk, flag))
2983		return;
2984
2985	/* C. Process data loss notification, provided it is valid. */
2986	if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) &&
2987	    before(tp->snd_una, tp->high_seq) &&
2988	    icsk->icsk_ca_state != TCP_CA_Open &&
2989	    tp->fackets_out > tp->reordering) {
2990		tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0);
2991		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
2992	}
2993
2994	/* D. Check consistency of the current state. */
2995	tcp_verify_left_out(tp);
2996
2997	/* E. Check state exit conditions. State can be terminated
2998	 *    when high_seq is ACKed. */
2999	if (icsk->icsk_ca_state == TCP_CA_Open) {
3000		WARN_ON(tp->retrans_out != 0);
3001		tp->retrans_stamp = 0;
3002	} else if (!before(tp->snd_una, tp->high_seq)) {
3003		switch (icsk->icsk_ca_state) {
3004		case TCP_CA_Loss:
3005			icsk->icsk_retransmits = 0;
3006			if (tcp_try_undo_recovery(sk))
3007				return;
3008			break;
3009
3010		case TCP_CA_CWR:
3011			/* CWR is to be held something *above* high_seq
3012			 * is ACKed for CWR bit to reach receiver. */
3013			if (tp->snd_una != tp->high_seq) {
3014				tcp_complete_cwr(sk);
3015				tcp_set_ca_state(sk, TCP_CA_Open);
3016			}
3017			break;
3018
3019		case TCP_CA_Disorder:
3020			tcp_try_undo_dsack(sk);
3021			if (!tp->undo_marker ||
3022			    /* For SACK case do not Open to allow to undo
3023			     * catching for all duplicate ACKs. */
3024			    tcp_is_reno(tp) || tp->snd_una != tp->high_seq) {
3025				tp->undo_marker = 0;
3026				tcp_set_ca_state(sk, TCP_CA_Open);
3027			}
3028			break;
3029
3030		case TCP_CA_Recovery:
3031			if (tcp_is_reno(tp))
3032				tcp_reset_reno_sack(tp);
3033			if (tcp_try_undo_recovery(sk))
3034				return;
3035			tcp_complete_cwr(sk);
3036			break;
3037		}
3038	}
3039
3040	/* F. Process state. */
3041	switch (icsk->icsk_ca_state) {
3042	case TCP_CA_Recovery:
3043		if (!(flag & FLAG_SND_UNA_ADVANCED)) {
3044			if (tcp_is_reno(tp) && is_dupack)
3045				tcp_add_reno_sack(sk);
3046		} else
3047			do_lost = tcp_try_undo_partial(sk, pkts_acked);
3048		break;
3049	case TCP_CA_Loss:
3050		if (flag & FLAG_DATA_ACKED)
3051			icsk->icsk_retransmits = 0;
3052		if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED)
3053			tcp_reset_reno_sack(tp);
3054		if (!tcp_try_undo_loss(sk)) {
3055			tcp_moderate_cwnd(tp);
3056			tcp_xmit_retransmit_queue(sk);
3057			return;
 
 
 
 
 
 
 
 
 
 
 
 
3058		}
3059		if (icsk->icsk_ca_state != TCP_CA_Open)
 
 
 
 
 
 
 
3060			return;
3061		/* Loss is undone; fall through to processing in Open state. */
 
3062	default:
3063		if (tcp_is_reno(tp)) {
3064			if (flag & FLAG_SND_UNA_ADVANCED)
3065				tcp_reset_reno_sack(tp);
3066			if (is_dupack)
3067				tcp_add_reno_sack(sk);
3068		}
3069
3070		if (icsk->icsk_ca_state == TCP_CA_Disorder)
3071			tcp_try_undo_dsack(sk);
3072
3073		if (!tcp_time_to_recover(sk)) {
 
3074			tcp_try_to_open(sk, flag);
3075			return;
3076		}
3077
3078		/* MTU probe failure: don't reduce cwnd */
3079		if (icsk->icsk_ca_state < TCP_CA_CWR &&
3080		    icsk->icsk_mtup.probe_size &&
3081		    tp->snd_una == tp->mtu_probe.probe_seq_start) {
3082			tcp_mtup_probe_failed(sk);
3083			/* Restores the reduction we did in tcp_mtup_probe() */
3084			tp->snd_cwnd++;
3085			tcp_simple_retransmit(sk);
3086			return;
3087		}
3088
3089		/* Otherwise enter Recovery state */
3090
3091		if (tcp_is_reno(tp))
3092			mib_idx = LINUX_MIB_TCPRENORECOVERY;
3093		else
3094			mib_idx = LINUX_MIB_TCPSACKRECOVERY;
3095
3096		NET_INC_STATS_BH(sock_net(sk), mib_idx);
3097
3098		tp->high_seq = tp->snd_nxt;
3099		tp->prior_ssthresh = 0;
3100		tp->undo_marker = tp->snd_una;
3101		tp->undo_retrans = tp->retrans_out;
3102
3103		if (icsk->icsk_ca_state < TCP_CA_CWR) {
3104			if (!(flag & FLAG_ECE))
3105				tp->prior_ssthresh = tcp_current_ssthresh(sk);
3106			tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
3107			TCP_ECN_queue_cwr(tp);
3108		}
3109
3110		tp->bytes_acked = 0;
3111		tp->snd_cwnd_cnt = 0;
3112		tcp_set_ca_state(sk, TCP_CA_Recovery);
3113		fast_rexmit = 1;
3114	}
3115
3116	if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
3117		tcp_update_scoreboard(sk, fast_rexmit);
3118	tcp_cwnd_down(sk, flag);
3119	tcp_xmit_retransmit_queue(sk);
3120}
3121
3122void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt)
3123{
3124	tcp_rtt_estimator(sk, seq_rtt);
3125	tcp_set_rto(sk);
3126	inet_csk(sk)->icsk_backoff = 0;
 
 
 
 
 
 
 
 
 
3127}
3128EXPORT_SYMBOL(tcp_valid_rtt_meas);
3129
3130/* Read draft-ietf-tcplw-high-performance before mucking
3131 * with this code. (Supersedes RFC1323)
3132 */
3133static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
3134{
 
 
 
 
 
 
 
 
 
 
3135	/* RTTM Rule: A TSecr value received in a segment is used to
3136	 * update the averaged RTT measurement only if the segment
3137	 * acknowledges some new data, i.e., only if it advances the
3138	 * left edge of the send window.
3139	 *
3140	 * See draft-ietf-tcplw-high-performance-00, section 3.3.
3141	 * 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
3142	 *
3143	 * Changed: reset backoff as soon as we see the first valid sample.
3144	 * If we do not, we get strongly overestimated rto. With timestamps
3145	 * samples are accepted even from very old segments: f.e., when rtt=1
3146	 * increases to 8, we retransmit 5 times and after 8 seconds delayed
3147	 * answer arrives rto becomes 120 seconds! If at least one of segments
3148	 * in window is lost... Voila.	 			--ANK (010210)
3149	 */
3150	struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3151
3152	tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr);
 
 
3153}
3154
3155static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
 
3156{
3157	/* We don't have a timestamp. Can only use
3158	 * packets that are not retransmitted to determine
3159	 * rtt estimates. Also, we must not reset the
3160	 * backoff for rto until we get a non-retransmitted
3161	 * packet. This allows us to deal with a situation
3162	 * where the network delay has increased suddenly.
3163	 * I.e. Karn's algorithm. (SIGCOMM '87, p5.)
3164	 */
3165
3166	if (flag & FLAG_RETRANS_DATA_ACKED)
3167		return;
3168
3169	tcp_valid_rtt_meas(sk, seq_rtt);
3170}
3171
3172static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
3173				      const s32 seq_rtt)
3174{
3175	const struct tcp_sock *tp = tcp_sk(sk);
3176	/* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
3177	if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
3178		tcp_ack_saw_tstamp(sk, flag);
3179	else if (seq_rtt >= 0)
3180		tcp_ack_no_tstamp(sk, seq_rtt, flag);
3181}
3182
3183static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
3184{
3185	const struct inet_connection_sock *icsk = inet_csk(sk);
3186	icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight);
3187	tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
 
3188}
3189
3190/* Restart timer after forward progress on connection.
3191 * RFC2988 recommends to restart timer to now+rto.
3192 */
3193static void tcp_rearm_rto(struct sock *sk)
3194{
 
3195	struct tcp_sock *tp = tcp_sk(sk);
3196
 
 
 
 
 
 
3197	if (!tp->packets_out) {
3198		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
3199	} else {
3200		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3201					  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
 
 
 
 
 
 
 
 
 
 
3202	}
3203}
3204
 
 
 
 
 
 
 
3205/* If we get here, the whole TSO packet has not been acked. */
3206static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
3207{
3208	struct tcp_sock *tp = tcp_sk(sk);
3209	u32 packets_acked;
3210
3211	BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una));
3212
3213	packets_acked = tcp_skb_pcount(skb);
3214	if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
3215		return 0;
3216	packets_acked -= tcp_skb_pcount(skb);
3217
3218	if (packets_acked) {
3219		BUG_ON(tcp_skb_pcount(skb) == 0);
3220		BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq));
3221	}
3222
3223	return packets_acked;
3224}
3225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3226/* Remove acknowledged frames from the retransmission queue. If our packet
3227 * is before the ack sequence we can discard it as it's confirmed to have
3228 * arrived at the other end.
3229 */
3230static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3231			       u32 prior_snd_una)
 
3232{
3233	struct tcp_sock *tp = tcp_sk(sk);
3234	const struct inet_connection_sock *icsk = inet_csk(sk);
3235	struct sk_buff *skb;
3236	u32 now = tcp_time_stamp;
3237	int fully_acked = 1;
3238	int flag = 0;
3239	u32 pkts_acked = 0;
3240	u32 reord = tp->packets_out;
3241	u32 prior_sacked = tp->sacked_out;
3242	s32 seq_rtt = -1;
3243	s32 ca_seq_rtt = -1;
3244	ktime_t last_ackt = net_invalid_timestamp();
 
 
 
 
 
 
3245
3246	while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
 
 
3247		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
3248		u32 acked_pcount;
3249		u8 sacked = scb->sacked;
 
3250
3251		/* Determine how many packets and what bytes were acked, tso and else */
3252		if (after(scb->end_seq, tp->snd_una)) {
3253			if (tcp_skb_pcount(skb) == 1 ||
3254			    !after(tp->snd_una, scb->seq))
3255				break;
3256
3257			acked_pcount = tcp_tso_acked(sk, skb);
3258			if (!acked_pcount)
3259				break;
3260
3261			fully_acked = 0;
3262		} else {
3263			acked_pcount = tcp_skb_pcount(skb);
3264		}
3265
3266		if (sacked & TCPCB_RETRANS) {
3267			if (sacked & TCPCB_SACKED_RETRANS)
3268				tp->retrans_out -= acked_pcount;
3269			flag |= FLAG_RETRANS_DATA_ACKED;
3270			ca_seq_rtt = -1;
3271			seq_rtt = -1;
3272			if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1))
3273				flag |= FLAG_NONHEAD_RETRANS_ACKED;
3274		} else {
3275			ca_seq_rtt = now - scb->when;
3276			last_ackt = skb->tstamp;
3277			if (seq_rtt < 0) {
3278				seq_rtt = ca_seq_rtt;
3279			}
3280			if (!(sacked & TCPCB_SACKED_ACKED))
3281				reord = min(pkts_acked, reord);
3282		}
3283
3284		if (sacked & TCPCB_SACKED_ACKED)
3285			tp->sacked_out -= acked_pcount;
 
 
 
 
 
 
3286		if (sacked & TCPCB_LOST)
3287			tp->lost_out -= acked_pcount;
3288
3289		tp->packets_out -= acked_pcount;
3290		pkts_acked += acked_pcount;
 
3291
3292		/* Initial outgoing SYN's get put onto the write_queue
3293		 * just like anything else we transmit.  It is not
3294		 * true data, and if we misinform our callers that
3295		 * this ACK acks real data, we will erroneously exit
3296		 * connection startup slow start one packet too
3297		 * quickly.  This is severely frowned upon behavior.
3298		 */
3299		if (!(scb->flags & TCPHDR_SYN)) {
3300			flag |= FLAG_DATA_ACKED;
3301		} else {
3302			flag |= FLAG_SYN_ACKED;
3303			tp->retrans_stamp = 0;
3304		}
3305
3306		if (!fully_acked)
3307			break;
3308
3309		tcp_unlink_write_queue(skb, sk);
3310		sk_wmem_free_skb(sk, skb);
3311		tp->scoreboard_skb_hint = NULL;
3312		if (skb == tp->retransmit_skb_hint)
3313			tp->retransmit_skb_hint = NULL;
3314		if (skb == tp->lost_skb_hint)
3315			tp->lost_skb_hint = NULL;
 
 
3316	}
3317
 
 
 
3318	if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una)))
3319		tp->snd_up = tp->snd_una;
3320
3321	if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
3322		flag |= FLAG_SACK_RENEGING;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3323
3324	if (flag & FLAG_ACKED) {
3325		const struct tcp_congestion_ops *ca_ops
3326			= inet_csk(sk)->icsk_ca_ops;
3327
3328		if (unlikely(icsk->icsk_mtup.probe_size &&
3329			     !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3330			tcp_mtup_probe_success(sk);
3331		}
3332
3333		tcp_ack_update_rtt(sk, flag, seq_rtt);
3334		tcp_rearm_rto(sk);
3335
3336		if (tcp_is_reno(tp)) {
3337			tcp_remove_reno_sacks(sk, pkts_acked);
 
 
 
 
 
 
 
 
 
3338		} else {
3339			int delta;
3340
3341			/* Non-retransmitted hole got filled? That's reordering */
3342			if (reord < prior_fackets)
3343				tcp_update_reordering(sk, tp->fackets_out - reord, 0);
3344
3345			delta = tcp_is_fack(tp) ? pkts_acked :
3346						  prior_sacked - tp->sacked_out;
3347			tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
3348		}
3349
3350		tp->fackets_out -= min(pkts_acked, tp->fackets_out);
3351
3352		if (ca_ops->pkts_acked) {
3353			s32 rtt_us = -1;
3354
3355			/* Is the ACK triggering packet unambiguous? */
3356			if (!(flag & FLAG_RETRANS_DATA_ACKED)) {
3357				/* High resolution needed and available? */
3358				if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
3359				    !ktime_equal(last_ackt,
3360						 net_invalid_timestamp()))
3361					rtt_us = ktime_us_delta(ktime_get_real(),
3362								last_ackt);
3363				else if (ca_seq_rtt >= 0)
3364					rtt_us = jiffies_to_usecs(ca_seq_rtt);
3365			}
3366
3367			ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
3368		}
3369	}
3370
3371#if FASTRETRANS_DEBUG > 0
3372	WARN_ON((int)tp->sacked_out < 0);
3373	WARN_ON((int)tp->lost_out < 0);
3374	WARN_ON((int)tp->retrans_out < 0);
3375	if (!tp->packets_out && tcp_is_sack(tp)) {
3376		icsk = inet_csk(sk);
3377		if (tp->lost_out) {
3378			printk(KERN_DEBUG "Leak l=%u %d\n",
3379			       tp->lost_out, icsk->icsk_ca_state);
3380			tp->lost_out = 0;
3381		}
3382		if (tp->sacked_out) {
3383			printk(KERN_DEBUG "Leak s=%u %d\n",
3384			       tp->sacked_out, icsk->icsk_ca_state);
3385			tp->sacked_out = 0;
3386		}
3387		if (tp->retrans_out) {
3388			printk(KERN_DEBUG "Leak r=%u %d\n",
3389			       tp->retrans_out, icsk->icsk_ca_state);
3390			tp->retrans_out = 0;
3391		}
3392	}
3393#endif
3394	return flag;
3395}
3396
3397static void tcp_ack_probe(struct sock *sk)
3398{
3399	const struct tcp_sock *tp = tcp_sk(sk);
3400	struct inet_connection_sock *icsk = inet_csk(sk);
 
 
3401
3402	/* Was it a usable window open? */
3403
3404	if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) {
 
3405		icsk->icsk_backoff = 0;
 
3406		inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
3407		/* Socket must be waked up by subsequent tcp_data_snd_check().
3408		 * This function is not for random using!
3409		 */
3410	} else {
3411		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3412					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
3413					  TCP_RTO_MAX);
 
3414	}
3415}
3416
3417static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
3418{
3419	return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
3420		inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
3421}
3422
3423static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
 
3424{
3425	const struct tcp_sock *tp = tcp_sk(sk);
3426	return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
3427		!((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3428}
3429
3430/* Check that window update is acceptable.
3431 * The function assumes that snd_una<=ack<=snd_next.
3432 */
3433static inline int tcp_may_update_window(const struct tcp_sock *tp,
3434					const u32 ack, const u32 ack_seq,
3435					const u32 nwin)
3436{
3437	return	after(ack, tp->snd_una) ||
3438		after(ack_seq, tp->snd_wl1) ||
3439		(ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3440}
3441
3442/* Update our send window.
3443 *
3444 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
3445 * and in FreeBSD. NetBSD's one is even worse.) is wrong.
3446 */
3447static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack,
3448				 u32 ack_seq)
3449{
3450	struct tcp_sock *tp = tcp_sk(sk);
3451	int flag = 0;
3452	u32 nwin = ntohs(tcp_hdr(skb)->window);
3453
3454	if (likely(!tcp_hdr(skb)->syn))
3455		nwin <<= tp->rx_opt.snd_wscale;
3456
3457	if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
3458		flag |= FLAG_WIN_UPDATE;
3459		tcp_update_wl(tp, ack_seq);
3460
3461		if (tp->snd_wnd != nwin) {
3462			tp->snd_wnd = nwin;
3463
3464			/* Note, it is the only place, where
3465			 * fast path is recovered for sending TCP.
3466			 */
3467			tp->pred_flags = 0;
3468			tcp_fast_path_check(sk);
3469
 
 
 
3470			if (nwin > tp->max_window) {
3471				tp->max_window = nwin;
3472				tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
3473			}
3474		}
3475	}
3476
3477	tp->snd_una = ack;
3478
3479	return flag;
3480}
3481
3482/* A very conservative spurious RTO response algorithm: reduce cwnd and
3483 * continue in congestion avoidance.
3484 */
3485static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
3486{
3487	tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
3488	tp->snd_cwnd_cnt = 0;
3489	tp->bytes_acked = 0;
3490	TCP_ECN_queue_cwr(tp);
3491	tcp_moderate_cwnd(tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3492}
3493
3494/* A conservative spurious RTO response algorithm: reduce cwnd using
3495 * rate halving and continue in congestion avoidance.
 
 
 
 
3496 */
3497static void tcp_ratehalving_spur_to_response(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
3498{
3499	tcp_enter_cwr(sk, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3500}
3501
3502static void tcp_undo_spur_to_response(struct sock *sk, int flag)
3503{
3504	if (flag & FLAG_ECE)
3505		tcp_ratehalving_spur_to_response(sk);
3506	else
3507		tcp_undo_cwr(sk, true);
3508}
3509
3510/* F-RTO spurious RTO detection algorithm (RFC4138)
3511 *
3512 * F-RTO affects during two new ACKs following RTO (well, almost, see inline
3513 * comments). State (ACK number) is kept in frto_counter. When ACK advances
3514 * window (but not to or beyond highest sequence sent before RTO):
3515 *   On First ACK,  send two new segments out.
3516 *   On Second ACK, RTO was likely spurious. Do spurious response (response
3517 *                  algorithm is not part of the F-RTO detection algorithm
3518 *                  given in RFC4138 but can be selected separately).
3519 * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss
3520 * and TCP falls back to conventional RTO recovery. F-RTO allows overriding
3521 * of Nagle, this is done using frto_counter states 2 and 3, when a new data
3522 * segment of any size sent during F-RTO, state 2 is upgraded to 3.
3523 *
3524 * Rationale: if the RTO was spurious, new ACKs should arrive from the
3525 * original window even after we transmit two new data segments.
3526 *
3527 * SACK version:
3528 *   on first step, wait until first cumulative ACK arrives, then move to
3529 *   the second step. In second step, the next ACK decides.
3530 *
3531 * F-RTO is implemented (mainly) in four functions:
3532 *   - tcp_use_frto() is used to determine if TCP is can use F-RTO
3533 *   - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is
3534 *     called when tcp_use_frto() showed green light
3535 *   - tcp_process_frto() handles incoming ACKs during F-RTO algorithm
3536 *   - tcp_enter_frto_loss() is called if there is not enough evidence
3537 *     to prove that the RTO is indeed spurious. It transfers the control
3538 *     from F-RTO to the conventional RTO recovery
3539 */
3540static int tcp_process_frto(struct sock *sk, int flag)
3541{
3542	struct tcp_sock *tp = tcp_sk(sk);
3543
3544	tcp_verify_left_out(tp);
 
3545
3546	/* Duplicate the behavior from Loss state (fastretrans_alert) */
3547	if (flag & FLAG_DATA_ACKED)
3548		inet_csk(sk)->icsk_retransmits = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3549
3550	if ((flag & FLAG_NONHEAD_RETRANS_ACKED) ||
3551	    ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED)))
3552		tp->undo_marker = 0;
3553
3554	if (!before(tp->snd_una, tp->frto_highmark)) {
3555		tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
3556		return 1;
3557	}
3558
3559	if (!tcp_is_sackfrto(tp)) {
3560		/* RFC4138 shortcoming in step 2; should also have case c):
3561		 * ACK isn't duplicate nor advances window, e.g., opposite dir
3562		 * data, winupdate
3563		 */
3564		if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP))
3565			return 1;
3566
3567		if (!(flag & FLAG_DATA_ACKED)) {
3568			tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
3569					    flag);
3570			return 1;
3571		}
3572	} else {
3573		if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
3574			/* Prevent sending of new data. */
3575			tp->snd_cwnd = min(tp->snd_cwnd,
3576					   tcp_packets_in_flight(tp));
3577			return 1;
3578		}
3579
3580		if ((tp->frto_counter >= 2) &&
3581		    (!(flag & FLAG_FORWARD_PROGRESS) ||
3582		     ((flag & FLAG_DATA_SACKED) &&
3583		      !(flag & FLAG_ONLY_ORIG_SACKED)))) {
3584			/* RFC4138 shortcoming (see comment above) */
3585			if (!(flag & FLAG_FORWARD_PROGRESS) &&
3586			    (flag & FLAG_NOT_DUP))
3587				return 1;
3588
3589			tcp_enter_frto_loss(sk, 3, flag);
3590			return 1;
3591		}
 
 
 
 
 
 
3592	}
 
 
3593
3594	if (tp->frto_counter == 1) {
3595		/* tcp_may_send_now needs to see updated state */
3596		tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
3597		tp->frto_counter = 2;
 
 
3598
3599		if (!tcp_may_send_now(sk))
3600			tcp_enter_frto_loss(sk, 2, flag);
 
 
3601
3602		return 1;
3603	} else {
3604		switch (sysctl_tcp_frto_response) {
3605		case 2:
3606			tcp_undo_spur_to_response(sk, flag);
3607			break;
3608		case 1:
3609			tcp_conservative_spur_to_response(tp);
3610			break;
3611		default:
3612			tcp_ratehalving_spur_to_response(sk);
3613			break;
3614		}
3615		tp->frto_counter = 0;
3616		tp->undo_marker = 0;
3617		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
3618	}
3619	return 0;
3620}
3621
3622/* This routine deals with incoming acks, but not outgoing ones. */
3623static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3624{
3625	struct inet_connection_sock *icsk = inet_csk(sk);
3626	struct tcp_sock *tp = tcp_sk(sk);
 
 
3627	u32 prior_snd_una = tp->snd_una;
 
3628	u32 ack_seq = TCP_SKB_CB(skb)->seq;
3629	u32 ack = TCP_SKB_CB(skb)->ack_seq;
3630	u32 prior_in_flight;
3631	u32 prior_fackets;
3632	int prior_packets;
3633	int frto_cwnd = 0;
 
 
 
 
 
 
 
 
 
3634
3635	/* If the ack is older than previous acks
3636	 * then we can probably ignore it.
3637	 */
3638	if (before(ack, prior_snd_una))
 
 
 
 
 
 
 
 
 
 
3639		goto old_ack;
 
3640
3641	/* If the ack includes data we haven't sent yet, discard
3642	 * this segment (RFC793 Section 3.9).
3643	 */
3644	if (after(ack, tp->snd_nxt))
3645		goto invalid_ack;
3646
3647	if (after(ack, prior_snd_una))
3648		flag |= FLAG_SND_UNA_ADVANCED;
 
3649
3650	if (sysctl_tcp_abc) {
3651		if (icsk->icsk_ca_state < TCP_CA_CWR)
3652			tp->bytes_acked += ack - prior_snd_una;
3653		else if (icsk->icsk_ca_state == TCP_CA_Loss)
3654			/* we assume just one segment left network */
3655			tp->bytes_acked += min(ack - prior_snd_una,
3656					       tp->mss_cache);
3657	}
3658
3659	prior_fackets = tp->fackets_out;
3660	prior_in_flight = tcp_packets_in_flight(tp);
3661
3662	if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
 
 
 
 
 
 
 
3663		/* Window is constant, pure forward advance.
3664		 * No more checks are required.
3665		 * Note, we use the fact that SND.UNA>=SND.WL2.
3666		 */
3667		tcp_update_wl(tp, ack_seq);
3668		tp->snd_una = ack;
3669		flag |= FLAG_WIN_UPDATE;
3670
3671		tcp_ca_event(sk, CA_EVENT_FAST_ACK);
3672
3673		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
3674	} else {
 
 
3675		if (ack_seq != TCP_SKB_CB(skb)->end_seq)
3676			flag |= FLAG_DATA;
3677		else
3678			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
3679
3680		flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3681
3682		if (TCP_SKB_CB(skb)->sacked)
3683			flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
 
3684
3685		if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
3686			flag |= FLAG_ECE;
 
 
3687
3688		tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
 
 
 
 
 
 
 
3689	}
3690
 
 
 
 
 
 
 
 
 
3691	/* We passed data and got it acked, remove any soft error
3692	 * log. Something worked...
3693	 */
3694	sk->sk_err_soft = 0;
3695	icsk->icsk_probes_out = 0;
3696	tp->rcv_tstamp = tcp_time_stamp;
3697	prior_packets = tp->packets_out;
3698	if (!prior_packets)
3699		goto no_queue;
3700
3701	/* See if we can take anything off of the retransmit queue. */
3702	flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
 
 
 
3703
3704	if (tp->frto_counter)
3705		frto_cwnd = tcp_process_frto(sk, flag);
3706	/* Guarantee sacktag reordering detection against wrap-arounds */
3707	if (before(tp->frto_highmark, tp->snd_una))
3708		tp->frto_highmark = 0;
3709
3710	if (tcp_ack_is_dubious(sk, flag)) {
3711		/* Advance CWND, if state allows this. */
3712		if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
3713		    tcp_may_raise_cwnd(sk, flag))
3714			tcp_cong_avoid(sk, ack, prior_in_flight);
3715		tcp_fastretrans_alert(sk, prior_packets - tp->packets_out,
3716				      flag);
3717	} else {
3718		if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
3719			tcp_cong_avoid(sk, ack, prior_in_flight);
3720	}
3721
 
 
 
 
3722	if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
3723		dst_confirm(__sk_dst_get(sk));
3724
 
 
 
 
 
 
3725	return 1;
3726
3727no_queue:
 
 
 
 
 
 
3728	/* If this ack opens up a zero window, clear backoff.  It was
3729	 * being used to time the probes, and is probably far higher than
3730	 * it needs to be for normal retransmission.
3731	 */
3732	if (tcp_send_head(sk))
3733		tcp_ack_probe(sk);
3734	return 1;
3735
3736invalid_ack:
3737	SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
3738	return -1;
3739
3740old_ack:
 
 
 
3741	if (TCP_SKB_CB(skb)->sacked) {
3742		tcp_sacktag_write_queue(sk, skb, prior_snd_una);
3743		if (icsk->icsk_ca_state == TCP_CA_Open)
3744			tcp_try_keep_open(sk);
 
 
 
3745	}
3746
3747	SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
3748	return 0;
3749}
3750
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3751/* Look for tcp options. Normally only called on SYN and SYNACK packets.
3752 * But, this can also be called on packets in the established flow when
3753 * the fast version below fails.
3754 */
3755void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
3756		       u8 **hvpp, int estab)
 
 
3757{
3758	unsigned char *ptr;
3759	struct tcphdr *th = tcp_hdr(skb);
3760	int length = (th->doff * 4) - sizeof(struct tcphdr);
3761
3762	ptr = (unsigned char *)(th + 1);
3763	opt_rx->saw_tstamp = 0;
 
3764
3765	while (length > 0) {
3766		int opcode = *ptr++;
3767		int opsize;
3768
3769		switch (opcode) {
3770		case TCPOPT_EOL:
3771			return;
3772		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
3773			length--;
3774			continue;
3775		default:
 
 
3776			opsize = *ptr++;
3777			if (opsize < 2) /* "silly options" */
3778				return;
3779			if (opsize > length)
3780				return;	/* don't parse partial options */
3781			switch (opcode) {
3782			case TCPOPT_MSS:
3783				if (opsize == TCPOLEN_MSS && th->syn && !estab) {
3784					u16 in_mss = get_unaligned_be16(ptr);
3785					if (in_mss) {
3786						if (opt_rx->user_mss &&
3787						    opt_rx->user_mss < in_mss)
3788							in_mss = opt_rx->user_mss;
3789						opt_rx->mss_clamp = in_mss;
3790					}
3791				}
3792				break;
3793			case TCPOPT_WINDOW:
3794				if (opsize == TCPOLEN_WINDOW && th->syn &&
3795				    !estab && sysctl_tcp_window_scaling) {
3796					__u8 snd_wscale = *(__u8 *)ptr;
3797					opt_rx->wscale_ok = 1;
3798					if (snd_wscale > 14) {
3799						if (net_ratelimit())
3800							printk(KERN_INFO "tcp_parse_options: Illegal window "
3801							       "scaling value %d >14 received.\n",
3802							       snd_wscale);
3803						snd_wscale = 14;
3804					}
3805					opt_rx->snd_wscale = snd_wscale;
3806				}
3807				break;
3808			case TCPOPT_TIMESTAMP:
3809				if ((opsize == TCPOLEN_TIMESTAMP) &&
3810				    ((estab && opt_rx->tstamp_ok) ||
3811				     (!estab && sysctl_tcp_timestamps))) {
3812					opt_rx->saw_tstamp = 1;
3813					opt_rx->rcv_tsval = get_unaligned_be32(ptr);
3814					opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
3815				}
3816				break;
3817			case TCPOPT_SACK_PERM:
3818				if (opsize == TCPOLEN_SACK_PERM && th->syn &&
3819				    !estab && sysctl_tcp_sack) {
3820					opt_rx->sack_ok = 1;
3821					tcp_sack_reset(opt_rx);
3822				}
3823				break;
3824
3825			case TCPOPT_SACK:
3826				if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
3827				   !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
3828				   opt_rx->sack_ok) {
3829					TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
3830				}
3831				break;
3832#ifdef CONFIG_TCP_MD5SIG
3833			case TCPOPT_MD5SIG:
3834				/*
3835				 * The MD5 Hash has already been
3836				 * checked (see tcp_v{4,6}_do_rcv()).
3837				 */
3838				break;
3839#endif
3840			case TCPOPT_COOKIE:
3841				/* This option is variable length.
 
 
3842				 */
3843				switch (opsize) {
3844				case TCPOLEN_COOKIE_BASE:
3845					/* not yet implemented */
3846					break;
3847				case TCPOLEN_COOKIE_PAIR:
3848					/* not yet implemented */
3849					break;
3850				case TCPOLEN_COOKIE_MIN+0:
3851				case TCPOLEN_COOKIE_MIN+2:
3852				case TCPOLEN_COOKIE_MIN+4:
3853				case TCPOLEN_COOKIE_MIN+6:
3854				case TCPOLEN_COOKIE_MAX:
3855					/* 16-bit multiple */
3856					opt_rx->cookie_plus = opsize;
3857					*hvpp = ptr;
3858					break;
3859				default:
3860					/* ignore option */
3861					break;
3862				}
 
 
 
 
 
3863				break;
3864			}
3865
 
 
 
3866			ptr += opsize-2;
3867			length -= opsize;
3868		}
3869	}
3870}
3871EXPORT_SYMBOL(tcp_parse_options);
3872
3873static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, struct tcphdr *th)
3874{
3875	__be32 *ptr = (__be32 *)(th + 1);
3876
3877	if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
3878			  | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
3879		tp->rx_opt.saw_tstamp = 1;
3880		++ptr;
3881		tp->rx_opt.rcv_tsval = ntohl(*ptr);
3882		++ptr;
3883		tp->rx_opt.rcv_tsecr = ntohl(*ptr);
3884		return 1;
 
 
 
3885	}
3886	return 0;
3887}
3888
3889/* Fast parse options. This hopes to only see timestamps.
3890 * If it is wrong it falls back on tcp_parse_options().
3891 */
3892static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
3893				  struct tcp_sock *tp, u8 **hvpp)
 
3894{
3895	/* In the spirit of fast parsing, compare doff directly to constant
3896	 * values.  Because equality is used, short doff can be ignored here.
3897	 */
3898	if (th->doff == (sizeof(*th) / 4)) {
3899		tp->rx_opt.saw_tstamp = 0;
3900		return 0;
3901	} else if (tp->rx_opt.tstamp_ok &&
3902		   th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
3903		if (tcp_parse_aligned_timestamp(tp, th))
3904			return 1;
3905	}
3906	tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
3907	return 1;
 
 
 
 
3908}
3909
3910#ifdef CONFIG_TCP_MD5SIG
3911/*
3912 * Parse MD5 Signature option
3913 */
3914u8 *tcp_parse_md5sig_option(struct tcphdr *th)
 
3915{
3916	int length = (th->doff << 2) - sizeof (*th);
3917	u8 *ptr = (u8*)(th + 1);
 
3918
3919	/* If the TCP option is too short, we can short cut */
3920	if (length < TCPOLEN_MD5SIG)
3921		return NULL;
3922
3923	while (length > 0) {
 
 
 
 
3924		int opcode = *ptr++;
3925		int opsize;
3926
3927		switch(opcode) {
3928		case TCPOPT_EOL:
3929			return NULL;
3930		case TCPOPT_NOP:
3931			length--;
3932			continue;
3933		default:
3934			opsize = *ptr++;
3935			if (opsize < 2 || opsize > length)
3936				return NULL;
3937			if (opcode == TCPOPT_MD5SIG)
3938				return opsize == TCPOLEN_MD5SIG ? ptr : NULL;
 
 
 
 
 
 
 
 
 
 
 
3939		}
3940		ptr += opsize - 2;
3941		length -= opsize;
3942	}
3943	return NULL;
3944}
3945EXPORT_SYMBOL(tcp_parse_md5sig_option);
3946#endif
3947
3948static inline void tcp_store_ts_recent(struct tcp_sock *tp)
3949{
3950	tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
3951	tp->rx_opt.ts_recent_stamp = get_seconds();
3952}
3953
3954static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
3955{
3956	if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
3957		/* PAWS bug workaround wrt. ACK frames, the PAWS discard
3958		 * extra check below makes sure this can only happen
3959		 * for pure ACK frames.  -DaveM
3960		 *
3961		 * Not only, also it occurs for expired timestamps.
3962		 */
3963
3964		if (tcp_paws_check(&tp->rx_opt, 0))
3965			tcp_store_ts_recent(tp);
3966	}
3967}
3968
3969/* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
3970 *
3971 * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
3972 * it can pass through stack. So, the following predicate verifies that
3973 * this segment is not used for anything but congestion avoidance or
3974 * fast retransmit. Moreover, we even are able to eliminate most of such
3975 * second order effects, if we apply some small "replay" window (~RTO)
3976 * to timestamp space.
3977 *
3978 * All these measures still do not guarantee that we reject wrapped ACKs
3979 * on networks with high bandwidth, when sequence space is recycled fastly,
3980 * but it guarantees that such events will be very rare and do not affect
3981 * connection seriously. This doesn't look nice, but alas, PAWS is really
3982 * buggy extension.
3983 *
3984 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC
3985 * states that events when retransmit arrives after original data are rare.
3986 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is
3987 * the biggest problem on large power networks even with minor reordering.
3988 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe
3989 * up to bandwidth of 18Gigabit/sec. 8) ]
3990 */
3991
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3992static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
3993{
3994	struct tcp_sock *tp = tcp_sk(sk);
3995	struct tcphdr *th = tcp_hdr(skb);
3996	u32 seq = TCP_SKB_CB(skb)->seq;
3997	u32 ack = TCP_SKB_CB(skb)->ack_seq;
3998
3999	return (/* 1. Pure ACK with correct sequence number. */
4000		(th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
4001
4002		/* 2. ... and duplicate ACK. */
4003		ack == tp->snd_una &&
4004
4005		/* 3. ... and does not update window. */
4006		!tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
4007
4008		/* 4. ... and sits in replay window. */
4009		(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
 
4010}
4011
4012static inline int tcp_paws_discard(const struct sock *sk,
4013				   const struct sk_buff *skb)
4014{
4015	const struct tcp_sock *tp = tcp_sk(sk);
4016
4017	return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) &&
4018	       !tcp_disordered_ack(sk, skb);
4019}
4020
4021/* Check segment sequence number for validity.
4022 *
4023 * Segment controls are considered valid, if the segment
4024 * fits to the window after truncation to the window. Acceptability
4025 * of data (and SYN, FIN, of course) is checked separately.
4026 * See tcp_data_queue(), for example.
4027 *
4028 * Also, controls (RST is main one) are accepted using RCV.WUP instead
4029 * of RCV.NXT. Peer still did not advance his SND.UNA when we
4030 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP.
4031 * (borrowed from freebsd)
4032 */
4033
4034static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq)
 
4035{
4036	return	!before(end_seq, tp->rcv_wup) &&
4037		!after(seq, tp->rcv_nxt + tcp_receive_window(tp));
 
 
 
 
 
4038}
4039
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4040/* When we get a reset we do this. */
4041static void tcp_reset(struct sock *sk)
4042{
 
 
 
 
 
 
 
 
 
 
4043	/* We want the right error as BSD sees it (and indeed as we do). */
4044	switch (sk->sk_state) {
4045	case TCP_SYN_SENT:
4046		sk->sk_err = ECONNREFUSED;
4047		break;
4048	case TCP_CLOSE_WAIT:
4049		sk->sk_err = EPIPE;
4050		break;
4051	case TCP_CLOSE:
4052		return;
4053	default:
4054		sk->sk_err = ECONNRESET;
4055	}
4056	/* This barrier is coupled with smp_rmb() in tcp_poll() */
4057	smp_wmb();
4058
4059	if (!sock_flag(sk, SOCK_DEAD))
4060		sk->sk_error_report(sk);
4061
4062	tcp_done(sk);
4063}
4064
4065/*
4066 * 	Process the FIN bit. This now behaves as it is supposed to work
4067 *	and the FIN takes effect when it is validly part of sequence
4068 *	space. Not before when we get holes.
4069 *
4070 *	If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT
4071 *	(and thence onto LAST-ACK and finally, CLOSE, we never enter
4072 *	TIME-WAIT)
4073 *
4074 *	If we are in FINWAIT-1, a received FIN indicates simultaneous
4075 *	close and we go into CLOSING (and later onto TIME-WAIT)
4076 *
4077 *	If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
4078 */
4079static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
4080{
4081	struct tcp_sock *tp = tcp_sk(sk);
4082
4083	inet_csk_schedule_ack(sk);
4084
4085	sk->sk_shutdown |= RCV_SHUTDOWN;
4086	sock_set_flag(sk, SOCK_DONE);
4087
4088	switch (sk->sk_state) {
4089	case TCP_SYN_RECV:
4090	case TCP_ESTABLISHED:
4091		/* Move to CLOSE_WAIT */
4092		tcp_set_state(sk, TCP_CLOSE_WAIT);
4093		inet_csk(sk)->icsk_ack.pingpong = 1;
4094		break;
4095
4096	case TCP_CLOSE_WAIT:
4097	case TCP_CLOSING:
4098		/* Received a retransmission of the FIN, do
4099		 * nothing.
4100		 */
4101		break;
4102	case TCP_LAST_ACK:
4103		/* RFC793: Remain in the LAST-ACK state. */
4104		break;
4105
4106	case TCP_FIN_WAIT1:
4107		/* This case occurs when a simultaneous close
4108		 * happens, we must ack the received FIN and
4109		 * enter the CLOSING state.
4110		 */
4111		tcp_send_ack(sk);
4112		tcp_set_state(sk, TCP_CLOSING);
4113		break;
4114	case TCP_FIN_WAIT2:
4115		/* Received a FIN -- send ACK and enter TIME_WAIT. */
4116		tcp_send_ack(sk);
4117		tcp_time_wait(sk, TCP_TIME_WAIT, 0);
4118		break;
4119	default:
4120		/* Only TCP_LISTEN and TCP_CLOSE are left, in these
4121		 * cases we should never reach this piece of code.
4122		 */
4123		printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
4124		       __func__, sk->sk_state);
4125		break;
4126	}
4127
4128	/* It _is_ possible, that we have something out-of-order _after_ FIN.
4129	 * Probably, we should reset in this case. For now drop them.
4130	 */
4131	__skb_queue_purge(&tp->out_of_order_queue);
4132	if (tcp_is_sack(tp))
4133		tcp_sack_reset(&tp->rx_opt);
4134	sk_mem_reclaim(sk);
4135
4136	if (!sock_flag(sk, SOCK_DEAD)) {
4137		sk->sk_state_change(sk);
4138
4139		/* Do not send POLL_HUP for half duplex close. */
4140		if (sk->sk_shutdown == SHUTDOWN_MASK ||
4141		    sk->sk_state == TCP_CLOSE)
4142			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
4143		else
4144			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
4145	}
4146}
4147
4148static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4149				  u32 end_seq)
4150{
4151	if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
4152		if (before(seq, sp->start_seq))
4153			sp->start_seq = seq;
4154		if (after(end_seq, sp->end_seq))
4155			sp->end_seq = end_seq;
4156		return 1;
4157	}
4158	return 0;
4159}
4160
4161static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
4162{
4163	struct tcp_sock *tp = tcp_sk(sk);
4164
4165	if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
4166		int mib_idx;
4167
4168		if (before(seq, tp->rcv_nxt))
4169			mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
4170		else
4171			mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
4172
4173		NET_INC_STATS_BH(sock_net(sk), mib_idx);
4174
4175		tp->rx_opt.dsack = 1;
4176		tp->duplicate_sack[0].start_seq = seq;
4177		tp->duplicate_sack[0].end_seq = end_seq;
4178	}
4179}
4180
4181static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
4182{
4183	struct tcp_sock *tp = tcp_sk(sk);
4184
4185	if (!tp->rx_opt.dsack)
4186		tcp_dsack_set(sk, seq, end_seq);
4187	else
4188		tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
4189}
4190
4191static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4192{
4193	struct tcp_sock *tp = tcp_sk(sk);
4194
4195	if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4196	    before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4197		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4198		tcp_enter_quickack_mode(sk);
4199
4200		if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
4201			u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4202
 
4203			if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
4204				end_seq = tp->rcv_nxt;
4205			tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
4206		}
4207	}
4208
4209	tcp_send_ack(sk);
4210}
4211
4212/* These routines update the SACK block as out-of-order packets arrive or
4213 * in-order packets close up the sequence space.
4214 */
4215static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
4216{
4217	int this_sack;
4218	struct tcp_sack_block *sp = &tp->selective_acks[0];
4219	struct tcp_sack_block *swalk = sp + 1;
4220
4221	/* See if the recent change to the first SACK eats into
4222	 * or hits the sequence space of other SACK blocks, if so coalesce.
4223	 */
4224	for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) {
4225		if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
4226			int i;
4227
4228			/* Zap SWALK, by moving every further SACK up by one slot.
4229			 * Decrease num_sacks.
4230			 */
4231			tp->rx_opt.num_sacks--;
4232			for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
4233				sp[i] = sp[i + 1];
4234			continue;
4235		}
4236		this_sack++, swalk++;
 
4237	}
4238}
4239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4240static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
4241{
4242	struct tcp_sock *tp = tcp_sk(sk);
4243	struct tcp_sack_block *sp = &tp->selective_acks[0];
4244	int cur_sacks = tp->rx_opt.num_sacks;
4245	int this_sack;
4246
4247	if (!cur_sacks)
4248		goto new_sack;
4249
4250	for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) {
4251		if (tcp_sack_extend(sp, seq, end_seq)) {
 
 
4252			/* Rotate this_sack to the first one. */
4253			for (; this_sack > 0; this_sack--, sp--)
4254				swap(*sp, *(sp - 1));
4255			if (cur_sacks > 1)
4256				tcp_sack_maybe_coalesce(tp);
4257			return;
4258		}
4259	}
4260
 
 
 
4261	/* Could not find an adjacent existing SACK, build a new one,
4262	 * put it at the front, and shift everyone else down.  We
4263	 * always know there is at least one SACK present already here.
4264	 *
4265	 * If the sack array is full, forget about the last one.
4266	 */
4267	if (this_sack >= TCP_NUM_SACKS) {
4268		this_sack--;
4269		tp->rx_opt.num_sacks--;
4270		sp--;
4271	}
4272	for (; this_sack > 0; this_sack--, sp--)
4273		*sp = *(sp - 1);
4274
4275new_sack:
4276	/* Build the new head SACK, and we're done. */
4277	sp->start_seq = seq;
4278	sp->end_seq = end_seq;
4279	tp->rx_opt.num_sacks++;
4280}
4281
4282/* RCV.NXT advances, some SACKs should be eaten. */
4283
4284static void tcp_sack_remove(struct tcp_sock *tp)
4285{
4286	struct tcp_sack_block *sp = &tp->selective_acks[0];
4287	int num_sacks = tp->rx_opt.num_sacks;
4288	int this_sack;
4289
4290	/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
4291	if (skb_queue_empty(&tp->out_of_order_queue)) {
4292		tp->rx_opt.num_sacks = 0;
4293		return;
4294	}
4295
4296	for (this_sack = 0; this_sack < num_sacks;) {
4297		/* Check if the start of the sack is covered by RCV.NXT. */
4298		if (!before(tp->rcv_nxt, sp->start_seq)) {
4299			int i;
4300
4301			/* RCV.NXT must cover all the block! */
4302			WARN_ON(before(tp->rcv_nxt, sp->end_seq));
4303
4304			/* Zap this SACK, by moving forward any other SACKS. */
4305			for (i=this_sack+1; i < num_sacks; i++)
4306				tp->selective_acks[i-1] = tp->selective_acks[i];
4307			num_sacks--;
4308			continue;
4309		}
4310		this_sack++;
4311		sp++;
4312	}
4313	tp->rx_opt.num_sacks = num_sacks;
4314}
4315
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4316/* This one checks to see if we can put data from the
4317 * out_of_order queue into the receive_queue.
4318 */
4319static void tcp_ofo_queue(struct sock *sk)
4320{
4321	struct tcp_sock *tp = tcp_sk(sk);
4322	__u32 dsack_high = tp->rcv_nxt;
4323	struct sk_buff *skb;
4324
4325	while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
 
 
 
 
4326		if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
4327			break;
4328
4329		if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
4330			__u32 dsack = dsack_high;
4331			if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
4332				dsack_high = TCP_SKB_CB(skb)->end_seq;
4333			tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
4334		}
 
 
4335
4336		if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
4337			SOCK_DEBUG(sk, "ofo packet was already received\n");
4338			__skb_unlink(skb, &tp->out_of_order_queue);
4339			__kfree_skb(skb);
4340			continue;
4341		}
4342		SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
4343			   tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
4344			   TCP_SKB_CB(skb)->end_seq);
4345
4346		__skb_unlink(skb, &tp->out_of_order_queue);
4347		__skb_queue_tail(&sk->sk_receive_queue, skb);
4348		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4349		if (tcp_hdr(skb)->fin)
4350			tcp_fin(skb, sk, tcp_hdr(skb));
 
 
 
 
 
 
 
 
 
 
 
4351	}
4352}
4353
4354static int tcp_prune_ofo_queue(struct sock *sk);
4355static int tcp_prune_queue(struct sock *sk);
4356
4357static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
 
4358{
4359	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
4360	    !sk_rmem_schedule(sk, size)) {
4361
4362		if (tcp_prune_queue(sk) < 0)
4363			return -1;
4364
4365		if (!sk_rmem_schedule(sk, size)) {
4366			if (!tcp_prune_ofo_queue(sk))
4367				return -1;
4368
4369			if (!sk_rmem_schedule(sk, size))
4370				return -1;
4371		}
4372	}
4373	return 0;
4374}
4375
4376static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4377{
4378	struct tcphdr *th = tcp_hdr(skb);
4379	struct tcp_sock *tp = tcp_sk(sk);
4380	int eaten = -1;
 
 
 
4381
4382	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
4383		goto drop;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4384
4385	skb_dst_drop(skb);
4386	__skb_pull(skb, th->doff * 4);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4387
4388	TCP_ECN_accept_cwr(tp, skb);
 
 
 
 
 
4389
 
4390	tp->rx_opt.dsack = 0;
4391
4392	/*  Queue data for delivery to the user.
4393	 *  Packets in sequence go to the receive queue.
4394	 *  Out of sequence packets to the out_of_order_queue.
4395	 */
4396	if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
4397		if (tcp_receive_window(tp) == 0)
 
 
 
 
 
 
 
 
 
 
 
 
4398			goto out_of_window;
 
4399
4400		/* Ok. In sequence. In window. */
4401		if (tp->ucopy.task == current &&
4402		    tp->copied_seq == tp->rcv_nxt && tp->ucopy.len &&
4403		    sock_owned_by_user(sk) && !tp->urg_data) {
4404			int chunk = min_t(unsigned int, skb->len,
4405					  tp->ucopy.len);
4406
4407			__set_current_state(TASK_RUNNING);
4408
4409			local_bh_enable();
4410			if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
4411				tp->ucopy.len -= chunk;
4412				tp->copied_seq += chunk;
4413				eaten = (chunk == skb->len);
4414				tcp_rcv_space_adjust(sk);
4415			}
4416			local_bh_disable();
4417		}
4418
4419		if (eaten <= 0) {
4420queue_and_out:
4421			if (eaten < 0 &&
4422			    tcp_try_rmem_schedule(sk, skb->truesize))
4423				goto drop;
4424
4425			skb_set_owner_r(skb, sk);
4426			__skb_queue_tail(&sk->sk_receive_queue, skb);
4427		}
4428		tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4429		if (skb->len)
4430			tcp_event_data_recv(sk, skb);
4431		if (th->fin)
4432			tcp_fin(skb, sk, th);
4433
4434		if (!skb_queue_empty(&tp->out_of_order_queue)) {
4435			tcp_ofo_queue(sk);
4436
4437			/* RFC2581. 4.2. SHOULD send immediate ACK, when
4438			 * gap in queue is filled.
4439			 */
4440			if (skb_queue_empty(&tp->out_of_order_queue))
4441				inet_csk(sk)->icsk_ack.pingpong = 0;
4442		}
4443
4444		if (tp->rx_opt.num_sacks)
4445			tcp_sack_remove(tp);
4446
4447		tcp_fast_path_check(sk);
4448
4449		if (eaten > 0)
4450			__kfree_skb(skb);
4451		else if (!sock_flag(sk, SOCK_DEAD))
4452			sk->sk_data_ready(sk, 0);
4453		return;
4454	}
4455
4456	if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
 
4457		/* A retransmit, 2nd most common case.  Force an immediate ack. */
4458		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
 
4459		tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4460
4461out_of_window:
4462		tcp_enter_quickack_mode(sk);
4463		inet_csk_schedule_ack(sk);
4464drop:
4465		__kfree_skb(skb);
4466		return;
4467	}
4468
4469	/* Out of window. F.e. zero window probe. */
4470	if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
 
 
4471		goto out_of_window;
4472
4473	tcp_enter_quickack_mode(sk);
4474
4475	if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4476		/* Partial packet, seq < rcv_next < end_seq */
4477		SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
4478			   tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
4479			   TCP_SKB_CB(skb)->end_seq);
4480
4481		tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
4482
4483		/* If window is closed, drop tail of packet. But after
4484		 * remembering D-SACK for its head made in previous line.
4485		 */
4486		if (!tcp_receive_window(tp))
 
 
4487			goto out_of_window;
 
4488		goto queue_and_out;
4489	}
4490
4491	TCP_ECN_check_ce(tp, skb);
4492
4493	if (tcp_try_rmem_schedule(sk, skb->truesize))
4494		goto drop;
4495
4496	/* Disable header prediction. */
4497	tp->pred_flags = 0;
4498	inet_csk_schedule_ack(sk);
4499
4500	SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
4501		   tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
4502
4503	skb_set_owner_r(skb, sk);
4504
4505	if (!skb_peek(&tp->out_of_order_queue)) {
4506		/* Initial out of order segment, build 1 SACK. */
4507		if (tcp_is_sack(tp)) {
4508			tp->rx_opt.num_sacks = 1;
4509			tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
4510			tp->selective_acks[0].end_seq =
4511						TCP_SKB_CB(skb)->end_seq;
4512		}
4513		__skb_queue_head(&tp->out_of_order_queue, skb);
4514	} else {
4515		struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue);
4516		u32 seq = TCP_SKB_CB(skb)->seq;
4517		u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4518
4519		if (seq == TCP_SKB_CB(skb1)->end_seq) {
4520			__skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4521
4522			if (!tp->rx_opt.num_sacks ||
4523			    tp->selective_acks[0].end_seq != seq)
4524				goto add_sack;
4525
4526			/* Common case: data arrive in order after hole. */
4527			tp->selective_acks[0].end_seq = end_seq;
4528			return;
4529		}
4530
4531		/* Find place to insert this segment. */
4532		while (1) {
4533			if (!after(TCP_SKB_CB(skb1)->seq, seq))
4534				break;
4535			if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
4536				skb1 = NULL;
4537				break;
4538			}
4539			skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
4540		}
4541
4542		/* Do skb overlap to previous one? */
4543		if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
4544			if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4545				/* All the bits are present. Drop. */
4546				__kfree_skb(skb);
4547				tcp_dsack_set(sk, seq, end_seq);
4548				goto add_sack;
4549			}
4550			if (after(seq, TCP_SKB_CB(skb1)->seq)) {
4551				/* Partial overlap. */
4552				tcp_dsack_set(sk, seq,
4553					      TCP_SKB_CB(skb1)->end_seq);
4554			} else {
4555				if (skb_queue_is_first(&tp->out_of_order_queue,
4556						       skb1))
4557					skb1 = NULL;
4558				else
4559					skb1 = skb_queue_prev(
4560						&tp->out_of_order_queue,
4561						skb1);
4562			}
4563		}
4564		if (!skb1)
4565			__skb_queue_head(&tp->out_of_order_queue, skb);
4566		else
4567			__skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4568
4569		/* And clean segments covered by new one as whole. */
4570		while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
4571			skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
4572
4573			if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
4574				break;
4575			if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
4576				tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4577						 end_seq);
4578				break;
4579			}
4580			__skb_unlink(skb1, &tp->out_of_order_queue);
4581			tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
4582					 TCP_SKB_CB(skb1)->end_seq);
4583			__kfree_skb(skb1);
4584		}
4585
4586add_sack:
4587		if (tcp_is_sack(tp))
4588			tcp_sack_new_ofo_skb(sk, seq, end_seq);
4589	}
4590}
4591
4592static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
4593					struct sk_buff_head *list)
 
4594{
4595	struct sk_buff *next = NULL;
4596
4597	if (!skb_queue_is_last(list, skb))
4598		next = skb_queue_next(list, skb);
 
 
4599
4600	__skb_unlink(skb, list);
4601	__kfree_skb(skb);
4602	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
4603
4604	return next;
4605}
4606
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4607/* Collapse contiguous sequence of skbs head..tail with
4608 * sequence numbers start..end.
4609 *
4610 * If tail is NULL, this means until the end of the list.
4611 *
4612 * Segments with FIN/SYN are not collapsed (only because this
4613 * simplifies code)
4614 */
4615static void
4616tcp_collapse(struct sock *sk, struct sk_buff_head *list,
4617	     struct sk_buff *head, struct sk_buff *tail,
4618	     u32 start, u32 end)
4619{
4620	struct sk_buff *skb, *n;
 
4621	bool end_of_skbs;
4622
4623	/* First, check that queue is collapsible and find
4624	 * the point where collapsing can be useful. */
4625	skb = head;
4626restart:
4627	end_of_skbs = true;
4628	skb_queue_walk_from_safe(list, skb, n) {
4629		if (skb == tail)
4630			break;
 
 
4631		/* No new bits? It is possible on ofo queue. */
4632		if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4633			skb = tcp_collapse_one(sk, skb, list);
4634			if (!skb)
4635				break;
4636			goto restart;
4637		}
4638
4639		/* The first skb to collapse is:
4640		 * - not SYN/FIN and
4641		 * - bloated or contains data before "start" or
4642		 *   overlaps to the next one.
4643		 */
4644		if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
4645		    (tcp_win_from_space(skb->truesize) > skb->len ||
4646		     before(TCP_SKB_CB(skb)->seq, start))) {
4647			end_of_skbs = false;
4648			break;
4649		}
4650
4651		if (!skb_queue_is_last(list, skb)) {
4652			struct sk_buff *next = skb_queue_next(list, skb);
4653			if (next != tail &&
4654			    TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
4655				end_of_skbs = false;
4656				break;
4657			}
4658		}
4659
 
4660		/* Decided to skip this, advance start seq. */
4661		start = TCP_SKB_CB(skb)->end_seq;
4662	}
4663	if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
 
 
4664		return;
4665
 
 
4666	while (before(start, end)) {
 
4667		struct sk_buff *nskb;
4668		unsigned int header = skb_headroom(skb);
4669		int copy = SKB_MAX_ORDER(header, 0);
4670
4671		/* Too big header? This can happen with IPv6. */
4672		if (copy < 0)
4673			return;
4674		if (end - start < copy)
4675			copy = end - start;
4676		nskb = alloc_skb(copy + header, GFP_ATOMIC);
4677		if (!nskb)
4678			return;
4679
4680		skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head);
4681		skb_set_network_header(nskb, (skb_network_header(skb) -
4682					      skb->head));
4683		skb_set_transport_header(nskb, (skb_transport_header(skb) -
4684						skb->head));
4685		skb_reserve(nskb, header);
4686		memcpy(nskb->head, skb->head, header);
4687		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
 
4688		TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
4689		__skb_queue_before(list, skb, nskb);
 
 
 
4690		skb_set_owner_r(nskb, sk);
 
4691
4692		/* Copy data, releasing collapsed skbs. */
4693		while (copy > 0) {
4694			int offset = start - TCP_SKB_CB(skb)->seq;
4695			int size = TCP_SKB_CB(skb)->end_seq - start;
4696
4697			BUG_ON(offset < 0);
4698			if (size > 0) {
4699				size = min(copy, size);
4700				if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
4701					BUG();
4702				TCP_SKB_CB(nskb)->end_seq += size;
4703				copy -= size;
4704				start += size;
4705			}
4706			if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
4707				skb = tcp_collapse_one(sk, skb, list);
4708				if (!skb ||
4709				    skb == tail ||
4710				    tcp_hdr(skb)->syn ||
4711				    tcp_hdr(skb)->fin)
4712					return;
 
4713			}
4714		}
4715	}
 
 
 
4716}
4717
4718/* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
4719 * and tcp_collapse() them until all the queue is collapsed.
4720 */
4721static void tcp_collapse_ofo_queue(struct sock *sk)
4722{
4723	struct tcp_sock *tp = tcp_sk(sk);
4724	struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
4725	struct sk_buff *head;
4726	u32 start, end;
4727
4728	if (skb == NULL)
 
 
 
4729		return;
4730
4731	start = TCP_SKB_CB(skb)->seq;
4732	end = TCP_SKB_CB(skb)->end_seq;
4733	head = skb;
4734
4735	for (;;) {
4736		struct sk_buff *next = NULL;
4737
4738		if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
4739			next = skb_queue_next(&tp->out_of_order_queue, skb);
4740		skb = next;
4741
4742		/* Segment is terminated when we see gap or when
4743		 * we are at the end of all the queue. */
 
4744		if (!skb ||
4745		    after(TCP_SKB_CB(skb)->seq, end) ||
4746		    before(TCP_SKB_CB(skb)->end_seq, start)) {
4747			tcp_collapse(sk, &tp->out_of_order_queue,
4748				     head, skb, start, end);
4749			head = skb;
4750			if (!skb)
4751				break;
4752			/* Start new segment */
 
 
 
 
 
 
 
 
 
4753			start = TCP_SKB_CB(skb)->seq;
 
4754			end = TCP_SKB_CB(skb)->end_seq;
4755		} else {
4756			if (before(TCP_SKB_CB(skb)->seq, start))
4757				start = TCP_SKB_CB(skb)->seq;
4758			if (after(TCP_SKB_CB(skb)->end_seq, end))
4759				end = TCP_SKB_CB(skb)->end_seq;
4760		}
4761	}
4762}
4763
4764/*
4765 * Purge the out-of-order queue.
4766 * Return true if queue was pruned.
 
 
 
 
 
 
 
 
 
4767 */
4768static int tcp_prune_ofo_queue(struct sock *sk)
4769{
4770	struct tcp_sock *tp = tcp_sk(sk);
4771	int res = 0;
 
 
 
 
 
 
 
 
 
 
 
4772
4773	if (!skb_queue_empty(&tp->out_of_order_queue)) {
4774		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
4775		__skb_queue_purge(&tp->out_of_order_queue);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4776
 
 
4777		/* Reset SACK state.  A conforming SACK implementation will
4778		 * do the same at a timeout based retransmit.  When a connection
4779		 * is in a sad state like this, we care only about integrity
4780		 * of the connection not performance.
4781		 */
4782		if (tp->rx_opt.sack_ok)
4783			tcp_sack_reset(&tp->rx_opt);
4784		sk_mem_reclaim(sk);
4785		res = 1;
4786	}
4787	return res;
4788}
4789
4790/* Reduce allocated memory if we can, trying to get
4791 * the socket within its memory limits again.
4792 *
4793 * Return less than zero if we should start dropping frames
4794 * until the socket owning process reads some of the data
4795 * to stabilize the situation.
4796 */
4797static int tcp_prune_queue(struct sock *sk)
4798{
4799	struct tcp_sock *tp = tcp_sk(sk);
4800
4801	SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
4802
4803	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
4804
4805	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
4806		tcp_clamp_window(sk);
4807	else if (tcp_memory_pressure)
4808		tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
 
 
 
4809
4810	tcp_collapse_ofo_queue(sk);
4811	if (!skb_queue_empty(&sk->sk_receive_queue))
4812		tcp_collapse(sk, &sk->sk_receive_queue,
4813			     skb_peek(&sk->sk_receive_queue),
4814			     NULL,
4815			     tp->copied_seq, tp->rcv_nxt);
4816	sk_mem_reclaim(sk);
4817
4818	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
4819		return 0;
4820
4821	/* Collapsing did not help, destructive actions follow.
4822	 * This must not ever occur. */
4823
4824	tcp_prune_ofo_queue(sk);
4825
4826	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
4827		return 0;
4828
4829	/* If we are really being abused, tell the caller to silently
4830	 * drop receive data on the floor.  It will get retransmitted
4831	 * and hopefully then we'll have sufficient space.
4832	 */
4833	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
4834
4835	/* Massive buffer overcommit. */
4836	tp->pred_flags = 0;
4837	return -1;
4838}
4839
4840/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
4841 * As additional protections, we do not touch cwnd in retransmission phases,
4842 * and if application hit its sndbuf limit recently.
4843 */
4844void tcp_cwnd_application_limited(struct sock *sk)
4845{
4846	struct tcp_sock *tp = tcp_sk(sk);
4847
4848	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
4849	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
4850		/* Limited by application or receiver window. */
4851		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
4852		u32 win_used = max(tp->snd_cwnd_used, init_win);
4853		if (win_used < tp->snd_cwnd) {
4854			tp->snd_ssthresh = tcp_current_ssthresh(sk);
4855			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
4856		}
4857		tp->snd_cwnd_used = 0;
4858	}
4859	tp->snd_cwnd_stamp = tcp_time_stamp;
4860}
4861
4862static int tcp_should_expand_sndbuf(struct sock *sk)
4863{
4864	struct tcp_sock *tp = tcp_sk(sk);
4865
4866	/* If the user specified a specific send buffer setting, do
4867	 * not modify it.
4868	 */
4869	if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
4870		return 0;
4871
4872	/* If we are under global TCP memory pressure, do not expand.  */
4873	if (tcp_memory_pressure)
4874		return 0;
 
 
 
 
 
 
 
 
 
 
4875
4876	/* If we are under soft global TCP memory pressure, do not expand.  */
4877	if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
4878		return 0;
4879
4880	/* If we filled the congestion window, do not expand.  */
4881	if (tp->packets_out >= tp->snd_cwnd)
4882		return 0;
4883
4884	return 1;
4885}
4886
4887/* When incoming ACK allowed to free some skb from write_queue,
4888 * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket
4889 * on the exit from tcp input handler.
4890 *
4891 * PROBLEM: sndbuf expansion does not work well with largesend.
4892 */
4893static void tcp_new_space(struct sock *sk)
4894{
4895	struct tcp_sock *tp = tcp_sk(sk);
4896
4897	if (tcp_should_expand_sndbuf(sk)) {
4898		int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
4899			MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
4900		int demanded = max_t(unsigned int, tp->snd_cwnd,
4901				     tp->reordering + 1);
4902		sndmem *= 2 * demanded;
4903		if (sndmem > sk->sk_sndbuf)
4904			sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
4905		tp->snd_cwnd_stamp = tcp_time_stamp;
4906	}
4907
4908	sk->sk_write_space(sk);
4909}
4910
4911static void tcp_check_space(struct sock *sk)
4912{
4913	if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
4914		sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
4915		if (sk->sk_socket &&
4916		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
4917			tcp_new_space(sk);
 
 
 
 
 
 
4918	}
4919}
4920
4921static inline void tcp_data_snd_check(struct sock *sk)
4922{
4923	tcp_push_pending_frames(sk);
4924	tcp_check_space(sk);
4925}
4926
4927/*
4928 * Check if sending an ack is needed.
4929 */
4930static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
4931{
4932	struct tcp_sock *tp = tcp_sk(sk);
 
4933
4934	    /* More than one full frame received... */
4935	if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
4936	     /* ... and right edge of window advances far enough.
4937	      * (tcp_recvmsg() will send ACK otherwise). Or...
 
 
4938	      */
4939	     __tcp_select_window(sk) >= tp->rcv_wnd) ||
 
4940	    /* We ACK each frame or... */
4941	    tcp_in_quickack_mode(sk) ||
4942	    /* We have out of order data. */
4943	    (ofo_possible && skb_peek(&tp->out_of_order_queue))) {
4944		/* Then ack it now */
 
 
 
 
 
 
 
 
4945		tcp_send_ack(sk);
4946	} else {
4947		/* Else, send delayed ack. */
 
 
4948		tcp_send_delayed_ack(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4949	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4950}
4951
4952static inline void tcp_ack_snd_check(struct sock *sk)
4953{
4954	if (!inet_csk_ack_scheduled(sk)) {
4955		/* We sent a data segment already. */
4956		return;
4957	}
4958	__tcp_ack_snd_check(sk, 1);
4959}
4960
4961/*
4962 *	This routine is only called when we have urgent data
4963 *	signaled. Its the 'slow' part of tcp_urg. It could be
4964 *	moved inline now as tcp_urg is only called from one
4965 *	place. We handle URGent data wrong. We have to - as
4966 *	BSD still doesn't use the correction from RFC961.
4967 *	For 1003.1g we should support a new option TCP_STDURG to permit
4968 *	either form (or just set the sysctl tcp_stdurg).
4969 */
4970
4971static void tcp_check_urg(struct sock *sk, struct tcphdr *th)
4972{
4973	struct tcp_sock *tp = tcp_sk(sk);
4974	u32 ptr = ntohs(th->urg_ptr);
4975
4976	if (ptr && !sysctl_tcp_stdurg)
4977		ptr--;
4978	ptr += ntohl(th->seq);
4979
4980	/* Ignore urgent data that we've already seen and read. */
4981	if (after(tp->copied_seq, ptr))
4982		return;
4983
4984	/* Do not replay urg ptr.
4985	 *
4986	 * NOTE: interesting situation not covered by specs.
4987	 * Misbehaving sender may send urg ptr, pointing to segment,
4988	 * which we already have in ofo queue. We are not able to fetch
4989	 * such data and will stay in TCP_URG_NOTYET until will be eaten
4990	 * by recvmsg(). Seems, we are not obliged to handle such wicked
4991	 * situations. But it is worth to think about possibility of some
4992	 * DoSes using some hypothetical application level deadlock.
4993	 */
4994	if (before(ptr, tp->rcv_nxt))
4995		return;
4996
4997	/* Do we already have a newer (or duplicate) urgent pointer? */
4998	if (tp->urg_data && !after(ptr, tp->urg_seq))
4999		return;
5000
5001	/* Tell the world about our new urgent pointer. */
5002	sk_send_sigurg(sk);
5003
5004	/* We may be adding urgent data when the last byte read was
5005	 * urgent. To do this requires some care. We cannot just ignore
5006	 * tp->copied_seq since we would read the last urgent byte again
5007	 * as data, nor can we alter copied_seq until this data arrives
5008	 * or we break the semantics of SIOCATMARK (and thus sockatmark())
5009	 *
5010	 * NOTE. Double Dutch. Rendering to plain English: author of comment
5011	 * above did something sort of 	send("A", MSG_OOB); send("B", MSG_OOB);
5012	 * and expect that both A and B disappear from stream. This is _wrong_.
5013	 * Though this happens in BSD with high probability, this is occasional.
5014	 * Any application relying on this is buggy. Note also, that fix "works"
5015	 * only in this artificial test. Insert some normal data between A and B and we will
5016	 * decline of BSD again. Verdict: it is better to remove to trap
5017	 * buggy users.
5018	 */
5019	if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
5020	    !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
5021		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
5022		tp->copied_seq++;
5023		if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
5024			__skb_unlink(skb, &sk->sk_receive_queue);
5025			__kfree_skb(skb);
5026		}
5027	}
5028
5029	tp->urg_data = TCP_URG_NOTYET;
5030	tp->urg_seq = ptr;
5031
5032	/* Disable header prediction. */
5033	tp->pred_flags = 0;
5034}
5035
5036/* This is the 'fast' part of urgent handling. */
5037static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th)
5038{
5039	struct tcp_sock *tp = tcp_sk(sk);
5040
5041	/* Check if we get a new urgent pointer - normally not. */
5042	if (th->urg)
5043		tcp_check_urg(sk, th);
5044
5045	/* Do we wait for any urgent data? - normally not... */
5046	if (tp->urg_data == TCP_URG_NOTYET) {
5047		u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
5048			  th->syn;
5049
5050		/* Is the urgent pointer pointing into this packet? */
5051		if (ptr < skb->len) {
5052			u8 tmp;
5053			if (skb_copy_bits(skb, ptr, &tmp, 1))
5054				BUG();
5055			tp->urg_data = TCP_URG_VALID | tmp;
5056			if (!sock_flag(sk, SOCK_DEAD))
5057				sk->sk_data_ready(sk, 0);
5058		}
5059	}
5060}
5061
5062static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
5063{
5064	struct tcp_sock *tp = tcp_sk(sk);
5065	int chunk = skb->len - hlen;
5066	int err;
5067
5068	local_bh_enable();
5069	if (skb_csum_unnecessary(skb))
5070		err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
5071	else
5072		err = skb_copy_and_csum_datagram_iovec(skb, hlen,
5073						       tp->ucopy.iov);
5074
5075	if (!err) {
5076		tp->ucopy.len -= chunk;
5077		tp->copied_seq += chunk;
5078		tcp_rcv_space_adjust(sk);
5079	}
5080
5081	local_bh_disable();
5082	return err;
5083}
5084
5085static __sum16 __tcp_checksum_complete_user(struct sock *sk,
5086					    struct sk_buff *skb)
5087{
5088	__sum16 result;
5089
5090	if (sock_owned_by_user(sk)) {
5091		local_bh_enable();
5092		result = __tcp_checksum_complete(skb);
5093		local_bh_disable();
5094	} else {
5095		result = __tcp_checksum_complete(skb);
5096	}
5097	return result;
5098}
5099
5100static inline int tcp_checksum_complete_user(struct sock *sk,
5101					     struct sk_buff *skb)
5102{
5103	return !skb_csum_unnecessary(skb) &&
5104	       __tcp_checksum_complete_user(sk, skb);
5105}
5106
5107#ifdef CONFIG_NET_DMA
5108static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5109				  int hlen)
5110{
5111	struct tcp_sock *tp = tcp_sk(sk);
5112	int chunk = skb->len - hlen;
5113	int dma_cookie;
5114	int copied_early = 0;
5115
5116	if (tp->ucopy.wakeup)
5117		return 0;
5118
5119	if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5120		tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
5121
5122	if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
5123
5124		dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
5125							 skb, hlen,
5126							 tp->ucopy.iov, chunk,
5127							 tp->ucopy.pinned_list);
5128
5129		if (dma_cookie < 0)
5130			goto out;
5131
5132		tp->ucopy.dma_cookie = dma_cookie;
5133		copied_early = 1;
5134
5135		tp->ucopy.len -= chunk;
5136		tp->copied_seq += chunk;
5137		tcp_rcv_space_adjust(sk);
5138
5139		if ((tp->ucopy.len == 0) ||
5140		    (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
5141		    (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
5142			tp->ucopy.wakeup = 1;
5143			sk->sk_data_ready(sk, 0);
5144		}
5145	} else if (chunk > 0) {
5146		tp->ucopy.wakeup = 1;
5147		sk->sk_data_ready(sk, 0);
5148	}
5149out:
5150	return copied_early;
5151}
5152#endif /* CONFIG_NET_DMA */
5153
5154/* Does PAWS and seqno based validation of an incoming segment, flags will
5155 * play significant role here.
5156 */
5157static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5158			      struct tcphdr *th, int syn_inerr)
5159{
5160	u8 *hash_location;
5161	struct tcp_sock *tp = tcp_sk(sk);
 
5162
5163	/* RFC1323: H1. Apply PAWS check first. */
5164	if (tcp_fast_parse_options(skb, th, tp, &hash_location) &&
5165	    tp->rx_opt.saw_tstamp &&
5166	    tcp_paws_discard(sk, skb)) {
5167		if (!th->rst) {
5168			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5169			tcp_send_dupack(sk, skb);
 
 
 
 
 
 
5170			goto discard;
5171		}
5172		/* Reset is accepted even if it did not pass PAWS. */
5173	}
5174
5175	/* Step 1: check sequence number */
5176	if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
 
5177		/* RFC793, page 37: "In all states except SYN-SENT, all reset
5178		 * (RST) segments are validated by checking their SEQ-fields."
5179		 * And page 69: "If an incoming segment is not acceptable,
5180		 * an acknowledgment should be sent in reply (unless the RST
5181		 * bit is set, if so drop the segment and return)".
5182		 */
5183		if (!th->rst)
5184			tcp_send_dupack(sk, skb);
 
 
 
 
 
 
 
 
5185		goto discard;
5186	}
5187
5188	/* Step 2: check RST bit */
5189	if (th->rst) {
5190		tcp_reset(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5191		goto discard;
5192	}
5193
5194	/* ts_recent update must be made after we are sure that the packet
5195	 * is in window.
5196	 */
5197	tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
5198
5199	/* step 3: check security and precedence [ignored] */
5200
5201	/* step 4: Check for a SYN in window. */
5202	if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
 
 
 
 
 
 
 
 
5203		if (syn_inerr)
5204			TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
5205		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
5206		tcp_reset(sk);
5207		return -1;
 
5208	}
5209
5210	return 1;
 
 
 
5211
5212discard:
 
 
 
 
 
5213	__kfree_skb(skb);
5214	return 0;
5215}
5216
5217/*
5218 *	TCP receive function for the ESTABLISHED state.
5219 *
5220 *	It is split into a fast path and a slow path. The fast path is
5221 * 	disabled when:
5222 *	- A zero window was announced from us - zero window probing
5223 *        is only handled properly in the slow path.
5224 *	- Out of order segments arrived.
5225 *	- Urgent data is expected.
5226 *	- There is no buffer space left
5227 *	- Unexpected TCP flags/window values/header lengths are received
5228 *	  (detected by checking the TCP header against pred_flags)
5229 *	- Data is sent in both directions. Fast path only supports pure senders
5230 *	  or pure receivers (this means either the sequence number or the ack
5231 *	  value must stay constant)
5232 *	- Unexpected TCP option.
5233 *
5234 *	When these conditions are not satisfied it drops into a standard
5235 *	receive procedure patterned after RFC793 to handle all cases.
5236 *	The first three cases are guaranteed by proper pred_flags setting,
5237 *	the rest is checked inline. Fast processing is turned on in
5238 *	tcp_data_queue when everything is OK.
5239 */
5240int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5241			struct tcphdr *th, unsigned len)
5242{
 
 
5243	struct tcp_sock *tp = tcp_sk(sk);
5244	int res;
 
 
 
5245
 
 
 
5246	/*
5247	 *	Header prediction.
5248	 *	The code loosely follows the one in the famous
5249	 *	"30 instruction TCP receive" Van Jacobson mail.
5250	 *
5251	 *	Van's trick is to deposit buffers into socket queue
5252	 *	on a device interrupt, to call tcp_recv function
5253	 *	on the receive process context and checksum and copy
5254	 *	the buffer to user space. smart...
5255	 *
5256	 *	Our current scheme is not silly either but we take the
5257	 *	extra cost of the net_bh soft interrupt processing...
5258	 *	We do checksum and copy also but from device to kernel.
5259	 */
5260
5261	tp->rx_opt.saw_tstamp = 0;
5262
5263	/*	pred_flags is 0xS?10 << 16 + snd_wnd
5264	 *	if header_prediction is to be made
5265	 *	'S' will always be tp->tcp_header_len >> 2
5266	 *	'?' will be 0 for the fast path, otherwise pred_flags is 0 to
5267	 *  turn it off	(when there are holes in the receive
5268	 *	 space for instance)
5269	 *	PSH flag is ignored.
5270	 */
5271
5272	if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
5273	    TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
5274	    !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
5275		int tcp_header_len = tp->tcp_header_len;
5276
5277		/* Timestamp header prediction: tcp_header_len
5278		 * is automatically equal to th->doff*4 due to pred_flags
5279		 * match.
5280		 */
5281
5282		/* Check timestamp */
5283		if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
5284			/* No? Slow path! */
5285			if (!tcp_parse_aligned_timestamp(tp, th))
5286				goto slow_path;
5287
5288			/* If PAWS failed, check it more carefully in slow path */
5289			if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
5290				goto slow_path;
5291
5292			/* DO NOT update ts_recent here, if checksum fails
5293			 * and timestamp was corrupted part, it will result
5294			 * in a hung connection since we will drop all
5295			 * future packets due to the PAWS test.
5296			 */
5297		}
5298
5299		if (len <= tcp_header_len) {
5300			/* Bulk data transfer: sender */
5301			if (len == tcp_header_len) {
5302				/* Predicted packet is in window by definition.
5303				 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5304				 * Hence, check seq<=rcv_wup reduces to:
5305				 */
5306				if (tcp_header_len ==
5307				    (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
5308				    tp->rcv_nxt == tp->rcv_wup)
5309					tcp_store_ts_recent(tp);
5310
5311				/* We know that such packets are checksummed
5312				 * on entry.
5313				 */
5314				tcp_ack(sk, skb, 0);
5315				__kfree_skb(skb);
5316				tcp_data_snd_check(sk);
5317				return 0;
 
 
 
 
 
5318			} else { /* Header too small */
5319				TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
 
5320				goto discard;
5321			}
5322		} else {
5323			int eaten = 0;
5324			int copied_early = 0;
5325
5326			if (tp->copied_seq == tp->rcv_nxt &&
5327			    len - tcp_header_len <= tp->ucopy.len) {
5328#ifdef CONFIG_NET_DMA
5329				if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
5330					copied_early = 1;
5331					eaten = 1;
5332				}
5333#endif
5334				if (tp->ucopy.task == current &&
5335				    sock_owned_by_user(sk) && !copied_early) {
5336					__set_current_state(TASK_RUNNING);
5337
5338					if (!tcp_copy_to_iovec(sk, skb, tcp_header_len))
5339						eaten = 1;
5340				}
5341				if (eaten) {
5342					/* Predicted packet is in window by definition.
5343					 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5344					 * Hence, check seq<=rcv_wup reduces to:
5345					 */
5346					if (tcp_header_len ==
5347					    (sizeof(struct tcphdr) +
5348					     TCPOLEN_TSTAMP_ALIGNED) &&
5349					    tp->rcv_nxt == tp->rcv_wup)
5350						tcp_store_ts_recent(tp);
5351
5352					tcp_rcv_rtt_measure_ts(sk, skb);
5353
5354					__skb_pull(skb, tcp_header_len);
5355					tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
5356					NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
5357				}
5358				if (copied_early)
5359					tcp_cleanup_rbuf(sk, skb->len);
5360			}
5361			if (!eaten) {
5362				if (tcp_checksum_complete_user(sk, skb))
5363					goto csum_error;
5364
5365				/* Predicted packet is in window by definition.
5366				 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
5367				 * Hence, check seq<=rcv_wup reduces to:
5368				 */
5369				if (tcp_header_len ==
5370				    (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
5371				    tp->rcv_nxt == tp->rcv_wup)
5372					tcp_store_ts_recent(tp);
5373
5374				tcp_rcv_rtt_measure_ts(sk, skb);
5375
5376				if ((int)skb->truesize > sk->sk_forward_alloc)
5377					goto step5;
5378
5379				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
 
5380
5381				/* Bulk data transfer: receiver */
5382				__skb_pull(skb, tcp_header_len);
5383				__skb_queue_tail(&sk->sk_receive_queue, skb);
5384				skb_set_owner_r(skb, sk);
5385				tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
5386			}
 
 
 
 
 
 
 
 
 
 
 
5387
5388			tcp_event_data_recv(sk, skb);
5389
5390			if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
5391				/* Well, only one small jumplet in fast path... */
5392				tcp_ack(sk, skb, FLAG_DATA);
5393				tcp_data_snd_check(sk);
5394				if (!inet_csk_ack_scheduled(sk))
5395					goto no_ack;
 
 
5396			}
5397
5398			if (!copied_early || tp->rcv_nxt != tp->rcv_wup)
5399				__tcp_ack_snd_check(sk, 0);
5400no_ack:
5401#ifdef CONFIG_NET_DMA
5402			if (copied_early)
5403				__skb_queue_tail(&sk->sk_async_wait_queue, skb);
5404			else
5405#endif
5406			if (eaten)
5407				__kfree_skb(skb);
5408			else
5409				sk->sk_data_ready(sk, 0);
5410			return 0;
5411		}
5412	}
5413
5414slow_path:
5415	if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
5416		goto csum_error;
5417
 
 
 
 
 
5418	/*
5419	 *	Standard slow path.
5420	 */
5421
5422	res = tcp_validate_incoming(sk, skb, th, 1);
5423	if (res <= 0)
5424		return -res;
5425
5426step5:
5427	if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
 
 
5428		goto discard;
5429
5430	tcp_rcv_rtt_measure_ts(sk, skb);
5431
5432	/* Process urgent data. */
5433	tcp_urg(sk, skb, th);
5434
5435	/* step 7: process the segment text */
5436	tcp_data_queue(sk, skb);
5437
5438	tcp_data_snd_check(sk);
5439	tcp_ack_snd_check(sk);
5440	return 0;
5441
5442csum_error:
5443	TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
 
 
 
5444
5445discard:
5446	__kfree_skb(skb);
5447	return 0;
5448}
5449EXPORT_SYMBOL(tcp_rcv_established);
5450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5451static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5452					 struct tcphdr *th, unsigned len)
5453{
5454	u8 *hash_location;
5455	struct inet_connection_sock *icsk = inet_csk(sk);
5456	struct tcp_sock *tp = tcp_sk(sk);
5457	struct tcp_cookie_values *cvp = tp->cookie_values;
5458	int saved_clamp = tp->rx_opt.mss_clamp;
 
 
5459
5460	tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
 
 
5461
5462	if (th->ack) {
5463		/* rfc793:
5464		 * "If the state is SYN-SENT then
5465		 *    first check the ACK bit
5466		 *      If the ACK bit is set
5467		 *	  If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
5468		 *        a reset (unless the RST bit is set, if so drop
5469		 *        the segment and return)"
5470		 *
5471		 *  We do not send data with SYN, so that RFC-correct
5472		 *  test reduces to:
5473		 */
5474		if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt)
 
 
 
 
 
 
 
5475			goto reset_and_undo;
 
5476
5477		if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
5478		    !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
5479			     tcp_time_stamp)) {
5480			NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
 
 
5481			goto reset_and_undo;
5482		}
5483
5484		/* Now ACK is acceptable.
5485		 *
5486		 * "If the RST bit is set
5487		 *    If the ACK was acceptable then signal the user "error:
5488		 *    connection reset", drop the segment, enter CLOSED state,
5489		 *    delete TCB, and return."
5490		 */
5491
5492		if (th->rst) {
5493			tcp_reset(sk);
5494			goto discard;
 
 
5495		}
5496
5497		/* rfc793:
5498		 *   "fifth, if neither of the SYN or RST bits is set then
5499		 *    drop the segment and return."
5500		 *
5501		 *    See note below!
5502		 *                                        --ANK(990513)
5503		 */
5504		if (!th->syn)
 
5505			goto discard_and_undo;
5506
5507		/* rfc793:
5508		 *   "If the SYN bit is on ...
5509		 *    are acceptable then ...
5510		 *    (our SYN has been ACKed), change the connection
5511		 *    state to ESTABLISHED..."
5512		 */
5513
5514		TCP_ECN_rcv_synack(tp, th);
5515
5516		tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
 
5517		tcp_ack(sk, skb, FLAG_SLOWPATH);
5518
5519		/* Ok.. it's good. Set up sequence numbers and
5520		 * move to established.
5521		 */
5522		tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
5523		tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
5524
5525		/* RFC1323: The window in SYN & SYN/ACK segments is
5526		 * never scaled.
5527		 */
5528		tp->snd_wnd = ntohs(th->window);
5529		tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5530
5531		if (!tp->rx_opt.wscale_ok) {
5532			tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
5533			tp->window_clamp = min(tp->window_clamp, 65535U);
 
5534		}
5535
5536		if (tp->rx_opt.saw_tstamp) {
5537			tp->rx_opt.tstamp_ok	   = 1;
5538			tp->tcp_header_len =
5539				sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
5540			tp->advmss	    -= TCPOLEN_TSTAMP_ALIGNED;
5541			tcp_store_ts_recent(tp);
5542		} else {
5543			tp->tcp_header_len = sizeof(struct tcphdr);
5544		}
5545
5546		if (tcp_is_sack(tp) && sysctl_tcp_fack)
5547			tcp_enable_fack(tp);
5548
5549		tcp_mtup_init(sk);
5550		tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
5551		tcp_initialize_rcv_mss(sk);
5552
5553		/* Remember, tcp_poll() does not lock socket!
5554		 * Change state from SYN-SENT only after copied_seq
5555		 * is initialized. */
5556		tp->copied_seq = tp->rcv_nxt;
5557
5558		if (cvp != NULL &&
5559		    cvp->cookie_pair_size > 0 &&
5560		    tp->rx_opt.cookie_plus > 0) {
5561			int cookie_size = tp->rx_opt.cookie_plus
5562					- TCPOLEN_COOKIE_BASE;
5563			int cookie_pair_size = cookie_size
5564					     + cvp->cookie_desired;
5565
5566			/* A cookie extension option was sent and returned.
5567			 * Note that each incoming SYNACK replaces the
5568			 * Responder cookie.  The initial exchange is most
5569			 * fragile, as protection against spoofing relies
5570			 * entirely upon the sequence and timestamp (above).
5571			 * This replacement strategy allows the correct pair to
5572			 * pass through, while any others will be filtered via
5573			 * Responder verification later.
5574			 */
5575			if (sizeof(cvp->cookie_pair) >= cookie_pair_size) {
5576				memcpy(&cvp->cookie_pair[cvp->cookie_desired],
5577				       hash_location, cookie_size);
5578				cvp->cookie_pair_size = cookie_pair_size;
5579			}
5580		}
5581
5582		smp_mb();
5583		tcp_set_state(sk, TCP_ESTABLISHED);
5584
5585		security_inet_conn_established(sk, skb);
5586
5587		/* Make sure socket is routed, for correct metrics.  */
5588		icsk->icsk_af_ops->rebuild_header(sk);
5589
5590		tcp_init_metrics(sk);
5591
5592		tcp_init_congestion_control(sk);
5593
5594		/* Prevent spurious tcp_cwnd_restart() on first data
5595		 * packet.
5596		 */
5597		tp->lsndtime = tcp_time_stamp;
5598
5599		tcp_init_buffer_space(sk);
5600
5601		if (sock_flag(sk, SOCK_KEEPOPEN))
5602			inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
5603
5604		if (!tp->rx_opt.snd_wscale)
5605			__tcp_fast_path_on(tp, tp->snd_wnd);
5606		else
5607			tp->pred_flags = 0;
5608
5609		if (!sock_flag(sk, SOCK_DEAD)) {
5610			sk->sk_state_change(sk);
5611			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
5612		}
5613
 
5614		if (sk->sk_write_pending ||
5615		    icsk->icsk_accept_queue.rskq_defer_accept ||
5616		    icsk->icsk_ack.pingpong) {
5617			/* Save one ACK. Data will be ready after
5618			 * several ticks, if write_pending is set.
5619			 *
5620			 * It may be deleted, but with this feature tcpdumps
5621			 * look so _wonderfully_ clever, that I was not able
5622			 * to stand against the temptation 8)     --ANK
5623			 */
5624			inet_csk_schedule_ack(sk);
5625			icsk->icsk_ack.lrcvtime = tcp_time_stamp;
5626			icsk->icsk_ack.ato	 = TCP_ATO_MIN;
5627			tcp_incr_quickack(sk);
5628			tcp_enter_quickack_mode(sk);
5629			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
5630						  TCP_DELACK_MAX, TCP_RTO_MAX);
5631
5632discard:
5633			__kfree_skb(skb);
5634			return 0;
5635		} else {
5636			tcp_send_ack(sk);
5637		}
 
5638		return -1;
5639	}
5640
5641	/* No ACK in the segment */
5642
5643	if (th->rst) {
5644		/* rfc793:
5645		 * "If the RST bit is set
5646		 *
5647		 *      Otherwise (no ACK) drop the segment and return."
5648		 */
5649
5650		goto discard_and_undo;
5651	}
5652
5653	/* PAWS check. */
5654	if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
5655	    tcp_paws_reject(&tp->rx_opt, 0))
 
5656		goto discard_and_undo;
5657
5658	if (th->syn) {
5659		/* We see SYN without ACK. It is attempt of
5660		 * simultaneous connect with crossed SYNs.
5661		 * Particularly, it can be connect to self.
5662		 */
 
 
 
 
 
 
 
 
 
 
5663		tcp_set_state(sk, TCP_SYN_RECV);
5664
5665		if (tp->rx_opt.saw_tstamp) {
5666			tp->rx_opt.tstamp_ok = 1;
5667			tcp_store_ts_recent(tp);
5668			tp->tcp_header_len =
5669				sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
5670		} else {
5671			tp->tcp_header_len = sizeof(struct tcphdr);
5672		}
5673
5674		tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
 
5675		tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
5676
5677		/* RFC1323: The window in SYN & SYN/ACK segments is
5678		 * never scaled.
5679		 */
5680		tp->snd_wnd    = ntohs(th->window);
5681		tp->snd_wl1    = TCP_SKB_CB(skb)->seq;
5682		tp->max_window = tp->snd_wnd;
5683
5684		TCP_ECN_rcv_syn(tp, th);
5685
5686		tcp_mtup_init(sk);
5687		tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
5688		tcp_initialize_rcv_mss(sk);
5689
5690		tcp_send_synack(sk);
5691#if 0
5692		/* Note, we could accept data and URG from this segment.
5693		 * There are no obstacles to make this.
 
 
5694		 *
5695		 * However, if we ignore data in ACKless segments sometimes,
5696		 * we have no reasons to accept it sometimes.
5697		 * Also, seems the code doing it in step6 of tcp_rcv_state_process
5698		 * is not flawless. So, discard packet for sanity.
5699		 * Uncomment this return to process the data.
5700		 */
5701		return -1;
5702#else
5703		goto discard;
5704#endif
5705	}
5706	/* "fifth, if neither of the SYN or RST bits is set then
5707	 * drop the segment and return."
5708	 */
5709
5710discard_and_undo:
5711	tcp_clear_options(&tp->rx_opt);
5712	tp->rx_opt.mss_clamp = saved_clamp;
5713	goto discard;
 
5714
5715reset_and_undo:
5716	tcp_clear_options(&tp->rx_opt);
5717	tp->rx_opt.mss_clamp = saved_clamp;
5718	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5719}
5720
5721/*
5722 *	This function implements the receiving procedure of RFC 793 for
5723 *	all states except ESTABLISHED and TIME_WAIT.
5724 *	It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
5725 *	address independent.
5726 */
5727
5728int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5729			  struct tcphdr *th, unsigned len)
5730{
5731	struct tcp_sock *tp = tcp_sk(sk);
5732	struct inet_connection_sock *icsk = inet_csk(sk);
 
 
5733	int queued = 0;
5734	int res;
5735
5736	tp->rx_opt.saw_tstamp = 0;
5737
5738	switch (sk->sk_state) {
5739	case TCP_CLOSE:
 
5740		goto discard;
5741
5742	case TCP_LISTEN:
5743		if (th->ack)
5744			return 1;
5745
5746		if (th->rst)
 
5747			goto discard;
5748
5749		if (th->syn) {
5750			if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
5751				return 1;
5752
5753			/* Now we have several options: In theory there is
5754			 * nothing else in the frame. KA9Q has an option to
5755			 * send data with the syn, BSD accepts data with the
5756			 * syn up to the [to be] advertised window and
5757			 * Solaris 2.1 gives you a protocol error. For now
5758			 * we just ignore it, that fits the spec precisely
5759			 * and avoids incompatibilities. It would be nice in
5760			 * future to drop through and process the data.
5761			 *
5762			 * Now that TTCP is starting to be used we ought to
5763			 * queue this data.
5764			 * But, this leaves one open to an easy denial of
5765			 * service attack, and SYN cookies can't defend
5766			 * against this problem. So, we drop the data
5767			 * in the interest of security over speed unless
5768			 * it's still in use.
5769			 */
5770			kfree_skb(skb);
 
 
 
 
 
 
5771			return 0;
5772		}
 
5773		goto discard;
5774
5775	case TCP_SYN_SENT:
5776		queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
 
 
5777		if (queued >= 0)
5778			return queued;
5779
5780		/* Do step6 onward by hand. */
5781		tcp_urg(sk, skb, th);
5782		__kfree_skb(skb);
5783		tcp_data_snd_check(sk);
5784		return 0;
5785	}
5786
5787	res = tcp_validate_incoming(sk, skb, th, 0);
5788	if (res <= 0)
5789		return -res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5790
5791	/* step 5: check the ACK field */
5792	if (th->ack) {
5793		int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5794
5795		switch (sk->sk_state) {
5796		case TCP_SYN_RECV:
5797			if (acceptable) {
5798				tp->copied_seq = tp->rcv_nxt;
5799				smp_mb();
5800				tcp_set_state(sk, TCP_ESTABLISHED);
5801				sk->sk_state_change(sk);
5802
5803				/* Note, that this wakeup is only for marginal
5804				 * crossed SYN case. Passively open sockets
5805				 * are not waked up, because sk->sk_sleep ==
5806				 * NULL and sk->sk_socket == NULL.
5807				 */
5808				if (sk->sk_socket)
5809					sk_wake_async(sk,
5810						      SOCK_WAKE_IO, POLL_OUT);
5811
5812				tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
5813				tp->snd_wnd = ntohs(th->window) <<
5814					      tp->rx_opt.snd_wscale;
5815				tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5816
5817				if (tp->rx_opt.tstamp_ok)
5818					tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
 
 
 
 
5819
5820				/* Make sure socket is routed, for
5821				 * correct metrics.
5822				 */
5823				icsk->icsk_af_ops->rebuild_header(sk);
5824
5825				tcp_init_metrics(sk);
 
5826
5827				tcp_init_congestion_control(sk);
 
5828
5829				/* Prevent spurious tcp_cwnd_restart() on
5830				 * first data packet.
5831				 */
5832				tp->lsndtime = tcp_time_stamp;
5833
5834				tcp_mtup_init(sk);
5835				tcp_initialize_rcv_mss(sk);
5836				tcp_init_buffer_space(sk);
5837				tcp_fast_path_on(tp);
5838			} else {
5839				return 1;
5840			}
5841			break;
5842
5843		case TCP_FIN_WAIT1:
5844			if (tp->snd_una == tp->write_seq) {
5845				tcp_set_state(sk, TCP_FIN_WAIT2);
5846				sk->sk_shutdown |= SEND_SHUTDOWN;
5847				dst_confirm(__sk_dst_get(sk));
5848
5849				if (!sock_flag(sk, SOCK_DEAD))
5850					/* Wake up lingering close() */
5851					sk->sk_state_change(sk);
5852				else {
5853					int tmo;
5854
5855					if (tp->linger2 < 0 ||
5856					    (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5857					     after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
5858						tcp_done(sk);
5859						NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5860						return 1;
5861					}
5862
5863					tmo = tcp_fin_time(sk);
5864					if (tmo > TCP_TIMEWAIT_LEN) {
5865						inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
5866					} else if (th->fin || sock_owned_by_user(sk)) {
5867						/* Bad case. We could lose such FIN otherwise.
5868						 * It is not a big problem, but it looks confusing
5869						 * and not so rare event. We still can lose it now,
5870						 * if it spins in bh_lock_sock(), but it is really
5871						 * marginal case.
5872						 */
5873						inet_csk_reset_keepalive_timer(sk, tmo);
5874					} else {
5875						tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
5876						goto discard;
5877					}
5878				}
5879			}
5880			break;
5881
5882		case TCP_CLOSING:
5883			if (tp->snd_una == tp->write_seq) {
5884				tcp_time_wait(sk, TCP_TIME_WAIT, 0);
5885				goto discard;
5886			}
5887			break;
5888
5889		case TCP_LAST_ACK:
5890			if (tp->snd_una == tp->write_seq) {
5891				tcp_update_metrics(sk);
5892				tcp_done(sk);
5893				goto discard;
5894			}
 
 
5895			break;
5896		}
5897	} else
5898		goto discard;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5899
5900	/* step 6: check the URG bit */
5901	tcp_urg(sk, skb, th);
5902
5903	/* step 7: process the segment text */
5904	switch (sk->sk_state) {
5905	case TCP_CLOSE_WAIT:
5906	case TCP_CLOSING:
5907	case TCP_LAST_ACK:
5908		if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
 
 
 
 
 
5909			break;
 
 
5910	case TCP_FIN_WAIT1:
5911	case TCP_FIN_WAIT2:
5912		/* RFC 793 says to queue data in these states,
5913		 * RFC 1122 says we MUST send a reset.
5914		 * BSD 4.4 also does reset.
5915		 */
5916		if (sk->sk_shutdown & RCV_SHUTDOWN) {
5917			if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
5918			    after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
5919				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
5920				tcp_reset(sk);
5921				return 1;
5922			}
5923		}
5924		/* Fall through */
5925	case TCP_ESTABLISHED:
5926		tcp_data_queue(sk, skb);
5927		queued = 1;
5928		break;
5929	}
5930
5931	/* tcp_data could move socket to TIME-WAIT */
5932	if (sk->sk_state != TCP_CLOSE) {
5933		tcp_data_snd_check(sk);
5934		tcp_ack_snd_check(sk);
5935	}
5936
5937	if (!queued) {
5938discard:
5939		__kfree_skb(skb);
5940	}
5941	return 0;
 
 
 
 
5942}
5943EXPORT_SYMBOL(tcp_rcv_state_process);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Implementation of the Transmission Control Protocol(TCP).
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  13 *		Florian La Roche, <flla@stud.uni-sb.de>
  14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  19 *		Jorge Cwik, <jorge@laser.satlink.net>
  20 */
  21
  22/*
  23 * Changes:
  24 *		Pedro Roque	:	Fast Retransmit/Recovery.
  25 *					Two receive queues.
  26 *					Retransmit queue handled by TCP.
  27 *					Better retransmit timer handling.
  28 *					New congestion avoidance.
  29 *					Header prediction.
  30 *					Variable renaming.
  31 *
  32 *		Eric		:	Fast Retransmit.
  33 *		Randy Scott	:	MSS option defines.
  34 *		Eric Schenk	:	Fixes to slow start algorithm.
  35 *		Eric Schenk	:	Yet another double ACK bug.
  36 *		Eric Schenk	:	Delayed ACK bug fixes.
  37 *		Eric Schenk	:	Floyd style fast retrans war avoidance.
  38 *		David S. Miller	:	Don't allow zero congestion window.
  39 *		Eric Schenk	:	Fix retransmitter so that it sends
  40 *					next packet on ack of previous packet.
  41 *		Andi Kleen	:	Moved open_request checking here
  42 *					and process RSTs for open_requests.
  43 *		Andi Kleen	:	Better prune_queue, and other fixes.
  44 *		Andrey Savochkin:	Fix RTT measurements in the presence of
  45 *					timestamps.
  46 *		Andrey Savochkin:	Check sequence numbers correctly when
  47 *					removing SACKs due to in sequence incoming
  48 *					data segments.
  49 *		Andi Kleen:		Make sure we never ack data there is not
  50 *					enough room for. Also make this condition
  51 *					a fatal error if it might still happen.
  52 *		Andi Kleen:		Add tcp_measure_rcv_mss to make
  53 *					connections with MSS<min(MTU,ann. MSS)
  54 *					work without delayed acks.
  55 *		Andi Kleen:		Process packets with PSH set in the
  56 *					fast path.
  57 *		J Hadi Salim:		ECN support
  58 *	 	Andrei Gurtov,
  59 *		Pasi Sarolahti,
  60 *		Panu Kuhlberg:		Experimental audit of TCP (re)transmission
  61 *					engine. Lots of bugs are found.
  62 *		Pasi Sarolahti:		F-RTO for dealing with spurious RTOs
  63 */
  64
  65#define pr_fmt(fmt) "TCP: " fmt
  66
  67#include <linux/mm.h>
  68#include <linux/slab.h>
  69#include <linux/module.h>
  70#include <linux/sysctl.h>
  71#include <linux/kernel.h>
  72#include <linux/prefetch.h>
  73#include <net/dst.h>
  74#include <net/tcp.h>
  75#include <net/proto_memory.h>
  76#include <net/inet_common.h>
  77#include <linux/ipsec.h>
  78#include <linux/unaligned.h>
  79#include <linux/errqueue.h>
  80#include <trace/events/tcp.h>
  81#include <linux/jump_label_ratelimit.h>
  82#include <net/busy_poll.h>
  83#include <net/mptcp.h>
 
 
 
 
 
 
 
 
 
  84
 
 
  85int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
 
 
 
 
 
 
 
 
  86
  87#define FLAG_DATA		0x01 /* Incoming frame contained data.		*/
  88#define FLAG_WIN_UPDATE		0x02 /* Incoming ACK was a window update.	*/
  89#define FLAG_DATA_ACKED		0x04 /* This ACK acknowledged new data.		*/
  90#define FLAG_RETRANS_DATA_ACKED	0x08 /* "" "" some of which was retransmitted.	*/
  91#define FLAG_SYN_ACKED		0x10 /* This ACK acknowledged SYN.		*/
  92#define FLAG_DATA_SACKED	0x20 /* New SACK.				*/
  93#define FLAG_ECE		0x40 /* ECE in this ACK				*/
  94#define FLAG_LOST_RETRANS	0x80 /* This ACK marks some retransmission lost */
  95#define FLAG_SLOWPATH		0x100 /* Do not skip RFC checks for window update.*/
  96#define FLAG_ORIG_SACK_ACKED	0x200 /* Never retransmitted data are (s)acked	*/
  97#define FLAG_SND_UNA_ADVANCED	0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
  98#define FLAG_DSACKING_ACK	0x800 /* SACK blocks contained D-SACK info */
  99#define FLAG_SET_XMIT_TIMER	0x1000 /* Set TLP or RTO timer */
 100#define FLAG_SACK_RENEGING	0x2000 /* snd_una advanced to a sacked seq */
 101#define FLAG_UPDATE_TS_RECENT	0x4000 /* tcp_replace_ts_recent() */
 102#define FLAG_NO_CHALLENGE_ACK	0x8000 /* do not call tcp_send_challenge_ack()	*/
 103#define FLAG_ACK_MAYBE_DELAYED	0x10000 /* Likely a delayed ACK */
 104#define FLAG_DSACK_TLP		0x20000 /* DSACK for tail loss probe */
 105
 106#define FLAG_ACKED		(FLAG_DATA_ACKED|FLAG_SYN_ACKED)
 107#define FLAG_NOT_DUP		(FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
 108#define FLAG_CA_ALERT		(FLAG_DATA_SACKED|FLAG_ECE|FLAG_DSACKING_ACK)
 109#define FLAG_FORWARD_PROGRESS	(FLAG_ACKED|FLAG_DATA_SACKED)
 
 110
 111#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
 112#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
 113
 114#define REXMIT_NONE	0 /* no loss recovery to do */
 115#define REXMIT_LOST	1 /* retransmit packets marked lost */
 116#define REXMIT_NEW	2 /* FRTO-style transmit of unsent/new packets */
 117
 118#if IS_ENABLED(CONFIG_TLS_DEVICE)
 119static DEFINE_STATIC_KEY_DEFERRED_FALSE(clean_acked_data_enabled, HZ);
 120
 121void clean_acked_data_enable(struct inet_connection_sock *icsk,
 122			     void (*cad)(struct sock *sk, u32 ack_seq))
 123{
 124	icsk->icsk_clean_acked = cad;
 125	static_branch_deferred_inc(&clean_acked_data_enabled);
 126}
 127EXPORT_SYMBOL_GPL(clean_acked_data_enable);
 128
 129void clean_acked_data_disable(struct inet_connection_sock *icsk)
 130{
 131	static_branch_slow_dec_deferred(&clean_acked_data_enabled);
 132	icsk->icsk_clean_acked = NULL;
 133}
 134EXPORT_SYMBOL_GPL(clean_acked_data_disable);
 135
 136void clean_acked_data_flush(void)
 137{
 138	static_key_deferred_flush(&clean_acked_data_enabled);
 139}
 140EXPORT_SYMBOL_GPL(clean_acked_data_flush);
 141#endif
 142
 143#ifdef CONFIG_CGROUP_BPF
 144static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
 145{
 146	bool unknown_opt = tcp_sk(sk)->rx_opt.saw_unknown &&
 147		BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
 148				       BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG);
 149	bool parse_all_opt = BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
 150						    BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG);
 151	struct bpf_sock_ops_kern sock_ops;
 152
 153	if (likely(!unknown_opt && !parse_all_opt))
 154		return;
 155
 156	/* The skb will be handled in the
 157	 * bpf_skops_established() or
 158	 * bpf_skops_write_hdr_opt().
 159	 */
 160	switch (sk->sk_state) {
 161	case TCP_SYN_RECV:
 162	case TCP_SYN_SENT:
 163	case TCP_LISTEN:
 164		return;
 165	}
 166
 167	sock_owned_by_me(sk);
 168
 169	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
 170	sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB;
 171	sock_ops.is_fullsock = 1;
 172	sock_ops.sk = sk;
 173	bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
 174
 175	BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
 176}
 177
 178static void bpf_skops_established(struct sock *sk, int bpf_op,
 179				  struct sk_buff *skb)
 180{
 181	struct bpf_sock_ops_kern sock_ops;
 182
 183	sock_owned_by_me(sk);
 184
 185	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
 186	sock_ops.op = bpf_op;
 187	sock_ops.is_fullsock = 1;
 188	sock_ops.sk = sk;
 189	/* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
 190	if (skb)
 191		bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
 192
 193	BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
 194}
 195#else
 196static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
 197{
 198}
 199
 200static void bpf_skops_established(struct sock *sk, int bpf_op,
 201				  struct sk_buff *skb)
 202{
 203}
 204#endif
 205
 206static __cold void tcp_gro_dev_warn(const struct sock *sk, const struct sk_buff *skb,
 207				    unsigned int len)
 208{
 209	struct net_device *dev;
 210
 211	rcu_read_lock();
 212	dev = dev_get_by_index_rcu(sock_net(sk), skb->skb_iif);
 213	if (!dev || len >= READ_ONCE(dev->mtu))
 214		pr_warn("%s: Driver has suspect GRO implementation, TCP performance may be compromised.\n",
 215			dev ? dev->name : "Unknown driver");
 216	rcu_read_unlock();
 217}
 218
 219/* Adapt the MSS value used to make delayed ack decision to the
 220 * real world.
 221 */
 222static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
 223{
 224	struct inet_connection_sock *icsk = inet_csk(sk);
 225	const unsigned int lss = icsk->icsk_ack.last_seg_size;
 226	unsigned int len;
 227
 228	icsk->icsk_ack.last_seg_size = 0;
 229
 230	/* skb->len may jitter because of SACKs, even if peer
 231	 * sends good full-sized frames.
 232	 */
 233	len = skb_shinfo(skb)->gso_size ? : skb->len;
 234	if (len >= icsk->icsk_ack.rcv_mss) {
 235		/* Note: divides are still a bit expensive.
 236		 * For the moment, only adjust scaling_ratio
 237		 * when we update icsk_ack.rcv_mss.
 238		 */
 239		if (unlikely(len != icsk->icsk_ack.rcv_mss)) {
 240			u64 val = (u64)skb->len << TCP_RMEM_TO_WIN_SCALE;
 241			u8 old_ratio = tcp_sk(sk)->scaling_ratio;
 242
 243			do_div(val, skb->truesize);
 244			tcp_sk(sk)->scaling_ratio = val ? val : 1;
 245
 246			if (old_ratio != tcp_sk(sk)->scaling_ratio) {
 247				struct tcp_sock *tp = tcp_sk(sk);
 248
 249				val = tcp_win_from_space(sk, sk->sk_rcvbuf);
 250				tcp_set_window_clamp(sk, val);
 251
 252				if (tp->window_clamp < tp->rcvq_space.space)
 253					tp->rcvq_space.space = tp->window_clamp;
 254			}
 255		}
 256		icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
 257					       tcp_sk(sk)->advmss);
 258		/* Account for possibly-removed options */
 259		DO_ONCE_LITE_IF(len > icsk->icsk_ack.rcv_mss + MAX_TCP_OPTION_SPACE,
 260				tcp_gro_dev_warn, sk, skb, len);
 261		/* If the skb has a len of exactly 1*MSS and has the PSH bit
 262		 * set then it is likely the end of an application write. So
 263		 * more data may not be arriving soon, and yet the data sender
 264		 * may be waiting for an ACK if cwnd-bound or using TX zero
 265		 * copy. So we set ICSK_ACK_PUSHED here so that
 266		 * tcp_cleanup_rbuf() will send an ACK immediately if the app
 267		 * reads all of the data and is not ping-pong. If len > MSS
 268		 * then this logic does not matter (and does not hurt) because
 269		 * tcp_cleanup_rbuf() will always ACK immediately if the app
 270		 * reads data and there is more than an MSS of unACKed data.
 271		 */
 272		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_PSH)
 273			icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
 274	} else {
 275		/* Otherwise, we make more careful check taking into account,
 276		 * that SACKs block is variable.
 277		 *
 278		 * "len" is invariant segment length, including TCP header.
 279		 */
 280		len += skb->data - skb_transport_header(skb);
 281		if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
 282		    /* If PSH is not set, packet should be
 283		     * full sized, provided peer TCP is not badly broken.
 284		     * This observation (if it is correct 8)) allows
 285		     * to handle super-low mtu links fairly.
 286		     */
 287		    (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
 288		     !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
 289			/* Subtract also invariant (if peer is RFC compliant),
 290			 * tcp header plus fixed timestamp option length.
 291			 * Resulting "len" is MSS free of SACK jitter.
 292			 */
 293			len -= tcp_sk(sk)->tcp_header_len;
 294			icsk->icsk_ack.last_seg_size = len;
 295			if (len == lss) {
 296				icsk->icsk_ack.rcv_mss = len;
 297				return;
 298			}
 299		}
 300		if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
 301			icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
 302		icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
 303	}
 304}
 305
 306static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
 307{
 308	struct inet_connection_sock *icsk = inet_csk(sk);
 309	unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
 310
 311	if (quickacks == 0)
 312		quickacks = 2;
 313	quickacks = min(quickacks, max_quickacks);
 314	if (quickacks > icsk->icsk_ack.quick)
 315		icsk->icsk_ack.quick = quickacks;
 316}
 317
 318static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
 319{
 320	struct inet_connection_sock *icsk = inet_csk(sk);
 321
 322	tcp_incr_quickack(sk, max_quickacks);
 323	inet_csk_exit_pingpong_mode(sk);
 324	icsk->icsk_ack.ato = TCP_ATO_MIN;
 325}
 326
 327/* Send ACKs quickly, if "quick" count is not exhausted
 328 * and the session is not interactive.
 329 */
 330
 331static bool tcp_in_quickack_mode(struct sock *sk)
 332{
 333	const struct inet_connection_sock *icsk = inet_csk(sk);
 334	const struct dst_entry *dst = __sk_dst_get(sk);
 335
 336	return (dst && dst_metric(dst, RTAX_QUICKACK)) ||
 337		(icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk));
 338}
 339
 340static void tcp_ecn_queue_cwr(struct tcp_sock *tp)
 341{
 342	if (tp->ecn_flags & TCP_ECN_OK)
 343		tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
 344}
 345
 346static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
 347{
 348	if (tcp_hdr(skb)->cwr) {
 349		tcp_sk(sk)->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
 350
 351		/* If the sender is telling us it has entered CWR, then its
 352		 * cwnd may be very low (even just 1 packet), so we should ACK
 353		 * immediately.
 354		 */
 355		if (TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq)
 356			inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
 357	}
 358}
 359
 360static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
 361{
 362	tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
 363}
 364
 365static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
 366{
 367	struct tcp_sock *tp = tcp_sk(sk);
 368
 369	switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
 370	case INET_ECN_NOT_ECT:
 371		/* Funny extension: if ECT is not set on a segment,
 372		 * and we already seen ECT on a previous segment,
 373		 * it is probably a retransmit.
 374		 */
 375		if (tp->ecn_flags & TCP_ECN_SEEN)
 376			tcp_enter_quickack_mode(sk, 2);
 377		break;
 378	case INET_ECN_CE:
 379		if (tcp_ca_needs_ecn(sk))
 380			tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
 381
 382		if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
 383			/* Better not delay acks, sender can have a very low cwnd */
 384			tcp_enter_quickack_mode(sk, 2);
 385			tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
 386		}
 387		tp->ecn_flags |= TCP_ECN_SEEN;
 388		break;
 389	default:
 390		if (tcp_ca_needs_ecn(sk))
 391			tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
 392		tp->ecn_flags |= TCP_ECN_SEEN;
 393		break;
 394	}
 395}
 396
 397static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
 398{
 399	if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK)
 400		__tcp_ecn_check_ce(sk, skb);
 401}
 402
 403static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
 404{
 405	if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
 406		tp->ecn_flags &= ~TCP_ECN_OK;
 407}
 408
 409static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
 410{
 411	if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
 412		tp->ecn_flags &= ~TCP_ECN_OK;
 413}
 414
 415static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
 416{
 417	if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
 418		return true;
 419	return false;
 420}
 421
 422/* Buffer size and advertised window tuning.
 423 *
 424 * 1. Tuning sk->sk_sndbuf, when connection enters established state.
 425 */
 426
 427static void tcp_sndbuf_expand(struct sock *sk)
 428{
 429	const struct tcp_sock *tp = tcp_sk(sk);
 430	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
 431	int sndmem, per_mss;
 432	u32 nr_segs;
 433
 434	/* Worst case is non GSO/TSO : each frame consumes one skb
 435	 * and skb->head is kmalloced using power of two area of memory
 436	 */
 437	per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) +
 438		  MAX_TCP_HEADER +
 439		  SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 440
 441	per_mss = roundup_pow_of_two(per_mss) +
 442		  SKB_DATA_ALIGN(sizeof(struct sk_buff));
 443
 444	nr_segs = max_t(u32, TCP_INIT_CWND, tcp_snd_cwnd(tp));
 445	nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
 446
 447	/* Fast Recovery (RFC 5681 3.2) :
 448	 * Cubic needs 1.7 factor, rounded to 2 to include
 449	 * extra cushion (application might react slowly to EPOLLOUT)
 450	 */
 451	sndmem = ca_ops->sndbuf_expand ? ca_ops->sndbuf_expand(sk) : 2;
 452	sndmem *= nr_segs * per_mss;
 453
 454	if (sk->sk_sndbuf < sndmem)
 455		WRITE_ONCE(sk->sk_sndbuf,
 456			   min(sndmem, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_wmem[2])));
 457}
 458
 459/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
 460 *
 461 * All tcp_full_space() is split to two parts: "network" buffer, allocated
 462 * forward and advertised in receiver window (tp->rcv_wnd) and
 463 * "application buffer", required to isolate scheduling/application
 464 * latencies from network.
 465 * window_clamp is maximal advertised window. It can be less than
 466 * tcp_full_space(), in this case tcp_full_space() - window_clamp
 467 * is reserved for "application" buffer. The less window_clamp is
 468 * the smoother our behaviour from viewpoint of network, but the lower
 469 * throughput and the higher sensitivity of the connection to losses. 8)
 470 *
 471 * rcv_ssthresh is more strict window_clamp used at "slow start"
 472 * phase to predict further behaviour of this connection.
 473 * It is used for two goals:
 474 * - to enforce header prediction at sender, even when application
 475 *   requires some significant "application buffer". It is check #1.
 476 * - to prevent pruning of receive queue because of misprediction
 477 *   of receiver window. Check #2.
 478 *
 479 * The scheme does not work when sender sends good segments opening
 480 * window and then starts to feed us spaghetti. But it should work
 481 * in common situations. Otherwise, we have to rely on queue collapsing.
 482 */
 483
 484/* Slow part of check#2. */
 485static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb,
 486			     unsigned int skbtruesize)
 487{
 488	const struct tcp_sock *tp = tcp_sk(sk);
 489	/* Optimize this! */
 490	int truesize = tcp_win_from_space(sk, skbtruesize) >> 1;
 491	int window = tcp_win_from_space(sk, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])) >> 1;
 492
 493	while (tp->rcv_ssthresh <= window) {
 494		if (truesize <= skb->len)
 495			return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
 496
 497		truesize >>= 1;
 498		window >>= 1;
 499	}
 500	return 0;
 501}
 502
 503/* Even if skb appears to have a bad len/truesize ratio, TCP coalescing
 504 * can play nice with us, as sk_buff and skb->head might be either
 505 * freed or shared with up to MAX_SKB_FRAGS segments.
 506 * Only give a boost to drivers using page frag(s) to hold the frame(s),
 507 * and if no payload was pulled in skb->head before reaching us.
 508 */
 509static u32 truesize_adjust(bool adjust, const struct sk_buff *skb)
 510{
 511	u32 truesize = skb->truesize;
 512
 513	if (adjust && !skb_headlen(skb)) {
 514		truesize -= SKB_TRUESIZE(skb_end_offset(skb));
 515		/* paranoid check, some drivers might be buggy */
 516		if (unlikely((int)truesize < (int)skb->len))
 517			truesize = skb->truesize;
 518	}
 519	return truesize;
 520}
 521
 522static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
 523			    bool adjust)
 524{
 525	struct tcp_sock *tp = tcp_sk(sk);
 526	int room;
 527
 528	room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
 529
 530	if (room <= 0)
 531		return;
 532
 533	/* Check #1 */
 534	if (!tcp_under_memory_pressure(sk)) {
 535		unsigned int truesize = truesize_adjust(adjust, skb);
 
 536		int incr;
 537
 538		/* Check #2. Increase window, if skb with such overhead
 539		 * will fit to rcvbuf in future.
 540		 */
 541		if (tcp_win_from_space(sk, truesize) <= skb->len)
 542			incr = 2 * tp->advmss;
 543		else
 544			incr = __tcp_grow_window(sk, skb, truesize);
 545
 546		if (incr) {
 547			incr = max_t(int, incr, 2 * skb->len);
 548			tp->rcv_ssthresh += min(room, incr);
 549			inet_csk(sk)->icsk_ack.quick |= 1;
 550		}
 551	} else {
 552		/* Under pressure:
 553		 * Adjust rcv_ssthresh according to reserved mem
 554		 */
 555		tcp_adjust_rcv_ssthresh(sk);
 556	}
 557}
 558
 559/* 3. Try to fixup all. It is made immediately after connection enters
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 560 *    established state.
 561 */
 562static void tcp_init_buffer_space(struct sock *sk)
 563{
 564	int tcp_app_win = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_app_win);
 565	struct tcp_sock *tp = tcp_sk(sk);
 566	int maxwin;
 567
 
 
 568	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
 569		tcp_sndbuf_expand(sk);
 570
 571	tcp_mstamp_refresh(tp);
 572	tp->rcvq_space.time = tp->tcp_mstamp;
 573	tp->rcvq_space.seq = tp->copied_seq;
 574
 575	maxwin = tcp_full_space(sk);
 576
 577	if (tp->window_clamp >= maxwin) {
 578		WRITE_ONCE(tp->window_clamp, maxwin);
 579
 580		if (tcp_app_win && maxwin > 4 * tp->advmss)
 581			WRITE_ONCE(tp->window_clamp,
 582				   max(maxwin - (maxwin >> tcp_app_win),
 583				       4 * tp->advmss));
 584	}
 585
 586	/* Force reservation of one segment. */
 587	if (tcp_app_win &&
 588	    tp->window_clamp > 2 * tp->advmss &&
 589	    tp->window_clamp + tp->advmss > maxwin)
 590		WRITE_ONCE(tp->window_clamp,
 591			   max(2 * tp->advmss, maxwin - tp->advmss));
 592
 593	tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
 594	tp->snd_cwnd_stamp = tcp_jiffies32;
 595	tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd,
 596				    (u32)TCP_INIT_CWND * tp->advmss);
 597}
 598
 599/* 4. Recalculate window clamp after socket hit its memory bounds. */
 600static void tcp_clamp_window(struct sock *sk)
 601{
 602	struct tcp_sock *tp = tcp_sk(sk);
 603	struct inet_connection_sock *icsk = inet_csk(sk);
 604	struct net *net = sock_net(sk);
 605	int rmem2;
 606
 607	icsk->icsk_ack.quick = 0;
 608	rmem2 = READ_ONCE(net->ipv4.sysctl_tcp_rmem[2]);
 609
 610	if (sk->sk_rcvbuf < rmem2 &&
 611	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
 612	    !tcp_under_memory_pressure(sk) &&
 613	    sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
 614		WRITE_ONCE(sk->sk_rcvbuf,
 615			   min(atomic_read(&sk->sk_rmem_alloc), rmem2));
 616	}
 617	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
 618		tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
 619}
 620
 621/* Initialize RCV_MSS value.
 622 * RCV_MSS is an our guess about MSS used by the peer.
 623 * We haven't any direct information about the MSS.
 624 * It's better to underestimate the RCV_MSS rather than overestimate.
 625 * Overestimations make us ACKing less frequently than needed.
 626 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
 627 */
 628void tcp_initialize_rcv_mss(struct sock *sk)
 629{
 630	const struct tcp_sock *tp = tcp_sk(sk);
 631	unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
 632
 633	hint = min(hint, tp->rcv_wnd / 2);
 634	hint = min(hint, TCP_MSS_DEFAULT);
 635	hint = max(hint, TCP_MIN_MSS);
 636
 637	inet_csk(sk)->icsk_ack.rcv_mss = hint;
 638}
 639EXPORT_SYMBOL(tcp_initialize_rcv_mss);
 640
 641/* Receiver "autotuning" code.
 642 *
 643 * The algorithm for RTT estimation w/o timestamps is based on
 644 * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
 645 * <https://public.lanl.gov/radiant/pubs.html#DRS>
 646 *
 647 * More detail on this code can be found at
 648 * <http://staff.psc.edu/jheffner/>,
 649 * though this reference is out of date.  A new paper
 650 * is pending.
 651 */
 652static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
 653{
 654	u32 new_sample = tp->rcv_rtt_est.rtt_us;
 655	long m = sample;
 656
 
 
 
 657	if (new_sample != 0) {
 658		/* If we sample in larger samples in the non-timestamp
 659		 * case, we could grossly overestimate the RTT especially
 660		 * with chatty applications or bulk transfer apps which
 661		 * are stalled on filesystem I/O.
 662		 *
 663		 * Also, since we are only going for a minimum in the
 664		 * non-timestamp case, we do not smooth things out
 665		 * else with timestamps disabled convergence takes too
 666		 * long.
 667		 */
 668		if (!win_dep) {
 669			m -= (new_sample >> 3);
 670			new_sample += m;
 671		} else {
 672			m <<= 3;
 673			if (m < new_sample)
 674				new_sample = m;
 675		}
 676	} else {
 677		/* No previous measure. */
 678		new_sample = m << 3;
 679	}
 680
 681	tp->rcv_rtt_est.rtt_us = new_sample;
 
 682}
 683
 684static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
 685{
 686	u32 delta_us;
 687
 688	if (tp->rcv_rtt_est.time == 0)
 689		goto new_measure;
 690	if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
 691		return;
 692	delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
 693	if (!delta_us)
 694		delta_us = 1;
 695	tcp_rcv_rtt_update(tp, delta_us, 1);
 696
 697new_measure:
 698	tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
 699	tp->rcv_rtt_est.time = tp->tcp_mstamp;
 700}
 701
 702static s32 tcp_rtt_tsopt_us(const struct tcp_sock *tp)
 703{
 704	u32 delta, delta_us;
 705
 706	delta = tcp_time_stamp_ts(tp) - tp->rx_opt.rcv_tsecr;
 707	if (tp->tcp_usec_ts)
 708		return delta;
 709
 710	if (likely(delta < INT_MAX / (USEC_PER_SEC / TCP_TS_HZ))) {
 711		if (!delta)
 712			delta = 1;
 713		delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
 714		return delta_us;
 715	}
 716	return -1;
 717}
 718
 719static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
 720					  const struct sk_buff *skb)
 721{
 722	struct tcp_sock *tp = tcp_sk(sk);
 723
 724	if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr)
 725		return;
 726	tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
 727
 728	if (TCP_SKB_CB(skb)->end_seq -
 729	    TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss) {
 730		s32 delta = tcp_rtt_tsopt_us(tp);
 731
 732		if (delta >= 0)
 733			tcp_rcv_rtt_update(tp, delta, 0);
 734	}
 735}
 736
 737/*
 738 * This function should be called every time data is copied to user space.
 739 * It calculates the appropriate TCP receive buffer space.
 740 */
 741void tcp_rcv_space_adjust(struct sock *sk)
 742{
 743	struct tcp_sock *tp = tcp_sk(sk);
 744	u32 copied;
 745	int time;
 
 746
 747	trace_tcp_rcv_space_adjust(sk);
 
 748
 749	tcp_mstamp_refresh(tp);
 750	time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
 751	if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
 752		return;
 753
 754	/* Number of bytes copied to user in last RTT */
 755	copied = tp->copied_seq - tp->rcvq_space.seq;
 756	if (copied <= tp->rcvq_space.space)
 757		goto new_measure;
 758
 759	/* A bit of theory :
 760	 * copied = bytes received in previous RTT, our base window
 761	 * To cope with packet losses, we need a 2x factor
 762	 * To cope with slow start, and sender growing its cwin by 100 %
 763	 * every RTT, we need a 4x factor, because the ACK we are sending
 764	 * now is for the next RTT, not the current one :
 765	 * <prev RTT . ><current RTT .. ><next RTT .... >
 766	 */
 767
 768	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
 769	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
 770		u64 rcvwin, grow;
 771		int rcvbuf;
 772
 773		/* minimal window to cope with packet losses, assuming
 774		 * steady state. Add some cushion because of small variations.
 775		 */
 776		rcvwin = ((u64)copied << 1) + 16 * tp->advmss;
 777
 778		/* Accommodate for sender rate increase (eg. slow start) */
 779		grow = rcvwin * (copied - tp->rcvq_space.space);
 780		do_div(grow, tp->rcvq_space.space);
 781		rcvwin += (grow << 1);
 782
 783		rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin),
 784			       READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
 785		if (rcvbuf > sk->sk_rcvbuf) {
 786			WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
 
 
 
 
 
 
 
 
 
 
 
 787
 788			/* Make the window clamp follow along.  */
 789			WRITE_ONCE(tp->window_clamp,
 790				   tcp_win_from_space(sk, rcvbuf));
 791		}
 792	}
 793	tp->rcvq_space.space = copied;
 794
 795new_measure:
 796	tp->rcvq_space.seq = tp->copied_seq;
 797	tp->rcvq_space.time = tp->tcp_mstamp;
 798}
 799
 800static void tcp_save_lrcv_flowlabel(struct sock *sk, const struct sk_buff *skb)
 801{
 802#if IS_ENABLED(CONFIG_IPV6)
 803	struct inet_connection_sock *icsk = inet_csk(sk);
 804
 805	if (skb->protocol == htons(ETH_P_IPV6))
 806		icsk->icsk_ack.lrcv_flowlabel = ntohl(ip6_flowlabel(ipv6_hdr(skb)));
 807#endif
 808}
 809
 810/* There is something which you must keep in mind when you analyze the
 811 * behavior of the tp->ato delayed ack timeout interval.  When a
 812 * connection starts up, we want to ack as quickly as possible.  The
 813 * problem is that "good" TCP's do slow start at the beginning of data
 814 * transmission.  The means that until we send the first few ACK's the
 815 * sender will sit on his end and only queue most of his data, because
 816 * he can only send snd_cwnd unacked packets at any given time.  For
 817 * each ACK we send, he increments snd_cwnd and transmits more of his
 818 * queue.  -DaveM
 819 */
 820static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
 821{
 822	struct tcp_sock *tp = tcp_sk(sk);
 823	struct inet_connection_sock *icsk = inet_csk(sk);
 824	u32 now;
 825
 826	inet_csk_schedule_ack(sk);
 827
 828	tcp_measure_rcv_mss(sk, skb);
 829
 830	tcp_rcv_rtt_measure(tp);
 831
 832	now = tcp_jiffies32;
 833
 834	if (!icsk->icsk_ack.ato) {
 835		/* The _first_ data packet received, initialize
 836		 * delayed ACK engine.
 837		 */
 838		tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
 839		icsk->icsk_ack.ato = TCP_ATO_MIN;
 840	} else {
 841		int m = now - icsk->icsk_ack.lrcvtime;
 842
 843		if (m <= TCP_ATO_MIN / 2) {
 844			/* The fastest case is the first. */
 845			icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
 846		} else if (m < icsk->icsk_ack.ato) {
 847			icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
 848			if (icsk->icsk_ack.ato > icsk->icsk_rto)
 849				icsk->icsk_ack.ato = icsk->icsk_rto;
 850		} else if (m > icsk->icsk_rto) {
 851			/* Too long gap. Apparently sender failed to
 852			 * restart window, so that we send ACKs quickly.
 853			 */
 854			tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
 
 855		}
 856	}
 857	icsk->icsk_ack.lrcvtime = now;
 858	tcp_save_lrcv_flowlabel(sk, skb);
 859
 860	tcp_ecn_check_ce(sk, skb);
 861
 862	if (skb->len >= 128)
 863		tcp_grow_window(sk, skb, true);
 864}
 865
 866/* Called to compute a smoothed rtt estimate. The data fed to this
 867 * routine either comes from timestamps, or from segments that were
 868 * known _not_ to have been retransmitted [see Karn/Partridge
 869 * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
 870 * piece by Van Jacobson.
 871 * NOTE: the next three routines used to be one big routine.
 872 * To save cycles in the RFC 1323 implementation it was better to break
 873 * it up into three procedures. -- erics
 874 */
 875static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
 876{
 877	struct tcp_sock *tp = tcp_sk(sk);
 878	long m = mrtt_us; /* RTT */
 879	u32 srtt = tp->srtt_us;
 880
 881	/*	The following amusing code comes from Jacobson's
 882	 *	article in SIGCOMM '88.  Note that rtt and mdev
 883	 *	are scaled versions of rtt and mean deviation.
 884	 *	This is designed to be as fast as possible
 885	 *	m stands for "measurement".
 886	 *
 887	 *	On a 1990 paper the rto value is changed to:
 888	 *	RTO = rtt + 4 * mdev
 889	 *
 890	 * Funny. This algorithm seems to be very broken.
 891	 * These formulae increase RTO, when it should be decreased, increase
 892	 * too slowly, when it should be increased quickly, decrease too quickly
 893	 * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
 894	 * does not matter how to _calculate_ it. Seems, it was trap
 895	 * that VJ failed to avoid. 8)
 896	 */
 897	if (srtt != 0) {
 898		m -= (srtt >> 3);	/* m is now error in rtt est */
 899		srtt += m;		/* rtt = 7/8 rtt + 1/8 new */
 
 
 900		if (m < 0) {
 901			m = -m;		/* m is now abs(error) */
 902			m -= (tp->mdev_us >> 2);   /* similar update on mdev */
 903			/* This is similar to one of Eifel findings.
 904			 * Eifel blocks mdev updates when rtt decreases.
 905			 * This solution is a bit different: we use finer gain
 906			 * for mdev in this case (alpha*beta).
 907			 * Like Eifel it also prevents growth of rto,
 908			 * but also it limits too fast rto decreases,
 909			 * happening in pure Eifel.
 910			 */
 911			if (m > 0)
 912				m >>= 3;
 913		} else {
 914			m -= (tp->mdev_us >> 2);   /* similar update on mdev */
 915		}
 916		tp->mdev_us += m;		/* mdev = 3/4 mdev + 1/4 new */
 917		if (tp->mdev_us > tp->mdev_max_us) {
 918			tp->mdev_max_us = tp->mdev_us;
 919			if (tp->mdev_max_us > tp->rttvar_us)
 920				tp->rttvar_us = tp->mdev_max_us;
 921		}
 922		if (after(tp->snd_una, tp->rtt_seq)) {
 923			if (tp->mdev_max_us < tp->rttvar_us)
 924				tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2;
 925			tp->rtt_seq = tp->snd_nxt;
 926			tp->mdev_max_us = tcp_rto_min_us(sk);
 927
 928			tcp_bpf_rtt(sk, mrtt_us, srtt);
 929		}
 930	} else {
 931		/* no previous measure. */
 932		srtt = m << 3;		/* take the measured time to be rtt */
 933		tp->mdev_us = m << 1;	/* make sure rto = 3*rtt */
 934		tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk));
 935		tp->mdev_max_us = tp->rttvar_us;
 936		tp->rtt_seq = tp->snd_nxt;
 937
 938		tcp_bpf_rtt(sk, mrtt_us, srtt);
 939	}
 940	tp->srtt_us = max(1U, srtt);
 941}
 942
 943static void tcp_update_pacing_rate(struct sock *sk)
 944{
 945	const struct tcp_sock *tp = tcp_sk(sk);
 946	u64 rate;
 947
 948	/* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
 949	rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3);
 950
 951	/* current rate is (cwnd * mss) / srtt
 952	 * In Slow Start [1], set sk_pacing_rate to 200 % the current rate.
 953	 * In Congestion Avoidance phase, set it to 120 % the current rate.
 954	 *
 955	 * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh)
 956	 *	 If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
 957	 *	 end of slow start and should slow down.
 958	 */
 959	if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2)
 960		rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio);
 961	else
 962		rate *= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio);
 963
 964	rate *= max(tcp_snd_cwnd(tp), tp->packets_out);
 965
 966	if (likely(tp->srtt_us))
 967		do_div(rate, tp->srtt_us);
 968
 969	/* WRITE_ONCE() is needed because sch_fq fetches sk_pacing_rate
 970	 * without any lock. We want to make sure compiler wont store
 971	 * intermediate values in this location.
 972	 */
 973	WRITE_ONCE(sk->sk_pacing_rate,
 974		   min_t(u64, rate, READ_ONCE(sk->sk_max_pacing_rate)));
 975}
 976
 977/* Calculate rto without backoff.  This is the second half of Van Jacobson's
 978 * routine referred to above.
 979 */
 980static void tcp_set_rto(struct sock *sk)
 981{
 982	const struct tcp_sock *tp = tcp_sk(sk);
 983	/* Old crap is replaced with new one. 8)
 984	 *
 985	 * More seriously:
 986	 * 1. If rtt variance happened to be less 50msec, it is hallucination.
 987	 *    It cannot be less due to utterly erratic ACK generation made
 988	 *    at least by solaris and freebsd. "Erratic ACKs" has _nothing_
 989	 *    to do with delayed acks, because at cwnd>2 true delack timeout
 990	 *    is invisible. Actually, Linux-2.4 also generates erratic
 991	 *    ACKs in some circumstances.
 992	 */
 993	inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
 994
 995	/* 2. Fixups made earlier cannot be right.
 996	 *    If we do not estimate RTO correctly without them,
 997	 *    all the algo is pure shit and should be replaced
 998	 *    with correct one. It is exactly, which we pretend to do.
 999	 */
1000
1001	/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
1002	 * guarantees that rto is higher.
1003	 */
1004	tcp_bound_rto(sk);
1005}
1006
1007__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1008{
1009	__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
1010
1011	if (!cwnd)
1012		cwnd = TCP_INIT_CWND;
1013	return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
1014}
1015
1016struct tcp_sacktag_state {
1017	/* Timestamps for earliest and latest never-retransmitted segment
1018	 * that was SACKed. RTO needs the earliest RTT to stay conservative,
1019	 * but congestion control should still get an accurate delay signal.
1020	 */
1021	u64	first_sackt;
1022	u64	last_sackt;
1023	u32	reord;
1024	u32	sack_delivered;
1025	int	flag;
1026	unsigned int mss_now;
1027	struct rate_sample *rate;
1028};
 
 
 
 
 
 
 
 
 
1029
1030/* Take a notice that peer is sending D-SACKs. Skip update of data delivery
1031 * and spurious retransmission information if this DSACK is unlikely caused by
1032 * sender's action:
1033 * - DSACKed sequence range is larger than maximum receiver's window.
1034 * - Total no. of DSACKed segments exceed the total no. of retransmitted segs.
1035 */
1036static u32 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq,
1037			  u32 end_seq, struct tcp_sacktag_state *state)
 
 
 
 
 
 
 
 
1038{
1039	u32 seq_len, dup_segs = 1;
 
 
 
1040
1041	if (!before(start_seq, end_seq))
1042		return 0;
 
 
 
 
 
1043
1044	seq_len = end_seq - start_seq;
1045	/* Dubious DSACK: DSACKed range greater than maximum advertised rwnd */
1046	if (seq_len > tp->max_window)
1047		return 0;
1048	if (seq_len > tp->mss_cache)
1049		dup_segs = DIV_ROUND_UP(seq_len, tp->mss_cache);
1050	else if (tp->tlp_high_seq && tp->tlp_high_seq == end_seq)
1051		state->flag |= FLAG_DSACK_TLP;
1052
1053	tp->dsack_dups += dup_segs;
1054	/* Skip the DSACK if dup segs weren't retransmitted by sender */
1055	if (tp->dsack_dups > tp->total_retrans)
1056		return 0;
1057
1058	tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
1059	/* We increase the RACK ordering window in rounds where we receive
1060	 * DSACKs that may have been due to reordering causing RACK to trigger
1061	 * a spurious fast recovery. Thus RACK ignores DSACKs that happen
1062	 * without having seen reordering, or that match TLP probes (TLP
1063	 * is timer-driven, not triggered by RACK).
1064	 */
1065	if (tp->reord_seen && !(state->flag & FLAG_DSACK_TLP))
1066		tp->rack.dsack_seen = 1;
 
 
 
 
 
 
 
 
1067
1068	state->flag |= FLAG_DSACKING_ACK;
1069	/* A spurious retransmission is delivered */
1070	state->sack_delivered += dup_segs;
1071
1072	return dup_segs;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073}
1074
1075/* It's reordering when higher sequence was delivered (i.e. sacked) before
1076 * some lower never-retransmitted sequence ("low_seq"). The maximum reordering
1077 * distance is approximated in full-mss packet distance ("reordering").
1078 */
1079static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
1080				      const int ts)
1081{
1082	struct tcp_sock *tp = tcp_sk(sk);
1083	const u32 mss = tp->mss_cache;
1084	u32 fack, metric;
1085
1086	fack = tcp_highest_sack_seq(tp);
1087	if (!before(low_seq, fack))
1088		return;
 
 
 
 
 
 
 
 
1089
1090	metric = fack - low_seq;
1091	if ((metric > tp->reordering * mss) && mss) {
1092#if FASTRETRANS_DEBUG > 1
1093		pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
1094			 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
1095			 tp->reordering,
1096			 0,
1097			 tp->sacked_out,
1098			 tp->undo_marker ? tp->undo_retrans : 0);
1099#endif
1100		tp->reordering = min_t(u32, (metric + mss - 1) / mss,
1101				       READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
1102	}
1103
1104	/* This exciting event is worth to be remembered. 8) */
1105	tp->reord_seen++;
1106	NET_INC_STATS(sock_net(sk),
1107		      ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER);
1108}
1109
1110 /* This must be called before lost_out or retrans_out are updated
1111  * on a new loss, because we want to know if all skbs previously
1112  * known to be lost have already been retransmitted, indicating
1113  * that this newly lost skb is our next skb to retransmit.
1114  */
1115static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
1116{
1117	if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) ||
1118	    (tp->retransmit_skb_hint &&
1119	     before(TCP_SKB_CB(skb)->seq,
1120		    TCP_SKB_CB(tp->retransmit_skb_hint)->seq)))
1121		tp->retransmit_skb_hint = skb;
1122}
1123
1124/* Sum the number of packets on the wire we have marked as lost, and
1125 * notify the congestion control module that the given skb was marked lost.
1126 */
1127static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb)
1128{
1129	tp->lost += tcp_skb_pcount(skb);
1130}
1131
1132void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
1133{
1134	__u8 sacked = TCP_SKB_CB(skb)->sacked;
1135	struct tcp_sock *tp = tcp_sk(sk);
1136
1137	if (sacked & TCPCB_SACKED_ACKED)
1138		return;
1139
1140	tcp_verify_retransmit_hint(tp, skb);
1141	if (sacked & TCPCB_LOST) {
1142		if (sacked & TCPCB_SACKED_RETRANS) {
1143			/* Account for retransmits that are lost again */
1144			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1145			tp->retrans_out -= tcp_skb_pcount(skb);
1146			NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
1147				      tcp_skb_pcount(skb));
1148			tcp_notify_skb_loss_event(tp, skb);
1149		}
1150	} else {
1151		tp->lost_out += tcp_skb_pcount(skb);
1152		TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1153		tcp_notify_skb_loss_event(tp, skb);
1154	}
1155}
1156
1157/* Updates the delivered and delivered_ce counts */
1158static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered,
1159				bool ece_ack)
1160{
1161	tp->delivered += delivered;
1162	if (ece_ack)
1163		tp->delivered_ce += delivered;
 
 
 
1164}
1165
1166/* This procedure tags the retransmission queue when SACKs arrive.
1167 *
1168 * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
1169 * Packets in queue with these bits set are counted in variables
1170 * sacked_out, retrans_out and lost_out, correspondingly.
1171 *
1172 * Valid combinations are:
1173 * Tag  InFlight	Description
1174 * 0	1		- orig segment is in flight.
1175 * S	0		- nothing flies, orig reached receiver.
1176 * L	0		- nothing flies, orig lost by net.
1177 * R	2		- both orig and retransmit are in flight.
1178 * L|R	1		- orig is lost, retransmit is in flight.
1179 * S|R  1		- orig reached receiver, retrans is still in flight.
1180 * (L|S|R is logically valid, it could occur when L|R is sacked,
1181 *  but it is equivalent to plain S and code short-circuits it to S.
1182 *  L|S is logically invalid, it would mean -1 packet in flight 8))
1183 *
1184 * These 6 states form finite state machine, controlled by the following events:
1185 * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
1186 * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
1187 * 3. Loss detection event of two flavors:
1188 *	A. Scoreboard estimator decided the packet is lost.
1189 *	   A'. Reno "three dupacks" marks head of queue lost.
1190 *	B. SACK arrives sacking SND.NXT at the moment, when the
 
 
 
1191 *	   segment was retransmitted.
1192 * 4. D-SACK added new rule: D-SACK changes any tag to S.
1193 *
1194 * It is pleasant to note, that state diagram turns out to be commutative,
1195 * so that we are allowed not to be bothered by order of our actions,
1196 * when multiple events arrive simultaneously. (see the function below).
1197 *
1198 * Reordering detection.
1199 * --------------------
1200 * Reordering metric is maximal distance, which a packet can be displaced
1201 * in packet stream. With SACKs we can estimate it:
1202 *
1203 * 1. SACK fills old hole and the corresponding segment was not
1204 *    ever retransmitted -> reordering. Alas, we cannot use it
1205 *    when segment was retransmitted.
1206 * 2. The last flaw is solved with D-SACK. D-SACK arrives
1207 *    for retransmitted and already SACKed segment -> reordering..
1208 * Both of these heuristics are not used in Loss state, when we cannot
1209 * account for retransmits accurately.
1210 *
1211 * SACK block validation.
1212 * ----------------------
1213 *
1214 * SACK block range validation checks that the received SACK block fits to
1215 * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT.
1216 * Note that SND.UNA is not included to the range though being valid because
1217 * it means that the receiver is rather inconsistent with itself reporting
1218 * SACK reneging when it should advance SND.UNA. Such SACK block this is
1219 * perfectly valid, however, in light of RFC2018 which explicitly states
1220 * that "SACK block MUST reflect the newest segment.  Even if the newest
1221 * segment is going to be discarded ...", not that it looks very clever
1222 * in case of head skb. Due to potentional receiver driven attacks, we
1223 * choose to avoid immediate execution of a walk in write queue due to
1224 * reneging and defer head skb's loss recovery to standard loss recovery
1225 * procedure that will eventually trigger (nothing forbids us doing this).
1226 *
1227 * Implements also blockage to start_seq wrap-around. Problem lies in the
1228 * fact that though start_seq (s) is before end_seq (i.e., not reversed),
1229 * there's no guarantee that it will be before snd_nxt (n). The problem
1230 * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt
1231 * wrap (s_w):
1232 *
1233 *         <- outs wnd ->                          <- wrapzone ->
1234 *         u     e      n                         u_w   e_w  s n_w
1235 *         |     |      |                          |     |   |  |
1236 * |<------------+------+----- TCP seqno space --------------+---------->|
1237 * ...-- <2^31 ->|                                           |<--------...
1238 * ...---- >2^31 ------>|                                    |<--------...
1239 *
1240 * Current code wouldn't be vulnerable but it's better still to discard such
1241 * crazy SACK blocks. Doing this check for start_seq alone closes somewhat
1242 * similar case (end_seq after snd_nxt wrap) as earlier reversed check in
1243 * snd_nxt wrap -> snd_una region will then become "well defined", i.e.,
1244 * equal to the ideal case (infinite seqno space without wrap caused issues).
1245 *
1246 * With D-SACK the lower bound is extended to cover sequence space below
1247 * SND.UNA down to undo_marker, which is the last point of interest. Yet
1248 * again, D-SACK block must not to go across snd_una (for the same reason as
1249 * for the normal SACK blocks, explained above). But there all simplicity
1250 * ends, TCP might receive valid D-SACKs below that. As long as they reside
1251 * fully below undo_marker they do not affect behavior in anyway and can
1252 * therefore be safely ignored. In rare cases (which are more or less
1253 * theoretical ones), the D-SACK will nicely cross that boundary due to skb
1254 * fragmentation and packet reordering past skb's retransmission. To consider
1255 * them correctly, the acceptable range must be extended even more though
1256 * the exact amount is rather hard to quantify. However, tp->max_window can
1257 * be used as an exaggerated estimate.
1258 */
1259static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
1260				   u32 start_seq, u32 end_seq)
1261{
1262	/* Too far in future, or reversed (interpretation is ambiguous) */
1263	if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
1264		return false;
1265
1266	/* Nasty start_seq wrap-around check (see comments above) */
1267	if (!before(start_seq, tp->snd_nxt))
1268		return false;
1269
1270	/* In outstanding window? ...This is valid exit for D-SACKs too.
1271	 * start_seq == snd_una is non-sensical (see comments above)
1272	 */
1273	if (after(start_seq, tp->snd_una))
1274		return true;
1275
1276	if (!is_dsack || !tp->undo_marker)
1277		return false;
1278
1279	/* ...Then it's D-SACK, and must reside below snd_una completely */
1280	if (after(end_seq, tp->snd_una))
1281		return false;
1282
1283	if (!before(start_seq, tp->undo_marker))
1284		return true;
1285
1286	/* Too old */
1287	if (!after(end_seq, tp->undo_marker))
1288		return false;
1289
1290	/* Undo_marker boundary crossing (overestimates a lot). Known already:
1291	 *   start_seq < undo_marker and end_seq >= undo_marker.
1292	 */
1293	return !before(start_seq, end_seq - tp->max_window);
1294}
1295
1296static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1297			    struct tcp_sack_block_wire *sp, int num_sacks,
1298			    u32 prior_snd_una, struct tcp_sacktag_state *state)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1299{
1300	struct tcp_sock *tp = tcp_sk(sk);
1301	u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
1302	u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
1303	u32 dup_segs;
1304
1305	if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
1306		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
 
 
1307	} else if (num_sacks > 1) {
1308		u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
1309		u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
1310
1311		if (after(end_seq_0, end_seq_1) || before(start_seq_0, start_seq_1))
1312			return false;
1313		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV);
1314	} else {
1315		return false;
1316	}
1317
1318	dup_segs = tcp_dsack_seen(tp, start_seq_0, end_seq_0, state);
1319	if (!dup_segs) {	/* Skip dubious DSACK */
1320		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKIGNOREDDUBIOUS);
1321		return false;
1322	}
1323
1324	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECVSEGS, dup_segs);
1325
1326	/* D-SACK for already forgotten data... Do dumb counting. */
1327	if (tp->undo_marker && tp->undo_retrans > 0 &&
1328	    !after(end_seq_0, prior_snd_una) &&
1329	    after(end_seq_0, tp->undo_marker))
1330		tp->undo_retrans = max_t(int, 0, tp->undo_retrans - dup_segs);
1331
1332	return true;
1333}
1334
 
 
 
 
 
 
1335/* Check if skb is fully within the SACK block. In presence of GSO skbs,
1336 * the incoming SACK may not exactly match but we can find smaller MSS
1337 * aligned portion of it that matches. Therefore we might need to fragment
1338 * which may fail and creates some hassle (caller must handle error case
1339 * returns).
1340 *
1341 * FIXME: this could be merged to shift decision code
1342 */
1343static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1344				  u32 start_seq, u32 end_seq)
1345{
1346	int err;
1347	bool in_sack;
1348	unsigned int pkt_len;
1349	unsigned int mss;
1350
1351	in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1352		  !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1353
1354	if (tcp_skb_pcount(skb) > 1 && !in_sack &&
1355	    after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
1356		mss = tcp_skb_mss(skb);
1357		in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1358
1359		if (!in_sack) {
1360			pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
1361			if (pkt_len < mss)
1362				pkt_len = mss;
1363		} else {
1364			pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
1365			if (pkt_len < mss)
1366				return -EINVAL;
1367		}
1368
1369		/* Round if necessary so that SACKs cover only full MSSes
1370		 * and/or the remaining small portion (if present)
1371		 */
1372		if (pkt_len > mss) {
1373			unsigned int new_len = (pkt_len / mss) * mss;
1374			if (!in_sack && new_len < pkt_len)
1375				new_len += mss;
 
 
 
1376			pkt_len = new_len;
1377		}
1378
1379		if (pkt_len >= skb->len && !in_sack)
1380			return 0;
1381
1382		err = tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
1383				   pkt_len, mss, GFP_ATOMIC);
1384		if (err < 0)
1385			return err;
1386	}
1387
1388	return in_sack;
1389}
1390
1391/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
1392static u8 tcp_sacktag_one(struct sock *sk,
1393			  struct tcp_sacktag_state *state, u8 sacked,
1394			  u32 start_seq, u32 end_seq,
1395			  int dup_sack, int pcount,
1396			  u64 xmit_time)
1397{
1398	struct tcp_sock *tp = tcp_sk(sk);
 
 
1399
1400	/* Account D-SACK for retransmitted packet. */
1401	if (dup_sack && (sacked & TCPCB_RETRANS)) {
1402		if (tp->undo_marker && tp->undo_retrans > 0 &&
1403		    after(end_seq, tp->undo_marker))
1404			tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount);
1405		if ((sacked & TCPCB_SACKED_ACKED) &&
1406		    before(start_seq, state->reord))
1407				state->reord = start_seq;
1408	}
1409
1410	/* Nothing to do; acked frame is about to be dropped (was ACKed). */
1411	if (!after(end_seq, tp->snd_una))
1412		return sacked;
1413
1414	if (!(sacked & TCPCB_SACKED_ACKED)) {
1415		tcp_rack_advance(tp, sacked, end_seq, xmit_time);
1416
1417		if (sacked & TCPCB_SACKED_RETRANS) {
1418			/* If the segment is not tagged as lost,
1419			 * we do not clear RETRANS, believing
1420			 * that retransmission is still in flight.
1421			 */
1422			if (sacked & TCPCB_LOST) {
1423				sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
1424				tp->lost_out -= pcount;
1425				tp->retrans_out -= pcount;
1426			}
1427		} else {
1428			if (!(sacked & TCPCB_RETRANS)) {
1429				/* New sack for not retransmitted frame,
1430				 * which was in hole. It is reordering.
1431				 */
1432				if (before(start_seq,
1433					   tcp_highest_sack_seq(tp)) &&
1434				    before(start_seq, state->reord))
1435					state->reord = start_seq;
1436
1437				if (!after(end_seq, tp->high_seq))
1438					state->flag |= FLAG_ORIG_SACK_ACKED;
1439				if (state->first_sackt == 0)
1440					state->first_sackt = xmit_time;
1441				state->last_sackt = xmit_time;
1442			}
1443
1444			if (sacked & TCPCB_LOST) {
1445				sacked &= ~TCPCB_LOST;
1446				tp->lost_out -= pcount;
1447			}
1448		}
1449
1450		sacked |= TCPCB_SACKED_ACKED;
1451		state->flag |= FLAG_DATA_SACKED;
1452		tp->sacked_out += pcount;
1453		/* Out-of-order packets delivered */
1454		state->sack_delivered += pcount;
1455
1456		/* Lost marker hint past SACKed? Tweak RFC3517 cnt */
1457		if (tp->lost_skb_hint &&
1458		    before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq))
 
1459			tp->lost_cnt_hint += pcount;
 
 
 
1460	}
1461
1462	/* D-SACK. We can detect redundant retransmission in S|R and plain R
1463	 * frames and clear it. undo_retrans is decreased above, L|R frames
1464	 * are accounted above as well.
1465	 */
1466	if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
1467		sacked &= ~TCPCB_SACKED_RETRANS;
1468		tp->retrans_out -= pcount;
1469	}
1470
1471	return sacked;
1472}
1473
1474/* Shift newly-SACKed bytes from this skb to the immediately previous
1475 * already-SACKed sk_buff. Mark the newly-SACKed bytes as such.
1476 */
1477static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *prev,
1478			    struct sk_buff *skb,
1479			    struct tcp_sacktag_state *state,
1480			    unsigned int pcount, int shifted, int mss,
1481			    bool dup_sack)
1482{
1483	struct tcp_sock *tp = tcp_sk(sk);
1484	u32 start_seq = TCP_SKB_CB(skb)->seq;	/* start of newly-SACKed */
1485	u32 end_seq = start_seq + shifted;	/* end of newly-SACKed */
1486
1487	BUG_ON(!pcount);
1488
1489	/* Adjust counters and hints for the newly sacked sequence
1490	 * range but discard the return value since prev is already
1491	 * marked. We must tag the range first because the seq
1492	 * advancement below implicitly advances
1493	 * tcp_highest_sack_seq() when skb is highest_sack.
1494	 */
1495	tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
1496			start_seq, end_seq, dup_sack, pcount,
1497			tcp_skb_timestamp_us(skb));
1498	tcp_rate_skb_delivered(sk, skb, state->rate);
1499
1500	if (skb == tp->lost_skb_hint)
1501		tp->lost_cnt_hint += pcount;
1502
1503	TCP_SKB_CB(prev)->end_seq += shifted;
1504	TCP_SKB_CB(skb)->seq += shifted;
1505
1506	tcp_skb_pcount_add(prev, pcount);
1507	WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
1508	tcp_skb_pcount_add(skb, -pcount);
1509
1510	/* When we're adding to gso_segs == 1, gso_size will be zero,
1511	 * in theory this shouldn't be necessary but as long as DSACK
1512	 * code can come after this skb later on it's better to keep
1513	 * setting gso_size to something.
1514	 */
1515	if (!TCP_SKB_CB(prev)->tcp_gso_size)
1516		TCP_SKB_CB(prev)->tcp_gso_size = mss;
 
 
1517
1518	/* CHECKME: To clear or not to clear? Mimics normal skb currently */
1519	if (tcp_skb_pcount(skb) <= 1)
1520		TCP_SKB_CB(skb)->tcp_gso_size = 0;
 
 
 
 
 
1521
1522	/* Difference in this won't matter, both ACKed by the same cumul. ACK */
1523	TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
1524
1525	if (skb->len > 0) {
1526		BUG_ON(!tcp_skb_pcount(skb));
1527		NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
1528		return false;
1529	}
1530
1531	/* Whole SKB was eaten :-) */
1532
1533	if (skb == tp->retransmit_skb_hint)
1534		tp->retransmit_skb_hint = prev;
 
 
1535	if (skb == tp->lost_skb_hint) {
1536		tp->lost_skb_hint = prev;
1537		tp->lost_cnt_hint -= tcp_skb_pcount(prev);
1538	}
1539
1540	TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1541	TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor;
1542	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1543		TCP_SKB_CB(prev)->end_seq++;
1544
1545	if (skb == tcp_highest_sack(sk))
1546		tcp_advance_highest_sack(sk, skb);
1547
1548	tcp_skb_collapse_tstamp(prev, skb);
1549	if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp))
1550		TCP_SKB_CB(prev)->tx.delivered_mstamp = 0;
1551
1552	tcp_rtx_queue_unlink_and_free(skb, sk);
1553
1554	NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
1555
1556	return true;
1557}
1558
1559/* I wish gso_size would have a bit more sane initialization than
1560 * something-or-zero which complicates things
1561 */
1562static int tcp_skb_seglen(const struct sk_buff *skb)
1563{
1564	return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
1565}
1566
1567/* Shifting pages past head area doesn't work */
1568static int skb_can_shift(const struct sk_buff *skb)
1569{
1570	return !skb_headlen(skb) && skb_is_nonlinear(skb);
1571}
1572
1573int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
1574		  int pcount, int shiftlen)
1575{
1576	/* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE)
1577	 * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need
1578	 * to make sure not storing more than 65535 * 8 bytes per skb,
1579	 * even if current MSS is bigger.
1580	 */
1581	if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
1582		return 0;
1583	if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
1584		return 0;
1585	return skb_shift(to, from, shiftlen);
1586}
1587
1588/* Try collapsing SACK blocks spanning across multiple skbs to a single
1589 * skb.
1590 */
1591static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
1592					  struct tcp_sacktag_state *state,
1593					  u32 start_seq, u32 end_seq,
1594					  bool dup_sack)
1595{
1596	struct tcp_sock *tp = tcp_sk(sk);
1597	struct sk_buff *prev;
1598	int mss;
1599	int pcount = 0;
1600	int len;
1601	int in_sack;
1602
 
 
 
1603	/* Normally R but no L won't result in plain S */
1604	if (!dup_sack &&
1605	    (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
1606		goto fallback;
1607	if (!skb_can_shift(skb))
1608		goto fallback;
1609	/* This frame is about to be dropped (was ACKed). */
1610	if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1611		goto fallback;
1612
1613	/* Can only happen with delayed DSACK + discard craziness */
1614	prev = skb_rb_prev(skb);
1615	if (!prev)
1616		goto fallback;
 
1617
1618	if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
1619		goto fallback;
1620
1621	if (!tcp_skb_can_collapse(prev, skb))
1622		goto fallback;
1623
1624	in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
1625		  !before(end_seq, TCP_SKB_CB(skb)->end_seq);
1626
1627	if (in_sack) {
1628		len = skb->len;
1629		pcount = tcp_skb_pcount(skb);
1630		mss = tcp_skb_seglen(skb);
1631
1632		/* TODO: Fix DSACKs to not fragment already SACKed and we can
1633		 * drop this restriction as unnecessary
1634		 */
1635		if (mss != tcp_skb_seglen(prev))
1636			goto fallback;
1637	} else {
1638		if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
1639			goto noop;
1640		/* CHECKME: This is non-MSS split case only?, this will
1641		 * cause skipped skbs due to advancing loop btw, original
1642		 * has that feature too
1643		 */
1644		if (tcp_skb_pcount(skb) <= 1)
1645			goto noop;
1646
1647		in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
1648		if (!in_sack) {
1649			/* TODO: head merge to next could be attempted here
1650			 * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)),
1651			 * though it might not be worth of the additional hassle
1652			 *
1653			 * ...we can probably just fallback to what was done
1654			 * previously. We could try merging non-SACKed ones
1655			 * as well but it probably isn't going to buy off
1656			 * because later SACKs might again split them, and
1657			 * it would make skb timestamp tracking considerably
1658			 * harder problem.
1659			 */
1660			goto fallback;
1661		}
1662
1663		len = end_seq - TCP_SKB_CB(skb)->seq;
1664		BUG_ON(len < 0);
1665		BUG_ON(len > skb->len);
1666
1667		/* MSS boundaries should be honoured or else pcount will
1668		 * severely break even though it makes things bit trickier.
1669		 * Optimize common case to avoid most of the divides
1670		 */
1671		mss = tcp_skb_mss(skb);
1672
1673		/* TODO: Fix DSACKs to not fragment already SACKed and we can
1674		 * drop this restriction as unnecessary
1675		 */
1676		if (mss != tcp_skb_seglen(prev))
1677			goto fallback;
1678
1679		if (len == mss) {
1680			pcount = 1;
1681		} else if (len < mss) {
1682			goto noop;
1683		} else {
1684			pcount = len / mss;
1685			len = pcount * mss;
1686		}
1687	}
1688
1689	/* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
1690	if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
1691		goto fallback;
1692
1693	if (!tcp_skb_shift(prev, skb, pcount, len))
1694		goto fallback;
1695	if (!tcp_shifted_skb(sk, prev, skb, state, pcount, len, mss, dup_sack))
1696		goto out;
1697
1698	/* Hole filled allows collapsing with the next as well, this is very
1699	 * useful when hole on every nth skb pattern happens
1700	 */
1701	skb = skb_rb_next(prev);
1702	if (!skb)
1703		goto out;
 
1704
1705	if (!skb_can_shift(skb) ||
 
1706	    ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
1707	    (mss != tcp_skb_seglen(skb)))
1708		goto out;
1709
1710	if (!tcp_skb_can_collapse(prev, skb))
1711		goto out;
1712	len = skb->len;
1713	pcount = tcp_skb_pcount(skb);
1714	if (tcp_skb_shift(prev, skb, pcount, len))
1715		tcp_shifted_skb(sk, prev, skb, state, pcount,
1716				len, mss, 0);
1717
1718out:
 
1719	return prev;
1720
1721noop:
1722	return skb;
1723
1724fallback:
1725	NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
1726	return NULL;
1727}
1728
1729static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1730					struct tcp_sack_block *next_dup,
1731					struct tcp_sacktag_state *state,
1732					u32 start_seq, u32 end_seq,
1733					bool dup_sack_in)
1734{
1735	struct tcp_sock *tp = tcp_sk(sk);
1736	struct sk_buff *tmp;
1737
1738	skb_rbtree_walk_from(skb) {
1739		int in_sack = 0;
1740		bool dup_sack = dup_sack_in;
 
 
 
1741
1742		/* queue is in-order => we can short-circuit the walk early */
1743		if (!before(TCP_SKB_CB(skb)->seq, end_seq))
1744			break;
1745
1746		if (next_dup  &&
1747		    before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
1748			in_sack = tcp_match_skb_to_sack(sk, skb,
1749							next_dup->start_seq,
1750							next_dup->end_seq);
1751			if (in_sack > 0)
1752				dup_sack = true;
1753		}
1754
1755		/* skb reference here is a bit tricky to get right, since
1756		 * shifting can eat and free both this skb and the next,
1757		 * so not even _safe variant of the loop is enough.
1758		 */
1759		if (in_sack <= 0) {
1760			tmp = tcp_shift_skb_data(sk, skb, state,
1761						 start_seq, end_seq, dup_sack);
1762			if (tmp) {
1763				if (tmp != skb) {
1764					skb = tmp;
1765					continue;
1766				}
1767
1768				in_sack = 0;
1769			} else {
1770				in_sack = tcp_match_skb_to_sack(sk, skb,
1771								start_seq,
1772								end_seq);
1773			}
1774		}
1775
1776		if (unlikely(in_sack < 0))
1777			break;
1778
1779		if (in_sack) {
1780			TCP_SKB_CB(skb)->sacked =
1781				tcp_sacktag_one(sk,
1782						state,
1783						TCP_SKB_CB(skb)->sacked,
1784						TCP_SKB_CB(skb)->seq,
1785						TCP_SKB_CB(skb)->end_seq,
1786						dup_sack,
1787						tcp_skb_pcount(skb),
1788						tcp_skb_timestamp_us(skb));
1789			tcp_rate_skb_delivered(sk, skb, state->rate);
1790			if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1791				list_del_init(&skb->tcp_tsorted_anchor);
1792
1793			if (!before(TCP_SKB_CB(skb)->seq,
1794				    tcp_highest_sack_seq(tp)))
1795				tcp_advance_highest_sack(sk, skb);
1796		}
 
 
1797	}
1798	return skb;
1799}
1800
1801static struct sk_buff *tcp_sacktag_bsearch(struct sock *sk, u32 seq)
1802{
1803	struct rb_node *parent, **p = &sk->tcp_rtx_queue.rb_node;
1804	struct sk_buff *skb;
1805
1806	while (*p) {
1807		parent = *p;
1808		skb = rb_to_skb(parent);
1809		if (before(seq, TCP_SKB_CB(skb)->seq)) {
1810			p = &parent->rb_left;
1811			continue;
1812		}
1813		if (!before(seq, TCP_SKB_CB(skb)->end_seq)) {
1814			p = &parent->rb_right;
1815			continue;
1816		}
1817		return skb;
1818	}
1819	return NULL;
1820}
1821
1822static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
 
1823					u32 skip_to_seq)
1824{
1825	if (skb && after(TCP_SKB_CB(skb)->seq, skip_to_seq))
1826		return skb;
 
 
 
 
1827
1828	return tcp_sacktag_bsearch(sk, skip_to_seq);
 
 
1829}
1830
1831static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
1832						struct sock *sk,
1833						struct tcp_sack_block *next_dup,
1834						struct tcp_sacktag_state *state,
1835						u32 skip_to_seq)
1836{
1837	if (!next_dup)
1838		return skb;
1839
1840	if (before(next_dup->start_seq, skip_to_seq)) {
1841		skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq);
1842		skb = tcp_sacktag_walk(skb, sk, NULL, state,
1843				       next_dup->start_seq, next_dup->end_seq,
1844				       1);
1845	}
1846
1847	return skb;
1848}
1849
1850static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
1851{
1852	return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
1853}
1854
1855static int
1856tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1857			u32 prior_snd_una, struct tcp_sacktag_state *state)
1858{
 
1859	struct tcp_sock *tp = tcp_sk(sk);
1860	const unsigned char *ptr = (skb_transport_header(ack_skb) +
1861				    TCP_SKB_CB(ack_skb)->sacked);
1862	struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
1863	struct tcp_sack_block sp[TCP_NUM_SACKS];
1864	struct tcp_sack_block *cache;
 
1865	struct sk_buff *skb;
1866	int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
1867	int used_sacks;
1868	bool found_dup_sack = false;
1869	int i, j;
1870	int first_sack_index;
1871
1872	state->flag = 0;
1873	state->reord = tp->snd_nxt;
1874
1875	if (!tp->sacked_out)
 
 
1876		tcp_highest_sack_reset(sk);
 
1877
1878	found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
1879					 num_sacks, prior_snd_una, state);
 
 
1880
1881	/* Eliminate too old ACKs, but take into
1882	 * account more or less fresh ones, they can
1883	 * contain valid SACK info.
1884	 */
1885	if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
1886		return 0;
1887
1888	if (!tp->packets_out)
1889		goto out;
1890
1891	used_sacks = 0;
1892	first_sack_index = 0;
1893	for (i = 0; i < num_sacks; i++) {
1894		bool dup_sack = !i && found_dup_sack;
1895
1896		sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
1897		sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
1898
1899		if (!tcp_is_sackblock_valid(tp, dup_sack,
1900					    sp[used_sacks].start_seq,
1901					    sp[used_sacks].end_seq)) {
1902			int mib_idx;
1903
1904			if (dup_sack) {
1905				if (!tp->undo_marker)
1906					mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
1907				else
1908					mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
1909			} else {
1910				/* Don't count olds caused by ACK reordering */
1911				if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
1912				    !after(sp[used_sacks].end_seq, tp->snd_una))
1913					continue;
1914				mib_idx = LINUX_MIB_TCPSACKDISCARD;
1915			}
1916
1917			NET_INC_STATS(sock_net(sk), mib_idx);
1918			if (i == 0)
1919				first_sack_index = -1;
1920			continue;
1921		}
1922
1923		/* Ignore very old stuff early */
1924		if (!after(sp[used_sacks].end_seq, prior_snd_una)) {
1925			if (i == 0)
1926				first_sack_index = -1;
1927			continue;
1928		}
1929
1930		used_sacks++;
1931	}
1932
1933	/* order SACK blocks to allow in order walk of the retrans queue */
1934	for (i = used_sacks - 1; i > 0; i--) {
1935		for (j = 0; j < i; j++) {
1936			if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
1937				swap(sp[j], sp[j + 1]);
1938
1939				/* Track where the first SACK block goes to */
1940				if (j == first_sack_index)
1941					first_sack_index = j + 1;
1942			}
1943		}
1944	}
1945
1946	state->mss_now = tcp_current_mss(sk);
1947	skb = NULL;
1948	i = 0;
1949
1950	if (!tp->sacked_out) {
1951		/* It's already past, so skip checking against it */
1952		cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
1953	} else {
1954		cache = tp->recv_sack_cache;
1955		/* Skip empty blocks in at head of the cache */
1956		while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq &&
1957		       !cache->end_seq)
1958			cache++;
1959	}
1960
1961	while (i < used_sacks) {
1962		u32 start_seq = sp[i].start_seq;
1963		u32 end_seq = sp[i].end_seq;
1964		bool dup_sack = (found_dup_sack && (i == first_sack_index));
1965		struct tcp_sack_block *next_dup = NULL;
1966
1967		if (found_dup_sack && ((i + 1) == first_sack_index))
1968			next_dup = &sp[i + 1];
1969
 
 
 
 
1970		/* Skip too early cached blocks */
1971		while (tcp_sack_cache_ok(tp, cache) &&
1972		       !before(start_seq, cache->end_seq))
1973			cache++;
1974
1975		/* Can skip some work by looking recv_sack_cache? */
1976		if (tcp_sack_cache_ok(tp, cache) && !dup_sack &&
1977		    after(end_seq, cache->start_seq)) {
1978
1979			/* Head todo? */
1980			if (before(start_seq, cache->start_seq)) {
1981				skb = tcp_sacktag_skip(skb, sk, start_seq);
 
1982				skb = tcp_sacktag_walk(skb, sk, next_dup,
1983						       state,
1984						       start_seq,
1985						       cache->start_seq,
1986						       dup_sack);
1987			}
1988
1989			/* Rest of the block already fully processed? */
1990			if (!after(end_seq, cache->end_seq))
1991				goto advance_sp;
1992
1993			skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
1994						       state,
1995						       cache->end_seq);
1996
1997			/* ...tail remains todo... */
1998			if (tcp_highest_sack_seq(tp) == cache->end_seq) {
1999				/* ...but better entrypoint exists! */
2000				skb = tcp_highest_sack(sk);
2001				if (!skb)
2002					break;
 
2003				cache++;
2004				goto walk;
2005			}
2006
2007			skb = tcp_sacktag_skip(skb, sk, cache->end_seq);
2008			/* Check overlap against next cached too (past this one already) */
2009			cache++;
2010			continue;
2011		}
2012
2013		if (!before(start_seq, tcp_highest_sack_seq(tp))) {
2014			skb = tcp_highest_sack(sk);
2015			if (!skb)
2016				break;
 
2017		}
2018		skb = tcp_sacktag_skip(skb, sk, start_seq);
2019
2020walk:
2021		skb = tcp_sacktag_walk(skb, sk, next_dup, state,
2022				       start_seq, end_seq, dup_sack);
2023
2024advance_sp:
 
 
 
 
 
 
2025		i++;
2026	}
2027
2028	/* Clear the head of the cache sack blocks so we can skip it next time */
2029	for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) {
2030		tp->recv_sack_cache[i].start_seq = 0;
2031		tp->recv_sack_cache[i].end_seq = 0;
2032	}
2033	for (j = 0; j < used_sacks; j++)
2034		tp->recv_sack_cache[i++] = sp[j];
2035
2036	if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker)
2037		tcp_check_sack_reordering(sk, state->reord, 0);
2038
2039	tcp_verify_left_out(tp);
 
 
 
 
 
 
2040out:
2041
2042#if FASTRETRANS_DEBUG > 0
2043	WARN_ON((int)tp->sacked_out < 0);
2044	WARN_ON((int)tp->lost_out < 0);
2045	WARN_ON((int)tp->retrans_out < 0);
2046	WARN_ON((int)tcp_packets_in_flight(tp) < 0);
2047#endif
2048	return state->flag;
2049}
2050
2051/* Limits sacked_out so that sum with lost_out isn't ever larger than
2052 * packets_out. Returns false if sacked_out adjustement wasn't necessary.
2053 */
2054static bool tcp_limit_reno_sacked(struct tcp_sock *tp)
2055{
2056	u32 holes;
2057
2058	holes = max(tp->lost_out, 1U);
2059	holes = min(holes, tp->packets_out);
2060
2061	if ((tp->sacked_out + holes) > tp->packets_out) {
2062		tp->sacked_out = tp->packets_out - holes;
2063		return true;
2064	}
2065	return false;
2066}
2067
2068/* If we receive more dupacks than we expected counting segments
2069 * in assumption of absent reordering, interpret this as reordering.
2070 * The only another reason could be bug in receiver TCP.
2071 */
2072static void tcp_check_reno_reordering(struct sock *sk, const int addend)
2073{
2074	struct tcp_sock *tp = tcp_sk(sk);
2075
2076	if (!tcp_limit_reno_sacked(tp))
2077		return;
2078
2079	tp->reordering = min_t(u32, tp->packets_out + addend,
2080			       READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering));
2081	tp->reord_seen++;
2082	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER);
2083}
2084
2085/* Emulate SACKs for SACKless connection: account for a new dupack. */
2086
2087static void tcp_add_reno_sack(struct sock *sk, int num_dupack, bool ece_ack)
2088{
2089	if (num_dupack) {
2090		struct tcp_sock *tp = tcp_sk(sk);
2091		u32 prior_sacked = tp->sacked_out;
2092		s32 delivered;
2093
2094		tp->sacked_out += num_dupack;
2095		tcp_check_reno_reordering(sk, 0);
2096		delivered = tp->sacked_out - prior_sacked;
2097		if (delivered > 0)
2098			tcp_count_delivered(tp, delivered, ece_ack);
2099		tcp_verify_left_out(tp);
2100	}
2101}
2102
2103/* Account for ACK, ACKing some data in Reno Recovery phase. */
2104
2105static void tcp_remove_reno_sacks(struct sock *sk, int acked, bool ece_ack)
2106{
2107	struct tcp_sock *tp = tcp_sk(sk);
2108
2109	if (acked > 0) {
2110		/* One ACK acked hole. The rest eat duplicate ACKs. */
2111		tcp_count_delivered(tp, max_t(int, acked - tp->sacked_out, 1),
2112				    ece_ack);
2113		if (acked - 1 >= tp->sacked_out)
2114			tp->sacked_out = 0;
2115		else
2116			tp->sacked_out -= acked - 1;
2117	}
2118	tcp_check_reno_reordering(sk, acked);
2119	tcp_verify_left_out(tp);
2120}
2121
2122static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
2123{
2124	tp->sacked_out = 0;
2125}
2126
2127void tcp_clear_retrans(struct tcp_sock *tp)
2128{
2129	tp->retrans_out = 0;
2130	tp->lost_out = 0;
2131	tp->undo_marker = 0;
2132	tp->undo_retrans = -1;
2133	tp->sacked_out = 0;
2134	tp->rto_stamp = 0;
2135	tp->total_rto = 0;
2136	tp->total_rto_recoveries = 0;
2137	tp->total_rto_time = 0;
2138}
2139
2140static inline void tcp_init_undo(struct tcp_sock *tp)
 
 
 
2141{
2142	tp->undo_marker = tp->snd_una;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2143
2144	/* Retransmission still in flight may cause DSACKs later. */
2145	/* First, account for regular retransmits in flight: */
2146	tp->undo_retrans = tp->retrans_out;
2147	/* Next, account for TLP retransmits in flight: */
2148	if (tp->tlp_high_seq && tp->tlp_retrans)
2149		tp->undo_retrans++;
2150	/* Finally, avoid 0, because undo_retrans==0 means "can undo now": */
2151	if (!tp->undo_retrans)
2152		tp->undo_retrans = -1;
 
 
 
 
 
2153}
2154
2155static bool tcp_is_rack(const struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
2156{
2157	return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) &
2158		TCP_RACK_LOSS_DETECTION;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2159}
2160
2161/* If we detect SACK reneging, forget all SACK information
2162 * and reset tags completely, otherwise preserve SACKs. If receiver
2163 * dropped its ofo queue, we will know this due to reneging detection.
2164 */
2165static void tcp_timeout_mark_lost(struct sock *sk)
2166{
2167	struct tcp_sock *tp = tcp_sk(sk);
2168	struct sk_buff *skb, *head;
2169	bool is_reneg;			/* is receiver reneging on SACKs? */
2170
2171	head = tcp_rtx_queue_head(sk);
2172	is_reneg = head && (TCP_SKB_CB(head)->sacked & TCPCB_SACKED_ACKED);
2173	if (is_reneg) {
2174		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
2175		tp->sacked_out = 0;
2176		/* Mark SACK reneging until we recover from this loss event. */
2177		tp->is_sack_reneg = 1;
2178	} else if (tcp_is_reno(tp)) {
2179		tcp_reset_reno_sack(tp);
2180	}
2181
2182	skb = head;
2183	skb_rbtree_walk_from(skb) {
2184		if (is_reneg)
2185			TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
2186		else if (tcp_is_rack(sk) && skb != head &&
2187			 tcp_rack_skb_timeout(tp, skb, 0) > 0)
2188			continue; /* Don't mark recently sent ones lost yet */
2189		tcp_mark_skb_lost(sk, skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2190	}
2191	tcp_verify_left_out(tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
2192	tcp_clear_all_retrans_hints(tp);
2193}
2194
2195/* Enter Loss state. */
2196void tcp_enter_loss(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2197{
2198	const struct inet_connection_sock *icsk = inet_csk(sk);
2199	struct tcp_sock *tp = tcp_sk(sk);
2200	struct net *net = sock_net(sk);
2201	bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
2202	u8 reordering;
2203
2204	tcp_timeout_mark_lost(sk);
2205
2206	/* Reduce ssthresh if it has not yet been made inside this window. */
2207	if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
2208	    !after(tp->high_seq, tp->snd_una) ||
2209	    (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
2210		tp->prior_ssthresh = tcp_current_ssthresh(sk);
2211		tp->prior_cwnd = tcp_snd_cwnd(tp);
2212		tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
2213		tcp_ca_event(sk, CA_EVENT_LOSS);
2214		tcp_init_undo(tp);
2215	}
2216	tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + 1);
2217	tp->snd_cwnd_cnt   = 0;
2218	tp->snd_cwnd_stamp = tcp_jiffies32;
 
 
 
2219
2220	/* Timeout in disordered state after receiving substantial DUPACKs
2221	 * suggests that the degree of reordering is over-estimated.
2222	 */
2223	reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering);
2224	if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
2225	    tp->sacked_out >= reordering)
2226		tp->reordering = min_t(unsigned int, tp->reordering,
2227				       reordering);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2228
 
 
2229	tcp_set_ca_state(sk, TCP_CA_Loss);
2230	tp->high_seq = tp->snd_nxt;
2231	tp->tlp_high_seq = 0;
2232	tcp_ecn_queue_cwr(tp);
2233
2234	/* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
2235	 * loss recovery is underway except recurring timeout(s) on
2236	 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
2237	 */
2238	tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) &&
2239		   (new_recovery || icsk->icsk_retransmits) &&
2240		   !inet_csk(sk)->icsk_mtup.probe_size;
2241}
2242
2243/* If ACK arrived pointing to a remembered SACK, it means that our
2244 * remembered SACKs do not reflect real state of receiver i.e.
2245 * receiver _host_ is heavily congested (or buggy).
2246 *
2247 * To avoid big spurious retransmission bursts due to transient SACK
2248 * scoreboard oddities that look like reneging, we give the receiver a
2249 * little time (max(RTT/2, 10ms)) to send us some more ACKs that will
2250 * restore sanity to the SACK scoreboard. If the apparent reneging
2251 * persists until this RTO then we'll clear the SACK scoreboard.
2252 */
2253static bool tcp_check_sack_reneging(struct sock *sk, int *ack_flag)
2254{
2255	if (*ack_flag & FLAG_SACK_RENEGING &&
2256	    *ack_flag & FLAG_SND_UNA_ADVANCED) {
2257		struct tcp_sock *tp = tcp_sk(sk);
2258		unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4),
2259					  msecs_to_jiffies(10));
2260
 
 
 
2261		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2262					  delay, TCP_RTO_MAX);
2263		*ack_flag &= ~FLAG_SET_XMIT_TIMER;
2264		return true;
2265	}
2266	return false;
 
 
 
 
 
2267}
2268
2269/* Heurestics to calculate number of duplicate ACKs. There's no dupACKs
2270 * counter when SACK is enabled (without SACK, sacked_out is used for
2271 * that purpose).
2272 *
 
 
 
 
2273 * With reordering, holes may still be in flight, so RFC3517 recovery
2274 * uses pure sacked_out (total number of SACKed segments) even though
2275 * it violates the RFC that uses duplicate ACKs, often these are equal
2276 * but when e.g. out-of-window ACKs or packet duplication occurs,
2277 * they differ. Since neither occurs due to loss, TCP should really
2278 * ignore them.
2279 */
2280static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
 
 
 
 
 
 
 
 
 
 
2281{
2282	return tp->sacked_out + 1;
 
 
 
2283}
2284
2285/* Linux NewReno/SACK/ECN state machine.
2286 * --------------------------------------
2287 *
2288 * "Open"	Normal state, no dubious events, fast path.
2289 * "Disorder"   In all the respects it is "Open",
2290 *		but requires a bit more attention. It is entered when
2291 *		we see some SACKs or dupacks. It is split of "Open"
2292 *		mainly to move some processing from fast path to slow one.
2293 * "CWR"	CWND was reduced due to some Congestion Notification event.
2294 *		It can be ECN, ICMP source quench, local device congestion.
2295 * "Recovery"	CWND was reduced, we are fast-retransmitting.
2296 * "Loss"	CWND was reduced due to RTO timeout or SACK reneging.
2297 *
2298 * tcp_fastretrans_alert() is entered:
2299 * - each incoming ACK, if state is not "Open"
2300 * - when arrived ACK is unusual, namely:
2301 *	* SACK
2302 *	* Duplicate ACK.
2303 *	* ECN ECE.
2304 *
2305 * Counting packets in flight is pretty simple.
2306 *
2307 *	in_flight = packets_out - left_out + retrans_out
2308 *
2309 *	packets_out is SND.NXT-SND.UNA counted in packets.
2310 *
2311 *	retrans_out is number of retransmitted segments.
2312 *
2313 *	left_out is number of segments left network, but not ACKed yet.
2314 *
2315 *		left_out = sacked_out + lost_out
2316 *
2317 *     sacked_out: Packets, which arrived to receiver out of order
2318 *		   and hence not ACKed. With SACKs this number is simply
2319 *		   amount of SACKed data. Even without SACKs
2320 *		   it is easy to give pretty reliable estimate of this number,
2321 *		   counting duplicate ACKs.
2322 *
2323 *       lost_out: Packets lost by network. TCP has no explicit
2324 *		   "loss notification" feedback from network (for now).
2325 *		   It means that this number can be only _guessed_.
2326 *		   Actually, it is the heuristics to predict lossage that
2327 *		   distinguishes different algorithms.
2328 *
2329 *	F.e. after RTO, when all the queue is considered as lost,
2330 *	lost_out = packets_out and in_flight = retrans_out.
2331 *
2332 *		Essentially, we have now a few algorithms detecting
2333 *		lost packets.
2334 *
2335 *		If the receiver supports SACK:
 
 
 
 
 
 
 
2336 *
2337 *		RFC6675/3517: It is the conventional algorithm. A packet is
2338 *		considered lost if the number of higher sequence packets
2339 *		SACKed is greater than or equal the DUPACK thoreshold
2340 *		(reordering). This is implemented in tcp_mark_head_lost and
2341 *		tcp_update_scoreboard.
2342 *
2343 *		RACK (draft-ietf-tcpm-rack-01): it is a newer algorithm
2344 *		(2017-) that checks timing instead of counting DUPACKs.
2345 *		Essentially a packet is considered lost if it's not S/ACKed
2346 *		after RTT + reordering_window, where both metrics are
2347 *		dynamically measured and adjusted. This is implemented in
2348 *		tcp_rack_mark_lost.
2349 *
2350 *		If the receiver does not support SACK:
2351 *
2352 *		NewReno (RFC6582): in Recovery we assume that one segment
2353 *		is lost (classic Reno). While we are in Recovery and
2354 *		a partial ACK arrives, we assume that one more packet
2355 *		is lost (NewReno). This heuristics are the same in NewReno
2356 *		and SACK.
2357 *
 
 
 
 
2358 * Really tricky (and requiring careful tuning) part of algorithm
2359 * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
2360 * The first determines the moment _when_ we should reduce CWND and,
2361 * hence, slow down forward transmission. In fact, it determines the moment
2362 * when we decide that hole is caused by loss, rather than by a reorder.
2363 *
2364 * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill
2365 * holes, caused by lost packets.
2366 *
2367 * And the most logically complicated part of algorithm is undo
2368 * heuristics. We detect false retransmits due to both too early
2369 * fast retransmit (reordering) and underestimated RTO, analyzing
2370 * timestamps and D-SACKs. When we detect that some segments were
2371 * retransmitted by mistake and CWND reduction was wrong, we undo
2372 * window reduction and abort recovery phase. This logic is hidden
2373 * inside several functions named tcp_try_undo_<something>.
2374 */
2375
2376/* This function decides, when we should leave Disordered state
2377 * and enter Recovery phase, reducing congestion window.
2378 *
2379 * Main question: may we further continue forward transmission
2380 * with the same cwnd?
2381 */
2382static bool tcp_time_to_recover(struct sock *sk, int flag)
2383{
2384	struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 
 
2385
2386	/* Trick#1: The loss is proven. */
2387	if (tp->lost_out)
2388		return true;
2389
2390	/* Not-A-Trick#2 : Classic rule... */
2391	if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering)
2392		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2393
2394	return false;
2395}
2396
2397/* Detect loss in event "A" above by marking head of queue up as lost.
2398 * For RFC3517 SACK, a segment is considered lost if it
2399 * has at least tp->reordering SACKed seqments above it; "packets" refers to
2400 * the maximum SACKed segments to pass before reaching this limit.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2401 */
2402static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
2403{
2404	struct tcp_sock *tp = tcp_sk(sk);
2405	struct sk_buff *skb;
2406	int cnt;
2407	/* Use SACK to deduce losses of new sequences sent during recovery */
2408	const u32 loss_high = tp->snd_nxt;
2409
2410	WARN_ON(packets > tp->packets_out);
2411	skb = tp->lost_skb_hint;
2412	if (skb) {
 
2413		/* Head already handled? */
2414		if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una))
2415			return;
2416		cnt = tp->lost_cnt_hint;
2417	} else {
2418		skb = tcp_rtx_queue_head(sk);
2419		cnt = 0;
2420	}
2421
2422	skb_rbtree_walk_from(skb) {
 
 
2423		/* TODO: do this better */
2424		/* this is not the most efficient way to do this... */
2425		tp->lost_skb_hint = skb;
2426		tp->lost_cnt_hint = cnt;
2427
2428		if (after(TCP_SKB_CB(skb)->end_seq, loss_high))
2429			break;
2430
2431		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
 
 
2432			cnt += tcp_skb_pcount(skb);
2433
2434		if (cnt > packets)
2435			break;
 
 
 
 
 
 
 
 
 
2436
2437		if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST))
2438			tcp_mark_skb_lost(sk, skb);
2439
2440		if (mark_head)
2441			break;
2442	}
2443	tcp_verify_left_out(tp);
2444}
2445
2446/* Account newly detected lost packet(s) */
2447
2448static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
2449{
2450	struct tcp_sock *tp = tcp_sk(sk);
2451
2452	if (tcp_is_sack(tp)) {
 
 
 
 
 
 
 
2453		int sacked_upto = tp->sacked_out - tp->reordering;
2454		if (sacked_upto >= 0)
2455			tcp_mark_head_lost(sk, sacked_upto, 0);
2456		else if (fast_rexmit)
2457			tcp_mark_head_lost(sk, 1, 1);
2458	}
2459}
2460
2461static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when)
2462{
2463	return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2464	       before(tp->rx_opt.rcv_tsecr, when);
2465}
2466
2467/* skb is spurious retransmitted if the returned timestamp echo
2468 * reply is prior to the skb transmission time
2469 */
2470static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp,
2471				     const struct sk_buff *skb)
2472{
2473	return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) &&
2474	       tcp_tsopt_ecr_before(tp, tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb));
 
2475}
2476
2477/* Nothing was retransmitted or returned timestamp is less
2478 * than timestamp of the first retransmission.
2479 */
2480static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
2481{
2482	const struct sock *sk = (const struct sock *)tp;
2483
2484	if (tp->retrans_stamp &&
2485	    tcp_tsopt_ecr_before(tp, tp->retrans_stamp))
2486		return true;  /* got echoed TS before first retransmission */
2487
2488	/* Check if nothing was retransmitted (retrans_stamp==0), which may
2489	 * happen in fast recovery due to TSQ. But we ignore zero retrans_stamp
2490	 * in TCP_SYN_SENT, since when we set FLAG_SYN_ACKED we also clear
2491	 * retrans_stamp even if we had retransmitted the SYN.
2492	 */
2493	if (!tp->retrans_stamp &&	   /* no record of a retransmit/SYN? */
2494	    sk->sk_state != TCP_SYN_SENT)  /* not the FLAG_SYN_ACKED case? */
2495		return true;  /* nothing was retransmitted */
2496
2497	return false;
2498}
2499
2500/* Undo procedures. */
2501
2502/* We can clear retrans_stamp when there are no retransmissions in the
2503 * window. It would seem that it is trivially available for us in
2504 * tp->retrans_out, however, that kind of assumptions doesn't consider
2505 * what will happen if errors occur when sending retransmission for the
2506 * second time. ...It could the that such segment has only
2507 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2508 * the head skb is enough except for some reneging corner cases that
2509 * are not worth the effort.
2510 *
2511 * Main reason for all this complexity is the fact that connection dying
2512 * time now depends on the validity of the retrans_stamp, in particular,
2513 * that successive retransmissions of a segment must not advance
2514 * retrans_stamp under any conditions.
2515 */
2516static bool tcp_any_retrans_done(const struct sock *sk)
2517{
2518	const struct tcp_sock *tp = tcp_sk(sk);
2519	struct sk_buff *skb;
2520
2521	if (tp->retrans_out)
2522		return true;
 
 
2523
2524	skb = tcp_rtx_queue_head(sk);
2525	if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2526		return true;
2527
2528	return false;
 
 
2529}
2530
2531/* If loss recovery is finished and there are no retransmits out in the
2532 * network, then we clear retrans_stamp so that upon the next loss recovery
2533 * retransmits_timed_out() and timestamp-undo are using the correct value.
2534 */
2535static void tcp_retrans_stamp_cleanup(struct sock *sk)
2536{
2537	if (!tcp_any_retrans_done(sk))
2538		tcp_sk(sk)->retrans_stamp = 0;
 
2539}
2540
 
 
 
2541static void DBGUNDO(struct sock *sk, const char *msg)
2542{
2543#if FASTRETRANS_DEBUG > 1
2544	struct tcp_sock *tp = tcp_sk(sk);
2545	struct inet_sock *inet = inet_sk(sk);
2546
2547	if (sk->sk_family == AF_INET) {
2548		pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
2549			 msg,
2550			 &inet->inet_daddr, ntohs(inet->inet_dport),
2551			 tcp_snd_cwnd(tp), tcp_left_out(tp),
2552			 tp->snd_ssthresh, tp->prior_ssthresh,
2553			 tp->packets_out);
2554	}
2555#if IS_ENABLED(CONFIG_IPV6)
2556	else if (sk->sk_family == AF_INET6) {
2557		pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
2558			 msg,
2559			 &sk->sk_v6_daddr, ntohs(inet->inet_dport),
2560			 tcp_snd_cwnd(tp), tcp_left_out(tp),
2561			 tp->snd_ssthresh, tp->prior_ssthresh,
2562			 tp->packets_out);
 
2563	}
2564#endif
 
 
 
2565#endif
2566}
2567
2568static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
2569{
2570	struct tcp_sock *tp = tcp_sk(sk);
2571
2572	if (unmark_loss) {
2573		struct sk_buff *skb;
2574
2575		skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
2576			TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
2577		}
2578		tp->lost_out = 0;
2579		tcp_clear_all_retrans_hints(tp);
2580	}
2581
2582	if (tp->prior_ssthresh) {
2583		const struct inet_connection_sock *icsk = inet_csk(sk);
2584
2585		tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk));
 
 
 
2586
2587		if (tp->prior_ssthresh > tp->snd_ssthresh) {
2588			tp->snd_ssthresh = tp->prior_ssthresh;
2589			tcp_ecn_withdraw_cwr(tp);
2590		}
 
 
2591	}
2592	tp->snd_cwnd_stamp = tcp_jiffies32;
2593	tp->undo_marker = 0;
2594	tp->rack.advanced = 1; /* Force RACK to re-exam losses */
2595}
2596
2597static inline bool tcp_may_undo(const struct tcp_sock *tp)
2598{
2599	return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
2600}
2601
2602static bool tcp_is_non_sack_preventing_reopen(struct sock *sk)
2603{
2604	struct tcp_sock *tp = tcp_sk(sk);
2605
2606	if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
2607		/* Hold old state until something *above* high_seq
2608		 * is ACKed. For Reno it is MUST to prevent false
2609		 * fast retransmits (RFC2582). SACK TCP is safe. */
2610		if (!tcp_any_retrans_done(sk))
2611			tp->retrans_stamp = 0;
2612		return true;
2613	}
2614	return false;
2615}
2616
2617/* People celebrate: "We love our President!" */
2618static bool tcp_try_undo_recovery(struct sock *sk)
2619{
2620	struct tcp_sock *tp = tcp_sk(sk);
2621
2622	if (tcp_may_undo(tp)) {
2623		int mib_idx;
2624
2625		/* Happy end! We did not retransmit anything
2626		 * or our original transmission succeeded.
2627		 */
2628		DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
2629		tcp_undo_cwnd_reduction(sk, false);
2630		if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
2631			mib_idx = LINUX_MIB_TCPLOSSUNDO;
2632		else
2633			mib_idx = LINUX_MIB_TCPFULLUNDO;
2634
2635		NET_INC_STATS(sock_net(sk), mib_idx);
2636	} else if (tp->rack.reo_wnd_persist) {
2637		tp->rack.reo_wnd_persist--;
 
 
 
 
 
 
2638	}
2639	if (tcp_is_non_sack_preventing_reopen(sk))
2640		return true;
2641	tcp_set_ca_state(sk, TCP_CA_Open);
2642	tp->is_sack_reneg = 0;
2643	return false;
2644}
2645
2646/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
2647static bool tcp_try_undo_dsack(struct sock *sk)
2648{
2649	struct tcp_sock *tp = tcp_sk(sk);
2650
2651	if (tp->undo_marker && !tp->undo_retrans) {
2652		tp->rack.reo_wnd_persist = min(TCP_RACK_RECOVERY_THRESH,
2653					       tp->rack.reo_wnd_persist + 1);
2654		DBGUNDO(sk, "D-SACK");
2655		tcp_undo_cwnd_reduction(sk, false);
2656		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
2657		return true;
2658	}
2659	return false;
2660}
2661
2662/* Undo during loss recovery after partial ACK or using F-RTO. */
2663static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
 
 
 
 
 
 
 
 
 
 
 
 
 
2664{
2665	struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 
2666
2667	if (frto_undo || tcp_may_undo(tp)) {
2668		tcp_undo_cwnd_reduction(sk, true);
 
2669
2670		DBGUNDO(sk, "partial loss");
2671		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2672		if (frto_undo)
2673			NET_INC_STATS(sock_net(sk),
2674					LINUX_MIB_TCPSPURIOUSRTOS);
2675		inet_csk(sk)->icsk_retransmits = 0;
2676		if (tcp_is_non_sack_preventing_reopen(sk))
2677			return true;
2678		if (frto_undo || tcp_is_sack(tp)) {
2679			tcp_set_ca_state(sk, TCP_CA_Open);
2680			tp->is_sack_reneg = 0;
2681		}
2682		return true;
2683	}
2684	return false;
2685}
2686
2687/* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937.
2688 * It computes the number of packets to send (sndcnt) based on packets newly
2689 * delivered:
2690 *   1) If the packets in flight is larger than ssthresh, PRR spreads the
2691 *	cwnd reductions across a full RTT.
2692 *   2) Otherwise PRR uses packet conservation to send as much as delivered.
2693 *      But when SND_UNA is acked without further losses,
2694 *      slow starts cwnd up to ssthresh to speed up the recovery.
2695 */
2696static void tcp_init_cwnd_reduction(struct sock *sk)
2697{
2698	struct tcp_sock *tp = tcp_sk(sk);
 
 
2699
2700	tp->high_seq = tp->snd_nxt;
2701	tp->tlp_high_seq = 0;
2702	tp->snd_cwnd_cnt = 0;
2703	tp->prior_cwnd = tcp_snd_cwnd(tp);
2704	tp->prr_delivered = 0;
2705	tp->prr_out = 0;
2706	tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
2707	tcp_ecn_queue_cwr(tp);
2708}
2709
2710void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int newly_lost, int flag)
2711{
2712	struct tcp_sock *tp = tcp_sk(sk);
2713	int sndcnt = 0;
2714	int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
2715
2716	if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd))
2717		return;
 
2718
2719	tp->prr_delivered += newly_acked_sacked;
2720	if (delta < 0) {
2721		u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
2722			       tp->prior_cwnd - 1;
2723		sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
2724	} else {
2725		sndcnt = max_t(int, tp->prr_delivered - tp->prr_out,
2726			       newly_acked_sacked);
2727		if (flag & FLAG_SND_UNA_ADVANCED && !newly_lost)
2728			sndcnt++;
2729		sndcnt = min(delta, sndcnt);
2730	}
2731	/* Force a fast retransmit upon entering fast recovery */
2732	sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
2733	tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + sndcnt);
2734}
2735
2736static inline void tcp_end_cwnd_reduction(struct sock *sk)
 
2737{
2738	struct tcp_sock *tp = tcp_sk(sk);
2739
2740	if (inet_csk(sk)->icsk_ca_ops->cong_control)
2741		return;
 
 
 
 
 
 
 
2742
2743	/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
2744	if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
2745	    (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
2746		tcp_snd_cwnd_set(tp, tp->snd_ssthresh);
2747		tp->snd_cwnd_stamp = tcp_jiffies32;
 
 
 
 
2748	}
2749	tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
2750}
2751
2752/* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
2753void tcp_enter_cwr(struct sock *sk)
2754{
2755	struct tcp_sock *tp = tcp_sk(sk);
2756
2757	tp->prior_ssthresh = 0;
2758	if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2759		tp->undo_marker = 0;
2760		tcp_init_cwnd_reduction(sk);
2761		tcp_set_ca_state(sk, TCP_CA_CWR);
2762	}
 
2763}
2764EXPORT_SYMBOL(tcp_enter_cwr);
2765
2766static void tcp_try_keep_open(struct sock *sk)
2767{
2768	struct tcp_sock *tp = tcp_sk(sk);
2769	int state = TCP_CA_Open;
2770
2771	if (tcp_left_out(tp) || tcp_any_retrans_done(sk))
2772		state = TCP_CA_Disorder;
2773
2774	if (inet_csk(sk)->icsk_ca_state != state) {
2775		tcp_set_ca_state(sk, state);
2776		tp->high_seq = tp->snd_nxt;
2777	}
2778}
2779
2780static void tcp_try_to_open(struct sock *sk, int flag)
2781{
2782	struct tcp_sock *tp = tcp_sk(sk);
2783
2784	tcp_verify_left_out(tp);
2785
2786	if (!tcp_any_retrans_done(sk))
2787		tp->retrans_stamp = 0;
2788
2789	if (flag & FLAG_ECE)
2790		tcp_enter_cwr(sk);
2791
2792	if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
2793		tcp_try_keep_open(sk);
 
 
 
2794	}
2795}
2796
2797static void tcp_mtup_probe_failed(struct sock *sk)
2798{
2799	struct inet_connection_sock *icsk = inet_csk(sk);
2800
2801	icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
2802	icsk->icsk_mtup.probe_size = 0;
2803	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
2804}
2805
2806static void tcp_mtup_probe_success(struct sock *sk)
2807{
2808	struct tcp_sock *tp = tcp_sk(sk);
2809	struct inet_connection_sock *icsk = inet_csk(sk);
2810	u64 val;
2811
 
2812	tp->prior_ssthresh = tcp_current_ssthresh(sk);
2813
2814	val = (u64)tcp_snd_cwnd(tp) * tcp_mss_to_mtu(sk, tp->mss_cache);
2815	do_div(val, icsk->icsk_mtup.probe_size);
2816	DEBUG_NET_WARN_ON_ONCE((u32)val != val);
2817	tcp_snd_cwnd_set(tp, max_t(u32, 1U, val));
2818
2819	tp->snd_cwnd_cnt = 0;
2820	tp->snd_cwnd_stamp = tcp_jiffies32;
2821	tp->snd_ssthresh = tcp_current_ssthresh(sk);
2822
2823	icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
2824	icsk->icsk_mtup.probe_size = 0;
2825	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
2826	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
2827}
2828
2829/* Sometimes we deduce that packets have been dropped due to reasons other than
2830 * congestion, like path MTU reductions or failed client TFO attempts. In these
2831 * cases we call this function to retransmit as many packets as cwnd allows,
2832 * without reducing cwnd. Given that retransmits will set retrans_stamp to a
2833 * non-zero value (and may do so in a later calling context due to TSQ), we
2834 * also enter CA_Loss so that we track when all retransmitted packets are ACKed
2835 * and clear retrans_stamp when that happens (to ensure later recurring RTOs
2836 * are using the correct retrans_stamp and don't declare ETIMEDOUT
2837 * prematurely).
2838 */
2839static void tcp_non_congestion_loss_retransmit(struct sock *sk)
2840{
2841	const struct inet_connection_sock *icsk = inet_csk(sk);
2842	struct tcp_sock *tp = tcp_sk(sk);
2843
2844	if (icsk->icsk_ca_state != TCP_CA_Loss) {
2845		tp->high_seq = tp->snd_nxt;
2846		tp->snd_ssthresh = tcp_current_ssthresh(sk);
2847		tp->prior_ssthresh = 0;
2848		tp->undo_marker = 0;
2849		tcp_set_ca_state(sk, TCP_CA_Loss);
2850	}
2851	tcp_xmit_retransmit_queue(sk);
2852}
2853
2854/* Do a simple retransmit without using the backoff mechanisms in
2855 * tcp_timer. This is used for path mtu discovery.
2856 * The socket is already locked here.
2857 */
2858void tcp_simple_retransmit(struct sock *sk)
2859{
 
2860	struct tcp_sock *tp = tcp_sk(sk);
2861	struct sk_buff *skb;
2862	int mss;
 
2863
2864	/* A fastopen SYN request is stored as two separate packets within
2865	 * the retransmit queue, this is done by tcp_send_syn_data().
2866	 * As a result simply checking the MSS of the frames in the queue
2867	 * will not work for the SYN packet.
2868	 *
2869	 * Us being here is an indication of a path MTU issue so we can
2870	 * assume that the fastopen SYN was lost and just mark all the
2871	 * frames in the retransmit queue as lost. We will use an MSS of
2872	 * -1 to mark all frames as lost, otherwise compute the current MSS.
2873	 */
2874	if (tp->syn_data && sk->sk_state == TCP_SYN_SENT)
2875		mss = -1;
2876	else
2877		mss = tcp_current_mss(sk);
2878
2879	skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
2880		if (tcp_skb_seglen(skb) > mss)
2881			tcp_mark_skb_lost(sk, skb);
2882	}
2883
2884	tcp_clear_retrans_hints_partial(tp);
2885
2886	if (!tp->lost_out)
2887		return;
2888
2889	if (tcp_is_reno(tp))
2890		tcp_limit_reno_sacked(tp);
2891
2892	tcp_verify_left_out(tp);
2893
2894	/* Don't muck with the congestion window here.
2895	 * Reason is that we do not increase amount of _data_
2896	 * in network, but units changed and effective
2897	 * cwnd/ssthresh really reduced now.
2898	 */
2899	tcp_non_congestion_loss_retransmit(sk);
 
 
 
 
 
 
 
2900}
2901EXPORT_SYMBOL(tcp_simple_retransmit);
2902
2903void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2904{
2905	struct tcp_sock *tp = tcp_sk(sk);
2906	int mib_idx;
2907
2908	/* Start the clock with our fast retransmit, for undo and ETIMEDOUT. */
2909	tcp_retrans_stamp_cleanup(sk);
2910
2911	if (tcp_is_reno(tp))
2912		mib_idx = LINUX_MIB_TCPRENORECOVERY;
2913	else
2914		mib_idx = LINUX_MIB_TCPSACKRECOVERY;
2915
2916	NET_INC_STATS(sock_net(sk), mib_idx);
2917
2918	tp->prior_ssthresh = 0;
2919	tcp_init_undo(tp);
2920
2921	if (!tcp_in_cwnd_reduction(sk)) {
2922		if (!ece_ack)
2923			tp->prior_ssthresh = tcp_current_ssthresh(sk);
2924		tcp_init_cwnd_reduction(sk);
2925	}
2926	tcp_set_ca_state(sk, TCP_CA_Recovery);
2927}
2928
2929static void tcp_update_rto_time(struct tcp_sock *tp)
2930{
2931	if (tp->rto_stamp) {
2932		tp->total_rto_time += tcp_time_stamp_ms(tp) - tp->rto_stamp;
2933		tp->rto_stamp = 0;
2934	}
2935}
2936
2937/* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
2938 * recovered or spurious. Otherwise retransmits more on partial ACKs.
2939 */
2940static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
2941			     int *rexmit)
2942{
2943	struct tcp_sock *tp = tcp_sk(sk);
2944	bool recovered = !before(tp->snd_una, tp->high_seq);
2945
2946	if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) &&
2947	    tcp_try_undo_loss(sk, false))
2948		return;
2949
2950	if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
2951		/* Step 3.b. A timeout is spurious if not all data are
2952		 * lost, i.e., never-retransmitted data are (s)acked.
2953		 */
2954		if ((flag & FLAG_ORIG_SACK_ACKED) &&
2955		    tcp_try_undo_loss(sk, true))
2956			return;
2957
2958		if (after(tp->snd_nxt, tp->high_seq)) {
2959			if (flag & FLAG_DATA_SACKED || num_dupack)
2960				tp->frto = 0; /* Step 3.a. loss was real */
2961		} else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
2962			tp->high_seq = tp->snd_nxt;
2963			/* Step 2.b. Try send new data (but deferred until cwnd
2964			 * is updated in tcp_ack()). Otherwise fall back to
2965			 * the conventional recovery.
2966			 */
2967			if (!tcp_write_queue_empty(sk) &&
2968			    after(tcp_wnd_end(tp), tp->snd_nxt)) {
2969				*rexmit = REXMIT_NEW;
2970				return;
2971			}
2972			tp->frto = 0;
2973		}
2974	}
2975
2976	if (recovered) {
2977		/* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */
2978		tcp_try_undo_recovery(sk);
2979		return;
2980	}
2981	if (tcp_is_reno(tp)) {
2982		/* A Reno DUPACK means new data in F-RTO step 2.b above are
2983		 * delivered. Lower inflight to clock out (re)transmissions.
2984		 */
2985		if (after(tp->snd_nxt, tp->high_seq) && num_dupack)
2986			tcp_add_reno_sack(sk, num_dupack, flag & FLAG_ECE);
2987		else if (flag & FLAG_SND_UNA_ADVANCED)
2988			tcp_reset_reno_sack(tp);
2989	}
2990	*rexmit = REXMIT_LOST;
2991}
2992
2993static bool tcp_force_fast_retransmit(struct sock *sk)
2994{
2995	struct tcp_sock *tp = tcp_sk(sk);
2996
2997	return after(tcp_highest_sack_seq(tp),
2998		     tp->snd_una + tp->reordering * tp->mss_cache);
2999}
3000
3001/* Undo during fast recovery after partial ACK. */
3002static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una,
3003				 bool *do_lost)
3004{
3005	struct tcp_sock *tp = tcp_sk(sk);
3006
3007	if (tp->undo_marker && tcp_packet_delayed(tp)) {
3008		/* Plain luck! Hole if filled with delayed
3009		 * packet, rather than with a retransmit. Check reordering.
3010		 */
3011		tcp_check_sack_reordering(sk, prior_snd_una, 1);
3012
3013		/* We are getting evidence that the reordering degree is higher
3014		 * than we realized. If there are no retransmits out then we
3015		 * can undo. Otherwise we clock out new packets but do not
3016		 * mark more packets lost or retransmit more.
3017		 */
3018		if (tp->retrans_out)
3019			return true;
3020
3021		if (!tcp_any_retrans_done(sk))
3022			tp->retrans_stamp = 0;
3023
3024		DBGUNDO(sk, "partial recovery");
3025		tcp_undo_cwnd_reduction(sk, true);
3026		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
3027		tcp_try_keep_open(sk);
3028	} else {
3029		/* Partial ACK arrived. Force fast retransmit. */
3030		*do_lost = tcp_force_fast_retransmit(sk);
3031	}
3032	return false;
3033}
3034
3035static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
3036{
3037	struct tcp_sock *tp = tcp_sk(sk);
3038
3039	if (tcp_rtx_queue_empty(sk))
3040		return;
3041
3042	if (unlikely(tcp_is_reno(tp))) {
3043		tcp_newreno_mark_lost(sk, *ack_flag & FLAG_SND_UNA_ADVANCED);
3044	} else if (tcp_is_rack(sk)) {
3045		u32 prior_retrans = tp->retrans_out;
3046
3047		if (tcp_rack_mark_lost(sk))
3048			*ack_flag &= ~FLAG_SET_XMIT_TIMER;
3049		if (prior_retrans > tp->retrans_out)
3050			*ack_flag |= FLAG_LOST_RETRANS;
3051	}
3052}
3053
3054/* Process an event, which can update packets-in-flight not trivially.
3055 * Main goal of this function is to calculate new estimate for left_out,
3056 * taking into account both packets sitting in receiver's buffer and
3057 * packets lost by network.
3058 *
3059 * Besides that it updates the congestion state when packet loss or ECN
3060 * is detected. But it does not reduce the cwnd, it is done by the
3061 * congestion control later.
3062 *
3063 * It does _not_ decide what to send, it is made in function
3064 * tcp_xmit_retransmit_queue().
3065 */
3066static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
3067				  int num_dupack, int *ack_flag, int *rexmit)
3068{
3069	struct inet_connection_sock *icsk = inet_csk(sk);
3070	struct tcp_sock *tp = tcp_sk(sk);
3071	int fast_rexmit = 0, flag = *ack_flag;
3072	bool ece_ack = flag & FLAG_ECE;
3073	bool do_lost = num_dupack || ((flag & FLAG_DATA_SACKED) &&
3074				      tcp_force_fast_retransmit(sk));
3075
3076	if (!tp->packets_out && tp->sacked_out)
3077		tp->sacked_out = 0;
 
 
3078
3079	/* Now state machine starts.
3080	 * A. ECE, hence prohibit cwnd undoing, the reduction is required. */
3081	if (ece_ack)
3082		tp->prior_ssthresh = 0;
3083
3084	/* B. In all the states check for reneging SACKs. */
3085	if (tcp_check_sack_reneging(sk, ack_flag))
3086		return;
3087
3088	/* C. Check consistency of the current state. */
 
 
 
 
 
 
 
 
 
3089	tcp_verify_left_out(tp);
3090
3091	/* D. Check state exit conditions. State can be terminated
3092	 *    when high_seq is ACKed. */
3093	if (icsk->icsk_ca_state == TCP_CA_Open) {
3094		WARN_ON(tp->retrans_out != 0 && !tp->syn_data);
3095		tp->retrans_stamp = 0;
3096	} else if (!before(tp->snd_una, tp->high_seq)) {
3097		switch (icsk->icsk_ca_state) {
 
 
 
 
 
 
3098		case TCP_CA_CWR:
3099			/* CWR is to be held something *above* high_seq
3100			 * is ACKed for CWR bit to reach receiver. */
3101			if (tp->snd_una != tp->high_seq) {
3102				tcp_end_cwnd_reduction(sk);
 
 
 
 
 
 
 
 
 
 
 
3103				tcp_set_ca_state(sk, TCP_CA_Open);
3104			}
3105			break;
3106
3107		case TCP_CA_Recovery:
3108			if (tcp_is_reno(tp))
3109				tcp_reset_reno_sack(tp);
3110			if (tcp_try_undo_recovery(sk))
3111				return;
3112			tcp_end_cwnd_reduction(sk);
3113			break;
3114		}
3115	}
3116
3117	/* E. Process state. */
3118	switch (icsk->icsk_ca_state) {
3119	case TCP_CA_Recovery:
3120		if (!(flag & FLAG_SND_UNA_ADVANCED)) {
3121			if (tcp_is_reno(tp))
3122				tcp_add_reno_sack(sk, num_dupack, ece_ack);
3123		} else if (tcp_try_undo_partial(sk, prior_snd_una, &do_lost))
 
 
 
 
 
 
 
 
 
 
3124			return;
3125
3126		if (tcp_try_undo_dsack(sk))
3127			tcp_try_to_open(sk, flag);
3128
3129		tcp_identify_packet_loss(sk, ack_flag);
3130		if (icsk->icsk_ca_state != TCP_CA_Recovery) {
3131			if (!tcp_time_to_recover(sk, flag))
3132				return;
3133			/* Undo reverts the recovery state. If loss is evident,
3134			 * starts a new recovery (e.g. reordering then loss);
3135			 */
3136			tcp_enter_recovery(sk, ece_ack);
3137		}
3138		break;
3139	case TCP_CA_Loss:
3140		tcp_process_loss(sk, flag, num_dupack, rexmit);
3141		if (icsk->icsk_ca_state != TCP_CA_Loss)
3142			tcp_update_rto_time(tp);
3143		tcp_identify_packet_loss(sk, ack_flag);
3144		if (!(icsk->icsk_ca_state == TCP_CA_Open ||
3145		      (*ack_flag & FLAG_LOST_RETRANS)))
3146			return;
3147		/* Change state if cwnd is undone or retransmits are lost */
3148		fallthrough;
3149	default:
3150		if (tcp_is_reno(tp)) {
3151			if (flag & FLAG_SND_UNA_ADVANCED)
3152				tcp_reset_reno_sack(tp);
3153			tcp_add_reno_sack(sk, num_dupack, ece_ack);
 
3154		}
3155
3156		if (icsk->icsk_ca_state <= TCP_CA_Disorder)
3157			tcp_try_undo_dsack(sk);
3158
3159		tcp_identify_packet_loss(sk, ack_flag);
3160		if (!tcp_time_to_recover(sk, flag)) {
3161			tcp_try_to_open(sk, flag);
3162			return;
3163		}
3164
3165		/* MTU probe failure: don't reduce cwnd */
3166		if (icsk->icsk_ca_state < TCP_CA_CWR &&
3167		    icsk->icsk_mtup.probe_size &&
3168		    tp->snd_una == tp->mtu_probe.probe_seq_start) {
3169			tcp_mtup_probe_failed(sk);
3170			/* Restores the reduction we did in tcp_mtup_probe() */
3171			tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
3172			tcp_simple_retransmit(sk);
3173			return;
3174		}
3175
3176		/* Otherwise enter Recovery state */
3177		tcp_enter_recovery(sk, ece_ack);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3178		fast_rexmit = 1;
3179	}
3180
3181	if (!tcp_is_rack(sk) && do_lost)
3182		tcp_update_scoreboard(sk, fast_rexmit);
3183	*rexmit = REXMIT_LOST;
 
3184}
3185
3186static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us, const int flag)
3187{
3188	u32 wlen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_rtt_wlen) * HZ;
3189	struct tcp_sock *tp = tcp_sk(sk);
3190
3191	if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) {
3192		/* If the remote keeps returning delayed ACKs, eventually
3193		 * the min filter would pick it up and overestimate the
3194		 * prop. delay when it expires. Skip suspected delayed ACKs.
3195		 */
3196		return;
3197	}
3198	minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32,
3199			   rtt_us ? : jiffies_to_usecs(1));
3200}
 
3201
3202static bool tcp_ack_update_rtt(struct sock *sk, const int flag,
3203			       long seq_rtt_us, long sack_rtt_us,
3204			       long ca_rtt_us, struct rate_sample *rs)
 
3205{
3206	const struct tcp_sock *tp = tcp_sk(sk);
3207
3208	/* Prefer RTT measured from ACK's timing to TS-ECR. This is because
3209	 * broken middle-boxes or peers may corrupt TS-ECR fields. But
3210	 * Karn's algorithm forbids taking RTT if some retransmitted data
3211	 * is acked (RFC6298).
3212	 */
3213	if (seq_rtt_us < 0)
3214		seq_rtt_us = sack_rtt_us;
3215
3216	/* RTTM Rule: A TSecr value received in a segment is used to
3217	 * update the averaged RTT measurement only if the segment
3218	 * acknowledges some new data, i.e., only if it advances the
3219	 * left edge of the send window.
 
3220	 * See draft-ietf-tcplw-high-performance-00, section 3.3.
 
 
 
 
 
 
 
 
3221	 */
3222	if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp &&
3223	    tp->rx_opt.rcv_tsecr && flag & FLAG_ACKED)
3224		seq_rtt_us = ca_rtt_us = tcp_rtt_tsopt_us(tp);
3225
3226	rs->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet (or -1) */
3227	if (seq_rtt_us < 0)
3228		return false;
3229
3230	/* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is
3231	 * always taken together with ACK, SACK, or TS-opts. Any negative
3232	 * values will be skipped with the seq_rtt_us < 0 check above.
3233	 */
3234	tcp_update_rtt_min(sk, ca_rtt_us, flag);
3235	tcp_rtt_estimator(sk, seq_rtt_us);
3236	tcp_set_rto(sk);
3237
3238	/* RFC6298: only reset backoff on valid RTT measurement. */
3239	inet_csk(sk)->icsk_backoff = 0;
3240	return true;
3241}
3242
3243/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
3244void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
3245{
3246	struct rate_sample rs;
3247	long rtt_us = -1L;
 
 
 
 
 
 
3248
3249	if (req && !req->num_retrans && tcp_rsk(req)->snt_synack)
3250		rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack);
3251
3252	tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us, &rs);
3253}
3254
 
 
 
 
 
 
 
 
 
 
3255
3256static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
3257{
3258	const struct inet_connection_sock *icsk = inet_csk(sk);
3259
3260	icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
3261	tcp_sk(sk)->snd_cwnd_stamp = tcp_jiffies32;
3262}
3263
3264/* Restart timer after forward progress on connection.
3265 * RFC2988 recommends to restart timer to now+rto.
3266 */
3267void tcp_rearm_rto(struct sock *sk)
3268{
3269	const struct inet_connection_sock *icsk = inet_csk(sk);
3270	struct tcp_sock *tp = tcp_sk(sk);
3271
3272	/* If the retrans timer is currently being used by Fast Open
3273	 * for SYN-ACK retrans purpose, stay put.
3274	 */
3275	if (rcu_access_pointer(tp->fastopen_rsk))
3276		return;
3277
3278	if (!tp->packets_out) {
3279		inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
3280	} else {
3281		u32 rto = inet_csk(sk)->icsk_rto;
3282		/* Offset the time elapsed after installing regular RTO */
3283		if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
3284		    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
3285			s64 delta_us = tcp_rto_delta_us(sk);
3286			/* delta_us may not be positive if the socket is locked
3287			 * when the retrans timer fires and is rescheduled.
3288			 */
3289			rto = usecs_to_jiffies(max_t(int, delta_us, 1));
3290		}
3291		tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
3292				     TCP_RTO_MAX);
3293	}
3294}
3295
3296/* Try to schedule a loss probe; if that doesn't work, then schedule an RTO. */
3297static void tcp_set_xmit_timer(struct sock *sk)
3298{
3299	if (!tcp_schedule_loss_probe(sk, true))
3300		tcp_rearm_rto(sk);
3301}
3302
3303/* If we get here, the whole TSO packet has not been acked. */
3304static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
3305{
3306	struct tcp_sock *tp = tcp_sk(sk);
3307	u32 packets_acked;
3308
3309	BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una));
3310
3311	packets_acked = tcp_skb_pcount(skb);
3312	if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
3313		return 0;
3314	packets_acked -= tcp_skb_pcount(skb);
3315
3316	if (packets_acked) {
3317		BUG_ON(tcp_skb_pcount(skb) == 0);
3318		BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq));
3319	}
3320
3321	return packets_acked;
3322}
3323
3324static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
3325			   const struct sk_buff *ack_skb, u32 prior_snd_una)
3326{
3327	const struct skb_shared_info *shinfo;
3328
3329	/* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */
3330	if (likely(!TCP_SKB_CB(skb)->txstamp_ack))
3331		return;
3332
3333	shinfo = skb_shinfo(skb);
3334	if (!before(shinfo->tskey, prior_snd_una) &&
3335	    before(shinfo->tskey, tcp_sk(sk)->snd_una)) {
3336		tcp_skb_tsorted_save(skb) {
3337			__skb_tstamp_tx(skb, ack_skb, NULL, sk, SCM_TSTAMP_ACK);
3338		} tcp_skb_tsorted_restore(skb);
3339	}
3340}
3341
3342/* Remove acknowledged frames from the retransmission queue. If our packet
3343 * is before the ack sequence we can discard it as it's confirmed to have
3344 * arrived at the other end.
3345 */
3346static int tcp_clean_rtx_queue(struct sock *sk, const struct sk_buff *ack_skb,
3347			       u32 prior_fack, u32 prior_snd_una,
3348			       struct tcp_sacktag_state *sack, bool ece_ack)
3349{
 
3350	const struct inet_connection_sock *icsk = inet_csk(sk);
3351	u64 first_ackt, last_ackt;
3352	struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 
3353	u32 prior_sacked = tp->sacked_out;
3354	u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */
3355	struct sk_buff *skb, *next;
3356	bool fully_acked = true;
3357	long sack_rtt_us = -1L;
3358	long seq_rtt_us = -1L;
3359	long ca_rtt_us = -1L;
3360	u32 pkts_acked = 0;
3361	bool rtt_update;
3362	int flag = 0;
3363
3364	first_ackt = 0;
3365
3366	for (skb = skb_rb_first(&sk->tcp_rtx_queue); skb; skb = next) {
3367		struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
3368		const u32 start_seq = scb->seq;
3369		u8 sacked = scb->sacked;
3370		u32 acked_pcount;
3371
3372		/* Determine how many packets and what bytes were acked, tso and else */
3373		if (after(scb->end_seq, tp->snd_una)) {
3374			if (tcp_skb_pcount(skb) == 1 ||
3375			    !after(tp->snd_una, scb->seq))
3376				break;
3377
3378			acked_pcount = tcp_tso_acked(sk, skb);
3379			if (!acked_pcount)
3380				break;
3381			fully_acked = false;
 
3382		} else {
3383			acked_pcount = tcp_skb_pcount(skb);
3384		}
3385
3386		if (unlikely(sacked & TCPCB_RETRANS)) {
3387			if (sacked & TCPCB_SACKED_RETRANS)
3388				tp->retrans_out -= acked_pcount;
3389			flag |= FLAG_RETRANS_DATA_ACKED;
3390		} else if (!(sacked & TCPCB_SACKED_ACKED)) {
3391			last_ackt = tcp_skb_timestamp_us(skb);
3392			WARN_ON_ONCE(last_ackt == 0);
3393			if (!first_ackt)
3394				first_ackt = last_ackt;
3395
3396			if (before(start_seq, reord))
3397				reord = start_seq;
3398			if (!after(scb->end_seq, tp->high_seq))
3399				flag |= FLAG_ORIG_SACK_ACKED;
 
 
3400		}
3401
3402		if (sacked & TCPCB_SACKED_ACKED) {
3403			tp->sacked_out -= acked_pcount;
3404		} else if (tcp_is_sack(tp)) {
3405			tcp_count_delivered(tp, acked_pcount, ece_ack);
3406			if (!tcp_skb_spurious_retrans(tp, skb))
3407				tcp_rack_advance(tp, sacked, scb->end_seq,
3408						 tcp_skb_timestamp_us(skb));
3409		}
3410		if (sacked & TCPCB_LOST)
3411			tp->lost_out -= acked_pcount;
3412
3413		tp->packets_out -= acked_pcount;
3414		pkts_acked += acked_pcount;
3415		tcp_rate_skb_delivered(sk, skb, sack->rate);
3416
3417		/* Initial outgoing SYN's get put onto the write_queue
3418		 * just like anything else we transmit.  It is not
3419		 * true data, and if we misinform our callers that
3420		 * this ACK acks real data, we will erroneously exit
3421		 * connection startup slow start one packet too
3422		 * quickly.  This is severely frowned upon behavior.
3423		 */
3424		if (likely(!(scb->tcp_flags & TCPHDR_SYN))) {
3425			flag |= FLAG_DATA_ACKED;
3426		} else {
3427			flag |= FLAG_SYN_ACKED;
3428			tp->retrans_stamp = 0;
3429		}
3430
3431		if (!fully_acked)
3432			break;
3433
3434		tcp_ack_tstamp(sk, skb, ack_skb, prior_snd_una);
3435
3436		next = skb_rb_next(skb);
3437		if (unlikely(skb == tp->retransmit_skb_hint))
3438			tp->retransmit_skb_hint = NULL;
3439		if (unlikely(skb == tp->lost_skb_hint))
3440			tp->lost_skb_hint = NULL;
3441		tcp_highest_sack_replace(sk, skb, next);
3442		tcp_rtx_queue_unlink_and_free(skb, sk);
3443	}
3444
3445	if (!skb)
3446		tcp_chrono_stop(sk, TCP_CHRONO_BUSY);
3447
3448	if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una)))
3449		tp->snd_up = tp->snd_una;
3450
3451	if (skb) {
3452		tcp_ack_tstamp(sk, skb, ack_skb, prior_snd_una);
3453		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3454			flag |= FLAG_SACK_RENEGING;
3455	}
3456
3457	if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
3458		seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
3459		ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
3460
3461		if (pkts_acked == 1 && fully_acked && !prior_sacked &&
3462		    (tp->snd_una - prior_snd_una) < tp->mss_cache &&
3463		    sack->rate->prior_delivered + 1 == tp->delivered &&
3464		    !(flag & (FLAG_CA_ALERT | FLAG_SYN_ACKED))) {
3465			/* Conservatively mark a delayed ACK. It's typically
3466			 * from a lone runt packet over the round trip to
3467			 * a receiver w/o out-of-order or CE events.
3468			 */
3469			flag |= FLAG_ACK_MAYBE_DELAYED;
3470		}
3471	}
3472	if (sack->first_sackt) {
3473		sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt);
3474		ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt);
3475	}
3476	rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
3477					ca_rtt_us, sack->rate);
3478
3479	if (flag & FLAG_ACKED) {
3480		flag |= FLAG_SET_XMIT_TIMER;  /* set TLP or RTO timer */
 
 
3481		if (unlikely(icsk->icsk_mtup.probe_size &&
3482			     !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3483			tcp_mtup_probe_success(sk);
3484		}
3485
 
 
 
3486		if (tcp_is_reno(tp)) {
3487			tcp_remove_reno_sacks(sk, pkts_acked, ece_ack);
3488
3489			/* If any of the cumulatively ACKed segments was
3490			 * retransmitted, non-SACK case cannot confirm that
3491			 * progress was due to original transmission due to
3492			 * lack of TCPCB_SACKED_ACKED bits even if some of
3493			 * the packets may have been never retransmitted.
3494			 */
3495			if (flag & FLAG_RETRANS_DATA_ACKED)
3496				flag &= ~FLAG_ORIG_SACK_ACKED;
3497		} else {
3498			int delta;
3499
3500			/* Non-retransmitted hole got filled? That's reordering */
3501			if (before(reord, prior_fack))
3502				tcp_check_sack_reordering(sk, reord, 0);
3503
3504			delta = prior_sacked - tp->sacked_out;
 
3505			tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
3506		}
3507	} else if (skb && rtt_update && sack_rtt_us >= 0 &&
3508		   sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp,
3509						    tcp_skb_timestamp_us(skb))) {
3510		/* Do not re-arm RTO if the sack RTT is measured from data sent
3511		 * after when the head was last (re)transmitted. Otherwise the
3512		 * timeout may continue to extend in loss recovery.
3513		 */
3514		flag |= FLAG_SET_XMIT_TIMER;  /* set TLP or RTO timer */
3515	}
3516
3517	if (icsk->icsk_ca_ops->pkts_acked) {
3518		struct ack_sample sample = { .pkts_acked = pkts_acked,
3519					     .rtt_us = sack->rate->rtt_us };
3520
3521		sample.in_flight = tp->mss_cache *
3522			(tp->delivered - sack->rate->prior_delivered);
3523		icsk->icsk_ca_ops->pkts_acked(sk, &sample);
 
 
 
3524	}
3525
3526#if FASTRETRANS_DEBUG > 0
3527	WARN_ON((int)tp->sacked_out < 0);
3528	WARN_ON((int)tp->lost_out < 0);
3529	WARN_ON((int)tp->retrans_out < 0);
3530	if (!tp->packets_out && tcp_is_sack(tp)) {
3531		icsk = inet_csk(sk);
3532		if (tp->lost_out) {
3533			pr_debug("Leak l=%u %d\n",
3534				 tp->lost_out, icsk->icsk_ca_state);
3535			tp->lost_out = 0;
3536		}
3537		if (tp->sacked_out) {
3538			pr_debug("Leak s=%u %d\n",
3539				 tp->sacked_out, icsk->icsk_ca_state);
3540			tp->sacked_out = 0;
3541		}
3542		if (tp->retrans_out) {
3543			pr_debug("Leak r=%u %d\n",
3544				 tp->retrans_out, icsk->icsk_ca_state);
3545			tp->retrans_out = 0;
3546		}
3547	}
3548#endif
3549	return flag;
3550}
3551
3552static void tcp_ack_probe(struct sock *sk)
3553{
 
3554	struct inet_connection_sock *icsk = inet_csk(sk);
3555	struct sk_buff *head = tcp_send_head(sk);
3556	const struct tcp_sock *tp = tcp_sk(sk);
3557
3558	/* Was it a usable window open? */
3559	if (!head)
3560		return;
3561	if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) {
3562		icsk->icsk_backoff = 0;
3563		icsk->icsk_probes_tstamp = 0;
3564		inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
3565		/* Socket must be waked up by subsequent tcp_data_snd_check().
3566		 * This function is not for random using!
3567		 */
3568	} else {
3569		unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
3570
3571		when = tcp_clamp_probe0_to_user_timeout(sk, when);
3572		tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX);
3573	}
3574}
3575
3576static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
3577{
3578	return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
3579		inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
3580}
3581
3582/* Decide wheather to run the increase function of congestion control. */
3583static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
3584{
3585	/* If reordering is high then always grow cwnd whenever data is
3586	 * delivered regardless of its ordering. Otherwise stay conservative
3587	 * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/
3588	 * new SACK or ECE mark may first advance cwnd here and later reduce
3589	 * cwnd in tcp_fastretrans_alert() based on more states.
3590	 */
3591	if (tcp_sk(sk)->reordering >
3592	    READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering))
3593		return flag & FLAG_FORWARD_PROGRESS;
3594
3595	return flag & FLAG_DATA_ACKED;
3596}
3597
3598/* The "ultimate" congestion control function that aims to replace the rigid
3599 * cwnd increase and decrease control (tcp_cong_avoid,tcp_*cwnd_reduction).
3600 * It's called toward the end of processing an ACK with precise rate
3601 * information. All transmission or retransmission are delayed afterwards.
3602 */
3603static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
3604			     int flag, const struct rate_sample *rs)
3605{
3606	const struct inet_connection_sock *icsk = inet_csk(sk);
3607
3608	if (icsk->icsk_ca_ops->cong_control) {
3609		icsk->icsk_ca_ops->cong_control(sk, ack, flag, rs);
3610		return;
3611	}
3612
3613	if (tcp_in_cwnd_reduction(sk)) {
3614		/* Reduce cwnd if state mandates */
3615		tcp_cwnd_reduction(sk, acked_sacked, rs->losses, flag);
3616	} else if (tcp_may_raise_cwnd(sk, flag)) {
3617		/* Advance cwnd if state allows */
3618		tcp_cong_avoid(sk, ack, acked_sacked);
3619	}
3620	tcp_update_pacing_rate(sk);
3621}
3622
3623/* Check that window update is acceptable.
3624 * The function assumes that snd_una<=ack<=snd_next.
3625 */
3626static inline bool tcp_may_update_window(const struct tcp_sock *tp,
3627					const u32 ack, const u32 ack_seq,
3628					const u32 nwin)
3629{
3630	return	after(ack, tp->snd_una) ||
3631		after(ack_seq, tp->snd_wl1) ||
3632		(ack_seq == tp->snd_wl1 && (nwin > tp->snd_wnd || !nwin));
3633}
3634
3635static void tcp_snd_sne_update(struct tcp_sock *tp, u32 ack)
3636{
3637#ifdef CONFIG_TCP_AO
3638	struct tcp_ao_info *ao;
3639
3640	if (!static_branch_unlikely(&tcp_ao_needed.key))
3641		return;
3642
3643	ao = rcu_dereference_protected(tp->ao_info,
3644				       lockdep_sock_is_held((struct sock *)tp));
3645	if (ao && ack < tp->snd_una) {
3646		ao->snd_sne++;
3647		trace_tcp_ao_snd_sne_update((struct sock *)tp, ao->snd_sne);
3648	}
3649#endif
3650}
3651
3652/* If we update tp->snd_una, also update tp->bytes_acked */
3653static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
3654{
3655	u32 delta = ack - tp->snd_una;
3656
3657	sock_owned_by_me((struct sock *)tp);
3658	tp->bytes_acked += delta;
3659	tcp_snd_sne_update(tp, ack);
3660	tp->snd_una = ack;
3661}
3662
3663static void tcp_rcv_sne_update(struct tcp_sock *tp, u32 seq)
3664{
3665#ifdef CONFIG_TCP_AO
3666	struct tcp_ao_info *ao;
3667
3668	if (!static_branch_unlikely(&tcp_ao_needed.key))
3669		return;
3670
3671	ao = rcu_dereference_protected(tp->ao_info,
3672				       lockdep_sock_is_held((struct sock *)tp));
3673	if (ao && seq < tp->rcv_nxt) {
3674		ao->rcv_sne++;
3675		trace_tcp_ao_rcv_sne_update((struct sock *)tp, ao->rcv_sne);
3676	}
3677#endif
3678}
3679
3680/* If we update tp->rcv_nxt, also update tp->bytes_received */
3681static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
3682{
3683	u32 delta = seq - tp->rcv_nxt;
3684
3685	sock_owned_by_me((struct sock *)tp);
3686	tp->bytes_received += delta;
3687	tcp_rcv_sne_update(tp, seq);
3688	WRITE_ONCE(tp->rcv_nxt, seq);
3689}
3690
3691/* Update our send window.
3692 *
3693 * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
3694 * and in FreeBSD. NetBSD's one is even worse.) is wrong.
3695 */
3696static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack,
3697				 u32 ack_seq)
3698{
3699	struct tcp_sock *tp = tcp_sk(sk);
3700	int flag = 0;
3701	u32 nwin = ntohs(tcp_hdr(skb)->window);
3702
3703	if (likely(!tcp_hdr(skb)->syn))
3704		nwin <<= tp->rx_opt.snd_wscale;
3705
3706	if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
3707		flag |= FLAG_WIN_UPDATE;
3708		tcp_update_wl(tp, ack_seq);
3709
3710		if (tp->snd_wnd != nwin) {
3711			tp->snd_wnd = nwin;
3712
3713			/* Note, it is the only place, where
3714			 * fast path is recovered for sending TCP.
3715			 */
3716			tp->pred_flags = 0;
3717			tcp_fast_path_check(sk);
3718
3719			if (!tcp_write_queue_empty(sk))
3720				tcp_slow_start_after_idle_check(sk);
3721
3722			if (nwin > tp->max_window) {
3723				tp->max_window = nwin;
3724				tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
3725			}
3726		}
3727	}
3728
3729	tcp_snd_una_update(tp, ack);
3730
3731	return flag;
3732}
3733
3734static bool __tcp_oow_rate_limited(struct net *net, int mib_idx,
3735				   u32 *last_oow_ack_time)
 
 
3736{
3737	/* Paired with the WRITE_ONCE() in this function. */
3738	u32 val = READ_ONCE(*last_oow_ack_time);
3739
3740	if (val) {
3741		s32 elapsed = (s32)(tcp_jiffies32 - val);
3742
3743		if (0 <= elapsed &&
3744		    elapsed < READ_ONCE(net->ipv4.sysctl_tcp_invalid_ratelimit)) {
3745			NET_INC_STATS(net, mib_idx);
3746			return true;	/* rate-limited: don't send yet! */
3747		}
3748	}
3749
3750	/* Paired with the prior READ_ONCE() and with itself,
3751	 * as we might be lockless.
3752	 */
3753	WRITE_ONCE(*last_oow_ack_time, tcp_jiffies32);
3754
3755	return false;	/* not rate-limited: go ahead, send dupack now! */
3756}
3757
3758/* Return true if we're currently rate-limiting out-of-window ACKs and
3759 * thus shouldn't send a dupack right now. We rate-limit dupacks in
3760 * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS
3761 * attacks that send repeated SYNs or ACKs for the same connection. To
3762 * do this, we do not send a duplicate SYNACK or ACK if the remote
3763 * endpoint is sending out-of-window SYNs or pure ACKs at a high rate.
3764 */
3765bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
3766			  int mib_idx, u32 *last_oow_ack_time)
3767{
3768	/* Data packets without SYNs are not likely part of an ACK loop. */
3769	if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) &&
3770	    !tcp_hdr(skb)->syn)
3771		return false;
3772
3773	return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time);
3774}
3775
3776/* RFC 5961 7 [ACK Throttling] */
3777static void tcp_send_challenge_ack(struct sock *sk)
3778{
3779	struct tcp_sock *tp = tcp_sk(sk);
3780	struct net *net = sock_net(sk);
3781	u32 count, now, ack_limit;
3782
3783	/* First check our per-socket dupack rate limit. */
3784	if (__tcp_oow_rate_limited(net,
3785				   LINUX_MIB_TCPACKSKIPPEDCHALLENGE,
3786				   &tp->last_oow_ack_time))
3787		return;
3788
3789	ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
3790	if (ack_limit == INT_MAX)
3791		goto send_ack;
3792
3793	/* Then check host-wide RFC 5961 rate limit. */
3794	now = jiffies / HZ;
3795	if (now != READ_ONCE(net->ipv4.tcp_challenge_timestamp)) {
3796		u32 half = (ack_limit + 1) >> 1;
3797
3798		WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now);
3799		WRITE_ONCE(net->ipv4.tcp_challenge_count,
3800			   get_random_u32_inclusive(half, ack_limit + half - 1));
3801	}
3802	count = READ_ONCE(net->ipv4.tcp_challenge_count);
3803	if (count > 0) {
3804		WRITE_ONCE(net->ipv4.tcp_challenge_count, count - 1);
3805send_ack:
3806		NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK);
3807		tcp_send_ack(sk);
3808	}
3809}
3810
3811static void tcp_store_ts_recent(struct tcp_sock *tp)
3812{
3813	tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
3814	tp->rx_opt.ts_recent_stamp = ktime_get_seconds();
 
 
3815}
3816
3817static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
3818{
3819	if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
3820		/* PAWS bug workaround wrt. ACK frames, the PAWS discard
3821		 * extra check below makes sure this can only happen
3822		 * for pure ACK frames.  -DaveM
3823		 *
3824		 * Not only, also it occurs for expired timestamps.
3825		 */
3826
3827		if (tcp_paws_check(&tp->rx_opt, 0))
3828			tcp_store_ts_recent(tp);
3829	}
3830}
3831
3832/* This routine deals with acks during a TLP episode and ends an episode by
3833 * resetting tlp_high_seq. Ref: TLP algorithm in draft-ietf-tcpm-rack
 
 
 
 
 
 
 
 
 
 
 
 
3834 */
3835static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
3836{
3837	struct tcp_sock *tp = tcp_sk(sk);
3838
3839	if (before(ack, tp->tlp_high_seq))
3840		return;
3841
3842	if (!tp->tlp_retrans) {
3843		/* TLP of new data has been acknowledged */
3844		tp->tlp_high_seq = 0;
3845	} else if (flag & FLAG_DSACK_TLP) {
3846		/* This DSACK means original and TLP probe arrived; no loss */
3847		tp->tlp_high_seq = 0;
3848	} else if (after(ack, tp->tlp_high_seq)) {
3849		/* ACK advances: there was a loss, so reduce cwnd. Reset
3850		 * tlp_high_seq in tcp_init_cwnd_reduction()
3851		 */
3852		tcp_init_cwnd_reduction(sk);
3853		tcp_set_ca_state(sk, TCP_CA_CWR);
3854		tcp_end_cwnd_reduction(sk);
3855		tcp_try_keep_open(sk);
3856		NET_INC_STATS(sock_net(sk),
3857				LINUX_MIB_TCPLOSSPROBERECOVERY);
3858	} else if (!(flag & (FLAG_SND_UNA_ADVANCED |
3859			     FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
3860		/* Pure dupack: original and TLP probe arrived; no loss */
3861		tp->tlp_high_seq = 0;
3862	}
3863}
3864
3865static inline void tcp_in_ack_event(struct sock *sk, u32 flags)
3866{
3867	const struct inet_connection_sock *icsk = inet_csk(sk);
3868
3869	if (icsk->icsk_ca_ops->in_ack_event)
3870		icsk->icsk_ca_ops->in_ack_event(sk, flags);
3871}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3872
3873/* Congestion control has updated the cwnd already. So if we're in
3874 * loss recovery then now we do any new sends (for FRTO) or
3875 * retransmits (for CA_Loss or CA_recovery) that make sense.
3876 */
3877static void tcp_xmit_recovery(struct sock *sk, int rexmit)
3878{
3879	struct tcp_sock *tp = tcp_sk(sk);
 
3880
3881	if (rexmit == REXMIT_NONE || sk->sk_state == TCP_SYN_SENT)
3882		return;
3883
3884	if (unlikely(rexmit == REXMIT_NEW)) {
3885		__tcp_push_pending_frames(sk, tcp_current_mss(sk),
3886					  TCP_NAGLE_OFF);
3887		if (after(tp->snd_nxt, tp->high_seq))
3888			return;
3889		tp->frto = 0;
3890	}
3891	tcp_xmit_retransmit_queue(sk);
3892}
3893
3894/* Returns the number of packets newly acked or sacked by the current ACK */
3895static u32 tcp_newly_delivered(struct sock *sk, u32 prior_delivered, int flag)
3896{
3897	const struct net *net = sock_net(sk);
3898	struct tcp_sock *tp = tcp_sk(sk);
3899	u32 delivered;
3900
3901	delivered = tp->delivered - prior_delivered;
3902	NET_ADD_STATS(net, LINUX_MIB_TCPDELIVERED, delivered);
3903	if (flag & FLAG_ECE)
3904		NET_ADD_STATS(net, LINUX_MIB_TCPDELIVEREDCE, delivered);
3905
3906	return delivered;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3907}
3908
3909/* This routine deals with incoming acks, but not outgoing ones. */
3910static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3911{
3912	struct inet_connection_sock *icsk = inet_csk(sk);
3913	struct tcp_sock *tp = tcp_sk(sk);
3914	struct tcp_sacktag_state sack_state;
3915	struct rate_sample rs = { .prior_delivered = 0 };
3916	u32 prior_snd_una = tp->snd_una;
3917	bool is_sack_reneg = tp->is_sack_reneg;
3918	u32 ack_seq = TCP_SKB_CB(skb)->seq;
3919	u32 ack = TCP_SKB_CB(skb)->ack_seq;
3920	int num_dupack = 0;
3921	int prior_packets = tp->packets_out;
3922	u32 delivered = tp->delivered;
3923	u32 lost = tp->lost;
3924	int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
3925	u32 prior_fack;
3926
3927	sack_state.first_sackt = 0;
3928	sack_state.rate = &rs;
3929	sack_state.sack_delivered = 0;
3930
3931	/* We very likely will need to access rtx queue. */
3932	prefetch(sk->tcp_rtx_queue.rb_node);
3933
3934	/* If the ack is older than previous acks
3935	 * then we can probably ignore it.
3936	 */
3937	if (before(ack, prior_snd_una)) {
3938		u32 max_window;
3939
3940		/* do not accept ACK for bytes we never sent. */
3941		max_window = min_t(u64, tp->max_window, tp->bytes_acked);
3942		/* RFC 5961 5.2 [Blind Data Injection Attack].[Mitigation] */
3943		if (before(ack, prior_snd_una - max_window)) {
3944			if (!(flag & FLAG_NO_CHALLENGE_ACK))
3945				tcp_send_challenge_ack(sk);
3946			return -SKB_DROP_REASON_TCP_TOO_OLD_ACK;
3947		}
3948		goto old_ack;
3949	}
3950
3951	/* If the ack includes data we haven't sent yet, discard
3952	 * this segment (RFC793 Section 3.9).
3953	 */
3954	if (after(ack, tp->snd_nxt))
3955		return -SKB_DROP_REASON_TCP_ACK_UNSENT_DATA;
3956
3957	if (after(ack, prior_snd_una)) {
3958		flag |= FLAG_SND_UNA_ADVANCED;
3959		icsk->icsk_retransmits = 0;
3960
3961#if IS_ENABLED(CONFIG_TLS_DEVICE)
3962		if (static_branch_unlikely(&clean_acked_data_enabled.key))
3963			if (icsk->icsk_clean_acked)
3964				icsk->icsk_clean_acked(sk, ack);
3965#endif
 
 
3966	}
3967
3968	prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una;
3969	rs.prior_in_flight = tcp_packets_in_flight(tp);
3970
3971	/* ts_recent update must be made after we are sure that the packet
3972	 * is in window.
3973	 */
3974	if (flag & FLAG_UPDATE_TS_RECENT)
3975		tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
3976
3977	if ((flag & (FLAG_SLOWPATH | FLAG_SND_UNA_ADVANCED)) ==
3978	    FLAG_SND_UNA_ADVANCED) {
3979		/* Window is constant, pure forward advance.
3980		 * No more checks are required.
3981		 * Note, we use the fact that SND.UNA>=SND.WL2.
3982		 */
3983		tcp_update_wl(tp, ack_seq);
3984		tcp_snd_una_update(tp, ack);
3985		flag |= FLAG_WIN_UPDATE;
3986
3987		tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
3988
3989		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
3990	} else {
3991		u32 ack_ev_flags = CA_ACK_SLOWPATH;
3992
3993		if (ack_seq != TCP_SKB_CB(skb)->end_seq)
3994			flag |= FLAG_DATA;
3995		else
3996			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS);
3997
3998		flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
3999
4000		if (TCP_SKB_CB(skb)->sacked)
4001			flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
4002							&sack_state);
4003
4004		if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) {
4005			flag |= FLAG_ECE;
4006			ack_ev_flags |= CA_ACK_ECE;
4007		}
4008
4009		if (sack_state.sack_delivered)
4010			tcp_count_delivered(tp, sack_state.sack_delivered,
4011					    flag & FLAG_ECE);
4012
4013		if (flag & FLAG_WIN_UPDATE)
4014			ack_ev_flags |= CA_ACK_WIN_UPDATE;
4015
4016		tcp_in_ack_event(sk, ack_ev_flags);
4017	}
4018
4019	/* This is a deviation from RFC3168 since it states that:
4020	 * "When the TCP data sender is ready to set the CWR bit after reducing
4021	 * the congestion window, it SHOULD set the CWR bit only on the first
4022	 * new data packet that it transmits."
4023	 * We accept CWR on pure ACKs to be more robust
4024	 * with widely-deployed TCP implementations that do this.
4025	 */
4026	tcp_ecn_accept_cwr(sk, skb);
4027
4028	/* We passed data and got it acked, remove any soft error
4029	 * log. Something worked...
4030	 */
4031	WRITE_ONCE(sk->sk_err_soft, 0);
4032	icsk->icsk_probes_out = 0;
4033	tp->rcv_tstamp = tcp_jiffies32;
 
4034	if (!prior_packets)
4035		goto no_queue;
4036
4037	/* See if we can take anything off of the retransmit queue. */
4038	flag |= tcp_clean_rtx_queue(sk, skb, prior_fack, prior_snd_una,
4039				    &sack_state, flag & FLAG_ECE);
4040
4041	tcp_rack_update_reo_wnd(sk, &rs);
4042
4043	if (tp->tlp_high_seq)
4044		tcp_process_tlp_ack(sk, ack, flag);
 
 
 
4045
4046	if (tcp_ack_is_dubious(sk, flag)) {
4047		if (!(flag & (FLAG_SND_UNA_ADVANCED |
4048			      FLAG_NOT_DUP | FLAG_DSACKING_ACK))) {
4049			num_dupack = 1;
4050			/* Consider if pure acks were aggregated in tcp_add_backlog() */
4051			if (!(flag & FLAG_DATA))
4052				num_dupack = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
4053		}
4054		tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
4055				      &rexmit);
4056	}
4057
4058	/* If needed, reset TLP/RTO timer when RACK doesn't set. */
4059	if (flag & FLAG_SET_XMIT_TIMER)
4060		tcp_set_xmit_timer(sk);
4061
4062	if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
4063		sk_dst_confirm(sk);
4064
4065	delivered = tcp_newly_delivered(sk, delivered, flag);
4066	lost = tp->lost - lost;			/* freshly marked lost */
4067	rs.is_ack_delayed = !!(flag & FLAG_ACK_MAYBE_DELAYED);
4068	tcp_rate_gen(sk, delivered, lost, is_sack_reneg, sack_state.rate);
4069	tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
4070	tcp_xmit_recovery(sk, rexmit);
4071	return 1;
4072
4073no_queue:
4074	/* If data was DSACKed, see if we can undo a cwnd reduction. */
4075	if (flag & FLAG_DSACKING_ACK) {
4076		tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
4077				      &rexmit);
4078		tcp_newly_delivered(sk, delivered, flag);
4079	}
4080	/* If this ack opens up a zero window, clear backoff.  It was
4081	 * being used to time the probes, and is probably far higher than
4082	 * it needs to be for normal retransmission.
4083	 */
4084	tcp_ack_probe(sk);
 
 
4085
4086	if (tp->tlp_high_seq)
4087		tcp_process_tlp_ack(sk, ack, flag);
4088	return 1;
4089
4090old_ack:
4091	/* If data was SACKed, tag it and see if we should send more data.
4092	 * If data was DSACKed, see if we can undo a cwnd reduction.
4093	 */
4094	if (TCP_SKB_CB(skb)->sacked) {
4095		flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
4096						&sack_state);
4097		tcp_fastretrans_alert(sk, prior_snd_una, num_dupack, &flag,
4098				      &rexmit);
4099		tcp_newly_delivered(sk, delivered, flag);
4100		tcp_xmit_recovery(sk, rexmit);
4101	}
4102
 
4103	return 0;
4104}
4105
4106static void tcp_parse_fastopen_option(int len, const unsigned char *cookie,
4107				      bool syn, struct tcp_fastopen_cookie *foc,
4108				      bool exp_opt)
4109{
4110	/* Valid only in SYN or SYN-ACK with an even length.  */
4111	if (!foc || !syn || len < 0 || (len & 1))
4112		return;
4113
4114	if (len >= TCP_FASTOPEN_COOKIE_MIN &&
4115	    len <= TCP_FASTOPEN_COOKIE_MAX)
4116		memcpy(foc->val, cookie, len);
4117	else if (len != 0)
4118		len = -1;
4119	foc->len = len;
4120	foc->exp = exp_opt;
4121}
4122
4123static bool smc_parse_options(const struct tcphdr *th,
4124			      struct tcp_options_received *opt_rx,
4125			      const unsigned char *ptr,
4126			      int opsize)
4127{
4128#if IS_ENABLED(CONFIG_SMC)
4129	if (static_branch_unlikely(&tcp_have_smc)) {
4130		if (th->syn && !(opsize & 1) &&
4131		    opsize >= TCPOLEN_EXP_SMC_BASE &&
4132		    get_unaligned_be32(ptr) == TCPOPT_SMC_MAGIC) {
4133			opt_rx->smc_ok = 1;
4134			return true;
4135		}
4136	}
4137#endif
4138	return false;
4139}
4140
4141/* Try to parse the MSS option from the TCP header. Return 0 on failure, clamped
4142 * value on success.
4143 */
4144u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss)
4145{
4146	const unsigned char *ptr = (const unsigned char *)(th + 1);
4147	int length = (th->doff * 4) - sizeof(struct tcphdr);
4148	u16 mss = 0;
4149
4150	while (length > 0) {
4151		int opcode = *ptr++;
4152		int opsize;
4153
4154		switch (opcode) {
4155		case TCPOPT_EOL:
4156			return mss;
4157		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
4158			length--;
4159			continue;
4160		default:
4161			if (length < 2)
4162				return mss;
4163			opsize = *ptr++;
4164			if (opsize < 2) /* "silly options" */
4165				return mss;
4166			if (opsize > length)
4167				return mss;	/* fail on partial options */
4168			if (opcode == TCPOPT_MSS && opsize == TCPOLEN_MSS) {
4169				u16 in_mss = get_unaligned_be16(ptr);
4170
4171				if (in_mss) {
4172					if (user_mss && user_mss < in_mss)
4173						in_mss = user_mss;
4174					mss = in_mss;
4175				}
4176			}
4177			ptr += opsize - 2;
4178			length -= opsize;
4179		}
4180	}
4181	return mss;
4182}
4183EXPORT_SYMBOL_GPL(tcp_parse_mss_option);
4184
4185/* Look for tcp options. Normally only called on SYN and SYNACK packets.
4186 * But, this can also be called on packets in the established flow when
4187 * the fast version below fails.
4188 */
4189void tcp_parse_options(const struct net *net,
4190		       const struct sk_buff *skb,
4191		       struct tcp_options_received *opt_rx, int estab,
4192		       struct tcp_fastopen_cookie *foc)
4193{
4194	const unsigned char *ptr;
4195	const struct tcphdr *th = tcp_hdr(skb);
4196	int length = (th->doff * 4) - sizeof(struct tcphdr);
4197
4198	ptr = (const unsigned char *)(th + 1);
4199	opt_rx->saw_tstamp = 0;
4200	opt_rx->saw_unknown = 0;
4201
4202	while (length > 0) {
4203		int opcode = *ptr++;
4204		int opsize;
4205
4206		switch (opcode) {
4207		case TCPOPT_EOL:
4208			return;
4209		case TCPOPT_NOP:	/* Ref: RFC 793 section 3.1 */
4210			length--;
4211			continue;
4212		default:
4213			if (length < 2)
4214				return;
4215			opsize = *ptr++;
4216			if (opsize < 2) /* "silly options" */
4217				return;
4218			if (opsize > length)
4219				return;	/* don't parse partial options */
4220			switch (opcode) {
4221			case TCPOPT_MSS:
4222				if (opsize == TCPOLEN_MSS && th->syn && !estab) {
4223					u16 in_mss = get_unaligned_be16(ptr);
4224					if (in_mss) {
4225						if (opt_rx->user_mss &&
4226						    opt_rx->user_mss < in_mss)
4227							in_mss = opt_rx->user_mss;
4228						opt_rx->mss_clamp = in_mss;
4229					}
4230				}
4231				break;
4232			case TCPOPT_WINDOW:
4233				if (opsize == TCPOLEN_WINDOW && th->syn &&
4234				    !estab && READ_ONCE(net->ipv4.sysctl_tcp_window_scaling)) {
4235					__u8 snd_wscale = *(__u8 *)ptr;
4236					opt_rx->wscale_ok = 1;
4237					if (snd_wscale > TCP_MAX_WSCALE) {
4238						net_info_ratelimited("%s: Illegal window scaling value %d > %u received\n",
4239								     __func__,
4240								     snd_wscale,
4241								     TCP_MAX_WSCALE);
4242						snd_wscale = TCP_MAX_WSCALE;
4243					}
4244					opt_rx->snd_wscale = snd_wscale;
4245				}
4246				break;
4247			case TCPOPT_TIMESTAMP:
4248				if ((opsize == TCPOLEN_TIMESTAMP) &&
4249				    ((estab && opt_rx->tstamp_ok) ||
4250				     (!estab && READ_ONCE(net->ipv4.sysctl_tcp_timestamps)))) {
4251					opt_rx->saw_tstamp = 1;
4252					opt_rx->rcv_tsval = get_unaligned_be32(ptr);
4253					opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
4254				}
4255				break;
4256			case TCPOPT_SACK_PERM:
4257				if (opsize == TCPOLEN_SACK_PERM && th->syn &&
4258				    !estab && READ_ONCE(net->ipv4.sysctl_tcp_sack)) {
4259					opt_rx->sack_ok = TCP_SACK_SEEN;
4260					tcp_sack_reset(opt_rx);
4261				}
4262				break;
4263
4264			case TCPOPT_SACK:
4265				if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
4266				   !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
4267				   opt_rx->sack_ok) {
4268					TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
4269				}
4270				break;
4271#ifdef CONFIG_TCP_MD5SIG
4272			case TCPOPT_MD5SIG:
4273				/* The MD5 Hash has already been
4274				 * checked (see tcp_v{4,6}_rcv()).
 
4275				 */
4276				break;
4277#endif
4278#ifdef CONFIG_TCP_AO
4279			case TCPOPT_AO:
4280				/* TCP AO has already been checked
4281				 * (see tcp_inbound_ao_hash()).
4282				 */
4283				break;
4284#endif
4285			case TCPOPT_FASTOPEN:
4286				tcp_parse_fastopen_option(
4287					opsize - TCPOLEN_FASTOPEN_BASE,
4288					ptr, th->syn, foc, false);
4289				break;
4290
4291			case TCPOPT_EXP:
4292				/* Fast Open option shares code 254 using a
4293				 * 16 bits magic number.
4294				 */
4295				if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE &&
4296				    get_unaligned_be16(ptr) ==
4297				    TCPOPT_FASTOPEN_MAGIC) {
4298					tcp_parse_fastopen_option(opsize -
4299						TCPOLEN_EXP_FASTOPEN_BASE,
4300						ptr + 2, th->syn, foc, true);
4301					break;
4302				}
4303
4304				if (smc_parse_options(th, opt_rx, ptr, opsize))
4305					break;
4306
4307				opt_rx->saw_unknown = 1;
4308				break;
 
4309
4310			default:
4311				opt_rx->saw_unknown = 1;
4312			}
4313			ptr += opsize-2;
4314			length -= opsize;
4315		}
4316	}
4317}
4318EXPORT_SYMBOL(tcp_parse_options);
4319
4320static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
4321{
4322	const __be32 *ptr = (const __be32 *)(th + 1);
4323
4324	if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
4325			  | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
4326		tp->rx_opt.saw_tstamp = 1;
4327		++ptr;
4328		tp->rx_opt.rcv_tsval = ntohl(*ptr);
4329		++ptr;
4330		if (*ptr)
4331			tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
4332		else
4333			tp->rx_opt.rcv_tsecr = 0;
4334		return true;
4335	}
4336	return false;
4337}
4338
4339/* Fast parse options. This hopes to only see timestamps.
4340 * If it is wrong it falls back on tcp_parse_options().
4341 */
4342static bool tcp_fast_parse_options(const struct net *net,
4343				   const struct sk_buff *skb,
4344				   const struct tcphdr *th, struct tcp_sock *tp)
4345{
4346	/* In the spirit of fast parsing, compare doff directly to constant
4347	 * values.  Because equality is used, short doff can be ignored here.
4348	 */
4349	if (th->doff == (sizeof(*th) / 4)) {
4350		tp->rx_opt.saw_tstamp = 0;
4351		return false;
4352	} else if (tp->rx_opt.tstamp_ok &&
4353		   th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
4354		if (tcp_parse_aligned_timestamp(tp, th))
4355			return true;
4356	}
4357
4358	tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL);
4359	if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
4360		tp->rx_opt.rcv_tsecr -= tp->tsoffset;
4361
4362	return true;
4363}
4364
4365#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
4366/*
4367 * Parse Signature options
4368 */
4369int tcp_do_parse_auth_options(const struct tcphdr *th,
4370			      const u8 **md5_hash, const u8 **ao_hash)
4371{
4372	int length = (th->doff << 2) - sizeof(*th);
4373	const u8 *ptr = (const u8 *)(th + 1);
4374	unsigned int minlen = TCPOLEN_MD5SIG;
4375
4376	if (IS_ENABLED(CONFIG_TCP_AO))
4377		minlen = sizeof(struct tcp_ao_hdr) + 1;
 
4378
4379	*md5_hash = NULL;
4380	*ao_hash = NULL;
4381
4382	/* If not enough data remaining, we can short cut */
4383	while (length >= minlen) {
4384		int opcode = *ptr++;
4385		int opsize;
4386
4387		switch (opcode) {
4388		case TCPOPT_EOL:
4389			return 0;
4390		case TCPOPT_NOP:
4391			length--;
4392			continue;
4393		default:
4394			opsize = *ptr++;
4395			if (opsize < 2 || opsize > length)
4396				return -EINVAL;
4397			if (opcode == TCPOPT_MD5SIG) {
4398				if (opsize != TCPOLEN_MD5SIG)
4399					return -EINVAL;
4400				if (unlikely(*md5_hash || *ao_hash))
4401					return -EEXIST;
4402				*md5_hash = ptr;
4403			} else if (opcode == TCPOPT_AO) {
4404				if (opsize <= sizeof(struct tcp_ao_hdr))
4405					return -EINVAL;
4406				if (unlikely(*md5_hash || *ao_hash))
4407					return -EEXIST;
4408				*ao_hash = ptr;
4409			}
4410		}
4411		ptr += opsize - 2;
4412		length -= opsize;
4413	}
4414	return 0;
4415}
4416EXPORT_SYMBOL(tcp_do_parse_auth_options);
4417#endif
4418
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4419/* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
4420 *
4421 * It is not fatal. If this ACK does _not_ change critical state (seqs, window)
4422 * it can pass through stack. So, the following predicate verifies that
4423 * this segment is not used for anything but congestion avoidance or
4424 * fast retransmit. Moreover, we even are able to eliminate most of such
4425 * second order effects, if we apply some small "replay" window (~RTO)
4426 * to timestamp space.
4427 *
4428 * All these measures still do not guarantee that we reject wrapped ACKs
4429 * on networks with high bandwidth, when sequence space is recycled fastly,
4430 * but it guarantees that such events will be very rare and do not affect
4431 * connection seriously. This doesn't look nice, but alas, PAWS is really
4432 * buggy extension.
4433 *
4434 * [ Later note. Even worse! It is buggy for segments _with_ data. RFC
4435 * states that events when retransmit arrives after original data are rare.
4436 * It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is
4437 * the biggest problem on large power networks even with minor reordering.
4438 * OK, let's give it small replay window. If peer clock is even 1hz, it is safe
4439 * up to bandwidth of 18Gigabit/sec. 8) ]
4440 */
4441
4442/* Estimates max number of increments of remote peer TSval in
4443 * a replay window (based on our current RTO estimation).
4444 */
4445static u32 tcp_tsval_replay(const struct sock *sk)
4446{
4447	/* If we use usec TS resolution,
4448	 * then expect the remote peer to use the same resolution.
4449	 */
4450	if (tcp_sk(sk)->tcp_usec_ts)
4451		return inet_csk(sk)->icsk_rto * (USEC_PER_SEC / HZ);
4452
4453	/* RFC 7323 recommends a TSval clock between 1ms and 1sec.
4454	 * We know that some OS (including old linux) can use 1200 Hz.
4455	 */
4456	return inet_csk(sk)->icsk_rto * 1200 / HZ;
4457}
4458
4459static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
4460{
4461	const struct tcp_sock *tp = tcp_sk(sk);
4462	const struct tcphdr *th = tcp_hdr(skb);
4463	u32 seq = TCP_SKB_CB(skb)->seq;
4464	u32 ack = TCP_SKB_CB(skb)->ack_seq;
4465
4466	return	/* 1. Pure ACK with correct sequence number. */
4467		(th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
4468
4469		/* 2. ... and duplicate ACK. */
4470		ack == tp->snd_una &&
4471
4472		/* 3. ... and does not update window. */
4473		!tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
4474
4475		/* 4. ... and sits in replay window. */
4476		(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <=
4477		tcp_tsval_replay(sk);
4478}
4479
4480static inline bool tcp_paws_discard(const struct sock *sk,
4481				   const struct sk_buff *skb)
4482{
4483	const struct tcp_sock *tp = tcp_sk(sk);
4484
4485	return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) &&
4486	       !tcp_disordered_ack(sk, skb);
4487}
4488
4489/* Check segment sequence number for validity.
4490 *
4491 * Segment controls are considered valid, if the segment
4492 * fits to the window after truncation to the window. Acceptability
4493 * of data (and SYN, FIN, of course) is checked separately.
4494 * See tcp_data_queue(), for example.
4495 *
4496 * Also, controls (RST is main one) are accepted using RCV.WUP instead
4497 * of RCV.NXT. Peer still did not advance his SND.UNA when we
4498 * delayed ACK, so that hisSND.UNA<=ourRCV.WUP.
4499 * (borrowed from freebsd)
4500 */
4501
4502static enum skb_drop_reason tcp_sequence(const struct tcp_sock *tp,
4503					 u32 seq, u32 end_seq)
4504{
4505	if (before(end_seq, tp->rcv_wup))
4506		return SKB_DROP_REASON_TCP_OLD_SEQUENCE;
4507
4508	if (after(seq, tp->rcv_nxt + tcp_receive_window(tp)))
4509		return SKB_DROP_REASON_TCP_INVALID_SEQUENCE;
4510
4511	return SKB_NOT_DROPPED_YET;
4512}
4513
4514
4515void tcp_done_with_error(struct sock *sk, int err)
4516{
4517	/* This barrier is coupled with smp_rmb() in tcp_poll() */
4518	WRITE_ONCE(sk->sk_err, err);
4519	smp_wmb();
4520
4521	tcp_write_queue_purge(sk);
4522	tcp_done(sk);
4523
4524	if (!sock_flag(sk, SOCK_DEAD))
4525		sk_error_report(sk);
4526}
4527EXPORT_SYMBOL(tcp_done_with_error);
4528
4529/* When we get a reset we do this. */
4530void tcp_reset(struct sock *sk, struct sk_buff *skb)
4531{
4532	int err;
4533
4534	trace_tcp_receive_reset(sk);
4535
4536	/* mptcp can't tell us to ignore reset pkts,
4537	 * so just ignore the return value of mptcp_incoming_options().
4538	 */
4539	if (sk_is_mptcp(sk))
4540		mptcp_incoming_options(sk, skb);
4541
4542	/* We want the right error as BSD sees it (and indeed as we do). */
4543	switch (sk->sk_state) {
4544	case TCP_SYN_SENT:
4545		err = ECONNREFUSED;
4546		break;
4547	case TCP_CLOSE_WAIT:
4548		err = EPIPE;
4549		break;
4550	case TCP_CLOSE:
4551		return;
4552	default:
4553		err = ECONNRESET;
4554	}
4555	tcp_done_with_error(sk, err);
 
 
 
 
 
 
4556}
4557
4558/*
4559 * 	Process the FIN bit. This now behaves as it is supposed to work
4560 *	and the FIN takes effect when it is validly part of sequence
4561 *	space. Not before when we get holes.
4562 *
4563 *	If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT
4564 *	(and thence onto LAST-ACK and finally, CLOSE, we never enter
4565 *	TIME-WAIT)
4566 *
4567 *	If we are in FINWAIT-1, a received FIN indicates simultaneous
4568 *	close and we go into CLOSING (and later onto TIME-WAIT)
4569 *
4570 *	If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
4571 */
4572void tcp_fin(struct sock *sk)
4573{
4574	struct tcp_sock *tp = tcp_sk(sk);
4575
4576	inet_csk_schedule_ack(sk);
4577
4578	WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
4579	sock_set_flag(sk, SOCK_DONE);
4580
4581	switch (sk->sk_state) {
4582	case TCP_SYN_RECV:
4583	case TCP_ESTABLISHED:
4584		/* Move to CLOSE_WAIT */
4585		tcp_set_state(sk, TCP_CLOSE_WAIT);
4586		inet_csk_enter_pingpong_mode(sk);
4587		break;
4588
4589	case TCP_CLOSE_WAIT:
4590	case TCP_CLOSING:
4591		/* Received a retransmission of the FIN, do
4592		 * nothing.
4593		 */
4594		break;
4595	case TCP_LAST_ACK:
4596		/* RFC793: Remain in the LAST-ACK state. */
4597		break;
4598
4599	case TCP_FIN_WAIT1:
4600		/* This case occurs when a simultaneous close
4601		 * happens, we must ack the received FIN and
4602		 * enter the CLOSING state.
4603		 */
4604		tcp_send_ack(sk);
4605		tcp_set_state(sk, TCP_CLOSING);
4606		break;
4607	case TCP_FIN_WAIT2:
4608		/* Received a FIN -- send ACK and enter TIME_WAIT. */
4609		tcp_send_ack(sk);
4610		tcp_time_wait(sk, TCP_TIME_WAIT, 0);
4611		break;
4612	default:
4613		/* Only TCP_LISTEN and TCP_CLOSE are left, in these
4614		 * cases we should never reach this piece of code.
4615		 */
4616		pr_err("%s: Impossible, sk->sk_state=%d\n",
4617		       __func__, sk->sk_state);
4618		break;
4619	}
4620
4621	/* It _is_ possible, that we have something out-of-order _after_ FIN.
4622	 * Probably, we should reset in this case. For now drop them.
4623	 */
4624	skb_rbtree_purge(&tp->out_of_order_queue);
4625	if (tcp_is_sack(tp))
4626		tcp_sack_reset(&tp->rx_opt);
 
4627
4628	if (!sock_flag(sk, SOCK_DEAD)) {
4629		sk->sk_state_change(sk);
4630
4631		/* Do not send POLL_HUP for half duplex close. */
4632		if (sk->sk_shutdown == SHUTDOWN_MASK ||
4633		    sk->sk_state == TCP_CLOSE)
4634			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
4635		else
4636			sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
4637	}
4638}
4639
4640static inline bool tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
4641				  u32 end_seq)
4642{
4643	if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
4644		if (before(seq, sp->start_seq))
4645			sp->start_seq = seq;
4646		if (after(end_seq, sp->end_seq))
4647			sp->end_seq = end_seq;
4648		return true;
4649	}
4650	return false;
4651}
4652
4653static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
4654{
4655	struct tcp_sock *tp = tcp_sk(sk);
4656
4657	if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
4658		int mib_idx;
4659
4660		if (before(seq, tp->rcv_nxt))
4661			mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
4662		else
4663			mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
4664
4665		NET_INC_STATS(sock_net(sk), mib_idx);
4666
4667		tp->rx_opt.dsack = 1;
4668		tp->duplicate_sack[0].start_seq = seq;
4669		tp->duplicate_sack[0].end_seq = end_seq;
4670	}
4671}
4672
4673static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
4674{
4675	struct tcp_sock *tp = tcp_sk(sk);
4676
4677	if (!tp->rx_opt.dsack)
4678		tcp_dsack_set(sk, seq, end_seq);
4679	else
4680		tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
4681}
4682
4683static void tcp_rcv_spurious_retrans(struct sock *sk, const struct sk_buff *skb)
4684{
4685	/* When the ACK path fails or drops most ACKs, the sender would
4686	 * timeout and spuriously retransmit the same segment repeatedly.
4687	 * If it seems our ACKs are not reaching the other side,
4688	 * based on receiving a duplicate data segment with new flowlabel
4689	 * (suggesting the sender suffered an RTO), and we are not already
4690	 * repathing due to our own RTO, then rehash the socket to repath our
4691	 * packets.
4692	 */
4693#if IS_ENABLED(CONFIG_IPV6)
4694	if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss &&
4695	    skb->protocol == htons(ETH_P_IPV6) &&
4696	    (tcp_sk(sk)->inet_conn.icsk_ack.lrcv_flowlabel !=
4697	     ntohl(ip6_flowlabel(ipv6_hdr(skb)))) &&
4698	    sk_rethink_txhash(sk))
4699		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDUPLICATEDATAREHASH);
4700
4701	/* Save last flowlabel after a spurious retrans. */
4702	tcp_save_lrcv_flowlabel(sk, skb);
4703#endif
4704}
4705
4706static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
4707{
4708	struct tcp_sock *tp = tcp_sk(sk);
4709
4710	if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
4711	    before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
4712		NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
4713		tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
4714
4715		if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) {
4716			u32 end_seq = TCP_SKB_CB(skb)->end_seq;
4717
4718			tcp_rcv_spurious_retrans(sk, skb);
4719			if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
4720				end_seq = tp->rcv_nxt;
4721			tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
4722		}
4723	}
4724
4725	tcp_send_ack(sk);
4726}
4727
4728/* These routines update the SACK block as out-of-order packets arrive or
4729 * in-order packets close up the sequence space.
4730 */
4731static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
4732{
4733	int this_sack;
4734	struct tcp_sack_block *sp = &tp->selective_acks[0];
4735	struct tcp_sack_block *swalk = sp + 1;
4736
4737	/* See if the recent change to the first SACK eats into
4738	 * or hits the sequence space of other SACK blocks, if so coalesce.
4739	 */
4740	for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) {
4741		if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
4742			int i;
4743
4744			/* Zap SWALK, by moving every further SACK up by one slot.
4745			 * Decrease num_sacks.
4746			 */
4747			tp->rx_opt.num_sacks--;
4748			for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
4749				sp[i] = sp[i + 1];
4750			continue;
4751		}
4752		this_sack++;
4753		swalk++;
4754	}
4755}
4756
4757void tcp_sack_compress_send_ack(struct sock *sk)
4758{
4759	struct tcp_sock *tp = tcp_sk(sk);
4760
4761	if (!tp->compressed_ack)
4762		return;
4763
4764	if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
4765		__sock_put(sk);
4766
4767	/* Since we have to send one ack finally,
4768	 * substract one from tp->compressed_ack to keep
4769	 * LINUX_MIB_TCPACKCOMPRESSED accurate.
4770	 */
4771	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
4772		      tp->compressed_ack - 1);
4773
4774	tp->compressed_ack = 0;
4775	tcp_send_ack(sk);
4776}
4777
4778/* Reasonable amount of sack blocks included in TCP SACK option
4779 * The max is 4, but this becomes 3 if TCP timestamps are there.
4780 * Given that SACK packets might be lost, be conservative and use 2.
4781 */
4782#define TCP_SACK_BLOCKS_EXPECTED 2
4783
4784static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
4785{
4786	struct tcp_sock *tp = tcp_sk(sk);
4787	struct tcp_sack_block *sp = &tp->selective_acks[0];
4788	int cur_sacks = tp->rx_opt.num_sacks;
4789	int this_sack;
4790
4791	if (!cur_sacks)
4792		goto new_sack;
4793
4794	for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) {
4795		if (tcp_sack_extend(sp, seq, end_seq)) {
4796			if (this_sack >= TCP_SACK_BLOCKS_EXPECTED)
4797				tcp_sack_compress_send_ack(sk);
4798			/* Rotate this_sack to the first one. */
4799			for (; this_sack > 0; this_sack--, sp--)
4800				swap(*sp, *(sp - 1));
4801			if (cur_sacks > 1)
4802				tcp_sack_maybe_coalesce(tp);
4803			return;
4804		}
4805	}
4806
4807	if (this_sack >= TCP_SACK_BLOCKS_EXPECTED)
4808		tcp_sack_compress_send_ack(sk);
4809
4810	/* Could not find an adjacent existing SACK, build a new one,
4811	 * put it at the front, and shift everyone else down.  We
4812	 * always know there is at least one SACK present already here.
4813	 *
4814	 * If the sack array is full, forget about the last one.
4815	 */
4816	if (this_sack >= TCP_NUM_SACKS) {
4817		this_sack--;
4818		tp->rx_opt.num_sacks--;
4819		sp--;
4820	}
4821	for (; this_sack > 0; this_sack--, sp--)
4822		*sp = *(sp - 1);
4823
4824new_sack:
4825	/* Build the new head SACK, and we're done. */
4826	sp->start_seq = seq;
4827	sp->end_seq = end_seq;
4828	tp->rx_opt.num_sacks++;
4829}
4830
4831/* RCV.NXT advances, some SACKs should be eaten. */
4832
4833static void tcp_sack_remove(struct tcp_sock *tp)
4834{
4835	struct tcp_sack_block *sp = &tp->selective_acks[0];
4836	int num_sacks = tp->rx_opt.num_sacks;
4837	int this_sack;
4838
4839	/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
4840	if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
4841		tp->rx_opt.num_sacks = 0;
4842		return;
4843	}
4844
4845	for (this_sack = 0; this_sack < num_sacks;) {
4846		/* Check if the start of the sack is covered by RCV.NXT. */
4847		if (!before(tp->rcv_nxt, sp->start_seq)) {
4848			int i;
4849
4850			/* RCV.NXT must cover all the block! */
4851			WARN_ON(before(tp->rcv_nxt, sp->end_seq));
4852
4853			/* Zap this SACK, by moving forward any other SACKS. */
4854			for (i = this_sack+1; i < num_sacks; i++)
4855				tp->selective_acks[i-1] = tp->selective_acks[i];
4856			num_sacks--;
4857			continue;
4858		}
4859		this_sack++;
4860		sp++;
4861	}
4862	tp->rx_opt.num_sacks = num_sacks;
4863}
4864
4865/**
4866 * tcp_try_coalesce - try to merge skb to prior one
4867 * @sk: socket
4868 * @to: prior buffer
4869 * @from: buffer to add in queue
4870 * @fragstolen: pointer to boolean
4871 *
4872 * Before queueing skb @from after @to, try to merge them
4873 * to reduce overall memory use and queue lengths, if cost is small.
4874 * Packets in ofo or receive queues can stay a long time.
4875 * Better try to coalesce them right now to avoid future collapses.
4876 * Returns true if caller should free @from instead of queueing it
4877 */
4878static bool tcp_try_coalesce(struct sock *sk,
4879			     struct sk_buff *to,
4880			     struct sk_buff *from,
4881			     bool *fragstolen)
4882{
4883	int delta;
4884
4885	*fragstolen = false;
4886
4887	/* Its possible this segment overlaps with prior segment in queue */
4888	if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq)
4889		return false;
4890
4891	if (!tcp_skb_can_collapse_rx(to, from))
4892		return false;
4893
4894	if (!skb_try_coalesce(to, from, fragstolen, &delta))
4895		return false;
4896
4897	atomic_add(delta, &sk->sk_rmem_alloc);
4898	sk_mem_charge(sk, delta);
4899	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
4900	TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
4901	TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
4902	TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
4903
4904	if (TCP_SKB_CB(from)->has_rxtstamp) {
4905		TCP_SKB_CB(to)->has_rxtstamp = true;
4906		to->tstamp = from->tstamp;
4907		skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp;
4908	}
4909
4910	return true;
4911}
4912
4913static bool tcp_ooo_try_coalesce(struct sock *sk,
4914			     struct sk_buff *to,
4915			     struct sk_buff *from,
4916			     bool *fragstolen)
4917{
4918	bool res = tcp_try_coalesce(sk, to, from, fragstolen);
4919
4920	/* In case tcp_drop_reason() is called later, update to->gso_segs */
4921	if (res) {
4922		u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
4923			       max_t(u16, 1, skb_shinfo(from)->gso_segs);
4924
4925		skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
4926	}
4927	return res;
4928}
4929
4930noinline_for_tracing static void
4931tcp_drop_reason(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason)
4932{
4933	sk_drops_add(sk, skb);
4934	sk_skb_reason_drop(sk, skb, reason);
4935}
4936
4937/* This one checks to see if we can put data from the
4938 * out_of_order queue into the receive_queue.
4939 */
4940static void tcp_ofo_queue(struct sock *sk)
4941{
4942	struct tcp_sock *tp = tcp_sk(sk);
4943	__u32 dsack_high = tp->rcv_nxt;
4944	bool fin, fragstolen, eaten;
4945	struct sk_buff *skb, *tail;
4946	struct rb_node *p;
4947
4948	p = rb_first(&tp->out_of_order_queue);
4949	while (p) {
4950		skb = rb_to_skb(p);
4951		if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
4952			break;
4953
4954		if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
4955			__u32 dsack = dsack_high;
4956			if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
4957				dsack_high = TCP_SKB_CB(skb)->end_seq;
4958			tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
4959		}
4960		p = rb_next(p);
4961		rb_erase(&skb->rbnode, &tp->out_of_order_queue);
4962
4963		if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) {
4964			tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_DROP);
 
 
4965			continue;
4966		}
 
 
 
4967
4968		tail = skb_peek_tail(&sk->sk_receive_queue);
4969		eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
4970		tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
4971		fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
4972		if (!eaten)
4973			tcp_add_receive_queue(sk, skb);
4974		else
4975			kfree_skb_partial(skb, fragstolen);
4976
4977		if (unlikely(fin)) {
4978			tcp_fin(sk);
4979			/* tcp_fin() purges tp->out_of_order_queue,
4980			 * so we must end this loop right now.
4981			 */
4982			break;
4983		}
4984	}
4985}
4986
4987static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb);
4988static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb);
4989
4990static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb,
4991				 unsigned int size)
4992{
4993	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
4994	    !sk_rmem_schedule(sk, skb, size)) {
4995
4996		if (tcp_prune_queue(sk, skb) < 0)
4997			return -1;
4998
4999		while (!sk_rmem_schedule(sk, skb, size)) {
5000			if (!tcp_prune_ofo_queue(sk, skb))
 
 
 
5001				return -1;
5002		}
5003	}
5004	return 0;
5005}
5006
5007static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
5008{
 
5009	struct tcp_sock *tp = tcp_sk(sk);
5010	struct rb_node **p, *parent;
5011	struct sk_buff *skb1;
5012	u32 seq, end_seq;
5013	bool fragstolen;
5014
5015	tcp_save_lrcv_flowlabel(sk, skb);
5016	tcp_ecn_check_ce(sk, skb);
5017
5018	if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
5019		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
5020		sk->sk_data_ready(sk);
5021		tcp_drop_reason(sk, skb, SKB_DROP_REASON_PROTO_MEM);
5022		return;
5023	}
5024
5025	/* Disable header prediction. */
5026	tp->pred_flags = 0;
5027	inet_csk_schedule_ack(sk);
5028
5029	tp->rcv_ooopack += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
5030	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
5031	seq = TCP_SKB_CB(skb)->seq;
5032	end_seq = TCP_SKB_CB(skb)->end_seq;
5033
5034	p = &tp->out_of_order_queue.rb_node;
5035	if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
5036		/* Initial out of order segment, build 1 SACK. */
5037		if (tcp_is_sack(tp)) {
5038			tp->rx_opt.num_sacks = 1;
5039			tp->selective_acks[0].start_seq = seq;
5040			tp->selective_acks[0].end_seq = end_seq;
5041		}
5042		rb_link_node(&skb->rbnode, NULL, p);
5043		rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
5044		tp->ooo_last_skb = skb;
5045		goto end;
5046	}
5047
5048	/* In the typical case, we are adding an skb to the end of the list.
5049	 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
5050	 */
5051	if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
5052				 skb, &fragstolen)) {
5053coalesce_done:
5054		/* For non sack flows, do not grow window to force DUPACK
5055		 * and trigger fast retransmit.
5056		 */
5057		if (tcp_is_sack(tp))
5058			tcp_grow_window(sk, skb, true);
5059		kfree_skb_partial(skb, fragstolen);
5060		skb = NULL;
5061		goto add_sack;
5062	}
5063	/* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
5064	if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) {
5065		parent = &tp->ooo_last_skb->rbnode;
5066		p = &parent->rb_right;
5067		goto insert;
5068	}
5069
5070	/* Find place to insert this segment. Handle overlaps on the way. */
5071	parent = NULL;
5072	while (*p) {
5073		parent = *p;
5074		skb1 = rb_to_skb(parent);
5075		if (before(seq, TCP_SKB_CB(skb1)->seq)) {
5076			p = &parent->rb_left;
5077			continue;
5078		}
5079		if (before(seq, TCP_SKB_CB(skb1)->end_seq)) {
5080			if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
5081				/* All the bits are present. Drop. */
5082				NET_INC_STATS(sock_net(sk),
5083					      LINUX_MIB_TCPOFOMERGE);
5084				tcp_drop_reason(sk, skb,
5085						SKB_DROP_REASON_TCP_OFOMERGE);
5086				skb = NULL;
5087				tcp_dsack_set(sk, seq, end_seq);
5088				goto add_sack;
5089			}
5090			if (after(seq, TCP_SKB_CB(skb1)->seq)) {
5091				/* Partial overlap. */
5092				tcp_dsack_set(sk, seq, TCP_SKB_CB(skb1)->end_seq);
5093			} else {
5094				/* skb's seq == skb1's seq and skb covers skb1.
5095				 * Replace skb1 with skb.
5096				 */
5097				rb_replace_node(&skb1->rbnode, &skb->rbnode,
5098						&tp->out_of_order_queue);
5099				tcp_dsack_extend(sk,
5100						 TCP_SKB_CB(skb1)->seq,
5101						 TCP_SKB_CB(skb1)->end_seq);
5102				NET_INC_STATS(sock_net(sk),
5103					      LINUX_MIB_TCPOFOMERGE);
5104				tcp_drop_reason(sk, skb1,
5105						SKB_DROP_REASON_TCP_OFOMERGE);
5106				goto merge_right;
5107			}
5108		} else if (tcp_ooo_try_coalesce(sk, skb1,
5109						skb, &fragstolen)) {
5110			goto coalesce_done;
5111		}
5112		p = &parent->rb_right;
5113	}
5114insert:
5115	/* Insert segment into RB tree. */
5116	rb_link_node(&skb->rbnode, parent, p);
5117	rb_insert_color(&skb->rbnode, &tp->out_of_order_queue);
5118
5119merge_right:
5120	/* Remove other segments covered by skb. */
5121	while ((skb1 = skb_rb_next(skb)) != NULL) {
5122		if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
5123			break;
5124		if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
5125			tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
5126					 end_seq);
5127			break;
5128		}
5129		rb_erase(&skb1->rbnode, &tp->out_of_order_queue);
5130		tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
5131				 TCP_SKB_CB(skb1)->end_seq);
5132		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
5133		tcp_drop_reason(sk, skb1, SKB_DROP_REASON_TCP_OFOMERGE);
5134	}
5135	/* If there is no skb after us, we are the last_skb ! */
5136	if (!skb1)
5137		tp->ooo_last_skb = skb;
5138
5139add_sack:
5140	if (tcp_is_sack(tp))
5141		tcp_sack_new_ofo_skb(sk, seq, end_seq);
5142end:
5143	if (skb) {
5144		/* For non sack flows, do not grow window to force DUPACK
5145		 * and trigger fast retransmit.
5146		 */
5147		if (tcp_is_sack(tp))
5148			tcp_grow_window(sk, skb, false);
5149		skb_condense(skb);
5150		skb_set_owner_r(skb, sk);
5151	}
5152}
5153
5154static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
5155				      bool *fragstolen)
5156{
5157	int eaten;
5158	struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
5159
5160	eaten = (tail &&
5161		 tcp_try_coalesce(sk, tail,
5162				  skb, fragstolen)) ? 1 : 0;
5163	tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
5164	if (!eaten) {
5165		tcp_add_receive_queue(sk, skb);
5166		skb_set_owner_r(skb, sk);
5167	}
5168	return eaten;
5169}
5170
5171int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
5172{
5173	struct sk_buff *skb;
5174	int err = -ENOMEM;
5175	int data_len = 0;
5176	bool fragstolen;
5177
5178	if (size == 0)
5179		return 0;
5180
5181	if (size > PAGE_SIZE) {
5182		int npages = min_t(size_t, size >> PAGE_SHIFT, MAX_SKB_FRAGS);
5183
5184		data_len = npages << PAGE_SHIFT;
5185		size = data_len + (size & ~PAGE_MASK);
5186	}
5187	skb = alloc_skb_with_frags(size - data_len, data_len,
5188				   PAGE_ALLOC_COSTLY_ORDER,
5189				   &err, sk->sk_allocation);
5190	if (!skb)
5191		goto err;
5192
5193	skb_put(skb, size - data_len);
5194	skb->data_len = data_len;
5195	skb->len = size;
5196
5197	if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
5198		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
5199		goto err_free;
5200	}
5201
5202	err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
5203	if (err)
5204		goto err_free;
5205
5206	TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
5207	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
5208	TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
5209
5210	if (tcp_queue_rcv(sk, skb, &fragstolen)) {
5211		WARN_ON_ONCE(fragstolen); /* should not happen */
5212		__kfree_skb(skb);
5213	}
5214	return size;
5215
5216err_free:
5217	kfree_skb(skb);
5218err:
5219	return err;
5220
5221}
5222
5223void tcp_data_ready(struct sock *sk)
5224{
5225	if (tcp_epollin_ready(sk, sk->sk_rcvlowat) || sock_flag(sk, SOCK_DONE))
5226		sk->sk_data_ready(sk);
5227}
5228
5229static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
5230{
5231	struct tcp_sock *tp = tcp_sk(sk);
5232	enum skb_drop_reason reason;
5233	bool fragstolen;
5234	int eaten;
5235
5236	/* If a subflow has been reset, the packet should not continue
5237	 * to be processed, drop the packet.
5238	 */
5239	if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb)) {
5240		__kfree_skb(skb);
5241		return;
5242	}
5243
5244	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
5245		__kfree_skb(skb);
5246		return;
5247	}
5248	tcp_cleanup_skb(skb);
5249	__skb_pull(skb, tcp_hdr(skb)->doff * 4);
5250
5251	reason = SKB_DROP_REASON_NOT_SPECIFIED;
5252	tp->rx_opt.dsack = 0;
5253
5254	/*  Queue data for delivery to the user.
5255	 *  Packets in sequence go to the receive queue.
5256	 *  Out of sequence packets to the out_of_order_queue.
5257	 */
5258	if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
5259		if (tcp_receive_window(tp) == 0) {
5260			/* Some stacks are known to send bare FIN packets
5261			 * in a loop even if we send RWIN 0 in our ACK.
5262			 * Accepting this FIN does not hurt memory pressure
5263			 * because the FIN flag will simply be merged to the
5264			 * receive queue tail skb in most cases.
5265			 */
5266			if (!skb->len &&
5267			    (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
5268				goto queue_and_out;
5269
5270			reason = SKB_DROP_REASON_TCP_ZEROWINDOW;
5271			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
5272			goto out_of_window;
5273		}
5274
5275		/* Ok. In sequence. In window. */
5276queue_and_out:
5277		if (tcp_try_rmem_schedule(sk, skb, skb->truesize)) {
5278			/* TODO: maybe ratelimit these WIN 0 ACK ? */
5279			inet_csk(sk)->icsk_ack.pending |=
5280					(ICSK_ACK_NOMEM | ICSK_ACK_NOW);
5281			inet_csk_schedule_ack(sk);
5282			sk->sk_data_ready(sk);
5283
5284			if (skb_queue_len(&sk->sk_receive_queue) && skb->len) {
5285				reason = SKB_DROP_REASON_PROTO_MEM;
5286				NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVQDROP);
5287				goto drop;
 
 
5288			}
5289			sk_forced_mem_schedule(sk, skb->truesize);
5290		}
5291
5292		eaten = tcp_queue_rcv(sk, skb, &fragstolen);
 
 
 
 
 
 
 
 
 
5293		if (skb->len)
5294			tcp_event_data_recv(sk, skb);
5295		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
5296			tcp_fin(sk);
5297
5298		if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
5299			tcp_ofo_queue(sk);
5300
5301			/* RFC5681. 4.2. SHOULD send immediate ACK, when
5302			 * gap in queue is filled.
5303			 */
5304			if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
5305				inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
5306		}
5307
5308		if (tp->rx_opt.num_sacks)
5309			tcp_sack_remove(tp);
5310
5311		tcp_fast_path_check(sk);
5312
5313		if (eaten > 0)
5314			kfree_skb_partial(skb, fragstolen);
5315		if (!sock_flag(sk, SOCK_DEAD))
5316			tcp_data_ready(sk);
5317		return;
5318	}
5319
5320	if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
5321		tcp_rcv_spurious_retrans(sk, skb);
5322		/* A retransmit, 2nd most common case.  Force an immediate ack. */
5323		reason = SKB_DROP_REASON_TCP_OLD_DATA;
5324		NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
5325		tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
5326
5327out_of_window:
5328		tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
5329		inet_csk_schedule_ack(sk);
5330drop:
5331		tcp_drop_reason(sk, skb, reason);
5332		return;
5333	}
5334
5335	/* Out of window. F.e. zero window probe. */
5336	if (!before(TCP_SKB_CB(skb)->seq,
5337		    tp->rcv_nxt + tcp_receive_window(tp))) {
5338		reason = SKB_DROP_REASON_TCP_OVERWINDOW;
5339		goto out_of_window;
5340	}
 
5341
5342	if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
5343		/* Partial packet, seq < rcv_next < end_seq */
 
 
 
 
5344		tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
5345
5346		/* If window is closed, drop tail of packet. But after
5347		 * remembering D-SACK for its head made in previous line.
5348		 */
5349		if (!tcp_receive_window(tp)) {
5350			reason = SKB_DROP_REASON_TCP_ZEROWINDOW;
5351			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP);
5352			goto out_of_window;
5353		}
5354		goto queue_and_out;
5355	}
5356
5357	tcp_data_queue_ofo(sk, skb);
5358}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5359
5360static struct sk_buff *tcp_skb_next(struct sk_buff *skb, struct sk_buff_head *list)
5361{
5362	if (list)
5363		return !skb_queue_is_last(list, skb) ? skb->next : NULL;
 
 
 
 
 
 
 
 
5364
5365	return skb_rb_next(skb);
 
 
 
5366}
5367
5368static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
5369					struct sk_buff_head *list,
5370					struct rb_root *root)
5371{
5372	struct sk_buff *next = tcp_skb_next(skb, list);
5373
5374	if (list)
5375		__skb_unlink(skb, list);
5376	else
5377		rb_erase(&skb->rbnode, root);
5378
 
5379	__kfree_skb(skb);
5380	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
5381
5382	return next;
5383}
5384
5385/* Insert skb into rb tree, ordered by TCP_SKB_CB(skb)->seq */
5386void tcp_rbtree_insert(struct rb_root *root, struct sk_buff *skb)
5387{
5388	struct rb_node **p = &root->rb_node;
5389	struct rb_node *parent = NULL;
5390	struct sk_buff *skb1;
5391
5392	while (*p) {
5393		parent = *p;
5394		skb1 = rb_to_skb(parent);
5395		if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
5396			p = &parent->rb_left;
5397		else
5398			p = &parent->rb_right;
5399	}
5400	rb_link_node(&skb->rbnode, parent, p);
5401	rb_insert_color(&skb->rbnode, root);
5402}
5403
5404/* Collapse contiguous sequence of skbs head..tail with
5405 * sequence numbers start..end.
5406 *
5407 * If tail is NULL, this means until the end of the queue.
5408 *
5409 * Segments with FIN/SYN are not collapsed (only because this
5410 * simplifies code)
5411 */
5412static void
5413tcp_collapse(struct sock *sk, struct sk_buff_head *list, struct rb_root *root,
5414	     struct sk_buff *head, struct sk_buff *tail, u32 start, u32 end)
 
5415{
5416	struct sk_buff *skb = head, *n;
5417	struct sk_buff_head tmp;
5418	bool end_of_skbs;
5419
5420	/* First, check that queue is collapsible and find
5421	 * the point where collapsing can be useful.
5422	 */
5423restart:
5424	for (end_of_skbs = true; skb != NULL && skb != tail; skb = n) {
5425		n = tcp_skb_next(skb, list);
5426
5427		if (!skb_frags_readable(skb))
5428			goto skip_this;
5429
5430		/* No new bits? It is possible on ofo queue. */
5431		if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
5432			skb = tcp_collapse_one(sk, skb, list, root);
5433			if (!skb)
5434				break;
5435			goto restart;
5436		}
5437
5438		/* The first skb to collapse is:
5439		 * - not SYN/FIN and
5440		 * - bloated or contains data before "start" or
5441		 *   overlaps to the next one and mptcp allow collapsing.
5442		 */
5443		if (!(TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) &&
5444		    (tcp_win_from_space(sk, skb->truesize) > skb->len ||
5445		     before(TCP_SKB_CB(skb)->seq, start))) {
5446			end_of_skbs = false;
5447			break;
5448		}
5449
5450		if (n && n != tail && skb_frags_readable(n) &&
5451		    tcp_skb_can_collapse_rx(skb, n) &&
5452		    TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) {
5453			end_of_skbs = false;
5454			break;
 
 
5455		}
5456
5457skip_this:
5458		/* Decided to skip this, advance start seq. */
5459		start = TCP_SKB_CB(skb)->end_seq;
5460	}
5461	if (end_of_skbs ||
5462	    (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
5463	    !skb_frags_readable(skb))
5464		return;
5465
5466	__skb_queue_head_init(&tmp);
5467
5468	while (before(start, end)) {
5469		int copy = min_t(int, SKB_MAX_ORDER(0, 0), end - start);
5470		struct sk_buff *nskb;
 
 
5471
5472		nskb = alloc_skb(copy, GFP_ATOMIC);
 
 
 
 
 
5473		if (!nskb)
5474			break;
5475
 
 
 
 
 
 
 
5476		memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
5477		skb_copy_decrypted(nskb, skb);
5478		TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
5479		if (list)
5480			__skb_queue_before(list, skb, nskb);
5481		else
5482			__skb_queue_tail(&tmp, nskb); /* defer rbtree insertion */
5483		skb_set_owner_r(nskb, sk);
5484		mptcp_skb_ext_move(nskb, skb);
5485
5486		/* Copy data, releasing collapsed skbs. */
5487		while (copy > 0) {
5488			int offset = start - TCP_SKB_CB(skb)->seq;
5489			int size = TCP_SKB_CB(skb)->end_seq - start;
5490
5491			BUG_ON(offset < 0);
5492			if (size > 0) {
5493				size = min(copy, size);
5494				if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
5495					BUG();
5496				TCP_SKB_CB(nskb)->end_seq += size;
5497				copy -= size;
5498				start += size;
5499			}
5500			if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
5501				skb = tcp_collapse_one(sk, skb, list, root);
5502				if (!skb ||
5503				    skb == tail ||
5504				    !tcp_skb_can_collapse_rx(nskb, skb) ||
5505				    (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN)) ||
5506				    !skb_frags_readable(skb))
5507					goto end;
5508			}
5509		}
5510	}
5511end:
5512	skb_queue_walk_safe(&tmp, skb, n)
5513		tcp_rbtree_insert(root, skb);
5514}
5515
5516/* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
5517 * and tcp_collapse() them until all the queue is collapsed.
5518 */
5519static void tcp_collapse_ofo_queue(struct sock *sk)
5520{
5521	struct tcp_sock *tp = tcp_sk(sk);
5522	u32 range_truesize, sum_tiny = 0;
5523	struct sk_buff *skb, *head;
5524	u32 start, end;
5525
5526	skb = skb_rb_first(&tp->out_of_order_queue);
5527new_range:
5528	if (!skb) {
5529		tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
5530		return;
5531	}
5532	start = TCP_SKB_CB(skb)->seq;
5533	end = TCP_SKB_CB(skb)->end_seq;
5534	range_truesize = skb->truesize;
 
 
 
5535
5536	for (head = skb;;) {
5537		skb = skb_rb_next(skb);
 
5538
5539		/* Range is terminated when we see a gap or when
5540		 * we are at the queue end.
5541		 */
5542		if (!skb ||
5543		    after(TCP_SKB_CB(skb)->seq, end) ||
5544		    before(TCP_SKB_CB(skb)->end_seq, start)) {
5545			/* Do not attempt collapsing tiny skbs */
5546			if (range_truesize != head->truesize ||
5547			    end - start >= SKB_WITH_OVERHEAD(PAGE_SIZE)) {
5548				tcp_collapse(sk, NULL, &tp->out_of_order_queue,
5549					     head, skb, start, end);
5550			} else {
5551				sum_tiny += range_truesize;
5552				if (sum_tiny > sk->sk_rcvbuf >> 3)
5553					return;
5554			}
5555			goto new_range;
5556		}
5557
5558		range_truesize += skb->truesize;
5559		if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
5560			start = TCP_SKB_CB(skb)->seq;
5561		if (after(TCP_SKB_CB(skb)->end_seq, end))
5562			end = TCP_SKB_CB(skb)->end_seq;
 
 
 
 
 
 
5563	}
5564}
5565
5566/*
5567 * Clean the out-of-order queue to make room.
5568 * We drop high sequences packets to :
5569 * 1) Let a chance for holes to be filled.
5570 *    This means we do not drop packets from ooo queue if their sequence
5571 *    is before incoming packet sequence.
5572 * 2) not add too big latencies if thousands of packets sit there.
5573 *    (But if application shrinks SO_RCVBUF, we could still end up
5574 *     freeing whole queue here)
5575 * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
5576 *
5577 * Return true if queue has shrunk.
5578 */
5579static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb)
5580{
5581	struct tcp_sock *tp = tcp_sk(sk);
5582	struct rb_node *node, *prev;
5583	bool pruned = false;
5584	int goal;
5585
5586	if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
5587		return false;
5588
5589	goal = sk->sk_rcvbuf >> 3;
5590	node = &tp->ooo_last_skb->rbnode;
5591
5592	do {
5593		struct sk_buff *skb = rb_to_skb(node);
5594
5595		/* If incoming skb would land last in ofo queue, stop pruning. */
5596		if (after(TCP_SKB_CB(in_skb)->seq, TCP_SKB_CB(skb)->seq))
5597			break;
5598		pruned = true;
5599		prev = rb_prev(node);
5600		rb_erase(node, &tp->out_of_order_queue);
5601		goal -= skb->truesize;
5602		tcp_drop_reason(sk, skb, SKB_DROP_REASON_TCP_OFO_QUEUE_PRUNE);
5603		tp->ooo_last_skb = rb_to_skb(prev);
5604		if (!prev || goal <= 0) {
5605			if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
5606			    !tcp_under_memory_pressure(sk))
5607				break;
5608			goal = sk->sk_rcvbuf >> 3;
5609		}
5610		node = prev;
5611	} while (node);
5612
5613	if (pruned) {
5614		NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
5615		/* Reset SACK state.  A conforming SACK implementation will
5616		 * do the same at a timeout based retransmit.  When a connection
5617		 * is in a sad state like this, we care only about integrity
5618		 * of the connection not performance.
5619		 */
5620		if (tp->rx_opt.sack_ok)
5621			tcp_sack_reset(&tp->rx_opt);
 
 
5622	}
5623	return pruned;
5624}
5625
5626/* Reduce allocated memory if we can, trying to get
5627 * the socket within its memory limits again.
5628 *
5629 * Return less than zero if we should start dropping frames
5630 * until the socket owning process reads some of the data
5631 * to stabilize the situation.
5632 */
5633static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb)
5634{
5635	struct tcp_sock *tp = tcp_sk(sk);
5636
5637	NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
 
 
5638
5639	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
5640		tcp_clamp_window(sk);
5641	else if (tcp_under_memory_pressure(sk))
5642		tcp_adjust_rcv_ssthresh(sk);
5643
5644	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5645		return 0;
5646
5647	tcp_collapse_ofo_queue(sk);
5648	if (!skb_queue_empty(&sk->sk_receive_queue))
5649		tcp_collapse(sk, &sk->sk_receive_queue, NULL,
5650			     skb_peek(&sk->sk_receive_queue),
5651			     NULL,
5652			     tp->copied_seq, tp->rcv_nxt);
 
5653
5654	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5655		return 0;
5656
5657	/* Collapsing did not help, destructive actions follow.
5658	 * This must not ever occur. */
5659
5660	tcp_prune_ofo_queue(sk, in_skb);
5661
5662	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
5663		return 0;
5664
5665	/* If we are really being abused, tell the caller to silently
5666	 * drop receive data on the floor.  It will get retransmitted
5667	 * and hopefully then we'll have sufficient space.
5668	 */
5669	NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED);
5670
5671	/* Massive buffer overcommit. */
5672	tp->pred_flags = 0;
5673	return -1;
5674}
5675
5676static bool tcp_should_expand_sndbuf(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5677{
5678	const struct tcp_sock *tp = tcp_sk(sk);
5679
5680	/* If the user specified a specific send buffer setting, do
5681	 * not modify it.
5682	 */
5683	if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
5684		return false;
5685
5686	/* If we are under global TCP memory pressure, do not expand.  */
5687	if (tcp_under_memory_pressure(sk)) {
5688		int unused_mem = sk_unused_reserved_mem(sk);
5689
5690		/* Adjust sndbuf according to reserved mem. But make sure
5691		 * it never goes below SOCK_MIN_SNDBUF.
5692		 * See sk_stream_moderate_sndbuf() for more details.
5693		 */
5694		if (unused_mem > SOCK_MIN_SNDBUF)
5695			WRITE_ONCE(sk->sk_sndbuf, unused_mem);
5696
5697		return false;
5698	}
5699
5700	/* If we are under soft global TCP memory pressure, do not expand.  */
5701	if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
5702		return false;
5703
5704	/* If we filled the congestion window, do not expand.  */
5705	if (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp))
5706		return false;
5707
5708	return true;
5709}
5710
 
 
 
 
 
 
5711static void tcp_new_space(struct sock *sk)
5712{
5713	struct tcp_sock *tp = tcp_sk(sk);
5714
5715	if (tcp_should_expand_sndbuf(sk)) {
5716		tcp_sndbuf_expand(sk);
5717		tp->snd_cwnd_stamp = tcp_jiffies32;
5718	}
5719
5720	INDIRECT_CALL_1(sk->sk_write_space, sk_stream_write_space, sk);
5721}
5722
5723/* Caller made space either from:
5724 * 1) Freeing skbs in rtx queues (after tp->snd_una has advanced)
5725 * 2) Sent skbs from output queue (and thus advancing tp->snd_nxt)
5726 *
5727 * We might be able to generate EPOLLOUT to the application if:
5728 * 1) Space consumed in output/rtx queues is below sk->sk_sndbuf/2
5729 * 2) notsent amount (tp->write_seq - tp->snd_nxt) became
5730 *    small enough that tcp_stream_memory_free() decides it
5731 *    is time to generate EPOLLOUT.
5732 */
5733void tcp_check_space(struct sock *sk)
5734{
5735	/* pairs with tcp_poll() */
5736	smp_mb();
5737	if (sk->sk_socket &&
5738	    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
5739		tcp_new_space(sk);
5740		if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
5741			tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
5742	}
5743}
5744
5745static inline void tcp_data_snd_check(struct sock *sk)
5746{
5747	tcp_push_pending_frames(sk);
5748	tcp_check_space(sk);
5749}
5750
5751/*
5752 * Check if sending an ack is needed.
5753 */
5754static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
5755{
5756	struct tcp_sock *tp = tcp_sk(sk);
5757	unsigned long rtt, delay;
5758
5759	    /* More than one full frame received... */
5760	if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
5761	     /* ... and right edge of window advances far enough.
5762	      * (tcp_recvmsg() will send ACK otherwise).
5763	      * If application uses SO_RCVLOWAT, we want send ack now if
5764	      * we have not received enough bytes to satisfy the condition.
5765	      */
5766	    (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat ||
5767	     __tcp_select_window(sk) >= tp->rcv_wnd)) ||
5768	    /* We ACK each frame or... */
5769	    tcp_in_quickack_mode(sk) ||
5770	    /* Protocol state mandates a one-time immediate ACK */
5771	    inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOW) {
5772		/* If we are running from __release_sock() in user context,
5773		 * Defer the ack until tcp_release_cb().
5774		 */
5775		if (sock_owned_by_user_nocheck(sk) &&
5776		    READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_backlog_ack_defer)) {
5777			set_bit(TCP_ACK_DEFERRED, &sk->sk_tsq_flags);
5778			return;
5779		}
5780send_now:
5781		tcp_send_ack(sk);
5782		return;
5783	}
5784
5785	if (!ofo_possible || RB_EMPTY_ROOT(&tp->out_of_order_queue)) {
5786		tcp_send_delayed_ack(sk);
5787		return;
5788	}
5789
5790	if (!tcp_is_sack(tp) ||
5791	    tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr))
5792		goto send_now;
5793
5794	if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) {
5795		tp->compressed_ack_rcv_nxt = tp->rcv_nxt;
5796		tp->dup_ack_counter = 0;
5797	}
5798	if (tp->dup_ack_counter < TCP_FASTRETRANS_THRESH) {
5799		tp->dup_ack_counter++;
5800		goto send_now;
5801	}
5802	tp->compressed_ack++;
5803	if (hrtimer_is_queued(&tp->compressed_ack_timer))
5804		return;
5805
5806	/* compress ack timer : 5 % of rtt, but no more than tcp_comp_sack_delay_ns */
5807
5808	rtt = tp->rcv_rtt_est.rtt_us;
5809	if (tp->srtt_us && tp->srtt_us < rtt)
5810		rtt = tp->srtt_us;
5811
5812	delay = min_t(unsigned long,
5813		      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_delay_ns),
5814		      rtt * (NSEC_PER_USEC >> 3)/20);
5815	sock_hold(sk);
5816	hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay),
5817			       READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_slack_ns),
5818			       HRTIMER_MODE_REL_PINNED_SOFT);
5819}
5820
5821static inline void tcp_ack_snd_check(struct sock *sk)
5822{
5823	if (!inet_csk_ack_scheduled(sk)) {
5824		/* We sent a data segment already. */
5825		return;
5826	}
5827	__tcp_ack_snd_check(sk, 1);
5828}
5829
5830/*
5831 *	This routine is only called when we have urgent data
5832 *	signaled. Its the 'slow' part of tcp_urg. It could be
5833 *	moved inline now as tcp_urg is only called from one
5834 *	place. We handle URGent data wrong. We have to - as
5835 *	BSD still doesn't use the correction from RFC961.
5836 *	For 1003.1g we should support a new option TCP_STDURG to permit
5837 *	either form (or just set the sysctl tcp_stdurg).
5838 */
5839
5840static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
5841{
5842	struct tcp_sock *tp = tcp_sk(sk);
5843	u32 ptr = ntohs(th->urg_ptr);
5844
5845	if (ptr && !READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_stdurg))
5846		ptr--;
5847	ptr += ntohl(th->seq);
5848
5849	/* Ignore urgent data that we've already seen and read. */
5850	if (after(tp->copied_seq, ptr))
5851		return;
5852
5853	/* Do not replay urg ptr.
5854	 *
5855	 * NOTE: interesting situation not covered by specs.
5856	 * Misbehaving sender may send urg ptr, pointing to segment,
5857	 * which we already have in ofo queue. We are not able to fetch
5858	 * such data and will stay in TCP_URG_NOTYET until will be eaten
5859	 * by recvmsg(). Seems, we are not obliged to handle such wicked
5860	 * situations. But it is worth to think about possibility of some
5861	 * DoSes using some hypothetical application level deadlock.
5862	 */
5863	if (before(ptr, tp->rcv_nxt))
5864		return;
5865
5866	/* Do we already have a newer (or duplicate) urgent pointer? */
5867	if (tp->urg_data && !after(ptr, tp->urg_seq))
5868		return;
5869
5870	/* Tell the world about our new urgent pointer. */
5871	sk_send_sigurg(sk);
5872
5873	/* We may be adding urgent data when the last byte read was
5874	 * urgent. To do this requires some care. We cannot just ignore
5875	 * tp->copied_seq since we would read the last urgent byte again
5876	 * as data, nor can we alter copied_seq until this data arrives
5877	 * or we break the semantics of SIOCATMARK (and thus sockatmark())
5878	 *
5879	 * NOTE. Double Dutch. Rendering to plain English: author of comment
5880	 * above did something sort of 	send("A", MSG_OOB); send("B", MSG_OOB);
5881	 * and expect that both A and B disappear from stream. This is _wrong_.
5882	 * Though this happens in BSD with high probability, this is occasional.
5883	 * Any application relying on this is buggy. Note also, that fix "works"
5884	 * only in this artificial test. Insert some normal data between A and B and we will
5885	 * decline of BSD again. Verdict: it is better to remove to trap
5886	 * buggy users.
5887	 */
5888	if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
5889	    !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
5890		struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
5891		tp->copied_seq++;
5892		if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
5893			__skb_unlink(skb, &sk->sk_receive_queue);
5894			__kfree_skb(skb);
5895		}
5896	}
5897
5898	WRITE_ONCE(tp->urg_data, TCP_URG_NOTYET);
5899	WRITE_ONCE(tp->urg_seq, ptr);
5900
5901	/* Disable header prediction. */
5902	tp->pred_flags = 0;
5903}
5904
5905/* This is the 'fast' part of urgent handling. */
5906static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th)
5907{
5908	struct tcp_sock *tp = tcp_sk(sk);
5909
5910	/* Check if we get a new urgent pointer - normally not. */
5911	if (unlikely(th->urg))
5912		tcp_check_urg(sk, th);
5913
5914	/* Do we wait for any urgent data? - normally not... */
5915	if (unlikely(tp->urg_data == TCP_URG_NOTYET)) {
5916		u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
5917			  th->syn;
5918
5919		/* Is the urgent pointer pointing into this packet? */
5920		if (ptr < skb->len) {
5921			u8 tmp;
5922			if (skb_copy_bits(skb, ptr, &tmp, 1))
5923				BUG();
5924			WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp);
5925			if (!sock_flag(sk, SOCK_DEAD))
5926				sk->sk_data_ready(sk);
5927		}
5928	}
5929}
5930
5931/* Accept RST for rcv_nxt - 1 after a FIN.
5932 * When tcp connections are abruptly terminated from Mac OSX (via ^C), a
5933 * FIN is sent followed by a RST packet. The RST is sent with the same
5934 * sequence number as the FIN, and thus according to RFC 5961 a challenge
5935 * ACK should be sent. However, Mac OSX rate limits replies to challenge
5936 * ACKs on the closed socket. In addition middleboxes can drop either the
5937 * challenge ACK or a subsequent RST.
5938 */
5939static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5940{
5941	const struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 
 
 
 
 
 
 
 
5942
5943	return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) &&
5944			(1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK |
5945					       TCPF_CLOSING));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5946}
 
5947
5948/* Does PAWS and seqno based validation of an incoming segment, flags will
5949 * play significant role here.
5950 */
5951static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5952				  const struct tcphdr *th, int syn_inerr)
5953{
 
5954	struct tcp_sock *tp = tcp_sk(sk);
5955	SKB_DR(reason);
5956
5957	/* RFC1323: H1. Apply PAWS check first. */
5958	if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) &&
5959	    tp->rx_opt.saw_tstamp &&
5960	    tcp_paws_discard(sk, skb)) {
5961		if (!th->rst) {
5962			if (unlikely(th->syn))
5963				goto syn_challenge;
5964			NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
5965			if (!tcp_oow_rate_limited(sock_net(sk), skb,
5966						  LINUX_MIB_TCPACKSKIPPEDPAWS,
5967						  &tp->last_oow_ack_time))
5968				tcp_send_dupack(sk, skb);
5969			SKB_DR_SET(reason, TCP_RFC7323_PAWS);
5970			goto discard;
5971		}
5972		/* Reset is accepted even if it did not pass PAWS. */
5973	}
5974
5975	/* Step 1: check sequence number */
5976	reason = tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
5977	if (reason) {
5978		/* RFC793, page 37: "In all states except SYN-SENT, all reset
5979		 * (RST) segments are validated by checking their SEQ-fields."
5980		 * And page 69: "If an incoming segment is not acceptable,
5981		 * an acknowledgment should be sent in reply (unless the RST
5982		 * bit is set, if so drop the segment and return)".
5983		 */
5984		if (!th->rst) {
5985			if (th->syn)
5986				goto syn_challenge;
5987			if (!tcp_oow_rate_limited(sock_net(sk), skb,
5988						  LINUX_MIB_TCPACKSKIPPEDSEQ,
5989						  &tp->last_oow_ack_time))
5990				tcp_send_dupack(sk, skb);
5991		} else if (tcp_reset_check(sk, skb)) {
5992			goto reset;
5993		}
5994		goto discard;
5995	}
5996
5997	/* Step 2: check RST bit */
5998	if (th->rst) {
5999		/* RFC 5961 3.2 (extend to match against (RCV.NXT - 1) after a
6000		 * FIN and SACK too if available):
6001		 * If seq num matches RCV.NXT or (RCV.NXT - 1) after a FIN, or
6002		 * the right-most SACK block,
6003		 * then
6004		 *     RESET the connection
6005		 * else
6006		 *     Send a challenge ACK
6007		 */
6008		if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt ||
6009		    tcp_reset_check(sk, skb))
6010			goto reset;
6011
6012		if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) {
6013			struct tcp_sack_block *sp = &tp->selective_acks[0];
6014			int max_sack = sp[0].end_seq;
6015			int this_sack;
6016
6017			for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;
6018			     ++this_sack) {
6019				max_sack = after(sp[this_sack].end_seq,
6020						 max_sack) ?
6021					sp[this_sack].end_seq : max_sack;
6022			}
6023
6024			if (TCP_SKB_CB(skb)->seq == max_sack)
6025				goto reset;
6026		}
6027
6028		/* Disable TFO if RST is out-of-order
6029		 * and no data has been received
6030		 * for current active TFO socket
6031		 */
6032		if (tp->syn_fastopen && !tp->data_segs_in &&
6033		    sk->sk_state == TCP_ESTABLISHED)
6034			tcp_fastopen_active_disable(sk);
6035		tcp_send_challenge_ack(sk);
6036		SKB_DR_SET(reason, TCP_RESET);
6037		goto discard;
6038	}
6039
 
 
 
 
 
6040	/* step 3: check security and precedence [ignored] */
6041
6042	/* step 4: Check for a SYN
6043	 * RFC 5961 4.2 : Send a challenge ack
6044	 */
6045	if (th->syn) {
6046		if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack &&
6047		    TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq &&
6048		    TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt &&
6049		    TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt)
6050			goto pass;
6051syn_challenge:
6052		if (syn_inerr)
6053			TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
6054		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
6055		tcp_send_challenge_ack(sk);
6056		SKB_DR_SET(reason, TCP_INVALID_SYN);
6057		goto discard;
6058	}
6059
6060pass:
6061	bpf_skops_parse_hdr(sk, skb);
6062
6063	return true;
6064
6065discard:
6066	tcp_drop_reason(sk, skb, reason);
6067	return false;
6068
6069reset:
6070	tcp_reset(sk, skb);
6071	__kfree_skb(skb);
6072	return false;
6073}
6074
6075/*
6076 *	TCP receive function for the ESTABLISHED state.
6077 *
6078 *	It is split into a fast path and a slow path. The fast path is
6079 * 	disabled when:
6080 *	- A zero window was announced from us - zero window probing
6081 *        is only handled properly in the slow path.
6082 *	- Out of order segments arrived.
6083 *	- Urgent data is expected.
6084 *	- There is no buffer space left
6085 *	- Unexpected TCP flags/window values/header lengths are received
6086 *	  (detected by checking the TCP header against pred_flags)
6087 *	- Data is sent in both directions. Fast path only supports pure senders
6088 *	  or pure receivers (this means either the sequence number or the ack
6089 *	  value must stay constant)
6090 *	- Unexpected TCP option.
6091 *
6092 *	When these conditions are not satisfied it drops into a standard
6093 *	receive procedure patterned after RFC793 to handle all cases.
6094 *	The first three cases are guaranteed by proper pred_flags setting,
6095 *	the rest is checked inline. Fast processing is turned on in
6096 *	tcp_data_queue when everything is OK.
6097 */
6098void tcp_rcv_established(struct sock *sk, struct sk_buff *skb)
 
6099{
6100	enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED;
6101	const struct tcphdr *th = (const struct tcphdr *)skb->data;
6102	struct tcp_sock *tp = tcp_sk(sk);
6103	unsigned int len = skb->len;
6104
6105	/* TCP congestion window tracking */
6106	trace_tcp_probe(sk, skb);
6107
6108	tcp_mstamp_refresh(tp);
6109	if (unlikely(!rcu_access_pointer(sk->sk_rx_dst)))
6110		inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
6111	/*
6112	 *	Header prediction.
6113	 *	The code loosely follows the one in the famous
6114	 *	"30 instruction TCP receive" Van Jacobson mail.
6115	 *
6116	 *	Van's trick is to deposit buffers into socket queue
6117	 *	on a device interrupt, to call tcp_recv function
6118	 *	on the receive process context and checksum and copy
6119	 *	the buffer to user space. smart...
6120	 *
6121	 *	Our current scheme is not silly either but we take the
6122	 *	extra cost of the net_bh soft interrupt processing...
6123	 *	We do checksum and copy also but from device to kernel.
6124	 */
6125
6126	tp->rx_opt.saw_tstamp = 0;
6127
6128	/*	pred_flags is 0xS?10 << 16 + snd_wnd
6129	 *	if header_prediction is to be made
6130	 *	'S' will always be tp->tcp_header_len >> 2
6131	 *	'?' will be 0 for the fast path, otherwise pred_flags is 0 to
6132	 *  turn it off	(when there are holes in the receive
6133	 *	 space for instance)
6134	 *	PSH flag is ignored.
6135	 */
6136
6137	if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
6138	    TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
6139	    !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
6140		int tcp_header_len = tp->tcp_header_len;
6141
6142		/* Timestamp header prediction: tcp_header_len
6143		 * is automatically equal to th->doff*4 due to pred_flags
6144		 * match.
6145		 */
6146
6147		/* Check timestamp */
6148		if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
6149			/* No? Slow path! */
6150			if (!tcp_parse_aligned_timestamp(tp, th))
6151				goto slow_path;
6152
6153			/* If PAWS failed, check it more carefully in slow path */
6154			if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
6155				goto slow_path;
6156
6157			/* DO NOT update ts_recent here, if checksum fails
6158			 * and timestamp was corrupted part, it will result
6159			 * in a hung connection since we will drop all
6160			 * future packets due to the PAWS test.
6161			 */
6162		}
6163
6164		if (len <= tcp_header_len) {
6165			/* Bulk data transfer: sender */
6166			if (len == tcp_header_len) {
6167				/* Predicted packet is in window by definition.
6168				 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
6169				 * Hence, check seq<=rcv_wup reduces to:
6170				 */
6171				if (tcp_header_len ==
6172				    (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
6173				    tp->rcv_nxt == tp->rcv_wup)
6174					tcp_store_ts_recent(tp);
6175
6176				/* We know that such packets are checksummed
6177				 * on entry.
6178				 */
6179				tcp_ack(sk, skb, 0);
6180				__kfree_skb(skb);
6181				tcp_data_snd_check(sk);
6182				/* When receiving pure ack in fast path, update
6183				 * last ts ecr directly instead of calling
6184				 * tcp_rcv_rtt_measure_ts()
6185				 */
6186				tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr;
6187				return;
6188			} else { /* Header too small */
6189				reason = SKB_DROP_REASON_PKT_TOO_SMALL;
6190				TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
6191				goto discard;
6192			}
6193		} else {
6194			int eaten = 0;
6195			bool fragstolen = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6196
6197			if (tcp_checksum_complete(skb))
6198				goto csum_error;
6199
6200			if ((int)skb->truesize > sk->sk_forward_alloc)
6201				goto step5;
6202
6203			/* Predicted packet is in window by definition.
6204			 * seq == rcv_nxt and rcv_wup <= rcv_nxt.
6205			 * Hence, check seq<=rcv_wup reduces to:
6206			 */
6207			if (tcp_header_len ==
6208			    (sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
6209			    tp->rcv_nxt == tp->rcv_wup)
6210				tcp_store_ts_recent(tp);
6211
6212			tcp_rcv_rtt_measure_ts(sk, skb);
6213
6214			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
6215
6216			/* Bulk data transfer: receiver */
6217			tcp_cleanup_skb(skb);
6218			__skb_pull(skb, tcp_header_len);
6219			eaten = tcp_queue_rcv(sk, skb, &fragstolen);
6220
6221			tcp_event_data_recv(sk, skb);
6222
6223			if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
6224				/* Well, only one small jumplet in fast path... */
6225				tcp_ack(sk, skb, FLAG_DATA);
6226				tcp_data_snd_check(sk);
6227				if (!inet_csk_ack_scheduled(sk))
6228					goto no_ack;
6229			} else {
6230				tcp_update_wl(tp, TCP_SKB_CB(skb)->seq);
6231			}
6232
6233			__tcp_ack_snd_check(sk, 0);
 
6234no_ack:
 
 
 
 
 
6235			if (eaten)
6236				kfree_skb_partial(skb, fragstolen);
6237			tcp_data_ready(sk);
6238			return;
 
6239		}
6240	}
6241
6242slow_path:
6243	if (len < (th->doff << 2) || tcp_checksum_complete(skb))
6244		goto csum_error;
6245
6246	if (!th->ack && !th->rst && !th->syn) {
6247		reason = SKB_DROP_REASON_TCP_FLAGS;
6248		goto discard;
6249	}
6250
6251	/*
6252	 *	Standard slow path.
6253	 */
6254
6255	if (!tcp_validate_incoming(sk, skb, th, 1))
6256		return;
 
6257
6258step5:
6259	reason = tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT);
6260	if ((int)reason < 0) {
6261		reason = -reason;
6262		goto discard;
6263	}
6264	tcp_rcv_rtt_measure_ts(sk, skb);
6265
6266	/* Process urgent data. */
6267	tcp_urg(sk, skb, th);
6268
6269	/* step 7: process the segment text */
6270	tcp_data_queue(sk, skb);
6271
6272	tcp_data_snd_check(sk);
6273	tcp_ack_snd_check(sk);
6274	return;
6275
6276csum_error:
6277	reason = SKB_DROP_REASON_TCP_CSUM;
6278	trace_tcp_bad_csum(skb);
6279	TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
6280	TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
6281
6282discard:
6283	tcp_drop_reason(sk, skb, reason);
 
6284}
6285EXPORT_SYMBOL(tcp_rcv_established);
6286
6287void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb)
6288{
6289	struct inet_connection_sock *icsk = inet_csk(sk);
6290	struct tcp_sock *tp = tcp_sk(sk);
6291
6292	tcp_mtup_init(sk);
6293	icsk->icsk_af_ops->rebuild_header(sk);
6294	tcp_init_metrics(sk);
6295
6296	/* Initialize the congestion window to start the transfer.
6297	 * Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
6298	 * retransmitted. In light of RFC6298 more aggressive 1sec
6299	 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
6300	 * retransmission has occurred.
6301	 */
6302	if (tp->total_retrans > 1 && tp->undo_marker)
6303		tcp_snd_cwnd_set(tp, 1);
6304	else
6305		tcp_snd_cwnd_set(tp, tcp_init_cwnd(tp, __sk_dst_get(sk)));
6306	tp->snd_cwnd_stamp = tcp_jiffies32;
6307
6308	bpf_skops_established(sk, bpf_op, skb);
6309	/* Initialize congestion control unless BPF initialized it already: */
6310	if (!icsk->icsk_ca_initialized)
6311		tcp_init_congestion_control(sk);
6312	tcp_init_buffer_space(sk);
6313}
6314
6315void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
6316{
6317	struct tcp_sock *tp = tcp_sk(sk);
6318	struct inet_connection_sock *icsk = inet_csk(sk);
6319
6320	tcp_ao_finish_connect(sk, skb);
6321	tcp_set_state(sk, TCP_ESTABLISHED);
6322	icsk->icsk_ack.lrcvtime = tcp_jiffies32;
6323
6324	if (skb) {
6325		icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
6326		security_inet_conn_established(sk, skb);
6327		sk_mark_napi_id(sk, skb);
6328	}
6329
6330	tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, skb);
6331
6332	/* Prevent spurious tcp_cwnd_restart() on first data
6333	 * packet.
6334	 */
6335	tp->lsndtime = tcp_jiffies32;
6336
6337	if (sock_flag(sk, SOCK_KEEPOPEN))
6338		inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
6339
6340	if (!tp->rx_opt.snd_wscale)
6341		__tcp_fast_path_on(tp, tp->snd_wnd);
6342	else
6343		tp->pred_flags = 0;
6344}
6345
6346static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
6347				    struct tcp_fastopen_cookie *cookie)
6348{
6349	struct tcp_sock *tp = tcp_sk(sk);
6350	struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL;
6351	u16 mss = tp->rx_opt.mss_clamp, try_exp = 0;
6352	bool syn_drop = false;
6353
6354	if (mss == tp->rx_opt.user_mss) {
6355		struct tcp_options_received opt;
6356
6357		/* Get original SYNACK MSS value if user MSS sets mss_clamp */
6358		tcp_clear_options(&opt);
6359		opt.user_mss = opt.mss_clamp = 0;
6360		tcp_parse_options(sock_net(sk), synack, &opt, 0, NULL);
6361		mss = opt.mss_clamp;
6362	}
6363
6364	if (!tp->syn_fastopen) {
6365		/* Ignore an unsolicited cookie */
6366		cookie->len = -1;
6367	} else if (tp->total_retrans) {
6368		/* SYN timed out and the SYN-ACK neither has a cookie nor
6369		 * acknowledges data. Presumably the remote received only
6370		 * the retransmitted (regular) SYNs: either the original
6371		 * SYN-data or the corresponding SYN-ACK was dropped.
6372		 */
6373		syn_drop = (cookie->len < 0 && data);
6374	} else if (cookie->len < 0 && !tp->syn_data) {
6375		/* We requested a cookie but didn't get it. If we did not use
6376		 * the (old) exp opt format then try so next time (try_exp=1).
6377		 * Otherwise we go back to use the RFC7413 opt (try_exp=2).
6378		 */
6379		try_exp = tp->syn_fastopen_exp ? 2 : 1;
6380	}
6381
6382	tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp);
6383
6384	if (data) { /* Retransmit unacked data in SYN */
6385		if (tp->total_retrans)
6386			tp->fastopen_client_fail = TFO_SYN_RETRANSMITTED;
6387		else
6388			tp->fastopen_client_fail = TFO_DATA_NOT_ACKED;
6389		skb_rbtree_walk_from(data)
6390			 tcp_mark_skb_lost(sk, data);
6391		tcp_non_congestion_loss_retransmit(sk);
6392		NET_INC_STATS(sock_net(sk),
6393				LINUX_MIB_TCPFASTOPENACTIVEFAIL);
6394		return true;
6395	}
6396	tp->syn_data_acked = tp->syn_data;
6397	if (tp->syn_data_acked) {
6398		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
6399		/* SYN-data is counted as two separate packets in tcp_ack() */
6400		if (tp->delivered > 1)
6401			--tp->delivered;
6402	}
6403
6404	tcp_fastopen_add_skb(sk, synack);
6405
6406	return false;
6407}
6408
6409static void smc_check_reset_syn(struct tcp_sock *tp)
6410{
6411#if IS_ENABLED(CONFIG_SMC)
6412	if (static_branch_unlikely(&tcp_have_smc)) {
6413		if (tp->syn_smc && !tp->rx_opt.smc_ok)
6414			tp->syn_smc = 0;
6415	}
6416#endif
6417}
6418
6419static void tcp_try_undo_spurious_syn(struct sock *sk)
6420{
6421	struct tcp_sock *tp = tcp_sk(sk);
6422	u32 syn_stamp;
6423
6424	/* undo_marker is set when SYN or SYNACK times out. The timeout is
6425	 * spurious if the ACK's timestamp option echo value matches the
6426	 * original SYN timestamp.
6427	 */
6428	syn_stamp = tp->retrans_stamp;
6429	if (tp->undo_marker && syn_stamp && tp->rx_opt.saw_tstamp &&
6430	    syn_stamp == tp->rx_opt.rcv_tsecr)
6431		tp->undo_marker = 0;
6432}
6433
6434static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
6435					 const struct tcphdr *th)
6436{
 
6437	struct inet_connection_sock *icsk = inet_csk(sk);
6438	struct tcp_sock *tp = tcp_sk(sk);
6439	struct tcp_fastopen_cookie foc = { .len = -1 };
6440	int saved_clamp = tp->rx_opt.mss_clamp;
6441	bool fastopen_fail;
6442	SKB_DR(reason);
6443
6444	tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc);
6445	if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
6446		tp->rx_opt.rcv_tsecr -= tp->tsoffset;
6447
6448	if (th->ack) {
6449		/* rfc793:
6450		 * "If the state is SYN-SENT then
6451		 *    first check the ACK bit
6452		 *      If the ACK bit is set
6453		 *	  If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
6454		 *        a reset (unless the RST bit is set, if so drop
6455		 *        the segment and return)"
 
 
 
6456		 */
6457		if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) ||
6458		    after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
6459			/* Previous FIN/ACK or RST/ACK might be ignored. */
6460			if (icsk->icsk_retransmits == 0)
6461				inet_csk_reset_xmit_timer(sk,
6462						ICSK_TIME_RETRANS,
6463						TCP_TIMEOUT_MIN, TCP_RTO_MAX);
6464			SKB_DR_SET(reason, TCP_INVALID_ACK_SEQUENCE);
6465			goto reset_and_undo;
6466		}
6467
6468		if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
6469		    !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
6470			     tcp_time_stamp_ts(tp))) {
6471			NET_INC_STATS(sock_net(sk),
6472					LINUX_MIB_PAWSACTIVEREJECTED);
6473			SKB_DR_SET(reason, TCP_RFC7323_PAWS);
6474			goto reset_and_undo;
6475		}
6476
6477		/* Now ACK is acceptable.
6478		 *
6479		 * "If the RST bit is set
6480		 *    If the ACK was acceptable then signal the user "error:
6481		 *    connection reset", drop the segment, enter CLOSED state,
6482		 *    delete TCB, and return."
6483		 */
6484
6485		if (th->rst) {
6486			tcp_reset(sk, skb);
6487consume:
6488			__kfree_skb(skb);
6489			return 0;
6490		}
6491
6492		/* rfc793:
6493		 *   "fifth, if neither of the SYN or RST bits is set then
6494		 *    drop the segment and return."
6495		 *
6496		 *    See note below!
6497		 *                                        --ANK(990513)
6498		 */
6499		if (!th->syn) {
6500			SKB_DR_SET(reason, TCP_FLAGS);
6501			goto discard_and_undo;
6502		}
6503		/* rfc793:
6504		 *   "If the SYN bit is on ...
6505		 *    are acceptable then ...
6506		 *    (our SYN has been ACKed), change the connection
6507		 *    state to ESTABLISHED..."
6508		 */
6509
6510		tcp_ecn_rcv_synack(tp, th);
6511
6512		tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
6513		tcp_try_undo_spurious_syn(sk);
6514		tcp_ack(sk, skb, FLAG_SLOWPATH);
6515
6516		/* Ok.. it's good. Set up sequence numbers and
6517		 * move to established.
6518		 */
6519		WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
6520		tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
6521
6522		/* RFC1323: The window in SYN & SYN/ACK segments is
6523		 * never scaled.
6524		 */
6525		tp->snd_wnd = ntohs(th->window);
 
6526
6527		if (!tp->rx_opt.wscale_ok) {
6528			tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
6529			WRITE_ONCE(tp->window_clamp,
6530				   min(tp->window_clamp, 65535U));
6531		}
6532
6533		if (tp->rx_opt.saw_tstamp) {
6534			tp->rx_opt.tstamp_ok	   = 1;
6535			tp->tcp_header_len =
6536				sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
6537			tp->advmss	    -= TCPOLEN_TSTAMP_ALIGNED;
6538			tcp_store_ts_recent(tp);
6539		} else {
6540			tp->tcp_header_len = sizeof(struct tcphdr);
6541		}
6542
 
 
 
 
6543		tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
6544		tcp_initialize_rcv_mss(sk);
6545
6546		/* Remember, tcp_poll() does not lock socket!
6547		 * Change state from SYN-SENT only after copied_seq
6548		 * is initialized. */
6549		WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
6550
6551		smc_check_reset_syn(tp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6552
6553		smp_mb();
 
 
 
6554
6555		tcp_finish_connect(sk, skb);
 
6556
6557		fastopen_fail = (tp->syn_fastopen || tp->syn_data) &&
6558				tcp_rcv_fastopen_synack(sk, skb, &foc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6559
6560		if (!sock_flag(sk, SOCK_DEAD)) {
6561			sk->sk_state_change(sk);
6562			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
6563		}
6564		if (fastopen_fail)
6565			return -1;
6566		if (sk->sk_write_pending ||
6567		    READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept) ||
6568		    inet_csk_in_pingpong_mode(sk)) {
6569			/* Save one ACK. Data will be ready after
6570			 * several ticks, if write_pending is set.
6571			 *
6572			 * It may be deleted, but with this feature tcpdumps
6573			 * look so _wonderfully_ clever, that I was not able
6574			 * to stand against the temptation 8)     --ANK
6575			 */
6576			inet_csk_schedule_ack(sk);
6577			tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
 
 
 
6578			inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
6579						  TCP_DELACK_MAX, TCP_RTO_MAX);
6580			goto consume;
 
 
 
 
 
6581		}
6582		tcp_send_ack(sk);
6583		return -1;
6584	}
6585
6586	/* No ACK in the segment */
6587
6588	if (th->rst) {
6589		/* rfc793:
6590		 * "If the RST bit is set
6591		 *
6592		 *      Otherwise (no ACK) drop the segment and return."
6593		 */
6594		SKB_DR_SET(reason, TCP_RESET);
6595		goto discard_and_undo;
6596	}
6597
6598	/* PAWS check. */
6599	if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
6600	    tcp_paws_reject(&tp->rx_opt, 0)) {
6601		SKB_DR_SET(reason, TCP_RFC7323_PAWS);
6602		goto discard_and_undo;
6603	}
6604	if (th->syn) {
6605		/* We see SYN without ACK. It is attempt of
6606		 * simultaneous connect with crossed SYNs.
6607		 * Particularly, it can be connect to self.
6608		 */
6609#ifdef CONFIG_TCP_AO
6610		struct tcp_ao_info *ao;
6611
6612		ao = rcu_dereference_protected(tp->ao_info,
6613					       lockdep_sock_is_held(sk));
6614		if (ao) {
6615			WRITE_ONCE(ao->risn, th->seq);
6616			ao->rcv_sne = 0;
6617		}
6618#endif
6619		tcp_set_state(sk, TCP_SYN_RECV);
6620
6621		if (tp->rx_opt.saw_tstamp) {
6622			tp->rx_opt.tstamp_ok = 1;
6623			tcp_store_ts_recent(tp);
6624			tp->tcp_header_len =
6625				sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
6626		} else {
6627			tp->tcp_header_len = sizeof(struct tcphdr);
6628		}
6629
6630		WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
6631		WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
6632		tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
6633
6634		/* RFC1323: The window in SYN & SYN/ACK segments is
6635		 * never scaled.
6636		 */
6637		tp->snd_wnd    = ntohs(th->window);
6638		tp->snd_wl1    = TCP_SKB_CB(skb)->seq;
6639		tp->max_window = tp->snd_wnd;
6640
6641		tcp_ecn_rcv_syn(tp, th);
6642
6643		tcp_mtup_init(sk);
6644		tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
6645		tcp_initialize_rcv_mss(sk);
6646
6647		tcp_send_synack(sk);
6648#if 0
6649		/* Note, we could accept data and URG from this segment.
6650		 * There are no obstacles to make this (except that we must
6651		 * either change tcp_recvmsg() to prevent it from returning data
6652		 * before 3WHS completes per RFC793, or employ TCP Fast Open).
6653		 *
6654		 * However, if we ignore data in ACKless segments sometimes,
6655		 * we have no reasons to accept it sometimes.
6656		 * Also, seems the code doing it in step6 of tcp_rcv_state_process
6657		 * is not flawless. So, discard packet for sanity.
6658		 * Uncomment this return to process the data.
6659		 */
6660		return -1;
6661#else
6662		goto consume;
6663#endif
6664	}
6665	/* "fifth, if neither of the SYN or RST bits is set then
6666	 * drop the segment and return."
6667	 */
6668
6669discard_and_undo:
6670	tcp_clear_options(&tp->rx_opt);
6671	tp->rx_opt.mss_clamp = saved_clamp;
6672	tcp_drop_reason(sk, skb, reason);
6673	return 0;
6674
6675reset_and_undo:
6676	tcp_clear_options(&tp->rx_opt);
6677	tp->rx_opt.mss_clamp = saved_clamp;
6678	/* we can reuse/return @reason to its caller to handle the exception */
6679	return reason;
6680}
6681
6682static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
6683{
6684	struct tcp_sock *tp = tcp_sk(sk);
6685	struct request_sock *req;
6686
6687	/* If we are still handling the SYNACK RTO, see if timestamp ECR allows
6688	 * undo. If peer SACKs triggered fast recovery, we can't undo here.
6689	 */
6690	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out)
6691		tcp_try_undo_recovery(sk);
6692
6693	tcp_update_rto_time(tp);
6694	inet_csk(sk)->icsk_retransmits = 0;
6695	/* In tcp_fastopen_synack_timer() on the first SYNACK RTO we set
6696	 * retrans_stamp but don't enter CA_Loss, so in case that happened we
6697	 * need to zero retrans_stamp here to prevent spurious
6698	 * retransmits_timed_out(). However, if the ACK of our SYNACK caused us
6699	 * to enter CA_Recovery then we need to leave retrans_stamp as it was
6700	 * set entering CA_Recovery, for correct retransmits_timed_out() and
6701	 * undo behavior.
6702	 */
6703	tcp_retrans_stamp_cleanup(sk);
6704
6705	/* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
6706	 * we no longer need req so release it.
6707	 */
6708	req = rcu_dereference_protected(tp->fastopen_rsk,
6709					lockdep_sock_is_held(sk));
6710	reqsk_fastopen_remove(sk, req, false);
6711
6712	/* Re-arm the timer because data may have been sent out.
6713	 * This is similar to the regular data transmission case
6714	 * when new data has just been ack'ed.
6715	 *
6716	 * (TFO) - we could try to be more aggressive and
6717	 * retransmitting any data sooner based on when they
6718	 * are sent out.
6719	 */
6720	tcp_rearm_rto(sk);
6721}
6722
6723/*
6724 *	This function implements the receiving procedure of RFC 793 for
6725 *	all states except ESTABLISHED and TIME_WAIT.
6726 *	It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
6727 *	address independent.
6728 */
6729
6730enum skb_drop_reason
6731tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
6732{
6733	struct tcp_sock *tp = tcp_sk(sk);
6734	struct inet_connection_sock *icsk = inet_csk(sk);
6735	const struct tcphdr *th = tcp_hdr(skb);
6736	struct request_sock *req;
6737	int queued = 0;
6738	SKB_DR(reason);
 
 
6739
6740	switch (sk->sk_state) {
6741	case TCP_CLOSE:
6742		SKB_DR_SET(reason, TCP_CLOSE);
6743		goto discard;
6744
6745	case TCP_LISTEN:
6746		if (th->ack)
6747			return SKB_DROP_REASON_TCP_FLAGS;
6748
6749		if (th->rst) {
6750			SKB_DR_SET(reason, TCP_RESET);
6751			goto discard;
6752		}
6753		if (th->syn) {
6754			if (th->fin) {
6755				SKB_DR_SET(reason, TCP_FLAGS);
6756				goto discard;
6757			}
6758			/* It is possible that we process SYN packets from backlog,
6759			 * so we need to make sure to disable BH and RCU right there.
 
 
 
 
 
 
 
 
 
 
 
 
 
6760			 */
6761			rcu_read_lock();
6762			local_bh_disable();
6763			icsk->icsk_af_ops->conn_request(sk, skb);
6764			local_bh_enable();
6765			rcu_read_unlock();
6766
6767			consume_skb(skb);
6768			return 0;
6769		}
6770		SKB_DR_SET(reason, TCP_FLAGS);
6771		goto discard;
6772
6773	case TCP_SYN_SENT:
6774		tp->rx_opt.saw_tstamp = 0;
6775		tcp_mstamp_refresh(tp);
6776		queued = tcp_rcv_synsent_state_process(sk, skb, th);
6777		if (queued >= 0)
6778			return queued;
6779
6780		/* Do step6 onward by hand. */
6781		tcp_urg(sk, skb, th);
6782		__kfree_skb(skb);
6783		tcp_data_snd_check(sk);
6784		return 0;
6785	}
6786
6787	tcp_mstamp_refresh(tp);
6788	tp->rx_opt.saw_tstamp = 0;
6789	req = rcu_dereference_protected(tp->fastopen_rsk,
6790					lockdep_sock_is_held(sk));
6791	if (req) {
6792		bool req_stolen;
6793
6794		WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
6795		    sk->sk_state != TCP_FIN_WAIT1);
6796
6797		if (!tcp_check_req(sk, skb, req, true, &req_stolen)) {
6798			SKB_DR_SET(reason, TCP_FASTOPEN);
6799			goto discard;
6800		}
6801	}
6802
6803	if (!th->ack && !th->rst && !th->syn) {
6804		SKB_DR_SET(reason, TCP_FLAGS);
6805		goto discard;
6806	}
6807	if (!tcp_validate_incoming(sk, skb, th, 0))
6808		return 0;
6809
6810	/* step 5: check the ACK field */
6811	reason = tcp_ack(sk, skb, FLAG_SLOWPATH |
6812				  FLAG_UPDATE_TS_RECENT |
6813				  FLAG_NO_CHALLENGE_ACK);
6814
6815	if ((int)reason <= 0) {
6816		if (sk->sk_state == TCP_SYN_RECV) {
6817			/* send one RST */
6818			if (!reason)
6819				return SKB_DROP_REASON_TCP_OLD_ACK;
6820			return -reason;
6821		}
6822		/* accept old ack during closing */
6823		if ((int)reason < 0) {
6824			tcp_send_challenge_ack(sk);
6825			reason = -reason;
6826			goto discard;
6827		}
6828	}
6829	SKB_DR_SET(reason, NOT_SPECIFIED);
6830	switch (sk->sk_state) {
6831	case TCP_SYN_RECV:
6832		tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */
6833		if (!tp->srtt_us)
6834			tcp_synack_rtt_meas(sk, req);
6835
6836		if (req) {
6837			tcp_rcv_synrecv_state_fastopen(sk);
6838		} else {
6839			tcp_try_undo_spurious_syn(sk);
6840			tp->retrans_stamp = 0;
6841			tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB,
6842					  skb);
6843			WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
6844		}
6845		tcp_ao_established(sk);
6846		smp_mb();
6847		tcp_set_state(sk, TCP_ESTABLISHED);
6848		sk->sk_state_change(sk);
 
 
 
 
 
 
 
 
6849
6850		/* Note, that this wakeup is only for marginal crossed SYN case.
6851		 * Passively open sockets are not waked up, because
6852		 * sk->sk_sleep == NULL and sk->sk_socket == NULL.
6853		 */
6854		if (sk->sk_socket)
6855			sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
6856
6857		tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
6858		tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
6859		tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
 
6860
6861		if (tp->rx_opt.tstamp_ok)
6862			tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
6863
6864		if (!inet_csk(sk)->icsk_ca_ops->cong_control)
6865			tcp_update_pacing_rate(sk);
6866
6867		/* Prevent spurious tcp_cwnd_restart() on first data packet */
6868		tp->lsndtime = tcp_jiffies32;
 
 
6869
6870		tcp_initialize_rcv_mss(sk);
6871		tcp_fast_path_on(tp);
6872		if (sk->sk_shutdown & SEND_SHUTDOWN)
6873			tcp_shutdown(sk, SEND_SHUTDOWN);
6874		break;
 
 
 
6875
6876	case TCP_FIN_WAIT1: {
6877		int tmo;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6878
6879		if (req)
6880			tcp_rcv_synrecv_state_fastopen(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6881
6882		if (tp->snd_una != tp->write_seq)
 
 
 
 
6883			break;
6884
6885		tcp_set_state(sk, TCP_FIN_WAIT2);
6886		WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | SEND_SHUTDOWN);
6887
6888		sk_dst_confirm(sk);
6889
6890		if (!sock_flag(sk, SOCK_DEAD)) {
6891			/* Wake up lingering close() */
6892			sk->sk_state_change(sk);
6893			break;
6894		}
6895
6896		if (READ_ONCE(tp->linger2) < 0) {
6897			tcp_done(sk);
6898			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
6899			return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
6900		}
6901		if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
6902		    after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
6903			/* Receive out of order FIN after close() */
6904			if (tp->syn_fastopen && th->fin)
6905				tcp_fastopen_active_disable(sk);
6906			tcp_done(sk);
6907			NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
6908			return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
6909		}
6910
6911		tmo = tcp_fin_time(sk);
6912		if (tmo > TCP_TIMEWAIT_LEN) {
6913			inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
6914		} else if (th->fin || sock_owned_by_user(sk)) {
6915			/* Bad case. We could lose such FIN otherwise.
6916			 * It is not a big problem, but it looks confusing
6917			 * and not so rare event. We still can lose it now,
6918			 * if it spins in bh_lock_sock(), but it is really
6919			 * marginal case.
6920			 */
6921			inet_csk_reset_keepalive_timer(sk, tmo);
6922		} else {
6923			tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
6924			goto consume;
6925		}
6926		break;
6927	}
6928
6929	case TCP_CLOSING:
6930		if (tp->snd_una == tp->write_seq) {
6931			tcp_time_wait(sk, TCP_TIME_WAIT, 0);
6932			goto consume;
6933		}
6934		break;
6935
6936	case TCP_LAST_ACK:
6937		if (tp->snd_una == tp->write_seq) {
6938			tcp_update_metrics(sk);
6939			tcp_done(sk);
6940			goto consume;
6941		}
6942		break;
6943	}
6944
6945	/* step 6: check the URG bit */
6946	tcp_urg(sk, skb, th);
6947
6948	/* step 7: process the segment text */
6949	switch (sk->sk_state) {
6950	case TCP_CLOSE_WAIT:
6951	case TCP_CLOSING:
6952	case TCP_LAST_ACK:
6953		if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
6954			/* If a subflow has been reset, the packet should not
6955			 * continue to be processed, drop the packet.
6956			 */
6957			if (sk_is_mptcp(sk) && !mptcp_incoming_options(sk, skb))
6958				goto discard;
6959			break;
6960		}
6961		fallthrough;
6962	case TCP_FIN_WAIT1:
6963	case TCP_FIN_WAIT2:
6964		/* RFC 793 says to queue data in these states,
6965		 * RFC 1122 says we MUST send a reset.
6966		 * BSD 4.4 also does reset.
6967		 */
6968		if (sk->sk_shutdown & RCV_SHUTDOWN) {
6969			if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
6970			    after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
6971				NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
6972				tcp_reset(sk, skb);
6973				return SKB_DROP_REASON_TCP_ABORT_ON_DATA;
6974			}
6975		}
6976		fallthrough;
6977	case TCP_ESTABLISHED:
6978		tcp_data_queue(sk, skb);
6979		queued = 1;
6980		break;
6981	}
6982
6983	/* tcp_data could move socket to TIME-WAIT */
6984	if (sk->sk_state != TCP_CLOSE) {
6985		tcp_data_snd_check(sk);
6986		tcp_ack_snd_check(sk);
6987	}
6988
6989	if (!queued) {
6990discard:
6991		tcp_drop_reason(sk, skb, reason);
6992	}
6993	return 0;
6994
6995consume:
6996	__kfree_skb(skb);
6997	return 0;
6998}
6999EXPORT_SYMBOL(tcp_rcv_state_process);
7000
7001static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
7002{
7003	struct inet_request_sock *ireq = inet_rsk(req);
7004
7005	if (family == AF_INET)
7006		net_dbg_ratelimited("drop open request from %pI4/%u\n",
7007				    &ireq->ir_rmt_addr, port);
7008#if IS_ENABLED(CONFIG_IPV6)
7009	else if (family == AF_INET6)
7010		net_dbg_ratelimited("drop open request from %pI6/%u\n",
7011				    &ireq->ir_v6_rmt_addr, port);
7012#endif
7013}
7014
7015/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
7016 *
7017 * If we receive a SYN packet with these bits set, it means a
7018 * network is playing bad games with TOS bits. In order to
7019 * avoid possible false congestion notifications, we disable
7020 * TCP ECN negotiation.
7021 *
7022 * Exception: tcp_ca wants ECN. This is required for DCTCP
7023 * congestion control: Linux DCTCP asserts ECT on all packets,
7024 * including SYN, which is most optimal solution; however,
7025 * others, such as FreeBSD do not.
7026 *
7027 * Exception: At least one of the reserved bits of the TCP header (th->res1) is
7028 * set, indicating the use of a future TCP extension (such as AccECN). See
7029 * RFC8311 §4.3 which updates RFC3168 to allow the development of such
7030 * extensions.
7031 */
7032static void tcp_ecn_create_request(struct request_sock *req,
7033				   const struct sk_buff *skb,
7034				   const struct sock *listen_sk,
7035				   const struct dst_entry *dst)
7036{
7037	const struct tcphdr *th = tcp_hdr(skb);
7038	const struct net *net = sock_net(listen_sk);
7039	bool th_ecn = th->ece && th->cwr;
7040	bool ect, ecn_ok;
7041	u32 ecn_ok_dst;
7042
7043	if (!th_ecn)
7044		return;
7045
7046	ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
7047	ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK);
7048	ecn_ok = READ_ONCE(net->ipv4.sysctl_tcp_ecn) || ecn_ok_dst;
7049
7050	if (((!ect || th->res1) && ecn_ok) || tcp_ca_needs_ecn(listen_sk) ||
7051	    (ecn_ok_dst & DST_FEATURE_ECN_CA) ||
7052	    tcp_bpf_ca_needs_ecn((struct sock *)req))
7053		inet_rsk(req)->ecn_ok = 1;
7054}
7055
7056static void tcp_openreq_init(struct request_sock *req,
7057			     const struct tcp_options_received *rx_opt,
7058			     struct sk_buff *skb, const struct sock *sk)
7059{
7060	struct inet_request_sock *ireq = inet_rsk(req);
7061
7062	req->rsk_rcv_wnd = 0;		/* So that tcp_send_synack() knows! */
7063	tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
7064	tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
7065	tcp_rsk(req)->snt_synack = 0;
7066	tcp_rsk(req)->last_oow_ack_time = 0;
7067	req->mss = rx_opt->mss_clamp;
7068	req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
7069	ireq->tstamp_ok = rx_opt->tstamp_ok;
7070	ireq->sack_ok = rx_opt->sack_ok;
7071	ireq->snd_wscale = rx_opt->snd_wscale;
7072	ireq->wscale_ok = rx_opt->wscale_ok;
7073	ireq->acked = 0;
7074	ireq->ecn_ok = 0;
7075	ireq->ir_rmt_port = tcp_hdr(skb)->source;
7076	ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
7077	ireq->ir_mark = inet_request_mark(sk, skb);
7078#if IS_ENABLED(CONFIG_SMC)
7079	ireq->smc_ok = rx_opt->smc_ok && !(tcp_sk(sk)->smc_hs_congested &&
7080			tcp_sk(sk)->smc_hs_congested(sk));
7081#endif
7082}
7083
7084/*
7085 * Return true if a syncookie should be sent
7086 */
7087static bool tcp_syn_flood_action(struct sock *sk, const char *proto)
7088{
7089	struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
7090	const char *msg = "Dropping request";
7091	struct net *net = sock_net(sk);
7092	bool want_cookie = false;
7093	u8 syncookies;
7094
7095	syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
7096
7097#ifdef CONFIG_SYN_COOKIES
7098	if (syncookies) {
7099		msg = "Sending cookies";
7100		want_cookie = true;
7101		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
7102	} else
7103#endif
7104		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
7105
7106	if (!READ_ONCE(queue->synflood_warned) && syncookies != 2 &&
7107	    xchg(&queue->synflood_warned, 1) == 0) {
7108		if (IS_ENABLED(CONFIG_IPV6) && sk->sk_family == AF_INET6) {
7109			net_info_ratelimited("%s: Possible SYN flooding on port [%pI6c]:%u. %s.\n",
7110					proto, inet6_rcv_saddr(sk),
7111					sk->sk_num, msg);
7112		} else {
7113			net_info_ratelimited("%s: Possible SYN flooding on port %pI4:%u. %s.\n",
7114					proto, &sk->sk_rcv_saddr,
7115					sk->sk_num, msg);
7116		}
7117	}
7118
7119	return want_cookie;
7120}
7121
7122static void tcp_reqsk_record_syn(const struct sock *sk,
7123				 struct request_sock *req,
7124				 const struct sk_buff *skb)
7125{
7126	if (tcp_sk(sk)->save_syn) {
7127		u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb);
7128		struct saved_syn *saved_syn;
7129		u32 mac_hdrlen;
7130		void *base;
7131
7132		if (tcp_sk(sk)->save_syn == 2) {  /* Save full header. */
7133			base = skb_mac_header(skb);
7134			mac_hdrlen = skb_mac_header_len(skb);
7135			len += mac_hdrlen;
7136		} else {
7137			base = skb_network_header(skb);
7138			mac_hdrlen = 0;
7139		}
7140
7141		saved_syn = kmalloc(struct_size(saved_syn, data, len),
7142				    GFP_ATOMIC);
7143		if (saved_syn) {
7144			saved_syn->mac_hdrlen = mac_hdrlen;
7145			saved_syn->network_hdrlen = skb_network_header_len(skb);
7146			saved_syn->tcp_hdrlen = tcp_hdrlen(skb);
7147			memcpy(saved_syn->data, base, len);
7148			req->saved_syn = saved_syn;
7149		}
7150	}
7151}
7152
7153/* If a SYN cookie is required and supported, returns a clamped MSS value to be
7154 * used for SYN cookie generation.
7155 */
7156u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops,
7157			  const struct tcp_request_sock_ops *af_ops,
7158			  struct sock *sk, struct tcphdr *th)
7159{
7160	struct tcp_sock *tp = tcp_sk(sk);
7161	u16 mss;
7162
7163	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) != 2 &&
7164	    !inet_csk_reqsk_queue_is_full(sk))
7165		return 0;
7166
7167	if (!tcp_syn_flood_action(sk, rsk_ops->slab_name))
7168		return 0;
7169
7170	if (sk_acceptq_is_full(sk)) {
7171		NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
7172		return 0;
7173	}
7174
7175	mss = tcp_parse_mss_option(th, tp->rx_opt.user_mss);
7176	if (!mss)
7177		mss = af_ops->mss_clamp;
7178
7179	return mss;
7180}
7181EXPORT_SYMBOL_GPL(tcp_get_syncookie_mss);
7182
7183int tcp_conn_request(struct request_sock_ops *rsk_ops,
7184		     const struct tcp_request_sock_ops *af_ops,
7185		     struct sock *sk, struct sk_buff *skb)
7186{
7187	struct tcp_fastopen_cookie foc = { .len = -1 };
7188	struct tcp_options_received tmp_opt;
7189	struct tcp_sock *tp = tcp_sk(sk);
7190	struct net *net = sock_net(sk);
7191	struct sock *fastopen_sk = NULL;
7192	struct request_sock *req;
7193	bool want_cookie = false;
7194	struct dst_entry *dst;
7195	struct flowi fl;
7196	u8 syncookies;
7197	u32 isn;
7198
7199#ifdef CONFIG_TCP_AO
7200	const struct tcp_ao_hdr *aoh;
7201#endif
7202
7203	isn = __this_cpu_read(tcp_tw_isn);
7204	if (isn) {
7205		/* TW buckets are converted to open requests without
7206		 * limitations, they conserve resources and peer is
7207		 * evidently real one.
7208		 */
7209		__this_cpu_write(tcp_tw_isn, 0);
7210	} else {
7211		syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
7212
7213		if (syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) {
7214			want_cookie = tcp_syn_flood_action(sk,
7215							   rsk_ops->slab_name);
7216			if (!want_cookie)
7217				goto drop;
7218		}
7219	}
7220
7221	if (sk_acceptq_is_full(sk)) {
7222		NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
7223		goto drop;
7224	}
7225
7226	req = inet_reqsk_alloc(rsk_ops, sk, !want_cookie);
7227	if (!req)
7228		goto drop;
7229
7230	req->syncookie = want_cookie;
7231	tcp_rsk(req)->af_specific = af_ops;
7232	tcp_rsk(req)->ts_off = 0;
7233	tcp_rsk(req)->req_usec_ts = false;
7234#if IS_ENABLED(CONFIG_MPTCP)
7235	tcp_rsk(req)->is_mptcp = 0;
7236#endif
7237
7238	tcp_clear_options(&tmp_opt);
7239	tmp_opt.mss_clamp = af_ops->mss_clamp;
7240	tmp_opt.user_mss  = tp->rx_opt.user_mss;
7241	tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0,
7242			  want_cookie ? NULL : &foc);
7243
7244	if (want_cookie && !tmp_opt.saw_tstamp)
7245		tcp_clear_options(&tmp_opt);
7246
7247	if (IS_ENABLED(CONFIG_SMC) && want_cookie)
7248		tmp_opt.smc_ok = 0;
7249
7250	tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
7251	tcp_openreq_init(req, &tmp_opt, skb, sk);
7252	inet_rsk(req)->no_srccheck = inet_test_bit(TRANSPARENT, sk);
7253
7254	/* Note: tcp_v6_init_req() might override ir_iif for link locals */
7255	inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb);
7256
7257	dst = af_ops->route_req(sk, skb, &fl, req, isn);
7258	if (!dst)
7259		goto drop_and_free;
7260
7261	if (tmp_opt.tstamp_ok) {
7262		tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst);
7263		tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
7264	}
7265	if (!want_cookie && !isn) {
7266		int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog);
7267
7268		/* Kill the following clause, if you dislike this way. */
7269		if (!syncookies &&
7270		    (max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
7271		     (max_syn_backlog >> 2)) &&
7272		    !tcp_peer_is_proven(req, dst)) {
7273			/* Without syncookies last quarter of
7274			 * backlog is filled with destinations,
7275			 * proven to be alive.
7276			 * It means that we continue to communicate
7277			 * to destinations, already remembered
7278			 * to the moment of synflood.
7279			 */
7280			pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
7281				    rsk_ops->family);
7282			goto drop_and_release;
7283		}
7284
7285		isn = af_ops->init_seq(skb);
7286	}
7287
7288	tcp_ecn_create_request(req, skb, sk, dst);
7289
7290	if (want_cookie) {
7291		isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
7292		if (!tmp_opt.tstamp_ok)
7293			inet_rsk(req)->ecn_ok = 0;
7294	}
7295
7296#ifdef CONFIG_TCP_AO
7297	if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
7298		goto drop_and_release; /* Invalid TCP options */
7299	if (aoh) {
7300		tcp_rsk(req)->used_tcp_ao = true;
7301		tcp_rsk(req)->ao_rcv_next = aoh->keyid;
7302		tcp_rsk(req)->ao_keyid = aoh->rnext_keyid;
7303
7304	} else {
7305		tcp_rsk(req)->used_tcp_ao = false;
7306	}
7307#endif
7308	tcp_rsk(req)->snt_isn = isn;
7309	tcp_rsk(req)->txhash = net_tx_rndhash();
7310	tcp_rsk(req)->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
7311	tcp_openreq_init_rwin(req, sk, dst);
7312	sk_rx_queue_set(req_to_sk(req), skb);
7313	if (!want_cookie) {
7314		tcp_reqsk_record_syn(sk, req, skb);
7315		fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst);
7316	}
7317	if (fastopen_sk) {
7318		af_ops->send_synack(fastopen_sk, dst, &fl, req,
7319				    &foc, TCP_SYNACK_FASTOPEN, skb);
7320		/* Add the child socket directly into the accept queue */
7321		if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
7322			reqsk_fastopen_remove(fastopen_sk, req, false);
7323			bh_unlock_sock(fastopen_sk);
7324			sock_put(fastopen_sk);
7325			goto drop_and_free;
7326		}
7327		sk->sk_data_ready(sk);
7328		bh_unlock_sock(fastopen_sk);
7329		sock_put(fastopen_sk);
7330	} else {
7331		tcp_rsk(req)->tfo_listener = false;
7332		if (!want_cookie) {
7333			req->timeout = tcp_timeout_init((struct sock *)req);
7334			if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
7335								    req->timeout))) {
7336				reqsk_free(req);
7337				dst_release(dst);
7338				return 0;
7339			}
7340
7341		}
7342		af_ops->send_synack(sk, dst, &fl, req, &foc,
7343				    !want_cookie ? TCP_SYNACK_NORMAL :
7344						   TCP_SYNACK_COOKIE,
7345				    skb);
7346		if (want_cookie) {
7347			reqsk_free(req);
7348			return 0;
7349		}
7350	}
7351	reqsk_put(req);
7352	return 0;
7353
7354drop_and_release:
7355	dst_release(dst);
7356drop_and_free:
7357	__reqsk_free(req);
7358drop:
7359	tcp_listendrop(sk);
7360	return 0;
7361}
7362EXPORT_SYMBOL(tcp_conn_request);