Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Implementation of the Transmission Control Protocol(TCP).
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  13 *		Florian La Roche, <flla@stud.uni-sb.de>
  14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  19 *		Jorge Cwik, <jorge@laser.satlink.net>
  20 */
  21
  22/*
  23 * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
  24 *				:	Fragmentation on mtu decrease
  25 *				:	Segment collapse on retransmit
  26 *				:	AF independence
  27 *
  28 *		Linus Torvalds	:	send_delayed_ack
  29 *		David S. Miller	:	Charge memory using the right skb
  30 *					during syn/ack processing.
  31 *		David S. Miller :	Output engine completely rewritten.
  32 *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
  33 *		Cacophonix Gaul :	draft-minshall-nagle-01
  34 *		J Hadi Salim	:	ECN support
  35 *
  36 */
  37
  38#define pr_fmt(fmt) "TCP: " fmt
  39
  40#include <net/tcp.h>
  41#include <net/mptcp.h>
  42
  43#include <linux/compiler.h>
  44#include <linux/gfp.h>
  45#include <linux/module.h>
  46#include <linux/static_key.h>
  47
  48#include <trace/events/tcp.h>
 
  49
  50/* Refresh clocks of a TCP socket,
  51 * ensuring monotically increasing values.
  52 */
  53void tcp_mstamp_refresh(struct tcp_sock *tp)
  54{
  55	u64 val = tcp_clock_ns();
  56
  57	tp->tcp_clock_cache = val;
  58	tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC);
  59}
 
 
 
 
 
 
 
 
  60
  61static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
  62			   int push_one, gfp_t gfp);
  63
  64/* Account for new data that has been sent to the network. */
  65static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
  66{
  67	struct inet_connection_sock *icsk = inet_csk(sk);
  68	struct tcp_sock *tp = tcp_sk(sk);
  69	unsigned int prior_packets = tp->packets_out;
  70
  71	WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
  72
  73	__skb_unlink(skb, &sk->sk_write_queue);
  74	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
  75
  76	if (tp->highest_sack == NULL)
  77		tp->highest_sack = skb;
  78
  79	tp->packets_out += tcp_skb_pcount(skb);
  80	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
 
  81		tcp_rearm_rto(sk);
 
  82
  83	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
  84		      tcp_skb_pcount(skb));
  85}
  86
  87/* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
  88 * window scaling factor due to loss of precision.
  89 * If window has been shrunk, what should we make? It is not clear at all.
  90 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
  91 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
  92 * invalid. OK, let's make this for now:
  93 */
  94static inline __u32 tcp_acceptable_seq(const struct sock *sk)
  95{
  96	const struct tcp_sock *tp = tcp_sk(sk);
  97
  98	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
  99	    (tp->rx_opt.wscale_ok &&
 100	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
 101		return tp->snd_nxt;
 102	else
 103		return tcp_wnd_end(tp);
 104}
 105
 106/* Calculate mss to advertise in SYN segment.
 107 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
 108 *
 109 * 1. It is independent of path mtu.
 110 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
 111 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
 112 *    attached devices, because some buggy hosts are confused by
 113 *    large MSS.
 114 * 4. We do not make 3, we advertise MSS, calculated from first
 115 *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
 116 *    This may be overridden via information stored in routing table.
 117 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
 118 *    probably even Jumbo".
 119 */
 120static __u16 tcp_advertise_mss(struct sock *sk)
 121{
 122	struct tcp_sock *tp = tcp_sk(sk);
 123	const struct dst_entry *dst = __sk_dst_get(sk);
 124	int mss = tp->advmss;
 125
 126	if (dst) {
 127		unsigned int metric = dst_metric_advmss(dst);
 128
 129		if (metric < mss) {
 130			mss = metric;
 131			tp->advmss = mss;
 132		}
 133	}
 134
 135	return (__u16)mss;
 136}
 137
 138/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
 139 * This is the first part of cwnd validation mechanism.
 140 */
 141void tcp_cwnd_restart(struct sock *sk, s32 delta)
 142{
 143	struct tcp_sock *tp = tcp_sk(sk);
 144	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
 145	u32 cwnd = tp->snd_cwnd;
 146
 147	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
 148
 149	tp->snd_ssthresh = tcp_current_ssthresh(sk);
 150	restart_cwnd = min(restart_cwnd, cwnd);
 151
 152	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
 153		cwnd >>= 1;
 154	tp->snd_cwnd = max(cwnd, restart_cwnd);
 155	tp->snd_cwnd_stamp = tcp_jiffies32;
 156	tp->snd_cwnd_used = 0;
 157}
 158
 159/* Congestion state accounting after a packet has been sent. */
 160static void tcp_event_data_sent(struct tcp_sock *tp,
 161				struct sock *sk)
 162{
 163	struct inet_connection_sock *icsk = inet_csk(sk);
 164	const u32 now = tcp_jiffies32;
 165
 166	if (tcp_packets_in_flight(tp) == 0)
 167		tcp_ca_event(sk, CA_EVENT_TX_START);
 168
 169	/* If this is the first data packet sent in response to the
 170	 * previous received data,
 171	 * and it is a reply for ato after last received packet,
 172	 * increase pingpong count.
 173	 */
 174	if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) &&
 175	    (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
 176		inet_csk_inc_pingpong_cnt(sk);
 177
 178	tp->lsndtime = now;
 
 
 
 
 
 
 179}
 180
 181/* Account for an ACK we sent. */
 182static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
 183				      u32 rcv_nxt)
 184{
 185	struct tcp_sock *tp = tcp_sk(sk);
 186
 187	if (unlikely(tp->compressed_ack)) {
 188		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
 189			      tp->compressed_ack);
 190		tp->compressed_ack = 0;
 191		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
 192			__sock_put(sk);
 193	}
 194
 195	if (unlikely(rcv_nxt != tp->rcv_nxt))
 196		return;  /* Special ACK sent by DCTCP to reflect ECN */
 197	tcp_dec_quickack_mode(sk, pkts);
 198	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 199}
 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 201/* Determine a window scaling and initial window to offer.
 202 * Based on the assumption that the given amount of space
 203 * will be offered. Store the results in the tp structure.
 204 * NOTE: for smooth operation initial space offering should
 205 * be a multiple of mss if possible. We assume here that mss >= 1.
 206 * This MUST be enforced by all callers.
 207 */
 208void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
 209			       __u32 *rcv_wnd, __u32 *window_clamp,
 210			       int wscale_ok, __u8 *rcv_wscale,
 211			       __u32 init_rcv_wnd)
 212{
 213	unsigned int space = (__space < 0 ? 0 : __space);
 214
 215	/* If no clamp set the clamp to the max possible scaled window */
 216	if (*window_clamp == 0)
 217		(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
 218	space = min(*window_clamp, space);
 219
 220	/* Quantize space offering to a multiple of mss if possible. */
 221	if (space > mss)
 222		space = rounddown(space, mss);
 223
 224	/* NOTE: offering an initial window larger than 32767
 225	 * will break some buggy TCP stacks. If the admin tells us
 226	 * it is likely we could be speaking with such a buggy stack
 227	 * we will truncate our initial window offering to 32K-1
 228	 * unless the remote has sent us a window scaling option,
 229	 * which we interpret as a sign the remote TCP is not
 230	 * misinterpreting the window field as a signed quantity.
 231	 */
 232	if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
 233		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
 234	else
 235		(*rcv_wnd) = min_t(u32, space, U16_MAX);
 236
 237	if (init_rcv_wnd)
 238		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
 239
 240	*rcv_wscale = 0;
 241	if (wscale_ok) {
 242		/* Set window scaling on max possible window */
 243		space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
 
 
 244		space = max_t(u32, space, sysctl_rmem_max);
 245		space = min_t(u32, space, *window_clamp);
 246		*rcv_wscale = clamp_t(int, ilog2(space) - 15,
 247				      0, TCP_MAX_WSCALE);
 
 
 248	}
 
 
 
 
 
 
 
 249	/* Set the clamp no higher than max representable value */
 250	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
 251}
 252EXPORT_SYMBOL(tcp_select_initial_window);
 253
 254/* Chose a new window to advertise, update state in tcp_sock for the
 255 * socket, and return result with RFC1323 scaling applied.  The return
 256 * value can be stuffed directly into th->window for an outgoing
 257 * frame.
 258 */
 259static u16 tcp_select_window(struct sock *sk)
 260{
 261	struct tcp_sock *tp = tcp_sk(sk);
 262	u32 old_win = tp->rcv_wnd;
 263	u32 cur_win = tcp_receive_window(tp);
 264	u32 new_win = __tcp_select_window(sk);
 265
 266	/* Never shrink the offered window */
 267	if (new_win < cur_win) {
 268		/* Danger Will Robinson!
 269		 * Don't update rcv_wup/rcv_wnd here or else
 270		 * we will not be able to advertise a zero
 271		 * window in time.  --DaveM
 272		 *
 273		 * Relax Will Robinson.
 274		 */
 275		if (new_win == 0)
 276			NET_INC_STATS(sock_net(sk),
 277				      LINUX_MIB_TCPWANTZEROWINDOWADV);
 278		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
 279	}
 280	tp->rcv_wnd = new_win;
 281	tp->rcv_wup = tp->rcv_nxt;
 282
 283	/* Make sure we do not exceed the maximum possible
 284	 * scaled window.
 285	 */
 286	if (!tp->rx_opt.rcv_wscale &&
 287	    sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
 288		new_win = min(new_win, MAX_TCP_WINDOW);
 289	else
 290		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
 291
 292	/* RFC1323 scaling applied */
 293	new_win >>= tp->rx_opt.rcv_wscale;
 294
 295	/* If we advertise zero window, disable fast path. */
 296	if (new_win == 0) {
 297		tp->pred_flags = 0;
 298		if (old_win)
 299			NET_INC_STATS(sock_net(sk),
 300				      LINUX_MIB_TCPTOZEROWINDOWADV);
 301	} else if (old_win == 0) {
 302		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
 303	}
 304
 305	return new_win;
 306}
 307
 308/* Packet ECN state for a SYN-ACK */
 309static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
 310{
 311	const struct tcp_sock *tp = tcp_sk(sk);
 312
 313	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
 314	if (!(tp->ecn_flags & TCP_ECN_OK))
 315		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
 316	else if (tcp_ca_needs_ecn(sk) ||
 317		 tcp_bpf_ca_needs_ecn(sk))
 318		INET_ECN_xmit(sk);
 319}
 320
 321/* Packet ECN state for a SYN.  */
 322static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
 323{
 324	struct tcp_sock *tp = tcp_sk(sk);
 325	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
 326	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
 327		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
 328
 329	if (!use_ecn) {
 330		const struct dst_entry *dst = __sk_dst_get(sk);
 331
 332		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
 333			use_ecn = true;
 334	}
 335
 336	tp->ecn_flags = 0;
 337
 338	if (use_ecn) {
 339		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
 340		tp->ecn_flags = TCP_ECN_OK;
 341		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
 342			INET_ECN_xmit(sk);
 343	}
 344}
 345
 346static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
 347{
 348	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
 349		/* tp->ecn_flags are cleared at a later point in time when
 350		 * SYN ACK is ultimatively being received.
 351		 */
 352		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
 353}
 354
 355static void
 356tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
 357{
 358	if (inet_rsk(req)->ecn_ok)
 359		th->ece = 1;
 360}
 361
 362/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
 363 * be sent.
 364 */
 365static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
 366			 struct tcphdr *th, int tcp_header_len)
 367{
 368	struct tcp_sock *tp = tcp_sk(sk);
 369
 370	if (tp->ecn_flags & TCP_ECN_OK) {
 371		/* Not-retransmitted data segment: set ECT and inject CWR. */
 372		if (skb->len != tcp_header_len &&
 373		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
 374			INET_ECN_xmit(sk);
 375			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
 376				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
 377				th->cwr = 1;
 378				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 379			}
 380		} else if (!tcp_ca_needs_ecn(sk)) {
 381			/* ACK or retransmitted segment: clear ECT|CE */
 382			INET_ECN_dontxmit(sk);
 383		}
 384		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
 385			th->ece = 1;
 386	}
 387}
 388
 389/* Constructs common control bits of non-data skb. If SYN/FIN is present,
 390 * auto increment end seqno.
 391 */
 392static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
 393{
 394	skb->ip_summed = CHECKSUM_PARTIAL;
 
 395
 396	TCP_SKB_CB(skb)->tcp_flags = flags;
 397	TCP_SKB_CB(skb)->sacked = 0;
 398
 399	tcp_skb_pcount_set(skb, 1);
 400
 401	TCP_SKB_CB(skb)->seq = seq;
 402	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
 403		seq++;
 404	TCP_SKB_CB(skb)->end_seq = seq;
 405}
 406
 407static inline bool tcp_urg_mode(const struct tcp_sock *tp)
 408{
 409	return tp->snd_una != tp->snd_up;
 410}
 411
 412#define OPTION_SACK_ADVERTISE	(1 << 0)
 413#define OPTION_TS		(1 << 1)
 414#define OPTION_MD5		(1 << 2)
 415#define OPTION_WSCALE		(1 << 3)
 416#define OPTION_FAST_OPEN_COOKIE	(1 << 8)
 417#define OPTION_SMC		(1 << 9)
 418#define OPTION_MPTCP		(1 << 10)
 419
 420static void smc_options_write(__be32 *ptr, u16 *options)
 421{
 422#if IS_ENABLED(CONFIG_SMC)
 423	if (static_branch_unlikely(&tcp_have_smc)) {
 424		if (unlikely(OPTION_SMC & *options)) {
 425			*ptr++ = htonl((TCPOPT_NOP  << 24) |
 426				       (TCPOPT_NOP  << 16) |
 427				       (TCPOPT_EXP <<  8) |
 428				       (TCPOLEN_EXP_SMC_BASE));
 429			*ptr++ = htonl(TCPOPT_SMC_MAGIC);
 430		}
 431	}
 432#endif
 433}
 434
 435struct tcp_out_options {
 436	u16 options;		/* bit field of OPTION_* */
 437	u16 mss;		/* 0 to disable */
 438	u8 ws;			/* window scale, 0 to disable */
 439	u8 num_sack_blocks;	/* number of SACK blocks to include */
 440	u8 hash_size;		/* bytes in hash_location */
 441	u8 bpf_opt_len;		/* length of BPF hdr option */
 442	__u8 *hash_location;	/* temporary pointer, overloaded */
 443	__u32 tsval, tsecr;	/* need to include OPTION_TS */
 444	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
 445	struct mptcp_out_options mptcp;
 446};
 447
 448static void mptcp_options_write(__be32 *ptr, const struct tcp_sock *tp,
 449				struct tcp_out_options *opts)
 450{
 451#if IS_ENABLED(CONFIG_MPTCP)
 452	if (unlikely(OPTION_MPTCP & opts->options))
 453		mptcp_write_options(ptr, tp, &opts->mptcp);
 454#endif
 455}
 456
 457#ifdef CONFIG_CGROUP_BPF
 458static int bpf_skops_write_hdr_opt_arg0(struct sk_buff *skb,
 459					enum tcp_synack_type synack_type)
 460{
 461	if (unlikely(!skb))
 462		return BPF_WRITE_HDR_TCP_CURRENT_MSS;
 463
 464	if (unlikely(synack_type == TCP_SYNACK_COOKIE))
 465		return BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
 466
 467	return 0;
 468}
 469
 470/* req, syn_skb and synack_type are used when writing synack */
 471static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
 472				  struct request_sock *req,
 473				  struct sk_buff *syn_skb,
 474				  enum tcp_synack_type synack_type,
 475				  struct tcp_out_options *opts,
 476				  unsigned int *remaining)
 477{
 478	struct bpf_sock_ops_kern sock_ops;
 479	int err;
 480
 481	if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
 482					   BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) ||
 483	    !*remaining)
 484		return;
 485
 486	/* *remaining has already been aligned to 4 bytes, so *remaining >= 4 */
 487
 488	/* init sock_ops */
 489	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
 490
 491	sock_ops.op = BPF_SOCK_OPS_HDR_OPT_LEN_CB;
 492
 493	if (req) {
 494		/* The listen "sk" cannot be passed here because
 495		 * it is not locked.  It would not make too much
 496		 * sense to do bpf_setsockopt(listen_sk) based
 497		 * on individual connection request also.
 498		 *
 499		 * Thus, "req" is passed here and the cgroup-bpf-progs
 500		 * of the listen "sk" will be run.
 501		 *
 502		 * "req" is also used here for fastopen even the "sk" here is
 503		 * a fullsock "child" sk.  It is to keep the behavior
 504		 * consistent between fastopen and non-fastopen on
 505		 * the bpf programming side.
 506		 */
 507		sock_ops.sk = (struct sock *)req;
 508		sock_ops.syn_skb = syn_skb;
 509	} else {
 510		sock_owned_by_me(sk);
 511
 512		sock_ops.is_fullsock = 1;
 513		sock_ops.sk = sk;
 514	}
 515
 516	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
 517	sock_ops.remaining_opt_len = *remaining;
 518	/* tcp_current_mss() does not pass a skb */
 519	if (skb)
 520		bpf_skops_init_skb(&sock_ops, skb, 0);
 521
 522	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
 523
 524	if (err || sock_ops.remaining_opt_len == *remaining)
 525		return;
 526
 527	opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len;
 528	/* round up to 4 bytes */
 529	opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3;
 530
 531	*remaining -= opts->bpf_opt_len;
 532}
 533
 534static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
 535				    struct request_sock *req,
 536				    struct sk_buff *syn_skb,
 537				    enum tcp_synack_type synack_type,
 538				    struct tcp_out_options *opts)
 539{
 540	u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len;
 541	struct bpf_sock_ops_kern sock_ops;
 542	int err;
 543
 544	if (likely(!max_opt_len))
 545		return;
 546
 547	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
 548
 549	sock_ops.op = BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
 550
 551	if (req) {
 552		sock_ops.sk = (struct sock *)req;
 553		sock_ops.syn_skb = syn_skb;
 554	} else {
 555		sock_owned_by_me(sk);
 556
 557		sock_ops.is_fullsock = 1;
 558		sock_ops.sk = sk;
 559	}
 560
 561	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
 562	sock_ops.remaining_opt_len = max_opt_len;
 563	first_opt_off = tcp_hdrlen(skb) - max_opt_len;
 564	bpf_skops_init_skb(&sock_ops, skb, first_opt_off);
 565
 566	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
 567
 568	if (err)
 569		nr_written = 0;
 570	else
 571		nr_written = max_opt_len - sock_ops.remaining_opt_len;
 572
 573	if (nr_written < max_opt_len)
 574		memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP,
 575		       max_opt_len - nr_written);
 576}
 577#else
 578static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
 579				  struct request_sock *req,
 580				  struct sk_buff *syn_skb,
 581				  enum tcp_synack_type synack_type,
 582				  struct tcp_out_options *opts,
 583				  unsigned int *remaining)
 584{
 585}
 586
 587static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
 588				    struct request_sock *req,
 589				    struct sk_buff *syn_skb,
 590				    enum tcp_synack_type synack_type,
 591				    struct tcp_out_options *opts)
 592{
 593}
 594#endif
 595
 596/* Write previously computed TCP options to the packet.
 597 *
 598 * Beware: Something in the Internet is very sensitive to the ordering of
 599 * TCP options, we learned this through the hard way, so be careful here.
 600 * Luckily we can at least blame others for their non-compliance but from
 601 * inter-operability perspective it seems that we're somewhat stuck with
 602 * the ordering which we have been using if we want to keep working with
 603 * those broken things (not that it currently hurts anybody as there isn't
 604 * particular reason why the ordering would need to be changed).
 605 *
 606 * At least SACK_PERM as the first option is known to lead to a disaster
 607 * (but it may well be that other scenarios fail similarly).
 608 */
 609static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
 610			      struct tcp_out_options *opts)
 611{
 612	u16 options = opts->options;	/* mungable copy */
 613
 614	if (unlikely(OPTION_MD5 & options)) {
 615		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 616			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 617		/* overload cookie hash location */
 618		opts->hash_location = (__u8 *)ptr;
 619		ptr += 4;
 620	}
 621
 622	if (unlikely(opts->mss)) {
 623		*ptr++ = htonl((TCPOPT_MSS << 24) |
 624			       (TCPOLEN_MSS << 16) |
 625			       opts->mss);
 626	}
 627
 628	if (likely(OPTION_TS & options)) {
 629		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 630			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
 631				       (TCPOLEN_SACK_PERM << 16) |
 632				       (TCPOPT_TIMESTAMP << 8) |
 633				       TCPOLEN_TIMESTAMP);
 634			options &= ~OPTION_SACK_ADVERTISE;
 635		} else {
 636			*ptr++ = htonl((TCPOPT_NOP << 24) |
 637				       (TCPOPT_NOP << 16) |
 638				       (TCPOPT_TIMESTAMP << 8) |
 639				       TCPOLEN_TIMESTAMP);
 640		}
 641		*ptr++ = htonl(opts->tsval);
 642		*ptr++ = htonl(opts->tsecr);
 643	}
 644
 645	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 646		*ptr++ = htonl((TCPOPT_NOP << 24) |
 647			       (TCPOPT_NOP << 16) |
 648			       (TCPOPT_SACK_PERM << 8) |
 649			       TCPOLEN_SACK_PERM);
 650	}
 651
 652	if (unlikely(OPTION_WSCALE & options)) {
 653		*ptr++ = htonl((TCPOPT_NOP << 24) |
 654			       (TCPOPT_WINDOW << 16) |
 655			       (TCPOLEN_WINDOW << 8) |
 656			       opts->ws);
 657	}
 658
 659	if (unlikely(opts->num_sack_blocks)) {
 660		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
 661			tp->duplicate_sack : tp->selective_acks;
 662		int this_sack;
 663
 664		*ptr++ = htonl((TCPOPT_NOP  << 24) |
 665			       (TCPOPT_NOP  << 16) |
 666			       (TCPOPT_SACK <<  8) |
 667			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
 668						     TCPOLEN_SACK_PERBLOCK)));
 669
 670		for (this_sack = 0; this_sack < opts->num_sack_blocks;
 671		     ++this_sack) {
 672			*ptr++ = htonl(sp[this_sack].start_seq);
 673			*ptr++ = htonl(sp[this_sack].end_seq);
 674		}
 675
 676		tp->rx_opt.dsack = 0;
 677	}
 678
 679	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
 680		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
 681		u8 *p = (u8 *)ptr;
 682		u32 len; /* Fast Open option length */
 683
 684		if (foc->exp) {
 685			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
 686			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
 687				     TCPOPT_FASTOPEN_MAGIC);
 688			p += TCPOLEN_EXP_FASTOPEN_BASE;
 689		} else {
 690			len = TCPOLEN_FASTOPEN_BASE + foc->len;
 691			*p++ = TCPOPT_FASTOPEN;
 692			*p++ = len;
 693		}
 694
 695		memcpy(p, foc->val, foc->len);
 696		if ((len & 3) == 2) {
 697			p[foc->len] = TCPOPT_NOP;
 698			p[foc->len + 1] = TCPOPT_NOP;
 699		}
 700		ptr += (len + 3) >> 2;
 701	}
 702
 703	smc_options_write(ptr, &options);
 704
 705	mptcp_options_write(ptr, tp, opts);
 706}
 707
 708static void smc_set_option(const struct tcp_sock *tp,
 709			   struct tcp_out_options *opts,
 710			   unsigned int *remaining)
 711{
 712#if IS_ENABLED(CONFIG_SMC)
 713	if (static_branch_unlikely(&tcp_have_smc)) {
 714		if (tp->syn_smc) {
 715			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
 716				opts->options |= OPTION_SMC;
 717				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
 718			}
 719		}
 720	}
 721#endif
 722}
 723
 724static void smc_set_option_cond(const struct tcp_sock *tp,
 725				const struct inet_request_sock *ireq,
 726				struct tcp_out_options *opts,
 727				unsigned int *remaining)
 728{
 729#if IS_ENABLED(CONFIG_SMC)
 730	if (static_branch_unlikely(&tcp_have_smc)) {
 731		if (tp->syn_smc && ireq->smc_ok) {
 732			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
 733				opts->options |= OPTION_SMC;
 734				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
 735			}
 736		}
 737	}
 738#endif
 739}
 740
 741static void mptcp_set_option_cond(const struct request_sock *req,
 742				  struct tcp_out_options *opts,
 743				  unsigned int *remaining)
 744{
 745	if (rsk_is_mptcp(req)) {
 746		unsigned int size;
 747
 748		if (mptcp_synack_options(req, &size, &opts->mptcp)) {
 749			if (*remaining >= size) {
 750				opts->options |= OPTION_MPTCP;
 751				*remaining -= size;
 752			}
 753		}
 754	}
 755}
 756
 757/* Compute TCP options for SYN packets. This is not the final
 758 * network wire format yet.
 759 */
 760static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
 761				struct tcp_out_options *opts,
 762				struct tcp_md5sig_key **md5)
 763{
 764	struct tcp_sock *tp = tcp_sk(sk);
 765	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 766	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
 767
 768	*md5 = NULL;
 769#ifdef CONFIG_TCP_MD5SIG
 770	if (static_branch_unlikely(&tcp_md5_needed) &&
 771	    rcu_access_pointer(tp->md5sig_info)) {
 772		*md5 = tp->af_specific->md5_lookup(sk, sk);
 773		if (*md5) {
 774			opts->options |= OPTION_MD5;
 775			remaining -= TCPOLEN_MD5SIG_ALIGNED;
 776		}
 777	}
 
 
 778#endif
 779
 780	/* We always get an MSS option.  The option bytes which will be seen in
 781	 * normal data packets should timestamps be used, must be in the MSS
 782	 * advertised.  But we subtract them from tp->mss_cache so that
 783	 * calculations in tcp_sendmsg are simpler etc.  So account for this
 784	 * fact here if necessary.  If we don't do this correctly, as a
 785	 * receiver we won't recognize data packets as being full sized when we
 786	 * should, and thus we won't abide by the delayed ACK rules correctly.
 787	 * SACKs don't matter, we never delay an ACK when we have any of those
 788	 * going out.  */
 789	opts->mss = tcp_advertise_mss(sk);
 790	remaining -= TCPOLEN_MSS_ALIGNED;
 791
 792	if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
 793		opts->options |= OPTION_TS;
 794		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
 795		opts->tsecr = tp->rx_opt.ts_recent;
 796		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 797	}
 798	if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
 799		opts->ws = tp->rx_opt.rcv_wscale;
 800		opts->options |= OPTION_WSCALE;
 801		remaining -= TCPOLEN_WSCALE_ALIGNED;
 802	}
 803	if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
 804		opts->options |= OPTION_SACK_ADVERTISE;
 805		if (unlikely(!(OPTION_TS & opts->options)))
 806			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 807	}
 808
 809	if (fastopen && fastopen->cookie.len >= 0) {
 810		u32 need = fastopen->cookie.len;
 811
 812		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
 813					       TCPOLEN_FASTOPEN_BASE;
 814		need = (need + 3) & ~3U;  /* Align to 32 bits */
 815		if (remaining >= need) {
 816			opts->options |= OPTION_FAST_OPEN_COOKIE;
 817			opts->fastopen_cookie = &fastopen->cookie;
 818			remaining -= need;
 819			tp->syn_fastopen = 1;
 820			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
 821		}
 822	}
 823
 824	smc_set_option(tp, opts, &remaining);
 825
 826	if (sk_is_mptcp(sk)) {
 827		unsigned int size;
 828
 829		if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) {
 830			opts->options |= OPTION_MPTCP;
 831			remaining -= size;
 832		}
 833	}
 834
 835	bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
 836
 837	return MAX_TCP_OPTION_SPACE - remaining;
 838}
 839
 840/* Set up TCP options for SYN-ACKs. */
 841static unsigned int tcp_synack_options(const struct sock *sk,
 842				       struct request_sock *req,
 843				       unsigned int mss, struct sk_buff *skb,
 844				       struct tcp_out_options *opts,
 845				       const struct tcp_md5sig_key *md5,
 846				       struct tcp_fastopen_cookie *foc,
 847				       enum tcp_synack_type synack_type,
 848				       struct sk_buff *syn_skb)
 849{
 850	struct inet_request_sock *ireq = inet_rsk(req);
 851	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 852
 853#ifdef CONFIG_TCP_MD5SIG
 854	if (md5) {
 855		opts->options |= OPTION_MD5;
 856		remaining -= TCPOLEN_MD5SIG_ALIGNED;
 857
 858		/* We can't fit any SACK blocks in a packet with MD5 + TS
 859		 * options. There was discussion about disabling SACK
 860		 * rather than TS in order to fit in better with old,
 861		 * buggy kernels, but that was deemed to be unnecessary.
 862		 */
 863		if (synack_type != TCP_SYNACK_COOKIE)
 864			ireq->tstamp_ok &= !ireq->sack_ok;
 865	}
 866#endif
 867
 868	/* We always send an MSS option. */
 869	opts->mss = mss;
 870	remaining -= TCPOLEN_MSS_ALIGNED;
 871
 872	if (likely(ireq->wscale_ok)) {
 873		opts->ws = ireq->rcv_wscale;
 874		opts->options |= OPTION_WSCALE;
 875		remaining -= TCPOLEN_WSCALE_ALIGNED;
 876	}
 877	if (likely(ireq->tstamp_ok)) {
 878		opts->options |= OPTION_TS;
 879		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
 880		opts->tsecr = req->ts_recent;
 881		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 882	}
 883	if (likely(ireq->sack_ok)) {
 884		opts->options |= OPTION_SACK_ADVERTISE;
 885		if (unlikely(!ireq->tstamp_ok))
 886			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 887	}
 888	if (foc != NULL && foc->len >= 0) {
 889		u32 need = foc->len;
 890
 891		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
 892				   TCPOLEN_FASTOPEN_BASE;
 893		need = (need + 3) & ~3U;  /* Align to 32 bits */
 894		if (remaining >= need) {
 895			opts->options |= OPTION_FAST_OPEN_COOKIE;
 896			opts->fastopen_cookie = foc;
 897			remaining -= need;
 898		}
 899	}
 900
 901	mptcp_set_option_cond(req, opts, &remaining);
 902
 903	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
 904
 905	bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb,
 906			      synack_type, opts, &remaining);
 907
 908	return MAX_TCP_OPTION_SPACE - remaining;
 909}
 910
 911/* Compute TCP options for ESTABLISHED sockets. This is not the
 912 * final wire format yet.
 913 */
 914static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
 915					struct tcp_out_options *opts,
 916					struct tcp_md5sig_key **md5)
 917{
 918	struct tcp_sock *tp = tcp_sk(sk);
 919	unsigned int size = 0;
 920	unsigned int eff_sacks;
 921
 922	opts->options = 0;
 923
 924	*md5 = NULL;
 925#ifdef CONFIG_TCP_MD5SIG
 926	if (static_branch_unlikely(&tcp_md5_needed) &&
 927	    rcu_access_pointer(tp->md5sig_info)) {
 928		*md5 = tp->af_specific->md5_lookup(sk, sk);
 929		if (*md5) {
 930			opts->options |= OPTION_MD5;
 931			size += TCPOLEN_MD5SIG_ALIGNED;
 932		}
 933	}
 
 
 934#endif
 935
 936	if (likely(tp->rx_opt.tstamp_ok)) {
 937		opts->options |= OPTION_TS;
 938		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
 939		opts->tsecr = tp->rx_opt.ts_recent;
 940		size += TCPOLEN_TSTAMP_ALIGNED;
 941	}
 942
 943	/* MPTCP options have precedence over SACK for the limited TCP
 944	 * option space because a MPTCP connection would be forced to
 945	 * fall back to regular TCP if a required multipath option is
 946	 * missing. SACK still gets a chance to use whatever space is
 947	 * left.
 948	 */
 949	if (sk_is_mptcp(sk)) {
 950		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 951		unsigned int opt_size = 0;
 952
 953		if (mptcp_established_options(sk, skb, &opt_size, remaining,
 954					      &opts->mptcp)) {
 955			opts->options |= OPTION_MPTCP;
 956			size += opt_size;
 957		}
 958	}
 959
 960	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
 961	if (unlikely(eff_sacks)) {
 962		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 963		if (unlikely(remaining < TCPOLEN_SACK_BASE_ALIGNED +
 964					 TCPOLEN_SACK_PERBLOCK))
 965			return size;
 966
 967		opts->num_sack_blocks =
 968			min_t(unsigned int, eff_sacks,
 969			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
 970			      TCPOLEN_SACK_PERBLOCK);
 971
 972		size += TCPOLEN_SACK_BASE_ALIGNED +
 973			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
 974	}
 975
 976	if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp,
 977					    BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) {
 978		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 979
 980		bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
 981
 982		size = MAX_TCP_OPTION_SPACE - remaining;
 983	}
 984
 985	return size;
 986}
 987
 988
 989/* TCP SMALL QUEUES (TSQ)
 990 *
 991 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
 992 * to reduce RTT and bufferbloat.
 993 * We do this using a special skb destructor (tcp_wfree).
 994 *
 995 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
 996 * needs to be reallocated in a driver.
 997 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
 998 *
 999 * Since transmit from skb destructor is forbidden, we use a tasklet
1000 * to process all sockets that eventually need to send more skbs.
1001 * We use one tasklet per cpu, with its own queue of sockets.
1002 */
1003struct tsq_tasklet {
1004	struct tasklet_struct	tasklet;
1005	struct list_head	head; /* queue of tcp sockets */
1006};
1007static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
1008
1009static void tcp_tsq_write(struct sock *sk)
1010{
1011	if ((1 << sk->sk_state) &
1012	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
1013	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
1014		struct tcp_sock *tp = tcp_sk(sk);
1015
1016		if (tp->lost_out > tp->retrans_out &&
1017		    tp->snd_cwnd > tcp_packets_in_flight(tp)) {
1018			tcp_mstamp_refresh(tp);
1019			tcp_xmit_retransmit_queue(sk);
1020		}
1021
1022		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
1023			       0, GFP_ATOMIC);
1024	}
1025}
1026
1027static void tcp_tsq_handler(struct sock *sk)
1028{
1029	bh_lock_sock(sk);
1030	if (!sock_owned_by_user(sk))
1031		tcp_tsq_write(sk);
1032	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
1033		sock_hold(sk);
1034	bh_unlock_sock(sk);
1035}
1036/*
1037 * One tasklet per cpu tries to send more skbs.
1038 * We run in tasklet context but need to disable irqs when
1039 * transferring tsq->head because tcp_wfree() might
1040 * interrupt us (non NAPI drivers)
1041 */
1042static void tcp_tasklet_func(struct tasklet_struct *t)
1043{
1044	struct tsq_tasklet *tsq = from_tasklet(tsq,  t, tasklet);
1045	LIST_HEAD(list);
1046	unsigned long flags;
1047	struct list_head *q, *n;
1048	struct tcp_sock *tp;
1049	struct sock *sk;
1050
1051	local_irq_save(flags);
1052	list_splice_init(&tsq->head, &list);
1053	local_irq_restore(flags);
1054
1055	list_for_each_safe(q, n, &list) {
1056		tp = list_entry(q, struct tcp_sock, tsq_node);
1057		list_del(&tp->tsq_node);
1058
1059		sk = (struct sock *)tp;
1060		smp_mb__before_atomic();
1061		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
1062
1063		tcp_tsq_handler(sk);
 
 
 
 
 
 
 
 
 
1064		sk_free(sk);
1065	}
1066}
1067
1068#define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
1069			  TCPF_WRITE_TIMER_DEFERRED |	\
1070			  TCPF_DELACK_TIMER_DEFERRED |	\
1071			  TCPF_MTU_REDUCED_DEFERRED)
1072/**
1073 * tcp_release_cb - tcp release_sock() callback
1074 * @sk: socket
1075 *
1076 * called from release_sock() to perform protocol dependent
1077 * actions before socket release.
1078 */
1079void tcp_release_cb(struct sock *sk)
1080{
1081	unsigned long flags, nflags;
1082
1083	/* perform an atomic operation only if at least one flag is set */
1084	do {
1085		flags = sk->sk_tsq_flags;
1086		if (!(flags & TCP_DEFERRED_ALL))
1087			return;
1088		nflags = flags & ~TCP_DEFERRED_ALL;
1089	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
1090
1091	if (flags & TCPF_TSQ_DEFERRED) {
1092		tcp_tsq_write(sk);
1093		__sock_put(sk);
1094	}
1095	/* Here begins the tricky part :
1096	 * We are called from release_sock() with :
1097	 * 1) BH disabled
1098	 * 2) sk_lock.slock spinlock held
1099	 * 3) socket owned by us (sk->sk_lock.owned == 1)
1100	 *
1101	 * But following code is meant to be called from BH handlers,
1102	 * so we should keep BH disabled, but early release socket ownership
1103	 */
1104	sock_release_ownership(sk);
1105
1106	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
1107		tcp_write_timer_handler(sk);
1108		__sock_put(sk);
1109	}
1110	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
1111		tcp_delack_timer_handler(sk);
1112		__sock_put(sk);
1113	}
1114	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
1115		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
1116		__sock_put(sk);
1117	}
1118}
1119EXPORT_SYMBOL(tcp_release_cb);
1120
1121void __init tcp_tasklet_init(void)
1122{
1123	int i;
1124
1125	for_each_possible_cpu(i) {
1126		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
1127
1128		INIT_LIST_HEAD(&tsq->head);
1129		tasklet_setup(&tsq->tasklet, tcp_tasklet_func);
 
 
1130	}
1131}
1132
1133/*
1134 * Write buffer destructor automatically called from kfree_skb.
1135 * We can't xmit new skbs from this context, as we might already
1136 * hold qdisc lock.
1137 */
1138void tcp_wfree(struct sk_buff *skb)
1139{
1140	struct sock *sk = skb->sk;
1141	struct tcp_sock *tp = tcp_sk(sk);
1142	unsigned long flags, nval, oval;
 
1143
1144	/* Keep one reference on sk_wmem_alloc.
1145	 * Will be released by sk_free() from here or tcp_tasklet_func()
1146	 */
1147	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
1148
1149	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
1150	 * Wait until our queues (qdisc + devices) are drained.
1151	 * This gives :
1152	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
1153	 * - chance for incoming ACK (processed by another cpu maybe)
1154	 *   to migrate this flow (skb->ooo_okay will be eventually set)
1155	 */
1156	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
1157		goto out;
1158
1159	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
1160		struct tsq_tasklet *tsq;
1161		bool empty;
1162
1163		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
1164			goto out;
1165
1166		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
1167		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
1168		if (nval != oval)
1169			continue;
1170
1171		/* queue this socket to tasklet queue */
1172		local_irq_save(flags);
1173		tsq = this_cpu_ptr(&tsq_tasklet);
1174		empty = list_empty(&tsq->head);
1175		list_add(&tp->tsq_node, &tsq->head);
1176		if (empty)
1177			tasklet_schedule(&tsq->tasklet);
1178		local_irq_restore(flags);
1179		return;
1180	}
1181out:
1182	sk_free(sk);
1183}
1184
1185/* Note: Called under soft irq.
1186 * We can call TCP stack right away, unless socket is owned by user.
1187 */
1188enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
1189{
1190	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
1191	struct sock *sk = (struct sock *)tp;
1192
1193	tcp_tsq_handler(sk);
1194	sock_put(sk);
1195
1196	return HRTIMER_NORESTART;
1197}
1198
1199static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
1200				      u64 prior_wstamp)
1201{
1202	struct tcp_sock *tp = tcp_sk(sk);
1203
1204	if (sk->sk_pacing_status != SK_PACING_NONE) {
1205		unsigned long rate = sk->sk_pacing_rate;
1206
1207		/* Original sch_fq does not pace first 10 MSS
1208		 * Note that tp->data_segs_out overflows after 2^32 packets,
1209		 * this is a minor annoyance.
1210		 */
1211		if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
1212			u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
1213			u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
1214
1215			/* take into account OS jitter */
1216			len_ns -= min_t(u64, len_ns / 2, credit);
1217			tp->tcp_wstamp_ns += len_ns;
1218		}
1219	}
1220	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1221}
1222
1223INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1224INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1225INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
1226
1227/* This routine actually transmits TCP packets queued in by
1228 * tcp_do_sendmsg().  This is used by both the initial
1229 * transmission and possible later retransmissions.
1230 * All SKB's seen here are completely headerless.  It is our
1231 * job to build the TCP header, and pass the packet down to
1232 * IP so it can do the same plus pass the packet off to the
1233 * device.
1234 *
1235 * We are working here with either a clone of the original
1236 * SKB, or a fresh unique copy made by the retransmit engine.
1237 */
1238static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1239			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
1240{
1241	const struct inet_connection_sock *icsk = inet_csk(sk);
1242	struct inet_sock *inet;
1243	struct tcp_sock *tp;
1244	struct tcp_skb_cb *tcb;
1245	struct tcp_out_options opts;
1246	unsigned int tcp_options_size, tcp_header_size;
1247	struct sk_buff *oskb = NULL;
1248	struct tcp_md5sig_key *md5;
1249	struct tcphdr *th;
1250	u64 prior_wstamp;
1251	int err;
1252
1253	BUG_ON(!skb || !tcp_skb_pcount(skb));
1254	tp = tcp_sk(sk);
1255	prior_wstamp = tp->tcp_wstamp_ns;
1256	tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
1257	skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
1258	if (clone_it) {
 
1259		TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
1260			- tp->snd_una;
1261		oskb = skb;
1262
1263		tcp_skb_tsorted_save(oskb) {
1264			if (unlikely(skb_cloned(oskb)))
1265				skb = pskb_copy(oskb, gfp_mask);
1266			else
1267				skb = skb_clone(oskb, gfp_mask);
1268		} tcp_skb_tsorted_restore(oskb);
1269
 
 
 
 
1270		if (unlikely(!skb))
1271			return -ENOBUFS;
1272		/* retransmit skbs might have a non zero value in skb->dev
1273		 * because skb->dev is aliased with skb->rbnode.rb_left
1274		 */
1275		skb->dev = NULL;
1276	}
1277
1278	inet = inet_sk(sk);
1279	tcb = TCP_SKB_CB(skb);
1280	memset(&opts, 0, sizeof(opts));
1281
1282	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
1283		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
1284	} else {
1285		tcp_options_size = tcp_established_options(sk, skb, &opts,
1286							   &md5);
1287		/* Force a PSH flag on all (GSO) packets to expedite GRO flush
1288		 * at receiver : This slightly improve GRO performance.
1289		 * Note that we do not force the PSH flag for non GSO packets,
1290		 * because they might be sent under high congestion events,
1291		 * and in this case it is better to delay the delivery of 1-MSS
1292		 * packets and thus the corresponding ACK packet that would
1293		 * release the following packet.
1294		 */
1295		if (tcp_skb_pcount(skb) > 1)
1296			tcb->tcp_flags |= TCPHDR_PSH;
1297	}
1298	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
1299
1300	/* if no packet is in qdisc/device queue, then allow XPS to select
1301	 * another queue. We can be called from tcp_tsq_handler()
1302	 * which holds one reference to sk.
1303	 *
1304	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
1305	 * One way to get this would be to set skb->truesize = 2 on them.
1306	 */
1307	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
1308
1309	/* If we had to use memory reserve to allocate this skb,
1310	 * this might cause drops if packet is looped back :
1311	 * Other socket might not have SOCK_MEMALLOC.
1312	 * Packets not looped back do not care about pfmemalloc.
1313	 */
1314	skb->pfmemalloc = 0;
1315
1316	skb_push(skb, tcp_header_size);
1317	skb_reset_transport_header(skb);
1318
1319	skb_orphan(skb);
1320	skb->sk = sk;
1321	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1322	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
1323
1324	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
1325
1326	/* Build TCP header and checksum it. */
1327	th = (struct tcphdr *)skb->data;
1328	th->source		= inet->inet_sport;
1329	th->dest		= inet->inet_dport;
1330	th->seq			= htonl(tcb->seq);
1331	th->ack_seq		= htonl(rcv_nxt);
1332	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
1333					tcb->tcp_flags);
1334
1335	th->check		= 0;
1336	th->urg_ptr		= 0;
1337
1338	/* The urg_mode check is necessary during a below snd_una win probe */
1339	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
1340		if (before(tp->snd_up, tcb->seq + 0x10000)) {
1341			th->urg_ptr = htons(tp->snd_up - tcb->seq);
1342			th->urg = 1;
1343		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
1344			th->urg_ptr = htons(0xFFFF);
1345			th->urg = 1;
1346		}
1347	}
1348
 
1349	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1350	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1351		th->window      = htons(tcp_select_window(sk));
1352		tcp_ecn_send(sk, skb, th, tcp_header_size);
1353	} else {
1354		/* RFC1323: The window in SYN & SYN/ACK segments
1355		 * is never scaled.
1356		 */
1357		th->window	= htons(min(tp->rcv_wnd, 65535U));
1358	}
1359
1360	tcp_options_write((__be32 *)(th + 1), tp, &opts);
1361
1362#ifdef CONFIG_TCP_MD5SIG
1363	/* Calculate the MD5 hash, as we have all we need now */
1364	if (md5) {
1365		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1366		tp->af_specific->calc_md5_hash(opts.hash_location,
1367					       md5, sk, skb);
1368	}
1369#endif
1370
1371	/* BPF prog is the last one writing header option */
1372	bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts);
1373
1374	INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check,
1375			   tcp_v6_send_check, tcp_v4_send_check,
1376			   sk, skb);
1377
1378	if (likely(tcb->tcp_flags & TCPHDR_ACK))
1379		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
1380
1381	if (skb->len != tcp_header_size) {
1382		tcp_event_data_sent(tp, sk);
1383		tp->data_segs_out += tcp_skb_pcount(skb);
1384		tp->bytes_sent += skb->len - tcp_header_size;
1385	}
1386
1387	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1388		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1389			      tcp_skb_pcount(skb));
1390
1391	tp->segs_out += tcp_skb_pcount(skb);
1392	skb_set_hash_from_sk(skb, sk);
1393	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1394	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1395	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1396
1397	/* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
 
1398
1399	/* Cleanup our debris for IP stacks */
1400	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1401			       sizeof(struct inet6_skb_parm)));
1402
1403	tcp_add_tx_delay(skb, tp);
1404
1405	err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit,
1406				 inet6_csk_xmit, ip_queue_xmit,
1407				 sk, skb, &inet->cork.fl);
1408
1409	if (unlikely(err > 0)) {
1410		tcp_enter_cwr(sk);
1411		err = net_xmit_eval(err);
1412	}
1413	if (!err && oskb) {
1414		tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1415		tcp_rate_skb_sent(sk, oskb);
1416	}
1417	return err;
1418}
1419
1420static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1421			    gfp_t gfp_mask)
1422{
1423	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
1424				  tcp_sk(sk)->rcv_nxt);
1425}
1426
1427/* This routine just queues the buffer for sending.
1428 *
1429 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
1430 * otherwise socket can stall.
1431 */
1432static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
1433{
1434	struct tcp_sock *tp = tcp_sk(sk);
1435
1436	/* Advance write_seq and place onto the write_queue. */
1437	WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
1438	__skb_header_release(skb);
1439	tcp_add_write_queue_tail(sk, skb);
1440	sk_wmem_queued_add(sk, skb->truesize);
1441	sk_mem_charge(sk, skb->truesize);
1442}
1443
1444/* Initialize TSO segments for a packet. */
1445static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1446{
1447	if (skb->len <= mss_now) {
1448		/* Avoid the costly divide in the normal
1449		 * non-TSO case.
1450		 */
1451		tcp_skb_pcount_set(skb, 1);
1452		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1453	} else {
1454		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1455		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
1456	}
1457}
1458
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1459/* Pcount in the middle of the write queue got changed, we need to do various
1460 * tweaks to fix counters
1461 */
1462static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1463{
1464	struct tcp_sock *tp = tcp_sk(sk);
1465
1466	tp->packets_out -= decr;
1467
1468	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1469		tp->sacked_out -= decr;
1470	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1471		tp->retrans_out -= decr;
1472	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1473		tp->lost_out -= decr;
1474
1475	/* Reno case is special. Sigh... */
1476	if (tcp_is_reno(tp) && decr > 0)
1477		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1478
 
 
1479	if (tp->lost_skb_hint &&
1480	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1481	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1482		tp->lost_cnt_hint -= decr;
1483
1484	tcp_verify_left_out(tp);
1485}
1486
1487static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
1488{
1489	return TCP_SKB_CB(skb)->txstamp_ack ||
1490		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
1491}
1492
1493static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1494{
1495	struct skb_shared_info *shinfo = skb_shinfo(skb);
1496
1497	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1498	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1499		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1500		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1501
1502		shinfo->tx_flags &= ~tsflags;
1503		shinfo2->tx_flags |= tsflags;
1504		swap(shinfo->tskey, shinfo2->tskey);
1505		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1506		TCP_SKB_CB(skb)->txstamp_ack = 0;
1507	}
1508}
1509
1510static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1511{
1512	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1513	TCP_SKB_CB(skb)->eor = 0;
1514}
1515
1516/* Insert buff after skb on the write or rtx queue of sk.  */
1517static void tcp_insert_write_queue_after(struct sk_buff *skb,
1518					 struct sk_buff *buff,
1519					 struct sock *sk,
1520					 enum tcp_queue tcp_queue)
1521{
1522	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
1523		__skb_queue_after(&sk->sk_write_queue, skb, buff);
1524	else
1525		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
1526}
1527
1528/* Function to create two new TCP segments.  Shrinks the given segment
1529 * to the specified size and appends a new segment with the rest of the
1530 * packet to the list.  This won't be called frequently, I hope.
1531 * Remember, these are still headerless SKBs at this point.
1532 */
1533int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1534		 struct sk_buff *skb, u32 len,
1535		 unsigned int mss_now, gfp_t gfp)
1536{
1537	struct tcp_sock *tp = tcp_sk(sk);
1538	struct sk_buff *buff;
1539	int nsize, old_factor;
1540	long limit;
1541	int nlen;
1542	u8 flags;
1543
1544	if (WARN_ON(len > skb->len))
1545		return -EINVAL;
1546
1547	nsize = skb_headlen(skb) - len;
1548	if (nsize < 0)
1549		nsize = 0;
1550
1551	/* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1552	 * We need some allowance to not penalize applications setting small
1553	 * SO_SNDBUF values.
1554	 * Also allow first and last skb in retransmit queue to be split.
1555	 */
1556	limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
1557	if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1558		     tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1559		     skb != tcp_rtx_queue_head(sk) &&
1560		     skb != tcp_rtx_queue_tail(sk))) {
1561		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1562		return -ENOMEM;
1563	}
1564
1565	if (skb_unclone(skb, gfp))
1566		return -ENOMEM;
1567
1568	/* Get a new skb... force flag on. */
1569	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
1570	if (!buff)
1571		return -ENOMEM; /* We'll just try again later. */
1572	skb_copy_decrypted(buff, skb);
1573	mptcp_skb_ext_copy(buff, skb);
1574
1575	sk_wmem_queued_add(sk, buff->truesize);
1576	sk_mem_charge(sk, buff->truesize);
1577	nlen = skb->len - len - nsize;
1578	buff->truesize += nlen;
1579	skb->truesize -= nlen;
1580
1581	/* Correct the sequence numbers. */
1582	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1583	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1584	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1585
1586	/* PSH and FIN should only be set in the second packet. */
1587	flags = TCP_SKB_CB(skb)->tcp_flags;
1588	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1589	TCP_SKB_CB(buff)->tcp_flags = flags;
1590	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1591	tcp_skb_fragment_eor(skb, buff);
1592
1593	skb_split(skb, buff, len);
 
 
 
 
 
 
 
 
 
 
 
 
1594
1595	buff->ip_summed = CHECKSUM_PARTIAL;
1596
1597	buff->tstamp = skb->tstamp;
1598	tcp_fragment_tstamp(skb, buff);
1599
1600	old_factor = tcp_skb_pcount(skb);
1601
1602	/* Fix up tso_factor for both original and new SKB.  */
1603	tcp_set_skb_tso_segs(skb, mss_now);
1604	tcp_set_skb_tso_segs(buff, mss_now);
1605
1606	/* Update delivered info for the new segment */
1607	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1608
1609	/* If this packet has been sent out already, we must
1610	 * adjust the various packet counters.
1611	 */
1612	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1613		int diff = old_factor - tcp_skb_pcount(skb) -
1614			tcp_skb_pcount(buff);
1615
1616		if (diff)
1617			tcp_adjust_pcount(sk, skb, diff);
1618	}
1619
1620	/* Link BUFF into the send queue. */
1621	__skb_header_release(buff);
1622	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1623	if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1624		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
1625
1626	return 0;
1627}
1628
1629/* This is similar to __pskb_pull_tail(). The difference is that pulled
1630 * data is not copied, but immediately discarded.
 
1631 */
1632static int __pskb_trim_head(struct sk_buff *skb, int len)
1633{
1634	struct skb_shared_info *shinfo;
1635	int i, k, eat;
1636
1637	eat = min_t(int, len, skb_headlen(skb));
1638	if (eat) {
1639		__skb_pull(skb, eat);
1640		len -= eat;
1641		if (!len)
1642			return 0;
1643	}
1644	eat = len;
1645	k = 0;
1646	shinfo = skb_shinfo(skb);
1647	for (i = 0; i < shinfo->nr_frags; i++) {
1648		int size = skb_frag_size(&shinfo->frags[i]);
1649
1650		if (size <= eat) {
1651			skb_frag_unref(skb, i);
1652			eat -= size;
1653		} else {
1654			shinfo->frags[k] = shinfo->frags[i];
1655			if (eat) {
1656				skb_frag_off_add(&shinfo->frags[k], eat);
1657				skb_frag_size_sub(&shinfo->frags[k], eat);
1658				eat = 0;
1659			}
1660			k++;
1661		}
1662	}
1663	shinfo->nr_frags = k;
1664
 
1665	skb->data_len -= len;
1666	skb->len = skb->data_len;
1667	return len;
1668}
1669
1670/* Remove acked data from a packet in the transmit queue. */
1671int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1672{
1673	u32 delta_truesize;
1674
1675	if (skb_unclone(skb, GFP_ATOMIC))
1676		return -ENOMEM;
1677
1678	delta_truesize = __pskb_trim_head(skb, len);
1679
1680	TCP_SKB_CB(skb)->seq += len;
1681	skb->ip_summed = CHECKSUM_PARTIAL;
1682
1683	if (delta_truesize) {
1684		skb->truesize	   -= delta_truesize;
1685		sk_wmem_queued_add(sk, -delta_truesize);
1686		sk_mem_uncharge(sk, delta_truesize);
1687	}
1688
1689	/* Any change of skb->len requires recalculation of tso factor. */
1690	if (tcp_skb_pcount(skb) > 1)
1691		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
1692
1693	return 0;
1694}
1695
1696/* Calculate MSS not accounting any TCP options.  */
1697static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
1698{
1699	const struct tcp_sock *tp = tcp_sk(sk);
1700	const struct inet_connection_sock *icsk = inet_csk(sk);
1701	int mss_now;
1702
1703	/* Calculate base mss without TCP options:
1704	   It is MMS_S - sizeof(tcphdr) of rfc1122
1705	 */
1706	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1707
1708	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1709	if (icsk->icsk_af_ops->net_frag_header_len) {
1710		const struct dst_entry *dst = __sk_dst_get(sk);
1711
1712		if (dst && dst_allfrag(dst))
1713			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1714	}
1715
1716	/* Clamp it (mss_clamp does not include tcp options) */
1717	if (mss_now > tp->rx_opt.mss_clamp)
1718		mss_now = tp->rx_opt.mss_clamp;
1719
1720	/* Now subtract optional transport overhead */
1721	mss_now -= icsk->icsk_ext_hdr_len;
1722
1723	/* Then reserve room for full set of TCP options and 8 bytes of data */
1724	mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
 
1725	return mss_now;
1726}
1727
1728/* Calculate MSS. Not accounting for SACKs here.  */
1729int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1730{
1731	/* Subtract TCP options size, not including SACKs */
1732	return __tcp_mtu_to_mss(sk, pmtu) -
1733	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
1734}
1735EXPORT_SYMBOL(tcp_mtu_to_mss);
1736
1737/* Inverse of above */
1738int tcp_mss_to_mtu(struct sock *sk, int mss)
1739{
1740	const struct tcp_sock *tp = tcp_sk(sk);
1741	const struct inet_connection_sock *icsk = inet_csk(sk);
1742	int mtu;
1743
1744	mtu = mss +
1745	      tp->tcp_header_len +
1746	      icsk->icsk_ext_hdr_len +
1747	      icsk->icsk_af_ops->net_header_len;
1748
1749	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1750	if (icsk->icsk_af_ops->net_frag_header_len) {
1751		const struct dst_entry *dst = __sk_dst_get(sk);
1752
1753		if (dst && dst_allfrag(dst))
1754			mtu += icsk->icsk_af_ops->net_frag_header_len;
1755	}
1756	return mtu;
1757}
1758EXPORT_SYMBOL(tcp_mss_to_mtu);
1759
1760/* MTU probing init per socket */
1761void tcp_mtup_init(struct sock *sk)
1762{
1763	struct tcp_sock *tp = tcp_sk(sk);
1764	struct inet_connection_sock *icsk = inet_csk(sk);
1765	struct net *net = sock_net(sk);
1766
1767	icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
1768	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
1769			       icsk->icsk_af_ops->net_header_len;
1770	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
1771	icsk->icsk_mtup.probe_size = 0;
1772	if (icsk->icsk_mtup.enabled)
1773		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
1774}
1775EXPORT_SYMBOL(tcp_mtup_init);
1776
1777/* This function synchronize snd mss to current pmtu/exthdr set.
1778
1779   tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1780   for TCP options, but includes only bare TCP header.
1781
1782   tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1783   It is minimum of user_mss and mss received with SYN.
1784   It also does not include TCP options.
1785
1786   inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1787
1788   tp->mss_cache is current effective sending mss, including
1789   all tcp options except for SACKs. It is evaluated,
1790   taking into account current pmtu, but never exceeds
1791   tp->rx_opt.mss_clamp.
1792
1793   NOTE1. rfc1122 clearly states that advertised MSS
1794   DOES NOT include either tcp or ip options.
1795
1796   NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1797   are READ ONLY outside this function.		--ANK (980731)
1798 */
1799unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1800{
1801	struct tcp_sock *tp = tcp_sk(sk);
1802	struct inet_connection_sock *icsk = inet_csk(sk);
1803	int mss_now;
1804
1805	if (icsk->icsk_mtup.search_high > pmtu)
1806		icsk->icsk_mtup.search_high = pmtu;
1807
1808	mss_now = tcp_mtu_to_mss(sk, pmtu);
1809	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1810
1811	/* And store cached results */
1812	icsk->icsk_pmtu_cookie = pmtu;
1813	if (icsk->icsk_mtup.enabled)
1814		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1815	tp->mss_cache = mss_now;
1816
1817	return mss_now;
1818}
1819EXPORT_SYMBOL(tcp_sync_mss);
1820
1821/* Compute the current effective MSS, taking SACKs and IP options,
1822 * and even PMTU discovery events into account.
1823 */
1824unsigned int tcp_current_mss(struct sock *sk)
1825{
1826	const struct tcp_sock *tp = tcp_sk(sk);
1827	const struct dst_entry *dst = __sk_dst_get(sk);
1828	u32 mss_now;
1829	unsigned int header_len;
1830	struct tcp_out_options opts;
1831	struct tcp_md5sig_key *md5;
1832
1833	mss_now = tp->mss_cache;
1834
1835	if (dst) {
1836		u32 mtu = dst_mtu(dst);
1837		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1838			mss_now = tcp_sync_mss(sk, mtu);
1839	}
1840
1841	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1842		     sizeof(struct tcphdr);
1843	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
1844	 * some common options. If this is an odd packet (because we have SACK
1845	 * blocks etc) then our calculated header_len will be different, and
1846	 * we have to adjust mss_now correspondingly */
1847	if (header_len != tp->tcp_header_len) {
1848		int delta = (int) header_len - tp->tcp_header_len;
1849		mss_now -= delta;
1850	}
1851
1852	return mss_now;
1853}
1854
1855/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
1856 * As additional protections, we do not touch cwnd in retransmission phases,
1857 * and if application hit its sndbuf limit recently.
1858 */
1859static void tcp_cwnd_application_limited(struct sock *sk)
1860{
1861	struct tcp_sock *tp = tcp_sk(sk);
1862
1863	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
1864	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1865		/* Limited by application or receiver window. */
1866		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
1867		u32 win_used = max(tp->snd_cwnd_used, init_win);
1868		if (win_used < tp->snd_cwnd) {
1869			tp->snd_ssthresh = tcp_current_ssthresh(sk);
1870			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
1871		}
1872		tp->snd_cwnd_used = 0;
1873	}
1874	tp->snd_cwnd_stamp = tcp_jiffies32;
1875}
1876
1877static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1878{
1879	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1880	struct tcp_sock *tp = tcp_sk(sk);
1881
1882	/* Track the maximum number of outstanding packets in each
1883	 * window, and remember whether we were cwnd-limited then.
1884	 */
1885	if (!before(tp->snd_una, tp->max_packets_seq) ||
1886	    tp->packets_out > tp->max_packets_out ||
1887	    is_cwnd_limited) {
1888		tp->max_packets_out = tp->packets_out;
1889		tp->max_packets_seq = tp->snd_nxt;
1890		tp->is_cwnd_limited = is_cwnd_limited;
1891	}
1892
1893	if (tcp_is_cwnd_limited(sk)) {
1894		/* Network is feed fully. */
1895		tp->snd_cwnd_used = 0;
1896		tp->snd_cwnd_stamp = tcp_jiffies32;
1897	} else {
1898		/* Network starves. */
1899		if (tp->packets_out > tp->snd_cwnd_used)
1900			tp->snd_cwnd_used = tp->packets_out;
1901
1902		if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle &&
1903		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
1904		    !ca_ops->cong_control)
1905			tcp_cwnd_application_limited(sk);
1906
1907		/* The following conditions together indicate the starvation
1908		 * is caused by insufficient sender buffer:
1909		 * 1) just sent some data (see tcp_write_xmit)
1910		 * 2) not cwnd limited (this else condition)
1911		 * 3) no more data to send (tcp_write_queue_empty())
1912		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1913		 */
1914		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1915		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1916		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1917			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1918	}
1919}
1920
1921/* Minshall's variant of the Nagle send check. */
1922static bool tcp_minshall_check(const struct tcp_sock *tp)
1923{
1924	return after(tp->snd_sml, tp->snd_una) &&
1925		!after(tp->snd_sml, tp->snd_nxt);
1926}
1927
1928/* Update snd_sml if this skb is under mss
1929 * Note that a TSO packet might end with a sub-mss segment
1930 * The test is really :
1931 * if ((skb->len % mss) != 0)
1932 *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1933 * But we can avoid doing the divide again given we already have
1934 *  skb_pcount = skb->len / mss_now
1935 */
1936static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1937				const struct sk_buff *skb)
1938{
1939	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1940		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1941}
1942
1943/* Return false, if packet can be sent now without violation Nagle's rules:
1944 * 1. It is full sized. (provided by caller in %partial bool)
1945 * 2. Or it contains FIN. (already checked by caller)
1946 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1947 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1948 *    With Minshall's modification: all sent small packets are ACKed.
1949 */
1950static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1951			    int nonagle)
1952{
1953	return partial &&
1954		((nonagle & TCP_NAGLE_CORK) ||
1955		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1956}
1957
1958/* Return how many segs we'd like on a TSO packet,
1959 * to send one TSO packet per ms
1960 */
1961static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
1962			    int min_tso_segs)
1963{
1964	u32 bytes, segs;
1965
1966	bytes = min_t(unsigned long,
1967		      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
1968		      sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
1969
1970	/* Goal is to send at least one packet per ms,
1971	 * not one big TSO packet every 100 ms.
1972	 * This preserves ACK clocking and is consistent
1973	 * with tcp_tso_should_defer() heuristic.
1974	 */
1975	segs = max_t(u32, bytes / mss_now, min_tso_segs);
1976
1977	return segs;
1978}
 
1979
1980/* Return the number of segments we want in the skb we are transmitting.
1981 * See if congestion control module wants to decide; otherwise, autosize.
1982 */
1983static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1984{
1985	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1986	u32 min_tso, tso_segs;
1987
1988	min_tso = ca_ops->min_tso_segs ?
1989			ca_ops->min_tso_segs(sk) :
1990			sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
1991
1992	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
1993	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
1994}
1995
1996/* Returns the portion of skb which can be sent right away */
1997static unsigned int tcp_mss_split_point(const struct sock *sk,
1998					const struct sk_buff *skb,
1999					unsigned int mss_now,
2000					unsigned int max_segs,
2001					int nonagle)
2002{
2003	const struct tcp_sock *tp = tcp_sk(sk);
2004	u32 partial, needed, window, max_len;
2005
2006	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2007	max_len = mss_now * max_segs;
2008
2009	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
2010		return max_len;
2011
2012	needed = min(skb->len, window);
2013
2014	if (max_len <= needed)
2015		return max_len;
2016
2017	partial = needed % mss_now;
2018	/* If last segment is not a full MSS, check if Nagle rules allow us
2019	 * to include this last segment in this skb.
2020	 * Otherwise, we'll split the skb at last MSS boundary
2021	 */
2022	if (tcp_nagle_check(partial != 0, tp, nonagle))
2023		return needed - partial;
2024
2025	return needed;
2026}
2027
2028/* Can at least one segment of SKB be sent right now, according to the
2029 * congestion window rules?  If so, return how many segments are allowed.
2030 */
2031static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
2032					 const struct sk_buff *skb)
2033{
2034	u32 in_flight, cwnd, halfcwnd;
2035
2036	/* Don't be strict about the congestion window for the final FIN.  */
2037	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
2038	    tcp_skb_pcount(skb) == 1)
2039		return 1;
2040
2041	in_flight = tcp_packets_in_flight(tp);
2042	cwnd = tp->snd_cwnd;
2043	if (in_flight >= cwnd)
2044		return 0;
2045
2046	/* For better scheduling, ensure we have at least
2047	 * 2 GSO packets in flight.
2048	 */
2049	halfcwnd = max(cwnd >> 1, 1U);
2050	return min(halfcwnd, cwnd - in_flight);
2051}
2052
2053/* Initialize TSO state of a skb.
2054 * This must be invoked the first time we consider transmitting
2055 * SKB onto the wire.
2056 */
2057static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
2058{
2059	int tso_segs = tcp_skb_pcount(skb);
2060
2061	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
2062		tcp_set_skb_tso_segs(skb, mss_now);
2063		tso_segs = tcp_skb_pcount(skb);
2064	}
2065	return tso_segs;
2066}
2067
2068
2069/* Return true if the Nagle test allows this packet to be
2070 * sent now.
2071 */
2072static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
2073				  unsigned int cur_mss, int nonagle)
2074{
2075	/* Nagle rule does not apply to frames, which sit in the middle of the
2076	 * write_queue (they have no chances to get new data).
2077	 *
2078	 * This is implemented in the callers, where they modify the 'nonagle'
2079	 * argument based upon the location of SKB in the send queue.
2080	 */
2081	if (nonagle & TCP_NAGLE_PUSH)
2082		return true;
2083
2084	/* Don't use the nagle rule for urgent data (or for the final FIN). */
2085	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
2086		return true;
2087
2088	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
2089		return true;
2090
2091	return false;
2092}
2093
2094/* Does at least the first segment of SKB fit into the send window? */
2095static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
2096			     const struct sk_buff *skb,
2097			     unsigned int cur_mss)
2098{
2099	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
2100
2101	if (skb->len > cur_mss)
2102		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
2103
2104	return !after(end_seq, tcp_wnd_end(tp));
2105}
2106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2107/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
2108 * which is put after SKB on the list.  It is very much like
2109 * tcp_fragment() except that it may make several kinds of assumptions
2110 * in order to speed up the splitting operation.  In particular, we
2111 * know that all the data is in scatter-gather pages, and that the
2112 * packet has never been sent out before (and thus is not cloned).
2113 */
2114static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
2115			unsigned int mss_now, gfp_t gfp)
2116{
2117	int nlen = skb->len - len;
2118	struct sk_buff *buff;
 
2119	u8 flags;
2120
2121	/* All of a TSO frame must be composed of paged data.  */
2122	if (skb->len != skb->data_len)
2123		return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
2124				    skb, len, mss_now, gfp);
2125
2126	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
2127	if (unlikely(!buff))
2128		return -ENOMEM;
2129	skb_copy_decrypted(buff, skb);
2130	mptcp_skb_ext_copy(buff, skb);
2131
2132	sk_wmem_queued_add(sk, buff->truesize);
2133	sk_mem_charge(sk, buff->truesize);
2134	buff->truesize += nlen;
2135	skb->truesize -= nlen;
2136
2137	/* Correct the sequence numbers. */
2138	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
2139	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
2140	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
2141
2142	/* PSH and FIN should only be set in the second packet. */
2143	flags = TCP_SKB_CB(skb)->tcp_flags;
2144	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
2145	TCP_SKB_CB(buff)->tcp_flags = flags;
2146
2147	/* This packet was never sent out yet, so no SACK bits. */
2148	TCP_SKB_CB(buff)->sacked = 0;
2149
2150	tcp_skb_fragment_eor(skb, buff);
2151
2152	buff->ip_summed = CHECKSUM_PARTIAL;
2153	skb_split(skb, buff, len);
2154	tcp_fragment_tstamp(skb, buff);
2155
2156	/* Fix up tso_factor for both original and new SKB.  */
2157	tcp_set_skb_tso_segs(skb, mss_now);
2158	tcp_set_skb_tso_segs(buff, mss_now);
2159
2160	/* Link BUFF into the send queue. */
2161	__skb_header_release(buff);
2162	tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
2163
2164	return 0;
2165}
2166
2167/* Try to defer sending, if possible, in order to minimize the amount
2168 * of TSO splitting we do.  View it as a kind of TSO Nagle test.
2169 *
2170 * This algorithm is from John Heffner.
2171 */
2172static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
2173				 bool *is_cwnd_limited,
2174				 bool *is_rwnd_limited,
2175				 u32 max_segs)
2176{
2177	const struct inet_connection_sock *icsk = inet_csk(sk);
2178	u32 send_win, cong_win, limit, in_flight;
2179	struct tcp_sock *tp = tcp_sk(sk);
 
2180	struct sk_buff *head;
2181	int win_divisor;
2182	s64 delta;
 
 
2183
2184	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
2185		goto send_now;
2186
2187	/* Avoid bursty behavior by allowing defer
2188	 * only if the last write was recent (1 ms).
2189	 * Note that tp->tcp_wstamp_ns can be in the future if we have
2190	 * packets waiting in a qdisc or device for EDT delivery.
2191	 */
2192	delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
2193	if (delta > 0)
2194		goto send_now;
2195
2196	in_flight = tcp_packets_in_flight(tp);
2197
2198	BUG_ON(tcp_skb_pcount(skb) <= 1);
2199	BUG_ON(tp->snd_cwnd <= in_flight);
2200
2201	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2202
2203	/* From in_flight test above, we know that cwnd > in_flight.  */
2204	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
2205
2206	limit = min(send_win, cong_win);
2207
2208	/* If a full-sized TSO skb can be sent, do it. */
2209	if (limit >= max_segs * tp->mss_cache)
2210		goto send_now;
2211
2212	/* Middle in queue won't get any more data, full sendable already? */
2213	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
2214		goto send_now;
2215
2216	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
2217	if (win_divisor) {
2218		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
2219
2220		/* If at least some fraction of a window is available,
2221		 * just use it.
2222		 */
2223		chunk /= win_divisor;
2224		if (limit >= chunk)
2225			goto send_now;
2226	} else {
2227		/* Different approach, try not to defer past a single
2228		 * ACK.  Receiver should ACK every other full sized
2229		 * frame, so if we have space for more than 3 frames
2230		 * then send now.
2231		 */
2232		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
2233			goto send_now;
2234	}
2235
2236	/* TODO : use tsorted_sent_queue ? */
2237	head = tcp_rtx_queue_head(sk);
2238	if (!head)
2239		goto send_now;
2240	delta = tp->tcp_clock_cache - head->tstamp;
2241	/* If next ACK is likely to come too late (half srtt), do not defer */
2242	if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
2243		goto send_now;
2244
2245	/* Ok, it looks like it is advisable to defer.
2246	 * Three cases are tracked :
2247	 * 1) We are cwnd-limited
2248	 * 2) We are rwnd-limited
2249	 * 3) We are application limited.
2250	 */
2251	if (cong_win < send_win) {
2252		if (cong_win <= skb->len) {
2253			*is_cwnd_limited = true;
2254			return true;
2255		}
2256	} else {
2257		if (send_win <= skb->len) {
2258			*is_rwnd_limited = true;
2259			return true;
2260		}
2261	}
2262
2263	/* If this packet won't get more data, do not wait. */
2264	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
2265	    TCP_SKB_CB(skb)->eor)
2266		goto send_now;
2267
2268	return true;
2269
2270send_now:
2271	return false;
2272}
2273
2274static inline void tcp_mtu_check_reprobe(struct sock *sk)
2275{
2276	struct inet_connection_sock *icsk = inet_csk(sk);
2277	struct tcp_sock *tp = tcp_sk(sk);
2278	struct net *net = sock_net(sk);
2279	u32 interval;
2280	s32 delta;
2281
2282	interval = net->ipv4.sysctl_tcp_probe_interval;
2283	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
2284	if (unlikely(delta >= interval * HZ)) {
2285		int mss = tcp_current_mss(sk);
2286
2287		/* Update current search range */
2288		icsk->icsk_mtup.probe_size = 0;
2289		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
2290			sizeof(struct tcphdr) +
2291			icsk->icsk_af_ops->net_header_len;
2292		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
2293
2294		/* Update probe time stamp */
2295		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
2296	}
2297}
2298
2299static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2300{
2301	struct sk_buff *skb, *next;
2302
2303	skb = tcp_send_head(sk);
2304	tcp_for_write_queue_from_safe(skb, next, sk) {
2305		if (len <= skb->len)
2306			break;
2307
2308		if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
2309			return false;
2310
2311		len -= skb->len;
2312	}
2313
2314	return true;
2315}
2316
2317/* Create a new MTU probe if we are ready.
2318 * MTU probe is regularly attempting to increase the path MTU by
2319 * deliberately sending larger packets.  This discovers routing
2320 * changes resulting in larger path MTUs.
2321 *
2322 * Returns 0 if we should wait to probe (no cwnd available),
2323 *         1 if a probe was sent,
2324 *         -1 otherwise
2325 */
2326static int tcp_mtu_probe(struct sock *sk)
2327{
2328	struct inet_connection_sock *icsk = inet_csk(sk);
2329	struct tcp_sock *tp = tcp_sk(sk);
2330	struct sk_buff *skb, *nskb, *next;
2331	struct net *net = sock_net(sk);
2332	int probe_size;
2333	int size_needed;
2334	int copy, len;
2335	int mss_now;
2336	int interval;
2337
2338	/* Not currently probing/verifying,
2339	 * not in recovery,
2340	 * have enough cwnd, and
2341	 * not SACKing (the variable headers throw things off)
2342	 */
2343	if (likely(!icsk->icsk_mtup.enabled ||
2344		   icsk->icsk_mtup.probe_size ||
2345		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
2346		   tp->snd_cwnd < 11 ||
2347		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
2348		return -1;
2349
2350	/* Use binary search for probe_size between tcp_mss_base,
2351	 * and current mss_clamp. if (search_high - search_low)
2352	 * smaller than a threshold, backoff from probing.
2353	 */
2354	mss_now = tcp_current_mss(sk);
2355	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
2356				    icsk->icsk_mtup.search_low) >> 1);
2357	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
2358	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
2359	/* When misfortune happens, we are reprobing actively,
2360	 * and then reprobe timer has expired. We stick with current
2361	 * probing process by not resetting search range to its orignal.
2362	 */
2363	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
2364		interval < net->ipv4.sysctl_tcp_probe_threshold) {
2365		/* Check whether enough time has elaplased for
2366		 * another round of probing.
2367		 */
2368		tcp_mtu_check_reprobe(sk);
2369		return -1;
2370	}
2371
2372	/* Have enough data in the send queue to probe? */
2373	if (tp->write_seq - tp->snd_nxt < size_needed)
2374		return -1;
2375
2376	if (tp->snd_wnd < size_needed)
2377		return -1;
2378	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
2379		return 0;
2380
2381	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
2382	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
2383		if (!tcp_packets_in_flight(tp))
2384			return -1;
2385		else
2386			return 0;
2387	}
2388
2389	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2390		return -1;
2391
2392	/* We're allowed to probe.  Build it now. */
2393	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
2394	if (!nskb)
2395		return -1;
2396	sk_wmem_queued_add(sk, nskb->truesize);
2397	sk_mem_charge(sk, nskb->truesize);
2398
2399	skb = tcp_send_head(sk);
2400	skb_copy_decrypted(nskb, skb);
2401	mptcp_skb_ext_copy(nskb, skb);
2402
2403	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
2404	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
2405	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
2406	TCP_SKB_CB(nskb)->sacked = 0;
2407	nskb->csum = 0;
2408	nskb->ip_summed = CHECKSUM_PARTIAL;
2409
2410	tcp_insert_write_queue_before(nskb, skb, sk);
2411	tcp_highest_sack_replace(sk, skb, nskb);
2412
2413	len = 0;
2414	tcp_for_write_queue_from_safe(skb, next, sk) {
2415		copy = min_t(int, skb->len, probe_size - len);
2416		skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
 
 
 
 
 
 
 
2417
2418		if (skb->len <= copy) {
2419			/* We've eaten all the data from this skb.
2420			 * Throw it away. */
2421			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2422			/* If this is the last SKB we copy and eor is set
2423			 * we need to propagate it to the new skb.
2424			 */
2425			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2426			tcp_skb_collapse_tstamp(nskb, skb);
2427			tcp_unlink_write_queue(skb, sk);
2428			sk_wmem_free_skb(sk, skb);
2429		} else {
2430			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2431						   ~(TCPHDR_FIN|TCPHDR_PSH);
2432			if (!skb_shinfo(skb)->nr_frags) {
2433				skb_pull(skb, copy);
 
 
 
2434			} else {
2435				__pskb_trim_head(skb, copy);
2436				tcp_set_skb_tso_segs(skb, mss_now);
2437			}
2438			TCP_SKB_CB(skb)->seq += copy;
2439		}
2440
2441		len += copy;
2442
2443		if (len >= probe_size)
2444			break;
2445	}
2446	tcp_init_tso_segs(nskb, nskb->len);
2447
2448	/* We're ready to send.  If this fails, the probe will
2449	 * be resegmented into mss-sized pieces by tcp_write_xmit().
2450	 */
2451	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
2452		/* Decrement cwnd here because we are sending
2453		 * effectively two packets. */
2454		tp->snd_cwnd--;
2455		tcp_event_new_data_sent(sk, nskb);
2456
2457		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
2458		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
2459		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
2460
2461		return 1;
2462	}
2463
2464	return -1;
2465}
2466
2467static bool tcp_pacing_check(struct sock *sk)
2468{
2469	struct tcp_sock *tp = tcp_sk(sk);
2470
2471	if (!tcp_needs_internal_pacing(sk))
2472		return false;
2473
2474	if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2475		return false;
2476
2477	if (!hrtimer_is_queued(&tp->pacing_timer)) {
2478		hrtimer_start(&tp->pacing_timer,
2479			      ns_to_ktime(tp->tcp_wstamp_ns),
2480			      HRTIMER_MODE_ABS_PINNED_SOFT);
2481		sock_hold(sk);
2482	}
2483	return true;
2484}
2485
2486/* TCP Small Queues :
2487 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2488 * (These limits are doubled for retransmits)
2489 * This allows for :
2490 *  - better RTT estimation and ACK scheduling
2491 *  - faster recovery
2492 *  - high rates
2493 * Alas, some drivers / subsystems require a fair amount
2494 * of queued bytes to ensure line rate.
2495 * One example is wifi aggregation (802.11 AMPDU)
2496 */
2497static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2498				  unsigned int factor)
2499{
2500	unsigned long limit;
2501
2502	limit = max_t(unsigned long,
2503		      2 * skb->truesize,
2504		      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
2505	if (sk->sk_pacing_status == SK_PACING_NONE)
2506		limit = min_t(unsigned long, limit,
2507			      sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
2508	limit <<= factor;
2509
2510	if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
2511	    tcp_sk(sk)->tcp_tx_delay) {
2512		u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay;
2513
2514		/* TSQ is based on skb truesize sum (sk_wmem_alloc), so we
2515		 * approximate our needs assuming an ~100% skb->truesize overhead.
2516		 * USEC_PER_SEC is approximated by 2^20.
2517		 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift.
2518		 */
2519		extra_bytes >>= (20 - 1);
2520		limit += extra_bytes;
2521	}
2522	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
2523		/* Always send skb if rtx queue is empty.
2524		 * No need to wait for TX completion to call us back,
2525		 * after softirq/tasklet schedule.
2526		 * This helps when TX completions are delayed too much.
2527		 */
2528		if (tcp_rtx_queue_empty(sk))
 
2529			return false;
2530
2531		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2532		/* It is possible TX completion already happened
2533		 * before we set TSQ_THROTTLED, so we must
2534		 * test again the condition.
2535		 */
2536		smp_mb__after_atomic();
2537		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2538			return true;
2539	}
2540	return false;
2541}
2542
2543static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
2544{
2545	const u32 now = tcp_jiffies32;
2546	enum tcp_chrono old = tp->chrono_type;
2547
2548	if (old > TCP_CHRONO_UNSPEC)
2549		tp->chrono_stat[old - 1] += now - tp->chrono_start;
2550	tp->chrono_start = now;
2551	tp->chrono_type = new;
2552}
2553
2554void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
2555{
2556	struct tcp_sock *tp = tcp_sk(sk);
2557
2558	/* If there are multiple conditions worthy of tracking in a
2559	 * chronograph then the highest priority enum takes precedence
2560	 * over the other conditions. So that if something "more interesting"
2561	 * starts happening, stop the previous chrono and start a new one.
2562	 */
2563	if (type > tp->chrono_type)
2564		tcp_chrono_set(tp, type);
2565}
2566
2567void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
2568{
2569	struct tcp_sock *tp = tcp_sk(sk);
2570
2571
2572	/* There are multiple conditions worthy of tracking in a
2573	 * chronograph, so that the highest priority enum takes
2574	 * precedence over the other conditions (see tcp_chrono_start).
2575	 * If a condition stops, we only stop chrono tracking if
2576	 * it's the "most interesting" or current chrono we are
2577	 * tracking and starts busy chrono if we have pending data.
2578	 */
2579	if (tcp_rtx_and_write_queues_empty(sk))
2580		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
2581	else if (type == tp->chrono_type)
2582		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
2583}
2584
2585/* This routine writes packets to the network.  It advances the
2586 * send_head.  This happens as incoming acks open up the remote
2587 * window for us.
2588 *
2589 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2590 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2591 * account rare use of URG, this is not a big flaw.
2592 *
2593 * Send at most one packet when push_one > 0. Temporarily ignore
2594 * cwnd limit to force at most one packet out when push_one == 2.
2595
2596 * Returns true, if no segments are in flight and we have queued segments,
2597 * but cannot send anything now because of SWS or another problem.
2598 */
2599static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2600			   int push_one, gfp_t gfp)
2601{
2602	struct tcp_sock *tp = tcp_sk(sk);
2603	struct sk_buff *skb;
2604	unsigned int tso_segs, sent_pkts;
2605	int cwnd_quota;
2606	int result;
2607	bool is_cwnd_limited = false, is_rwnd_limited = false;
2608	u32 max_segs;
2609
2610	sent_pkts = 0;
2611
2612	tcp_mstamp_refresh(tp);
2613	if (!push_one) {
2614		/* Do MTU probing. */
2615		result = tcp_mtu_probe(sk);
2616		if (!result) {
2617			return false;
2618		} else if (result > 0) {
2619			sent_pkts = 1;
2620		}
2621	}
2622
2623	max_segs = tcp_tso_segs(sk, mss_now);
2624	while ((skb = tcp_send_head(sk))) {
2625		unsigned int limit;
2626
 
 
 
2627		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
2628			/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
2629			skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
2630			list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2631			tcp_init_tso_segs(skb, mss_now);
2632			goto repair; /* Skip network transmission */
2633		}
2634
2635		if (tcp_pacing_check(sk))
2636			break;
2637
2638		tso_segs = tcp_init_tso_segs(skb, mss_now);
2639		BUG_ON(!tso_segs);
2640
2641		cwnd_quota = tcp_cwnd_test(tp, skb);
2642		if (!cwnd_quota) {
2643			if (push_one == 2)
2644				/* Force out a loss probe pkt. */
2645				cwnd_quota = 1;
2646			else
2647				break;
2648		}
2649
2650		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
2651			is_rwnd_limited = true;
2652			break;
2653		}
2654
2655		if (tso_segs == 1) {
2656			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2657						     (tcp_skb_is_last(sk, skb) ?
2658						      nonagle : TCP_NAGLE_PUSH))))
2659				break;
2660		} else {
2661			if (!push_one &&
2662			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2663						 &is_rwnd_limited, max_segs))
2664				break;
2665		}
2666
2667		limit = mss_now;
2668		if (tso_segs > 1 && !tcp_urg_mode(tp))
2669			limit = tcp_mss_split_point(sk, skb, mss_now,
2670						    min_t(unsigned int,
2671							  cwnd_quota,
2672							  max_segs),
2673						    nonagle);
2674
2675		if (skb->len > limit &&
2676		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2677			break;
2678
 
 
2679		if (tcp_small_queue_check(sk, skb, 0))
2680			break;
2681
2682		/* Argh, we hit an empty skb(), presumably a thread
2683		 * is sleeping in sendmsg()/sk_stream_wait_memory().
2684		 * We do not want to send a pure-ack packet and have
2685		 * a strange looking rtx queue with empty packet(s).
2686		 */
2687		if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
2688			break;
2689
2690		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
2691			break;
2692
2693repair:
2694		/* Advance the send_head.  This one is sent out.
2695		 * This call will increment packets_out.
2696		 */
2697		tcp_event_new_data_sent(sk, skb);
2698
2699		tcp_minshall_update(tp, mss_now, skb);
2700		sent_pkts += tcp_skb_pcount(skb);
2701
2702		if (push_one)
2703			break;
2704	}
2705
2706	if (is_rwnd_limited)
2707		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
2708	else
2709		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
2710
2711	is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
2712	if (likely(sent_pkts || is_cwnd_limited))
2713		tcp_cwnd_validate(sk, is_cwnd_limited);
2714
2715	if (likely(sent_pkts)) {
2716		if (tcp_in_cwnd_reduction(sk))
2717			tp->prr_out += sent_pkts;
2718
2719		/* Send one loss probe per tail loss episode. */
2720		if (push_one != 2)
2721			tcp_schedule_loss_probe(sk, false);
 
 
2722		return false;
2723	}
2724	return !tp->packets_out && !tcp_write_queue_empty(sk);
2725}
2726
2727bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
2728{
2729	struct inet_connection_sock *icsk = inet_csk(sk);
2730	struct tcp_sock *tp = tcp_sk(sk);
2731	u32 timeout, rto_delta_us;
2732	int early_retrans;
2733
 
 
 
 
 
 
 
2734	/* Don't do any loss probe on a Fast Open connection before 3WHS
2735	 * finishes.
2736	 */
2737	if (rcu_access_pointer(tp->fastopen_rsk))
 
 
 
 
2738		return false;
2739
2740	early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
2741	/* Schedule a loss probe in 2*RTT for SACK capable connections
2742	 * not in loss recovery, that are either limited by cwnd or application.
2743	 */
2744	if ((early_retrans != 3 && early_retrans != 4) ||
2745	    !tp->packets_out || !tcp_is_sack(tp) ||
2746	    (icsk->icsk_ca_state != TCP_CA_Open &&
2747	     icsk->icsk_ca_state != TCP_CA_CWR))
2748		return false;
2749
2750	/* Probe timeout is 2*rtt. Add minimum RTO to account
 
 
 
 
2751	 * for delayed ack when there's one outstanding packet. If no RTT
2752	 * sample is available then probe after TCP_TIMEOUT_INIT.
2753	 */
2754	if (tp->srtt_us) {
2755		timeout = usecs_to_jiffies(tp->srtt_us >> 2);
2756		if (tp->packets_out == 1)
2757			timeout += TCP_RTO_MIN;
2758		else
2759			timeout += TCP_TIMEOUT_MIN;
2760	} else {
2761		timeout = TCP_TIMEOUT_INIT;
 
 
 
 
 
2762	}
2763
2764	/* If the RTO formula yields an earlier time, then use that time. */
2765	rto_delta_us = advancing_rto ?
2766			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2767			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2768	if (rto_delta_us > 0)
2769		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
2770
2771	tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
2772	return true;
2773}
2774
2775/* Thanks to skb fast clones, we can detect if a prior transmit of
2776 * a packet is still in a qdisc or driver queue.
2777 * In this case, there is very little point doing a retransmit !
2778 */
2779static bool skb_still_in_host_queue(struct sock *sk,
2780				    const struct sk_buff *skb)
2781{
2782	if (unlikely(skb_fclone_busy(sk, skb))) {
2783		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2784		smp_mb__after_atomic();
2785		if (skb_fclone_busy(sk, skb)) {
2786			NET_INC_STATS(sock_net(sk),
2787				      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
2788			return true;
2789		}
2790	}
2791	return false;
2792}
2793
2794/* When probe timeout (PTO) fires, try send a new segment if possible, else
2795 * retransmit the last segment.
2796 */
2797void tcp_send_loss_probe(struct sock *sk)
2798{
2799	struct tcp_sock *tp = tcp_sk(sk);
2800	struct sk_buff *skb;
2801	int pcount;
2802	int mss = tcp_current_mss(sk);
2803
2804	/* At most one outstanding TLP */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2805	if (tp->tlp_high_seq)
2806		goto rearm_timer;
2807
2808	tp->tlp_retrans = 0;
2809	skb = tcp_send_head(sk);
2810	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2811		pcount = tp->packets_out;
2812		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2813		if (tp->packets_out > pcount)
2814			goto probe_sent;
2815		goto rearm_timer;
2816	}
2817	skb = skb_rb_last(&sk->tcp_rtx_queue);
2818	if (unlikely(!skb)) {
2819		WARN_ONCE(tp->packets_out,
2820			  "invalid inflight: %u state %u cwnd %u mss %d\n",
2821			  tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
2822		inet_csk(sk)->icsk_pending = 0;
2823		return;
2824	}
2825
2826	if (skb_still_in_host_queue(sk, skb))
2827		goto rearm_timer;
2828
2829	pcount = tcp_skb_pcount(skb);
2830	if (WARN_ON(!pcount))
2831		goto rearm_timer;
2832
2833	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2834		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
2835					  (pcount - 1) * mss, mss,
2836					  GFP_ATOMIC)))
2837			goto rearm_timer;
2838		skb = skb_rb_next(skb);
2839	}
2840
2841	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2842		goto rearm_timer;
2843
2844	if (__tcp_retransmit_skb(sk, skb, 1))
2845		goto rearm_timer;
2846
2847	tp->tlp_retrans = 1;
2848
2849probe_sent:
2850	/* Record snd_nxt for loss detection. */
2851	tp->tlp_high_seq = tp->snd_nxt;
2852
 
2853	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2854	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2855	inet_csk(sk)->icsk_pending = 0;
2856rearm_timer:
2857	tcp_rearm_rto(sk);
2858}
2859
2860/* Push out any pending frames which were held back due to
2861 * TCP_CORK or attempt at coalescing tiny packets.
2862 * The socket must be locked by the caller.
2863 */
2864void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
2865			       int nonagle)
2866{
2867	/* If we are closed, the bytes will have to remain here.
2868	 * In time closedown will finish, we empty the write queue and
2869	 * all will be happy.
2870	 */
2871	if (unlikely(sk->sk_state == TCP_CLOSE))
2872		return;
2873
2874	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
2875			   sk_gfp_mask(sk, GFP_ATOMIC)))
2876		tcp_check_probe_timer(sk);
2877}
2878
2879/* Send _single_ skb sitting at the send head. This function requires
2880 * true push pending frames to setup probe timer etc.
2881 */
2882void tcp_push_one(struct sock *sk, unsigned int mss_now)
2883{
2884	struct sk_buff *skb = tcp_send_head(sk);
2885
2886	BUG_ON(!skb || skb->len < mss_now);
2887
2888	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2889}
2890
2891/* This function returns the amount that we can raise the
2892 * usable window based on the following constraints
2893 *
2894 * 1. The window can never be shrunk once it is offered (RFC 793)
2895 * 2. We limit memory per socket
2896 *
2897 * RFC 1122:
2898 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
2899 *  RECV.NEXT + RCV.WIN fixed until:
2900 *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
2901 *
2902 * i.e. don't raise the right edge of the window until you can raise
2903 * it at least MSS bytes.
2904 *
2905 * Unfortunately, the recommended algorithm breaks header prediction,
2906 * since header prediction assumes th->window stays fixed.
2907 *
2908 * Strictly speaking, keeping th->window fixed violates the receiver
2909 * side SWS prevention criteria. The problem is that under this rule
2910 * a stream of single byte packets will cause the right side of the
2911 * window to always advance by a single byte.
2912 *
2913 * Of course, if the sender implements sender side SWS prevention
2914 * then this will not be a problem.
2915 *
2916 * BSD seems to make the following compromise:
2917 *
2918 *	If the free space is less than the 1/4 of the maximum
2919 *	space available and the free space is less than 1/2 mss,
2920 *	then set the window to 0.
2921 *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
2922 *	Otherwise, just prevent the window from shrinking
2923 *	and from being larger than the largest representable value.
2924 *
2925 * This prevents incremental opening of the window in the regime
2926 * where TCP is limited by the speed of the reader side taking
2927 * data out of the TCP receive queue. It does nothing about
2928 * those cases where the window is constrained on the sender side
2929 * because the pipeline is full.
2930 *
2931 * BSD also seems to "accidentally" limit itself to windows that are a
2932 * multiple of MSS, at least until the free space gets quite small.
2933 * This would appear to be a side effect of the mbuf implementation.
2934 * Combining these two algorithms results in the observed behavior
2935 * of having a fixed window size at almost all times.
2936 *
2937 * Below we obtain similar behavior by forcing the offered window to
2938 * a multiple of the mss when it is feasible to do so.
2939 *
2940 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
2941 * Regular options like TIMESTAMP are taken into account.
2942 */
2943u32 __tcp_select_window(struct sock *sk)
2944{
2945	struct inet_connection_sock *icsk = inet_csk(sk);
2946	struct tcp_sock *tp = tcp_sk(sk);
2947	/* MSS for the peer's data.  Previous versions used mss_clamp
2948	 * here.  I don't know if the value based on our guesses
2949	 * of peer's MSS is better for the performance.  It's more correct
2950	 * but may be worse for the performance because of rcv_mss
2951	 * fluctuations.  --SAW  1998/11/1
2952	 */
2953	int mss = icsk->icsk_ack.rcv_mss;
2954	int free_space = tcp_space(sk);
2955	int allowed_space = tcp_full_space(sk);
2956	int full_space, window;
2957
2958	if (sk_is_mptcp(sk))
2959		mptcp_space(sk, &free_space, &allowed_space);
2960
2961	full_space = min_t(int, tp->window_clamp, allowed_space);
2962
2963	if (unlikely(mss > full_space)) {
2964		mss = full_space;
2965		if (mss <= 0)
2966			return 0;
2967	}
2968	if (free_space < (full_space >> 1)) {
2969		icsk->icsk_ack.quick = 0;
2970
2971		if (tcp_under_memory_pressure(sk))
2972			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2973					       4U * tp->advmss);
2974
2975		/* free_space might become our new window, make sure we don't
2976		 * increase it due to wscale.
2977		 */
2978		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
2979
2980		/* if free space is less than mss estimate, or is below 1/16th
2981		 * of the maximum allowed, try to move to zero-window, else
2982		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
2983		 * new incoming data is dropped due to memory limits.
2984		 * With large window, mss test triggers way too late in order
2985		 * to announce zero window in time before rmem limit kicks in.
2986		 */
2987		if (free_space < (allowed_space >> 4) || free_space < mss)
2988			return 0;
2989	}
2990
2991	if (free_space > tp->rcv_ssthresh)
2992		free_space = tp->rcv_ssthresh;
2993
2994	/* Don't do rounding if we are using window scaling, since the
2995	 * scaled window will not line up with the MSS boundary anyway.
2996	 */
 
2997	if (tp->rx_opt.rcv_wscale) {
2998		window = free_space;
2999
3000		/* Advertise enough space so that it won't get scaled away.
3001		 * Import case: prevent zero window announcement if
3002		 * 1<<rcv_wscale > mss.
3003		 */
3004		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
 
 
3005	} else {
3006		window = tp->rcv_wnd;
3007		/* Get the largest window that is a nice multiple of mss.
3008		 * Window clamp already applied above.
3009		 * If our current window offering is within 1 mss of the
3010		 * free space we just keep it. This prevents the divide
3011		 * and multiply from happening most of the time.
3012		 * We also don't do any window rounding when the free space
3013		 * is too small.
3014		 */
3015		if (window <= free_space - mss || window > free_space)
3016			window = rounddown(free_space, mss);
3017		else if (mss == full_space &&
3018			 free_space > window + (full_space >> 1))
3019			window = free_space;
3020	}
3021
3022	return window;
3023}
3024
3025void tcp_skb_collapse_tstamp(struct sk_buff *skb,
3026			     const struct sk_buff *next_skb)
3027{
3028	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
3029		const struct skb_shared_info *next_shinfo =
3030			skb_shinfo(next_skb);
3031		struct skb_shared_info *shinfo = skb_shinfo(skb);
3032
3033		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
3034		shinfo->tskey = next_shinfo->tskey;
3035		TCP_SKB_CB(skb)->txstamp_ack |=
3036			TCP_SKB_CB(next_skb)->txstamp_ack;
3037	}
3038}
3039
3040/* Collapses two adjacent SKB's during retransmission. */
3041static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
3042{
3043	struct tcp_sock *tp = tcp_sk(sk);
3044	struct sk_buff *next_skb = skb_rb_next(skb);
3045	int next_skb_size;
3046
 
3047	next_skb_size = next_skb->len;
3048
3049	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
3050
3051	if (next_skb_size) {
3052		if (next_skb_size <= skb_availroom(skb))
3053			skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
3054				      next_skb_size);
3055		else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size))
3056			return false;
3057	}
3058	tcp_highest_sack_replace(sk, next_skb, skb);
 
 
 
 
 
 
 
 
3059
3060	/* Update sequence range on original skb. */
3061	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
3062
3063	/* Merge over control information. This moves PSH/FIN etc. over */
3064	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
3065
3066	/* All done, get rid of second SKB and account for it so
3067	 * packet counting does not break.
3068	 */
3069	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
3070	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
3071
3072	/* changed transmit queue under us so clear hints */
3073	tcp_clear_retrans_hints_partial(tp);
3074	if (next_skb == tp->retransmit_skb_hint)
3075		tp->retransmit_skb_hint = skb;
3076
3077	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
3078
3079	tcp_skb_collapse_tstamp(skb, next_skb);
3080
3081	tcp_rtx_queue_unlink_and_free(next_skb, sk);
3082	return true;
3083}
3084
3085/* Check if coalescing SKBs is legal. */
3086static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
3087{
3088	if (tcp_skb_pcount(skb) > 1)
3089		return false;
3090	if (skb_cloned(skb))
3091		return false;
 
 
3092	/* Some heuristics for collapsing over SACK'd could be invented */
3093	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3094		return false;
3095
3096	return true;
3097}
3098
3099/* Collapse packets in the retransmit queue to make to create
3100 * less packets on the wire. This is only done on retransmission.
3101 */
3102static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
3103				     int space)
3104{
3105	struct tcp_sock *tp = tcp_sk(sk);
3106	struct sk_buff *skb = to, *tmp;
3107	bool first = true;
3108
3109	if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)
3110		return;
3111	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3112		return;
3113
3114	skb_rbtree_walk_from_safe(skb, tmp) {
3115		if (!tcp_can_collapse(sk, skb))
3116			break;
3117
3118		if (!tcp_skb_can_collapse(to, skb))
3119			break;
3120
3121		space -= skb->len;
3122
3123		if (first) {
3124			first = false;
3125			continue;
3126		}
3127
3128		if (space < 0)
3129			break;
3130
3131		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
3132			break;
3133
3134		if (!tcp_collapse_retrans(sk, to))
3135			break;
3136	}
3137}
3138
3139/* This retransmits one SKB.  Policy decisions and retransmit queue
3140 * state updates are done by the caller.  Returns non-zero if an
3141 * error occurred which prevented the send.
3142 */
3143int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
3144{
3145	struct inet_connection_sock *icsk = inet_csk(sk);
3146	struct tcp_sock *tp = tcp_sk(sk);
3147	unsigned int cur_mss;
3148	int diff, len, err;
3149
3150
3151	/* Inconclusive MTU probe */
3152	if (icsk->icsk_mtup.probe_size)
3153		icsk->icsk_mtup.probe_size = 0;
3154
 
 
 
 
 
 
 
 
3155	if (skb_still_in_host_queue(sk, skb))
3156		return -EBUSY;
3157
3158	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
3159		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
3160			WARN_ON_ONCE(1);
3161			return -EINVAL;
3162		}
3163		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
3164			return -ENOMEM;
3165	}
3166
3167	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
3168		return -EHOSTUNREACH; /* Routing failure or similar. */
3169
3170	cur_mss = tcp_current_mss(sk);
3171
3172	/* If receiver has shrunk his window, and skb is out of
3173	 * new window, do not retransmit it. The exception is the
3174	 * case, when window is shrunk to zero. In this case
3175	 * our retransmit serves as a zero window probe.
3176	 */
3177	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
3178	    TCP_SKB_CB(skb)->seq != tp->snd_una)
3179		return -EAGAIN;
3180
3181	len = cur_mss * segs;
3182	if (skb->len > len) {
3183		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
3184				 cur_mss, GFP_ATOMIC))
3185			return -ENOMEM; /* We'll try again later. */
3186	} else {
3187		if (skb_unclone(skb, GFP_ATOMIC))
3188			return -ENOMEM;
3189
3190		diff = tcp_skb_pcount(skb);
3191		tcp_set_skb_tso_segs(skb, cur_mss);
3192		diff -= tcp_skb_pcount(skb);
3193		if (diff)
3194			tcp_adjust_pcount(sk, skb, diff);
3195		if (skb->len < cur_mss)
3196			tcp_retrans_try_collapse(sk, skb, cur_mss);
3197	}
3198
3199	/* RFC3168, section 6.1.1.1. ECN fallback */
3200	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
3201		tcp_ecn_clear_syn(sk, skb);
3202
3203	/* Update global and local TCP statistics. */
3204	segs = tcp_skb_pcount(skb);
3205	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
3206	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3207		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3208	tp->total_retrans += segs;
3209	tp->bytes_retrans += skb->len;
3210
3211	/* make sure skb->data is aligned on arches that require it
3212	 * and check if ack-trimming & collapsing extended the headroom
3213	 * beyond what csum_start can cover.
3214	 */
3215	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
3216		     skb_headroom(skb) >= 0xFFFF)) {
3217		struct sk_buff *nskb;
3218
3219		tcp_skb_tsorted_save(skb) {
3220			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
3221			if (nskb) {
3222				nskb->dev = NULL;
3223				err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
3224			} else {
3225				err = -ENOBUFS;
3226			}
3227		} tcp_skb_tsorted_restore(skb);
3228
3229		if (!err) {
3230			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
3231			tcp_rate_skb_sent(sk, skb);
3232		}
3233	} else {
3234		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3235	}
3236
3237	/* To avoid taking spuriously low RTT samples based on a timestamp
3238	 * for a transmit that never happened, always mark EVER_RETRANS
3239	 */
3240	TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
3241
3242	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
3243		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
3244				  TCP_SKB_CB(skb)->seq, segs, err);
3245
3246	if (likely(!err)) {
3247		trace_tcp_retransmit_skb(sk, skb);
3248	} else if (err != -EBUSY) {
3249		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
 
 
 
 
 
3250	}
3251	return err;
3252}
3253
3254int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
3255{
3256	struct tcp_sock *tp = tcp_sk(sk);
3257	int err = __tcp_retransmit_skb(sk, skb, segs);
3258
3259	if (err == 0) {
3260#if FASTRETRANS_DEBUG > 0
3261		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
3262			net_dbg_ratelimited("retrans_out leaked\n");
3263		}
3264#endif
3265		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
3266		tp->retrans_out += tcp_skb_pcount(skb);
3267	}
3268
3269	/* Save stamp of the first (attempted) retransmit. */
3270	if (!tp->retrans_stamp)
3271		tp->retrans_stamp = tcp_skb_timestamp(skb);
 
 
 
 
3272
3273	if (tp->undo_retrans < 0)
3274		tp->undo_retrans = 0;
3275	tp->undo_retrans += tcp_skb_pcount(skb);
3276	return err;
3277}
3278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3279/* This gets called after a retransmit timeout, and the initially
3280 * retransmitted data is acknowledged.  It tries to continue
3281 * resending the rest of the retransmit queue, until either
3282 * we've sent it all or the congestion window limit is reached.
 
 
 
3283 */
3284void tcp_xmit_retransmit_queue(struct sock *sk)
3285{
3286	const struct inet_connection_sock *icsk = inet_csk(sk);
3287	struct sk_buff *skb, *rtx_head, *hole = NULL;
3288	struct tcp_sock *tp = tcp_sk(sk);
3289	bool rearm_timer = false;
3290	u32 max_segs;
 
3291	int mib_idx;
 
3292
3293	if (!tp->packets_out)
3294		return;
3295
3296	rtx_head = tcp_rtx_queue_head(sk);
3297	skb = tp->retransmit_skb_hint ?: rtx_head;
 
 
 
 
 
 
 
 
 
 
 
3298	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
3299	skb_rbtree_walk_from(skb) {
3300		__u8 sacked;
3301		int segs;
3302
3303		if (tcp_pacing_check(sk))
3304			break;
3305
3306		/* we could do better than to assign each time */
3307		if (!hole)
3308			tp->retransmit_skb_hint = skb;
3309
3310		segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
3311		if (segs <= 0)
3312			break;
3313		sacked = TCP_SKB_CB(skb)->sacked;
3314		/* In case tcp_shift_skb_data() have aggregated large skbs,
3315		 * we need to make sure not sending too bigs TSO packets
3316		 */
3317		segs = min_t(int, segs, max_segs);
3318
3319		if (tp->retrans_out >= tp->lost_out) {
3320			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3321		} else if (!(sacked & TCPCB_LOST)) {
3322			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
3323				hole = skb;
3324			continue;
3325
3326		} else {
 
3327			if (icsk->icsk_ca_state != TCP_CA_Loss)
3328				mib_idx = LINUX_MIB_TCPFASTRETRANS;
3329			else
3330				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
3331		}
3332
3333		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
3334			continue;
3335
3336		if (tcp_small_queue_check(sk, skb, 1))
3337			break;
3338
3339		if (tcp_retransmit_skb(sk, skb, segs))
3340			break;
3341
3342		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
3343
3344		if (tcp_in_cwnd_reduction(sk))
3345			tp->prr_out += tcp_skb_pcount(skb);
3346
3347		if (skb == rtx_head &&
3348		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
3349			rearm_timer = true;
3350
3351	}
3352	if (rearm_timer)
3353		tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3354				     inet_csk(sk)->icsk_rto,
3355				     TCP_RTO_MAX);
3356}
3357
3358/* We allow to exceed memory limits for FIN packets to expedite
3359 * connection tear down and (memory) recovery.
3360 * Otherwise tcp_send_fin() could be tempted to either delay FIN
3361 * or even be forced to close flow without any FIN.
3362 * In general, we want to allow one skb per socket to avoid hangs
3363 * with edge trigger epoll()
3364 */
3365void sk_forced_mem_schedule(struct sock *sk, int size)
3366{
3367	int amt;
3368
3369	if (size <= sk->sk_forward_alloc)
3370		return;
3371	amt = sk_mem_pages(size);
3372	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
3373	sk_memory_allocated_add(sk, amt);
3374
3375	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3376		mem_cgroup_charge_skmem(sk->sk_memcg, amt);
3377}
3378
3379/* Send a FIN. The caller locks the socket for us.
3380 * We should try to send a FIN packet really hard, but eventually give up.
3381 */
3382void tcp_send_fin(struct sock *sk)
3383{
3384	struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk);
3385	struct tcp_sock *tp = tcp_sk(sk);
3386
3387	/* Optimization, tack on the FIN if we have one skb in write queue and
3388	 * this skb was not yet sent, or we are under memory pressure.
3389	 * Note: in the latter case, FIN packet will be sent after a timeout,
3390	 * as TCP stack thinks it has already been transmitted.
3391	 */
3392	tskb = tail;
3393	if (!tskb && tcp_under_memory_pressure(sk))
3394		tskb = skb_rb_last(&sk->tcp_rtx_queue);
3395
3396	if (tskb) {
3397		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3398		TCP_SKB_CB(tskb)->end_seq++;
3399		tp->write_seq++;
3400		if (!tail) {
3401			/* This means tskb was already sent.
3402			 * Pretend we included the FIN on previous transmit.
3403			 * We need to set tp->snd_nxt to the value it would have
3404			 * if FIN had been sent. This is because retransmit path
3405			 * does not change tp->snd_nxt.
3406			 */
3407			WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
3408			return;
3409		}
3410	} else {
3411		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
3412		if (unlikely(!skb))
 
 
3413			return;
3414
3415		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3416		skb_reserve(skb, MAX_TCP_HEADER);
3417		sk_forced_mem_schedule(sk, skb->truesize);
3418		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3419		tcp_init_nondata_skb(skb, tp->write_seq,
3420				     TCPHDR_ACK | TCPHDR_FIN);
3421		tcp_queue_skb(sk, skb);
3422	}
3423	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
3424}
3425
3426/* We get here when a process closes a file descriptor (either due to
3427 * an explicit close() or as a byproduct of exit()'ing) and there
3428 * was unread data in the receive queue.  This behavior is recommended
3429 * by RFC 2525, section 2.17.  -DaveM
3430 */
3431void tcp_send_active_reset(struct sock *sk, gfp_t priority)
3432{
3433	struct sk_buff *skb;
3434
3435	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
3436
3437	/* NOTE: No TCP options attached and we never retransmit this. */
3438	skb = alloc_skb(MAX_TCP_HEADER, priority);
3439	if (!skb) {
3440		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3441		return;
3442	}
3443
3444	/* Reserve space for headers and prepare control bits. */
3445	skb_reserve(skb, MAX_TCP_HEADER);
3446	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3447			     TCPHDR_ACK | TCPHDR_RST);
3448	tcp_mstamp_refresh(tcp_sk(sk));
3449	/* Send it off. */
3450	if (tcp_transmit_skb(sk, skb, 0, priority))
3451		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3452
3453	/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3454	 * skb here is different to the troublesome skb, so use NULL
3455	 */
3456	trace_tcp_send_reset(sk, NULL);
3457}
3458
3459/* Send a crossed SYN-ACK during socket establishment.
3460 * WARNING: This routine must only be called when we have already sent
3461 * a SYN packet that crossed the incoming SYN that caused this routine
3462 * to get called. If this assumption fails then the initial rcv_wnd
3463 * and rcv_wscale values will not be correct.
3464 */
3465int tcp_send_synack(struct sock *sk)
3466{
3467	struct sk_buff *skb;
3468
3469	skb = tcp_rtx_queue_head(sk);
3470	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
3471		pr_err("%s: wrong queue state\n", __func__);
3472		return -EFAULT;
3473	}
3474	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
3475		if (skb_cloned(skb)) {
3476			struct sk_buff *nskb;
3477
3478			tcp_skb_tsorted_save(skb) {
3479				nskb = skb_copy(skb, GFP_ATOMIC);
3480			} tcp_skb_tsorted_restore(skb);
3481			if (!nskb)
3482				return -ENOMEM;
3483			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
3484			tcp_highest_sack_replace(sk, skb, nskb);
3485			tcp_rtx_queue_unlink_and_free(skb, sk);
3486			__skb_header_release(nskb);
3487			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
3488			sk_wmem_queued_add(sk, nskb->truesize);
 
3489			sk_mem_charge(sk, nskb->truesize);
3490			skb = nskb;
3491		}
3492
3493		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3494		tcp_ecn_send_synack(sk, skb);
3495	}
3496	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3497}
3498
3499/**
3500 * tcp_make_synack - Allocate one skb and build a SYNACK packet.
3501 * @sk: listener socket
3502 * @dst: dst entry attached to the SYNACK. It is consumed and caller
3503 *       should not use it again.
3504 * @req: request_sock pointer
3505 * @foc: cookie for tcp fast open
3506 * @synack_type: Type of synack to prepare
3507 * @syn_skb: SYN packet just received.  It could be NULL for rtx case.
3508 */
3509struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3510				struct request_sock *req,
3511				struct tcp_fastopen_cookie *foc,
3512				enum tcp_synack_type synack_type,
3513				struct sk_buff *syn_skb)
3514{
3515	struct inet_request_sock *ireq = inet_rsk(req);
3516	const struct tcp_sock *tp = tcp_sk(sk);
3517	struct tcp_md5sig_key *md5 = NULL;
3518	struct tcp_out_options opts;
3519	struct sk_buff *skb;
3520	int tcp_header_size;
3521	struct tcphdr *th;
 
3522	int mss;
3523	u64 now;
3524
3525	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
3526	if (unlikely(!skb)) {
3527		dst_release(dst);
3528		return NULL;
3529	}
3530	/* Reserve space for headers. */
3531	skb_reserve(skb, MAX_TCP_HEADER);
3532
3533	switch (synack_type) {
3534	case TCP_SYNACK_NORMAL:
3535		skb_set_owner_w(skb, req_to_sk(req));
3536		break;
3537	case TCP_SYNACK_COOKIE:
3538		/* Under synflood, we do not attach skb to a socket,
3539		 * to avoid false sharing.
3540		 */
3541		break;
3542	case TCP_SYNACK_FASTOPEN:
3543		/* sk is a const pointer, because we want to express multiple
3544		 * cpu might call us concurrently.
3545		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3546		 */
3547		skb_set_owner_w(skb, (struct sock *)sk);
3548		break;
3549	}
3550	skb_dst_set(skb, dst);
3551
3552	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
 
 
 
3553
3554	memset(&opts, 0, sizeof(opts));
3555	now = tcp_clock_ns();
3556#ifdef CONFIG_SYN_COOKIES
3557	if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
3558		skb->skb_mstamp_ns = cookie_init_timestamp(req, now);
3559	else
3560#endif
3561	{
3562		skb->skb_mstamp_ns = now;
3563		if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
3564			tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
3565	}
3566
3567#ifdef CONFIG_TCP_MD5SIG
3568	rcu_read_lock();
3569	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
3570#endif
3571	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
3572	/* bpf program will be interested in the tcp_flags */
3573	TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
3574	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
3575					     foc, synack_type,
3576					     syn_skb) + sizeof(*th);
3577
3578	skb_push(skb, tcp_header_size);
3579	skb_reset_transport_header(skb);
3580
3581	th = (struct tcphdr *)skb->data;
3582	memset(th, 0, sizeof(struct tcphdr));
3583	th->syn = 1;
3584	th->ack = 1;
3585	tcp_ecn_make_synack(req, th);
3586	th->source = htons(ireq->ir_num);
3587	th->dest = ireq->ir_rmt_port;
3588	skb->mark = ireq->ir_mark;
3589	skb->ip_summed = CHECKSUM_PARTIAL;
3590	th->seq = htonl(tcp_rsk(req)->snt_isn);
 
 
 
 
3591	/* XXX data is queued and acked as is. No buffer/window check */
3592	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
3593
3594	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3595	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
3596	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
3597	th->doff = (tcp_header_size >> 2);
3598	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3599
3600#ifdef CONFIG_TCP_MD5SIG
3601	/* Okay, we have all we need - do the md5 hash if needed */
3602	if (md5)
3603		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
3604					       md5, req_to_sk(req), skb);
3605	rcu_read_unlock();
3606#endif
3607
3608	bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
3609				synack_type, &opts);
3610
3611	skb->skb_mstamp_ns = now;
3612	tcp_add_tx_delay(skb, tp);
3613
3614	return skb;
3615}
3616EXPORT_SYMBOL(tcp_make_synack);
3617
3618static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
3619{
3620	struct inet_connection_sock *icsk = inet_csk(sk);
3621	const struct tcp_congestion_ops *ca;
3622	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
3623
3624	if (ca_key == TCP_CA_UNSPEC)
3625		return;
3626
3627	rcu_read_lock();
3628	ca = tcp_ca_find_key(ca_key);
3629	if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
3630		bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
3631		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
3632		icsk->icsk_ca_ops = ca;
3633	}
3634	rcu_read_unlock();
3635}
3636
3637/* Do all connect socket setups that can be done AF independent. */
3638static void tcp_connect_init(struct sock *sk)
3639{
3640	const struct dst_entry *dst = __sk_dst_get(sk);
3641	struct tcp_sock *tp = tcp_sk(sk);
3642	__u8 rcv_wscale;
3643	u32 rcv_wnd;
3644
3645	/* We'll fix this up when we get a response from the other end.
3646	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
3647	 */
3648	tp->tcp_header_len = sizeof(struct tcphdr);
3649	if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
3650		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
3651
3652#ifdef CONFIG_TCP_MD5SIG
3653	if (tp->af_specific->md5_lookup(sk, sk))
3654		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3655#endif
3656
3657	/* If user gave his TCP_MAXSEG, record it to clamp */
3658	if (tp->rx_opt.user_mss)
3659		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
3660	tp->max_window = 0;
3661	tcp_mtup_init(sk);
3662	tcp_sync_mss(sk, dst_mtu(dst));
3663
3664	tcp_ca_dst_init(sk, dst);
3665
3666	if (!tp->window_clamp)
3667		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
3668	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
 
 
3669
3670	tcp_initialize_rcv_mss(sk);
3671
3672	/* limit the window selection if the user enforce a smaller rx buffer */
3673	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3674	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3675		tp->window_clamp = tcp_full_space(sk);
3676
3677	rcv_wnd = tcp_rwnd_init_bpf(sk);
3678	if (rcv_wnd == 0)
3679		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
3680
3681	tcp_select_initial_window(sk, tcp_full_space(sk),
3682				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
3683				  &tp->rcv_wnd,
3684				  &tp->window_clamp,
3685				  sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
3686				  &rcv_wscale,
3687				  rcv_wnd);
3688
3689	tp->rx_opt.rcv_wscale = rcv_wscale;
3690	tp->rcv_ssthresh = tp->rcv_wnd;
3691
3692	sk->sk_err = 0;
3693	sock_reset_flag(sk, SOCK_DONE);
3694	tp->snd_wnd = 0;
3695	tcp_init_wl(tp, 0);
3696	tcp_write_queue_purge(sk);
3697	tp->snd_una = tp->write_seq;
3698	tp->snd_sml = tp->write_seq;
3699	tp->snd_up = tp->write_seq;
3700	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3701
3702	if (likely(!tp->repair))
3703		tp->rcv_nxt = 0;
3704	else
3705		tp->rcv_tstamp = tcp_jiffies32;
3706	tp->rcv_wup = tp->rcv_nxt;
3707	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
3708
3709	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3710	inet_csk(sk)->icsk_retransmits = 0;
3711	tcp_clear_retrans(tp);
3712}
3713
3714static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3715{
3716	struct tcp_sock *tp = tcp_sk(sk);
3717	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3718
3719	tcb->end_seq += skb->len;
3720	__skb_header_release(skb);
3721	sk_wmem_queued_add(sk, skb->truesize);
 
3722	sk_mem_charge(sk, skb->truesize);
3723	WRITE_ONCE(tp->write_seq, tcb->end_seq);
3724	tp->packets_out += tcp_skb_pcount(skb);
3725}
3726
3727/* Build and send a SYN with data and (cached) Fast Open cookie. However,
3728 * queue a data-only packet after the regular SYN, such that regular SYNs
3729 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3730 * only the SYN sequence, the data are retransmitted in the first ACK.
3731 * If cookie is not cached or other error occurs, falls back to send a
3732 * regular SYN with Fast Open cookie request option.
3733 */
3734static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3735{
3736	struct tcp_sock *tp = tcp_sk(sk);
3737	struct tcp_fastopen_request *fo = tp->fastopen_req;
3738	int space, err = 0;
 
3739	struct sk_buff *syn_data;
3740
3741	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3742	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
 
 
 
 
 
 
 
 
 
 
 
3743		goto fallback;
3744
3745	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3746	 * user-MSS. Reserve maximum option space for middleboxes that add
3747	 * private TCP options. The cost is reduced data space in SYN :(
3748	 */
3749	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
3750
3751	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3752		MAX_TCP_OPTION_SPACE;
3753
3754	space = min_t(size_t, space, fo->size);
3755
3756	/* limit to order-0 allocations */
3757	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3758
3759	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
3760	if (!syn_data)
3761		goto fallback;
3762	syn_data->ip_summed = CHECKSUM_PARTIAL;
3763	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
3764	if (space) {
3765		int copied = copy_from_iter(skb_put(syn_data, space), space,
3766					    &fo->data->msg_iter);
3767		if (unlikely(!copied)) {
3768			tcp_skb_tsorted_anchor_cleanup(syn_data);
3769			kfree_skb(syn_data);
3770			goto fallback;
3771		}
3772		if (copied != space) {
3773			skb_trim(syn_data, copied);
3774			space = copied;
3775		}
3776		skb_zcopy_set(syn_data, fo->uarg, NULL);
3777	}
3778	/* No more data pending in inet_wait_for_connect() */
3779	if (space == fo->size)
3780		fo->data = NULL;
3781	fo->copied = space;
3782
3783	tcp_connect_queue_skb(sk, syn_data);
3784	if (syn_data->len)
3785		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3786
3787	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3788
3789	syn->skb_mstamp_ns = syn_data->skb_mstamp_ns;
3790
3791	/* Now full SYN+DATA was cloned and sent (or not),
3792	 * remove the SYN from the original skb (syn_data)
3793	 * we keep in write queue in case of a retransmit, as we
3794	 * also have the SYN packet (with no data) in the same queue.
3795	 */
3796	TCP_SKB_CB(syn_data)->seq++;
3797	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3798	if (!err) {
3799		tp->syn_data = (fo->copied > 0);
3800		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
3801		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3802		goto done;
3803	}
3804
3805	/* data was not sent, put it in write_queue */
3806	__skb_queue_tail(&sk->sk_write_queue, syn_data);
3807	tp->packets_out -= tcp_skb_pcount(syn_data);
3808
3809fallback:
3810	/* Send a regular SYN with Fast Open cookie request option */
3811	if (fo->cookie.len > 0)
3812		fo->cookie.len = 0;
3813	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3814	if (err)
3815		tp->syn_fastopen = 0;
3816done:
3817	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3818	return err;
3819}
3820
3821/* Build a SYN and send it off. */
3822int tcp_connect(struct sock *sk)
3823{
3824	struct tcp_sock *tp = tcp_sk(sk);
3825	struct sk_buff *buff;
3826	int err;
3827
3828	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
3829
3830	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
3831		return -EHOSTUNREACH; /* Routing failure or similar. */
3832
3833	tcp_connect_init(sk);
3834
3835	if (unlikely(tp->repair)) {
3836		tcp_finish_connect(sk, NULL);
3837		return 0;
3838	}
3839
3840	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3841	if (unlikely(!buff))
3842		return -ENOBUFS;
3843
3844	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
3845	tcp_mstamp_refresh(tp);
3846	tp->retrans_stamp = tcp_time_stamp(tp);
3847	tcp_connect_queue_skb(sk, buff);
3848	tcp_ecn_send_syn(sk, buff);
3849	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
3850
3851	/* Send off SYN; include data in Fast Open. */
3852	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3853	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3854	if (err == -ECONNREFUSED)
3855		return err;
3856
3857	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3858	 * in order to make this packet get counted in tcpOutSegs.
3859	 */
3860	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3861	tp->pushed_seq = tp->write_seq;
3862	buff = tcp_send_head(sk);
3863	if (unlikely(buff)) {
3864		WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
3865		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
3866	}
3867	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
3868
3869	/* Timer for repeating the SYN until an answer. */
3870	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3871				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
3872	return 0;
3873}
3874EXPORT_SYMBOL(tcp_connect);
3875
3876/* Send out a delayed ack, the caller does the policy checking
3877 * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
3878 * for details.
3879 */
3880void tcp_send_delayed_ack(struct sock *sk)
3881{
3882	struct inet_connection_sock *icsk = inet_csk(sk);
3883	int ato = icsk->icsk_ack.ato;
3884	unsigned long timeout;
3885
 
 
3886	if (ato > TCP_DELACK_MIN) {
3887		const struct tcp_sock *tp = tcp_sk(sk);
3888		int max_ato = HZ / 2;
3889
3890		if (inet_csk_in_pingpong_mode(sk) ||
3891		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
3892			max_ato = TCP_DELACK_MAX;
3893
3894		/* Slow path, intersegment interval is "high". */
3895
3896		/* If some rtt estimate is known, use it to bound delayed ack.
3897		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
3898		 * directly.
3899		 */
3900		if (tp->srtt_us) {
3901			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3902					TCP_DELACK_MIN);
3903
3904			if (rtt < max_ato)
3905				max_ato = rtt;
3906		}
3907
3908		ato = min(ato, max_ato);
3909	}
3910
3911	ato = min_t(u32, ato, inet_csk(sk)->icsk_delack_max);
3912
3913	/* Stay within the limit we were given */
3914	timeout = jiffies + ato;
3915
3916	/* Use new timeout only if there wasn't a older one earlier. */
3917	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
3918		/* If delack timer is about to expire, send ACK now. */
3919		if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
 
 
 
3920			tcp_send_ack(sk);
3921			return;
3922		}
3923
3924		if (!time_before(timeout, icsk->icsk_ack.timeout))
3925			timeout = icsk->icsk_ack.timeout;
3926	}
3927	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3928	icsk->icsk_ack.timeout = timeout;
3929	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
3930}
3931
3932/* This routine sends an ack and also updates the window. */
3933void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
3934{
3935	struct sk_buff *buff;
3936
3937	/* If we have been reset, we may not send again. */
3938	if (sk->sk_state == TCP_CLOSE)
3939		return;
3940
 
 
3941	/* We are not putting this on the write queue, so
3942	 * tcp_transmit_skb() will set the ownership to this
3943	 * sock.
3944	 */
3945	buff = alloc_skb(MAX_TCP_HEADER,
3946			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
3947	if (unlikely(!buff)) {
3948		struct inet_connection_sock *icsk = inet_csk(sk);
3949		unsigned long delay;
3950
3951		delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
3952		if (delay < TCP_RTO_MAX)
3953			icsk->icsk_ack.retry++;
3954		inet_csk_schedule_ack(sk);
3955		icsk->icsk_ack.ato = TCP_ATO_MIN;
3956		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX);
 
3957		return;
3958	}
3959
3960	/* Reserve space for headers and prepare control bits. */
3961	skb_reserve(buff, MAX_TCP_HEADER);
3962	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
3963
3964	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
3965	 * too much.
3966	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
3967	 */
3968	skb_set_tcp_pure_ack(buff);
3969
3970	/* Send it off, this clears delayed acks for us. */
3971	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
3972}
3973EXPORT_SYMBOL_GPL(__tcp_send_ack);
3974
3975void tcp_send_ack(struct sock *sk)
3976{
3977	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
3978}
 
3979
3980/* This routine sends a packet with an out of date sequence
3981 * number. It assumes the other end will try to ack it.
3982 *
3983 * Question: what should we make while urgent mode?
3984 * 4.4BSD forces sending single byte of data. We cannot send
3985 * out of window data, because we have SND.NXT==SND.MAX...
3986 *
3987 * Current solution: to send TWO zero-length segments in urgent mode:
3988 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
3989 * out-of-date with SND.UNA-1 to probe window.
3990 */
3991static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
3992{
3993	struct tcp_sock *tp = tcp_sk(sk);
3994	struct sk_buff *skb;
3995
3996	/* We don't queue it, tcp_transmit_skb() sets ownership. */
3997	skb = alloc_skb(MAX_TCP_HEADER,
3998			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
3999	if (!skb)
4000		return -1;
4001
4002	/* Reserve space for headers and set control bits. */
4003	skb_reserve(skb, MAX_TCP_HEADER);
4004	/* Use a previous sequence.  This should cause the other
4005	 * end to send an ack.  Don't queue or clone SKB, just
4006	 * send it.
4007	 */
4008	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
 
4009	NET_INC_STATS(sock_net(sk), mib);
4010	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
4011}
4012
4013/* Called from setsockopt( ... TCP_REPAIR ) */
4014void tcp_send_window_probe(struct sock *sk)
4015{
4016	if (sk->sk_state == TCP_ESTABLISHED) {
4017		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
4018		tcp_mstamp_refresh(tcp_sk(sk));
4019		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
4020	}
4021}
4022
4023/* Initiate keepalive or window probe from timer. */
4024int tcp_write_wakeup(struct sock *sk, int mib)
4025{
4026	struct tcp_sock *tp = tcp_sk(sk);
4027	struct sk_buff *skb;
4028
4029	if (sk->sk_state == TCP_CLOSE)
4030		return -1;
4031
4032	skb = tcp_send_head(sk);
4033	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
4034		int err;
4035		unsigned int mss = tcp_current_mss(sk);
4036		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
4037
4038		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
4039			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
4040
4041		/* We are probing the opening of a window
4042		 * but the window size is != 0
4043		 * must have been a result SWS avoidance ( sender )
4044		 */
4045		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
4046		    skb->len > mss) {
4047			seg_size = min(seg_size, mss);
4048			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4049			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
4050					 skb, seg_size, mss, GFP_ATOMIC))
4051				return -1;
4052		} else if (!tcp_skb_pcount(skb))
4053			tcp_set_skb_tso_segs(skb, mss);
4054
4055		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4056		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
4057		if (!err)
4058			tcp_event_new_data_sent(sk, skb);
4059		return err;
4060	} else {
4061		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
4062			tcp_xmit_probe_skb(sk, 1, mib);
4063		return tcp_xmit_probe_skb(sk, 0, mib);
4064	}
4065}
4066
4067/* A window probe timeout has occurred.  If window is not closed send
4068 * a partial packet else a zero probe.
4069 */
4070void tcp_send_probe0(struct sock *sk)
4071{
4072	struct inet_connection_sock *icsk = inet_csk(sk);
4073	struct tcp_sock *tp = tcp_sk(sk);
4074	struct net *net = sock_net(sk);
4075	unsigned long timeout;
4076	int err;
4077
4078	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
4079
4080	if (tp->packets_out || tcp_write_queue_empty(sk)) {
4081		/* Cancel probe timer, if it is not required. */
4082		icsk->icsk_probes_out = 0;
4083		icsk->icsk_backoff = 0;
4084		icsk->icsk_probes_tstamp = 0;
4085		return;
4086	}
4087
4088	icsk->icsk_probes_out++;
4089	if (err <= 0) {
4090		if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
4091			icsk->icsk_backoff++;
4092		timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
 
4093	} else {
4094		/* If packet was not sent due to local congestion,
4095		 * Let senders fight for local resources conservatively.
 
 
 
4096		 */
4097		timeout = TCP_RESOURCE_PROBE_INTERVAL;
4098	}
4099
4100	timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
4101	tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
 
 
4102}
4103
4104int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
4105{
4106	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
4107	struct flowi fl;
4108	int res;
4109
4110	tcp_rsk(req)->txhash = net_tx_rndhash();
4111	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
4112				  NULL);
4113	if (!res) {
4114		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
4115		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
4116		if (unlikely(tcp_passive_fastopen(sk)))
4117			tcp_sk(sk)->total_retrans++;
4118		trace_tcp_retransmit_synack(sk, req);
4119	}
4120	return res;
4121}
4122EXPORT_SYMBOL(tcp_rtx_synack);
v4.10.11
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  11 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  12 *		Florian La Roche, <flla@stud.uni-sb.de>
  13 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  15 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  16 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  17 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18 *		Jorge Cwik, <jorge@laser.satlink.net>
  19 */
  20
  21/*
  22 * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
  23 *				:	Fragmentation on mtu decrease
  24 *				:	Segment collapse on retransmit
  25 *				:	AF independence
  26 *
  27 *		Linus Torvalds	:	send_delayed_ack
  28 *		David S. Miller	:	Charge memory using the right skb
  29 *					during syn/ack processing.
  30 *		David S. Miller :	Output engine completely rewritten.
  31 *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
  32 *		Cacophonix Gaul :	draft-minshall-nagle-01
  33 *		J Hadi Salim	:	ECN support
  34 *
  35 */
  36
  37#define pr_fmt(fmt) "TCP: " fmt
  38
  39#include <net/tcp.h>
 
  40
  41#include <linux/compiler.h>
  42#include <linux/gfp.h>
  43#include <linux/module.h>
 
  44
  45/* People can turn this off for buggy TCP's found in printers etc. */
  46int sysctl_tcp_retrans_collapse __read_mostly = 1;
  47
  48/* People can turn this on to work with those rare, broken TCPs that
  49 * interpret the window field as a signed quantity.
  50 */
  51int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
 
 
  52
  53/* Default TSQ limit of four TSO segments */
  54int sysctl_tcp_limit_output_bytes __read_mostly = 262144;
  55
  56/* This limits the percentage of the congestion window which we
  57 * will allow a single TSO frame to consume.  Building TSO frames
  58 * which are too large can cause TCP streams to be bursty.
  59 */
  60int sysctl_tcp_tso_win_divisor __read_mostly = 3;
  61
  62/* By default, RFC2861 behavior.  */
  63int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
  64
  65static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
  66			   int push_one, gfp_t gfp);
  67
  68/* Account for new data that has been sent to the network. */
  69static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
  70{
  71	struct inet_connection_sock *icsk = inet_csk(sk);
  72	struct tcp_sock *tp = tcp_sk(sk);
  73	unsigned int prior_packets = tp->packets_out;
  74
  75	tcp_advance_send_head(sk, skb);
  76	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
 
 
 
 
 
  77
  78	tp->packets_out += tcp_skb_pcount(skb);
  79	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
  80	    icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
  81		tcp_rearm_rto(sk);
  82	}
  83
  84	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
  85		      tcp_skb_pcount(skb));
  86}
  87
  88/* SND.NXT, if window was not shrunk.
 
  89 * If window has been shrunk, what should we make? It is not clear at all.
  90 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
  91 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
  92 * invalid. OK, let's make this for now:
  93 */
  94static inline __u32 tcp_acceptable_seq(const struct sock *sk)
  95{
  96	const struct tcp_sock *tp = tcp_sk(sk);
  97
  98	if (!before(tcp_wnd_end(tp), tp->snd_nxt))
 
 
  99		return tp->snd_nxt;
 100	else
 101		return tcp_wnd_end(tp);
 102}
 103
 104/* Calculate mss to advertise in SYN segment.
 105 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
 106 *
 107 * 1. It is independent of path mtu.
 108 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
 109 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
 110 *    attached devices, because some buggy hosts are confused by
 111 *    large MSS.
 112 * 4. We do not make 3, we advertise MSS, calculated from first
 113 *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
 114 *    This may be overridden via information stored in routing table.
 115 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
 116 *    probably even Jumbo".
 117 */
 118static __u16 tcp_advertise_mss(struct sock *sk)
 119{
 120	struct tcp_sock *tp = tcp_sk(sk);
 121	const struct dst_entry *dst = __sk_dst_get(sk);
 122	int mss = tp->advmss;
 123
 124	if (dst) {
 125		unsigned int metric = dst_metric_advmss(dst);
 126
 127		if (metric < mss) {
 128			mss = metric;
 129			tp->advmss = mss;
 130		}
 131	}
 132
 133	return (__u16)mss;
 134}
 135
 136/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
 137 * This is the first part of cwnd validation mechanism.
 138 */
 139void tcp_cwnd_restart(struct sock *sk, s32 delta)
 140{
 141	struct tcp_sock *tp = tcp_sk(sk);
 142	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
 143	u32 cwnd = tp->snd_cwnd;
 144
 145	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
 146
 147	tp->snd_ssthresh = tcp_current_ssthresh(sk);
 148	restart_cwnd = min(restart_cwnd, cwnd);
 149
 150	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
 151		cwnd >>= 1;
 152	tp->snd_cwnd = max(cwnd, restart_cwnd);
 153	tp->snd_cwnd_stamp = tcp_time_stamp;
 154	tp->snd_cwnd_used = 0;
 155}
 156
 157/* Congestion state accounting after a packet has been sent. */
 158static void tcp_event_data_sent(struct tcp_sock *tp,
 159				struct sock *sk)
 160{
 161	struct inet_connection_sock *icsk = inet_csk(sk);
 162	const u32 now = tcp_time_stamp;
 163
 164	if (tcp_packets_in_flight(tp) == 0)
 165		tcp_ca_event(sk, CA_EVENT_TX_START);
 166
 
 
 
 
 
 
 
 
 
 167	tp->lsndtime = now;
 168
 169	/* If it is a reply for ato after last received
 170	 * packet, enter pingpong mode.
 171	 */
 172	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
 173		icsk->icsk_ack.pingpong = 1;
 174}
 175
 176/* Account for an ACK we sent. */
 177static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
 
 178{
 
 
 
 
 
 
 
 
 
 
 
 
 179	tcp_dec_quickack_mode(sk, pkts);
 180	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 181}
 182
 183
 184u32 tcp_default_init_rwnd(u32 mss)
 185{
 186	/* Initial receive window should be twice of TCP_INIT_CWND to
 187	 * enable proper sending of new unsent data during fast recovery
 188	 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
 189	 * limit when mss is larger than 1460.
 190	 */
 191	u32 init_rwnd = TCP_INIT_CWND * 2;
 192
 193	if (mss > 1460)
 194		init_rwnd = max((1460 * init_rwnd) / mss, 2U);
 195	return init_rwnd;
 196}
 197
 198/* Determine a window scaling and initial window to offer.
 199 * Based on the assumption that the given amount of space
 200 * will be offered. Store the results in the tp structure.
 201 * NOTE: for smooth operation initial space offering should
 202 * be a multiple of mss if possible. We assume here that mss >= 1.
 203 * This MUST be enforced by all callers.
 204 */
 205void tcp_select_initial_window(int __space, __u32 mss,
 206			       __u32 *rcv_wnd, __u32 *window_clamp,
 207			       int wscale_ok, __u8 *rcv_wscale,
 208			       __u32 init_rcv_wnd)
 209{
 210	unsigned int space = (__space < 0 ? 0 : __space);
 211
 212	/* If no clamp set the clamp to the max possible scaled window */
 213	if (*window_clamp == 0)
 214		(*window_clamp) = (65535 << 14);
 215	space = min(*window_clamp, space);
 216
 217	/* Quantize space offering to a multiple of mss if possible. */
 218	if (space > mss)
 219		space = (space / mss) * mss;
 220
 221	/* NOTE: offering an initial window larger than 32767
 222	 * will break some buggy TCP stacks. If the admin tells us
 223	 * it is likely we could be speaking with such a buggy stack
 224	 * we will truncate our initial window offering to 32K-1
 225	 * unless the remote has sent us a window scaling option,
 226	 * which we interpret as a sign the remote TCP is not
 227	 * misinterpreting the window field as a signed quantity.
 228	 */
 229	if (sysctl_tcp_workaround_signed_windows)
 230		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
 231	else
 232		(*rcv_wnd) = space;
 
 
 
 233
 234	(*rcv_wscale) = 0;
 235	if (wscale_ok) {
 236		/* Set window scaling on max possible window
 237		 * See RFC1323 for an explanation of the limit to 14
 238		 */
 239		space = max_t(u32, space, sysctl_tcp_rmem[2]);
 240		space = max_t(u32, space, sysctl_rmem_max);
 241		space = min_t(u32, space, *window_clamp);
 242		while (space > 65535 && (*rcv_wscale) < 14) {
 243			space >>= 1;
 244			(*rcv_wscale)++;
 245		}
 246	}
 247
 248	if (mss > (1 << *rcv_wscale)) {
 249		if (!init_rcv_wnd) /* Use default unless specified otherwise */
 250			init_rcv_wnd = tcp_default_init_rwnd(mss);
 251		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
 252	}
 253
 254	/* Set the clamp no higher than max representable value */
 255	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
 256}
 257EXPORT_SYMBOL(tcp_select_initial_window);
 258
 259/* Chose a new window to advertise, update state in tcp_sock for the
 260 * socket, and return result with RFC1323 scaling applied.  The return
 261 * value can be stuffed directly into th->window for an outgoing
 262 * frame.
 263 */
 264static u16 tcp_select_window(struct sock *sk)
 265{
 266	struct tcp_sock *tp = tcp_sk(sk);
 267	u32 old_win = tp->rcv_wnd;
 268	u32 cur_win = tcp_receive_window(tp);
 269	u32 new_win = __tcp_select_window(sk);
 270
 271	/* Never shrink the offered window */
 272	if (new_win < cur_win) {
 273		/* Danger Will Robinson!
 274		 * Don't update rcv_wup/rcv_wnd here or else
 275		 * we will not be able to advertise a zero
 276		 * window in time.  --DaveM
 277		 *
 278		 * Relax Will Robinson.
 279		 */
 280		if (new_win == 0)
 281			NET_INC_STATS(sock_net(sk),
 282				      LINUX_MIB_TCPWANTZEROWINDOWADV);
 283		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
 284	}
 285	tp->rcv_wnd = new_win;
 286	tp->rcv_wup = tp->rcv_nxt;
 287
 288	/* Make sure we do not exceed the maximum possible
 289	 * scaled window.
 290	 */
 291	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
 
 292		new_win = min(new_win, MAX_TCP_WINDOW);
 293	else
 294		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
 295
 296	/* RFC1323 scaling applied */
 297	new_win >>= tp->rx_opt.rcv_wscale;
 298
 299	/* If we advertise zero window, disable fast path. */
 300	if (new_win == 0) {
 301		tp->pred_flags = 0;
 302		if (old_win)
 303			NET_INC_STATS(sock_net(sk),
 304				      LINUX_MIB_TCPTOZEROWINDOWADV);
 305	} else if (old_win == 0) {
 306		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
 307	}
 308
 309	return new_win;
 310}
 311
 312/* Packet ECN state for a SYN-ACK */
 313static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
 314{
 315	const struct tcp_sock *tp = tcp_sk(sk);
 316
 317	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
 318	if (!(tp->ecn_flags & TCP_ECN_OK))
 319		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
 320	else if (tcp_ca_needs_ecn(sk))
 
 321		INET_ECN_xmit(sk);
 322}
 323
 324/* Packet ECN state for a SYN.  */
 325static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
 326{
 327	struct tcp_sock *tp = tcp_sk(sk);
 
 328	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
 329		       tcp_ca_needs_ecn(sk);
 330
 331	if (!use_ecn) {
 332		const struct dst_entry *dst = __sk_dst_get(sk);
 333
 334		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
 335			use_ecn = true;
 336	}
 337
 338	tp->ecn_flags = 0;
 339
 340	if (use_ecn) {
 341		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
 342		tp->ecn_flags = TCP_ECN_OK;
 343		if (tcp_ca_needs_ecn(sk))
 344			INET_ECN_xmit(sk);
 345	}
 346}
 347
 348static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
 349{
 350	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
 351		/* tp->ecn_flags are cleared at a later point in time when
 352		 * SYN ACK is ultimatively being received.
 353		 */
 354		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
 355}
 356
 357static void
 358tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
 359{
 360	if (inet_rsk(req)->ecn_ok)
 361		th->ece = 1;
 362}
 363
 364/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
 365 * be sent.
 366 */
 367static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
 368			 struct tcphdr *th, int tcp_header_len)
 369{
 370	struct tcp_sock *tp = tcp_sk(sk);
 371
 372	if (tp->ecn_flags & TCP_ECN_OK) {
 373		/* Not-retransmitted data segment: set ECT and inject CWR. */
 374		if (skb->len != tcp_header_len &&
 375		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
 376			INET_ECN_xmit(sk);
 377			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
 378				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
 379				th->cwr = 1;
 380				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 381			}
 382		} else if (!tcp_ca_needs_ecn(sk)) {
 383			/* ACK or retransmitted segment: clear ECT|CE */
 384			INET_ECN_dontxmit(sk);
 385		}
 386		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
 387			th->ece = 1;
 388	}
 389}
 390
 391/* Constructs common control bits of non-data skb. If SYN/FIN is present,
 392 * auto increment end seqno.
 393 */
 394static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
 395{
 396	skb->ip_summed = CHECKSUM_PARTIAL;
 397	skb->csum = 0;
 398
 399	TCP_SKB_CB(skb)->tcp_flags = flags;
 400	TCP_SKB_CB(skb)->sacked = 0;
 401
 402	tcp_skb_pcount_set(skb, 1);
 403
 404	TCP_SKB_CB(skb)->seq = seq;
 405	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
 406		seq++;
 407	TCP_SKB_CB(skb)->end_seq = seq;
 408}
 409
 410static inline bool tcp_urg_mode(const struct tcp_sock *tp)
 411{
 412	return tp->snd_una != tp->snd_up;
 413}
 414
 415#define OPTION_SACK_ADVERTISE	(1 << 0)
 416#define OPTION_TS		(1 << 1)
 417#define OPTION_MD5		(1 << 2)
 418#define OPTION_WSCALE		(1 << 3)
 419#define OPTION_FAST_OPEN_COOKIE	(1 << 8)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 420
 421struct tcp_out_options {
 422	u16 options;		/* bit field of OPTION_* */
 423	u16 mss;		/* 0 to disable */
 424	u8 ws;			/* window scale, 0 to disable */
 425	u8 num_sack_blocks;	/* number of SACK blocks to include */
 426	u8 hash_size;		/* bytes in hash_location */
 
 427	__u8 *hash_location;	/* temporary pointer, overloaded */
 428	__u32 tsval, tsecr;	/* need to include OPTION_TS */
 429	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
 
 430};
 431
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 432/* Write previously computed TCP options to the packet.
 433 *
 434 * Beware: Something in the Internet is very sensitive to the ordering of
 435 * TCP options, we learned this through the hard way, so be careful here.
 436 * Luckily we can at least blame others for their non-compliance but from
 437 * inter-operability perspective it seems that we're somewhat stuck with
 438 * the ordering which we have been using if we want to keep working with
 439 * those broken things (not that it currently hurts anybody as there isn't
 440 * particular reason why the ordering would need to be changed).
 441 *
 442 * At least SACK_PERM as the first option is known to lead to a disaster
 443 * (but it may well be that other scenarios fail similarly).
 444 */
 445static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
 446			      struct tcp_out_options *opts)
 447{
 448	u16 options = opts->options;	/* mungable copy */
 449
 450	if (unlikely(OPTION_MD5 & options)) {
 451		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 452			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 453		/* overload cookie hash location */
 454		opts->hash_location = (__u8 *)ptr;
 455		ptr += 4;
 456	}
 457
 458	if (unlikely(opts->mss)) {
 459		*ptr++ = htonl((TCPOPT_MSS << 24) |
 460			       (TCPOLEN_MSS << 16) |
 461			       opts->mss);
 462	}
 463
 464	if (likely(OPTION_TS & options)) {
 465		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 466			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
 467				       (TCPOLEN_SACK_PERM << 16) |
 468				       (TCPOPT_TIMESTAMP << 8) |
 469				       TCPOLEN_TIMESTAMP);
 470			options &= ~OPTION_SACK_ADVERTISE;
 471		} else {
 472			*ptr++ = htonl((TCPOPT_NOP << 24) |
 473				       (TCPOPT_NOP << 16) |
 474				       (TCPOPT_TIMESTAMP << 8) |
 475				       TCPOLEN_TIMESTAMP);
 476		}
 477		*ptr++ = htonl(opts->tsval);
 478		*ptr++ = htonl(opts->tsecr);
 479	}
 480
 481	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 482		*ptr++ = htonl((TCPOPT_NOP << 24) |
 483			       (TCPOPT_NOP << 16) |
 484			       (TCPOPT_SACK_PERM << 8) |
 485			       TCPOLEN_SACK_PERM);
 486	}
 487
 488	if (unlikely(OPTION_WSCALE & options)) {
 489		*ptr++ = htonl((TCPOPT_NOP << 24) |
 490			       (TCPOPT_WINDOW << 16) |
 491			       (TCPOLEN_WINDOW << 8) |
 492			       opts->ws);
 493	}
 494
 495	if (unlikely(opts->num_sack_blocks)) {
 496		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
 497			tp->duplicate_sack : tp->selective_acks;
 498		int this_sack;
 499
 500		*ptr++ = htonl((TCPOPT_NOP  << 24) |
 501			       (TCPOPT_NOP  << 16) |
 502			       (TCPOPT_SACK <<  8) |
 503			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
 504						     TCPOLEN_SACK_PERBLOCK)));
 505
 506		for (this_sack = 0; this_sack < opts->num_sack_blocks;
 507		     ++this_sack) {
 508			*ptr++ = htonl(sp[this_sack].start_seq);
 509			*ptr++ = htonl(sp[this_sack].end_seq);
 510		}
 511
 512		tp->rx_opt.dsack = 0;
 513	}
 514
 515	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
 516		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
 517		u8 *p = (u8 *)ptr;
 518		u32 len; /* Fast Open option length */
 519
 520		if (foc->exp) {
 521			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
 522			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
 523				     TCPOPT_FASTOPEN_MAGIC);
 524			p += TCPOLEN_EXP_FASTOPEN_BASE;
 525		} else {
 526			len = TCPOLEN_FASTOPEN_BASE + foc->len;
 527			*p++ = TCPOPT_FASTOPEN;
 528			*p++ = len;
 529		}
 530
 531		memcpy(p, foc->val, foc->len);
 532		if ((len & 3) == 2) {
 533			p[foc->len] = TCPOPT_NOP;
 534			p[foc->len + 1] = TCPOPT_NOP;
 535		}
 536		ptr += (len + 3) >> 2;
 537	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 538}
 539
 540/* Compute TCP options for SYN packets. This is not the final
 541 * network wire format yet.
 542 */
 543static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
 544				struct tcp_out_options *opts,
 545				struct tcp_md5sig_key **md5)
 546{
 547	struct tcp_sock *tp = tcp_sk(sk);
 548	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 549	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
 550
 
 551#ifdef CONFIG_TCP_MD5SIG
 552	*md5 = tp->af_specific->md5_lookup(sk, sk);
 553	if (*md5) {
 554		opts->options |= OPTION_MD5;
 555		remaining -= TCPOLEN_MD5SIG_ALIGNED;
 
 
 
 556	}
 557#else
 558	*md5 = NULL;
 559#endif
 560
 561	/* We always get an MSS option.  The option bytes which will be seen in
 562	 * normal data packets should timestamps be used, must be in the MSS
 563	 * advertised.  But we subtract them from tp->mss_cache so that
 564	 * calculations in tcp_sendmsg are simpler etc.  So account for this
 565	 * fact here if necessary.  If we don't do this correctly, as a
 566	 * receiver we won't recognize data packets as being full sized when we
 567	 * should, and thus we won't abide by the delayed ACK rules correctly.
 568	 * SACKs don't matter, we never delay an ACK when we have any of those
 569	 * going out.  */
 570	opts->mss = tcp_advertise_mss(sk);
 571	remaining -= TCPOLEN_MSS_ALIGNED;
 572
 573	if (likely(sysctl_tcp_timestamps && !*md5)) {
 574		opts->options |= OPTION_TS;
 575		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
 576		opts->tsecr = tp->rx_opt.ts_recent;
 577		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 578	}
 579	if (likely(sysctl_tcp_window_scaling)) {
 580		opts->ws = tp->rx_opt.rcv_wscale;
 581		opts->options |= OPTION_WSCALE;
 582		remaining -= TCPOLEN_WSCALE_ALIGNED;
 583	}
 584	if (likely(sysctl_tcp_sack)) {
 585		opts->options |= OPTION_SACK_ADVERTISE;
 586		if (unlikely(!(OPTION_TS & opts->options)))
 587			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 588	}
 589
 590	if (fastopen && fastopen->cookie.len >= 0) {
 591		u32 need = fastopen->cookie.len;
 592
 593		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
 594					       TCPOLEN_FASTOPEN_BASE;
 595		need = (need + 3) & ~3U;  /* Align to 32 bits */
 596		if (remaining >= need) {
 597			opts->options |= OPTION_FAST_OPEN_COOKIE;
 598			opts->fastopen_cookie = &fastopen->cookie;
 599			remaining -= need;
 600			tp->syn_fastopen = 1;
 601			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
 602		}
 603	}
 604
 
 
 
 
 
 
 
 
 
 
 
 
 
 605	return MAX_TCP_OPTION_SPACE - remaining;
 606}
 607
 608/* Set up TCP options for SYN-ACKs. */
 609static unsigned int tcp_synack_options(struct request_sock *req,
 
 610				       unsigned int mss, struct sk_buff *skb,
 611				       struct tcp_out_options *opts,
 612				       const struct tcp_md5sig_key *md5,
 613				       struct tcp_fastopen_cookie *foc)
 
 
 614{
 615	struct inet_request_sock *ireq = inet_rsk(req);
 616	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 617
 618#ifdef CONFIG_TCP_MD5SIG
 619	if (md5) {
 620		opts->options |= OPTION_MD5;
 621		remaining -= TCPOLEN_MD5SIG_ALIGNED;
 622
 623		/* We can't fit any SACK blocks in a packet with MD5 + TS
 624		 * options. There was discussion about disabling SACK
 625		 * rather than TS in order to fit in better with old,
 626		 * buggy kernels, but that was deemed to be unnecessary.
 627		 */
 628		ireq->tstamp_ok &= !ireq->sack_ok;
 
 629	}
 630#endif
 631
 632	/* We always send an MSS option. */
 633	opts->mss = mss;
 634	remaining -= TCPOLEN_MSS_ALIGNED;
 635
 636	if (likely(ireq->wscale_ok)) {
 637		opts->ws = ireq->rcv_wscale;
 638		opts->options |= OPTION_WSCALE;
 639		remaining -= TCPOLEN_WSCALE_ALIGNED;
 640	}
 641	if (likely(ireq->tstamp_ok)) {
 642		opts->options |= OPTION_TS;
 643		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
 644		opts->tsecr = req->ts_recent;
 645		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 646	}
 647	if (likely(ireq->sack_ok)) {
 648		opts->options |= OPTION_SACK_ADVERTISE;
 649		if (unlikely(!ireq->tstamp_ok))
 650			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 651	}
 652	if (foc != NULL && foc->len >= 0) {
 653		u32 need = foc->len;
 654
 655		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
 656				   TCPOLEN_FASTOPEN_BASE;
 657		need = (need + 3) & ~3U;  /* Align to 32 bits */
 658		if (remaining >= need) {
 659			opts->options |= OPTION_FAST_OPEN_COOKIE;
 660			opts->fastopen_cookie = foc;
 661			remaining -= need;
 662		}
 663	}
 664
 
 
 
 
 
 
 
 665	return MAX_TCP_OPTION_SPACE - remaining;
 666}
 667
 668/* Compute TCP options for ESTABLISHED sockets. This is not the
 669 * final wire format yet.
 670 */
 671static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
 672					struct tcp_out_options *opts,
 673					struct tcp_md5sig_key **md5)
 674{
 675	struct tcp_sock *tp = tcp_sk(sk);
 676	unsigned int size = 0;
 677	unsigned int eff_sacks;
 678
 679	opts->options = 0;
 680
 
 681#ifdef CONFIG_TCP_MD5SIG
 682	*md5 = tp->af_specific->md5_lookup(sk, sk);
 683	if (unlikely(*md5)) {
 684		opts->options |= OPTION_MD5;
 685		size += TCPOLEN_MD5SIG_ALIGNED;
 
 
 
 686	}
 687#else
 688	*md5 = NULL;
 689#endif
 690
 691	if (likely(tp->rx_opt.tstamp_ok)) {
 692		opts->options |= OPTION_TS;
 693		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
 694		opts->tsecr = tp->rx_opt.ts_recent;
 695		size += TCPOLEN_TSTAMP_ALIGNED;
 696	}
 697
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 698	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
 699	if (unlikely(eff_sacks)) {
 700		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 
 
 
 
 701		opts->num_sack_blocks =
 702			min_t(unsigned int, eff_sacks,
 703			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
 704			      TCPOLEN_SACK_PERBLOCK);
 
 705		size += TCPOLEN_SACK_BASE_ALIGNED +
 706			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
 707	}
 708
 
 
 
 
 
 
 
 
 
 709	return size;
 710}
 711
 712
 713/* TCP SMALL QUEUES (TSQ)
 714 *
 715 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
 716 * to reduce RTT and bufferbloat.
 717 * We do this using a special skb destructor (tcp_wfree).
 718 *
 719 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
 720 * needs to be reallocated in a driver.
 721 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
 722 *
 723 * Since transmit from skb destructor is forbidden, we use a tasklet
 724 * to process all sockets that eventually need to send more skbs.
 725 * We use one tasklet per cpu, with its own queue of sockets.
 726 */
 727struct tsq_tasklet {
 728	struct tasklet_struct	tasklet;
 729	struct list_head	head; /* queue of tcp sockets */
 730};
 731static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
 732
 733static void tcp_tsq_handler(struct sock *sk)
 734{
 735	if ((1 << sk->sk_state) &
 736	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
 737	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
 738		struct tcp_sock *tp = tcp_sk(sk);
 739
 740		if (tp->lost_out > tp->retrans_out &&
 741		    tp->snd_cwnd > tcp_packets_in_flight(tp))
 
 742			tcp_xmit_retransmit_queue(sk);
 
 743
 744		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
 745			       0, GFP_ATOMIC);
 746	}
 747}
 
 
 
 
 
 
 
 
 
 
 748/*
 749 * One tasklet per cpu tries to send more skbs.
 750 * We run in tasklet context but need to disable irqs when
 751 * transferring tsq->head because tcp_wfree() might
 752 * interrupt us (non NAPI drivers)
 753 */
 754static void tcp_tasklet_func(unsigned long data)
 755{
 756	struct tsq_tasklet *tsq = (struct tsq_tasklet *)data;
 757	LIST_HEAD(list);
 758	unsigned long flags;
 759	struct list_head *q, *n;
 760	struct tcp_sock *tp;
 761	struct sock *sk;
 762
 763	local_irq_save(flags);
 764	list_splice_init(&tsq->head, &list);
 765	local_irq_restore(flags);
 766
 767	list_for_each_safe(q, n, &list) {
 768		tp = list_entry(q, struct tcp_sock, tsq_node);
 769		list_del(&tp->tsq_node);
 770
 771		sk = (struct sock *)tp;
 772		smp_mb__before_atomic();
 773		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
 774
 775		if (!sk->sk_lock.owned &&
 776		    test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) {
 777			bh_lock_sock(sk);
 778			if (!sock_owned_by_user(sk)) {
 779				clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
 780				tcp_tsq_handler(sk);
 781			}
 782			bh_unlock_sock(sk);
 783		}
 784
 785		sk_free(sk);
 786	}
 787}
 788
 789#define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
 790			  TCPF_WRITE_TIMER_DEFERRED |	\
 791			  TCPF_DELACK_TIMER_DEFERRED |	\
 792			  TCPF_MTU_REDUCED_DEFERRED)
 793/**
 794 * tcp_release_cb - tcp release_sock() callback
 795 * @sk: socket
 796 *
 797 * called from release_sock() to perform protocol dependent
 798 * actions before socket release.
 799 */
 800void tcp_release_cb(struct sock *sk)
 801{
 802	unsigned long flags, nflags;
 803
 804	/* perform an atomic operation only if at least one flag is set */
 805	do {
 806		flags = sk->sk_tsq_flags;
 807		if (!(flags & TCP_DEFERRED_ALL))
 808			return;
 809		nflags = flags & ~TCP_DEFERRED_ALL;
 810	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
 811
 812	if (flags & TCPF_TSQ_DEFERRED)
 813		tcp_tsq_handler(sk);
 814
 
 815	/* Here begins the tricky part :
 816	 * We are called from release_sock() with :
 817	 * 1) BH disabled
 818	 * 2) sk_lock.slock spinlock held
 819	 * 3) socket owned by us (sk->sk_lock.owned == 1)
 820	 *
 821	 * But following code is meant to be called from BH handlers,
 822	 * so we should keep BH disabled, but early release socket ownership
 823	 */
 824	sock_release_ownership(sk);
 825
 826	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
 827		tcp_write_timer_handler(sk);
 828		__sock_put(sk);
 829	}
 830	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
 831		tcp_delack_timer_handler(sk);
 832		__sock_put(sk);
 833	}
 834	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
 835		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
 836		__sock_put(sk);
 837	}
 838}
 839EXPORT_SYMBOL(tcp_release_cb);
 840
 841void __init tcp_tasklet_init(void)
 842{
 843	int i;
 844
 845	for_each_possible_cpu(i) {
 846		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
 847
 848		INIT_LIST_HEAD(&tsq->head);
 849		tasklet_init(&tsq->tasklet,
 850			     tcp_tasklet_func,
 851			     (unsigned long)tsq);
 852	}
 853}
 854
 855/*
 856 * Write buffer destructor automatically called from kfree_skb.
 857 * We can't xmit new skbs from this context, as we might already
 858 * hold qdisc lock.
 859 */
 860void tcp_wfree(struct sk_buff *skb)
 861{
 862	struct sock *sk = skb->sk;
 863	struct tcp_sock *tp = tcp_sk(sk);
 864	unsigned long flags, nval, oval;
 865	int wmem;
 866
 867	/* Keep one reference on sk_wmem_alloc.
 868	 * Will be released by sk_free() from here or tcp_tasklet_func()
 869	 */
 870	wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc);
 871
 872	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
 873	 * Wait until our queues (qdisc + devices) are drained.
 874	 * This gives :
 875	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
 876	 * - chance for incoming ACK (processed by another cpu maybe)
 877	 *   to migrate this flow (skb->ooo_okay will be eventually set)
 878	 */
 879	if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
 880		goto out;
 881
 882	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
 883		struct tsq_tasklet *tsq;
 884		bool empty;
 885
 886		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
 887			goto out;
 888
 889		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
 890		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
 891		if (nval != oval)
 892			continue;
 893
 894		/* queue this socket to tasklet queue */
 895		local_irq_save(flags);
 896		tsq = this_cpu_ptr(&tsq_tasklet);
 897		empty = list_empty(&tsq->head);
 898		list_add(&tp->tsq_node, &tsq->head);
 899		if (empty)
 900			tasklet_schedule(&tsq->tasklet);
 901		local_irq_restore(flags);
 902		return;
 903	}
 904out:
 905	sk_free(sk);
 906}
 907
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 908/* This routine actually transmits TCP packets queued in by
 909 * tcp_do_sendmsg().  This is used by both the initial
 910 * transmission and possible later retransmissions.
 911 * All SKB's seen here are completely headerless.  It is our
 912 * job to build the TCP header, and pass the packet down to
 913 * IP so it can do the same plus pass the packet off to the
 914 * device.
 915 *
 916 * We are working here with either a clone of the original
 917 * SKB, or a fresh unique copy made by the retransmit engine.
 918 */
 919static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 920			    gfp_t gfp_mask)
 921{
 922	const struct inet_connection_sock *icsk = inet_csk(sk);
 923	struct inet_sock *inet;
 924	struct tcp_sock *tp;
 925	struct tcp_skb_cb *tcb;
 926	struct tcp_out_options opts;
 927	unsigned int tcp_options_size, tcp_header_size;
 
 928	struct tcp_md5sig_key *md5;
 929	struct tcphdr *th;
 
 930	int err;
 931
 932	BUG_ON(!skb || !tcp_skb_pcount(skb));
 933	tp = tcp_sk(sk);
 934
 
 
 935	if (clone_it) {
 936		skb_mstamp_get(&skb->skb_mstamp);
 937		TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
 938			- tp->snd_una;
 939		tcp_rate_skb_sent(sk, skb);
 
 
 
 
 
 
 
 940
 941		if (unlikely(skb_cloned(skb)))
 942			skb = pskb_copy(skb, gfp_mask);
 943		else
 944			skb = skb_clone(skb, gfp_mask);
 945		if (unlikely(!skb))
 946			return -ENOBUFS;
 
 
 
 
 947	}
 948
 949	inet = inet_sk(sk);
 950	tcb = TCP_SKB_CB(skb);
 951	memset(&opts, 0, sizeof(opts));
 952
 953	if (unlikely(tcb->tcp_flags & TCPHDR_SYN))
 954		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
 955	else
 956		tcp_options_size = tcp_established_options(sk, skb, &opts,
 957							   &md5);
 
 
 
 
 
 
 
 
 
 
 
 958	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
 959
 960	/* if no packet is in qdisc/device queue, then allow XPS to select
 961	 * another queue. We can be called from tcp_tsq_handler()
 962	 * which holds one reference to sk_wmem_alloc.
 963	 *
 964	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
 965	 * One way to get this would be to set skb->truesize = 2 on them.
 966	 */
 967	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
 968
 
 
 
 
 
 
 
 969	skb_push(skb, tcp_header_size);
 970	skb_reset_transport_header(skb);
 971
 972	skb_orphan(skb);
 973	skb->sk = sk;
 974	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
 975	skb_set_hash_from_sk(skb, sk);
 976	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
 
 977
 978	/* Build TCP header and checksum it. */
 979	th = (struct tcphdr *)skb->data;
 980	th->source		= inet->inet_sport;
 981	th->dest		= inet->inet_dport;
 982	th->seq			= htonl(tcb->seq);
 983	th->ack_seq		= htonl(tp->rcv_nxt);
 984	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
 985					tcb->tcp_flags);
 986
 987	th->check		= 0;
 988	th->urg_ptr		= 0;
 989
 990	/* The urg_mode check is necessary during a below snd_una win probe */
 991	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
 992		if (before(tp->snd_up, tcb->seq + 0x10000)) {
 993			th->urg_ptr = htons(tp->snd_up - tcb->seq);
 994			th->urg = 1;
 995		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
 996			th->urg_ptr = htons(0xFFFF);
 997			th->urg = 1;
 998		}
 999	}
1000
1001	tcp_options_write((__be32 *)(th + 1), tp, &opts);
1002	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1003	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1004		th->window      = htons(tcp_select_window(sk));
1005		tcp_ecn_send(sk, skb, th, tcp_header_size);
1006	} else {
1007		/* RFC1323: The window in SYN & SYN/ACK segments
1008		 * is never scaled.
1009		 */
1010		th->window	= htons(min(tp->rcv_wnd, 65535U));
1011	}
 
 
 
1012#ifdef CONFIG_TCP_MD5SIG
1013	/* Calculate the MD5 hash, as we have all we need now */
1014	if (md5) {
1015		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1016		tp->af_specific->calc_md5_hash(opts.hash_location,
1017					       md5, sk, skb);
1018	}
1019#endif
1020
1021	icsk->icsk_af_ops->send_check(sk, skb);
 
 
 
 
 
1022
1023	if (likely(tcb->tcp_flags & TCPHDR_ACK))
1024		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
1025
1026	if (skb->len != tcp_header_size) {
1027		tcp_event_data_sent(tp, sk);
1028		tp->data_segs_out += tcp_skb_pcount(skb);
 
1029	}
1030
1031	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1032		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1033			      tcp_skb_pcount(skb));
1034
1035	tp->segs_out += tcp_skb_pcount(skb);
 
1036	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1037	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1038	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1039
1040	/* Our usage of tstamp should remain private */
1041	skb->tstamp = 0;
1042
1043	/* Cleanup our debris for IP stacks */
1044	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1045			       sizeof(struct inet6_skb_parm)));
1046
1047	err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
1048
1049	if (likely(err <= 0))
1050		return err;
 
 
 
 
 
 
 
 
 
 
 
 
1051
1052	tcp_enter_cwr(sk);
1053
1054	return net_xmit_eval(err);
 
 
1055}
1056
1057/* This routine just queues the buffer for sending.
1058 *
1059 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
1060 * otherwise socket can stall.
1061 */
1062static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
1063{
1064	struct tcp_sock *tp = tcp_sk(sk);
1065
1066	/* Advance write_seq and place onto the write_queue. */
1067	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
1068	__skb_header_release(skb);
1069	tcp_add_write_queue_tail(sk, skb);
1070	sk->sk_wmem_queued += skb->truesize;
1071	sk_mem_charge(sk, skb->truesize);
1072}
1073
1074/* Initialize TSO segments for a packet. */
1075static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1076{
1077	if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
1078		/* Avoid the costly divide in the normal
1079		 * non-TSO case.
1080		 */
1081		tcp_skb_pcount_set(skb, 1);
1082		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1083	} else {
1084		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1085		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
1086	}
1087}
1088
1089/* When a modification to fackets out becomes necessary, we need to check
1090 * skb is counted to fackets_out or not.
1091 */
1092static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb,
1093				   int decr)
1094{
1095	struct tcp_sock *tp = tcp_sk(sk);
1096
1097	if (!tp->sacked_out || tcp_is_reno(tp))
1098		return;
1099
1100	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
1101		tp->fackets_out -= decr;
1102}
1103
1104/* Pcount in the middle of the write queue got changed, we need to do various
1105 * tweaks to fix counters
1106 */
1107static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1108{
1109	struct tcp_sock *tp = tcp_sk(sk);
1110
1111	tp->packets_out -= decr;
1112
1113	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1114		tp->sacked_out -= decr;
1115	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1116		tp->retrans_out -= decr;
1117	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1118		tp->lost_out -= decr;
1119
1120	/* Reno case is special. Sigh... */
1121	if (tcp_is_reno(tp) && decr > 0)
1122		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1123
1124	tcp_adjust_fackets_out(sk, skb, decr);
1125
1126	if (tp->lost_skb_hint &&
1127	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1128	    (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
1129		tp->lost_cnt_hint -= decr;
1130
1131	tcp_verify_left_out(tp);
1132}
1133
1134static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
1135{
1136	return TCP_SKB_CB(skb)->txstamp_ack ||
1137		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
1138}
1139
1140static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1141{
1142	struct skb_shared_info *shinfo = skb_shinfo(skb);
1143
1144	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1145	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1146		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1147		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1148
1149		shinfo->tx_flags &= ~tsflags;
1150		shinfo2->tx_flags |= tsflags;
1151		swap(shinfo->tskey, shinfo2->tskey);
1152		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1153		TCP_SKB_CB(skb)->txstamp_ack = 0;
1154	}
1155}
1156
1157static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1158{
1159	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1160	TCP_SKB_CB(skb)->eor = 0;
1161}
1162
 
 
 
 
 
 
 
 
 
 
 
 
1163/* Function to create two new TCP segments.  Shrinks the given segment
1164 * to the specified size and appends a new segment with the rest of the
1165 * packet to the list.  This won't be called frequently, I hope.
1166 * Remember, these are still headerless SKBs at this point.
1167 */
1168int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
 
1169		 unsigned int mss_now, gfp_t gfp)
1170{
1171	struct tcp_sock *tp = tcp_sk(sk);
1172	struct sk_buff *buff;
1173	int nsize, old_factor;
 
1174	int nlen;
1175	u8 flags;
1176
1177	if (WARN_ON(len > skb->len))
1178		return -EINVAL;
1179
1180	nsize = skb_headlen(skb) - len;
1181	if (nsize < 0)
1182		nsize = 0;
1183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1184	if (skb_unclone(skb, gfp))
1185		return -ENOMEM;
1186
1187	/* Get a new skb... force flag on. */
1188	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
1189	if (!buff)
1190		return -ENOMEM; /* We'll just try again later. */
 
 
1191
1192	sk->sk_wmem_queued += buff->truesize;
1193	sk_mem_charge(sk, buff->truesize);
1194	nlen = skb->len - len - nsize;
1195	buff->truesize += nlen;
1196	skb->truesize -= nlen;
1197
1198	/* Correct the sequence numbers. */
1199	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1200	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1201	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1202
1203	/* PSH and FIN should only be set in the second packet. */
1204	flags = TCP_SKB_CB(skb)->tcp_flags;
1205	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1206	TCP_SKB_CB(buff)->tcp_flags = flags;
1207	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1208	tcp_skb_fragment_eor(skb, buff);
1209
1210	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
1211		/* Copy and checksum data tail into the new buffer. */
1212		buff->csum = csum_partial_copy_nocheck(skb->data + len,
1213						       skb_put(buff, nsize),
1214						       nsize, 0);
1215
1216		skb_trim(skb, len);
1217
1218		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
1219	} else {
1220		skb->ip_summed = CHECKSUM_PARTIAL;
1221		skb_split(skb, buff, len);
1222	}
1223
1224	buff->ip_summed = skb->ip_summed;
1225
1226	buff->tstamp = skb->tstamp;
1227	tcp_fragment_tstamp(skb, buff);
1228
1229	old_factor = tcp_skb_pcount(skb);
1230
1231	/* Fix up tso_factor for both original and new SKB.  */
1232	tcp_set_skb_tso_segs(skb, mss_now);
1233	tcp_set_skb_tso_segs(buff, mss_now);
1234
1235	/* Update delivered info for the new segment */
1236	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1237
1238	/* If this packet has been sent out already, we must
1239	 * adjust the various packet counters.
1240	 */
1241	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1242		int diff = old_factor - tcp_skb_pcount(skb) -
1243			tcp_skb_pcount(buff);
1244
1245		if (diff)
1246			tcp_adjust_pcount(sk, skb, diff);
1247	}
1248
1249	/* Link BUFF into the send queue. */
1250	__skb_header_release(buff);
1251	tcp_insert_write_queue_after(skb, buff, sk);
 
 
1252
1253	return 0;
1254}
1255
1256/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
1257 * eventually). The difference is that pulled data not copied, but
1258 * immediately discarded.
1259 */
1260static void __pskb_trim_head(struct sk_buff *skb, int len)
1261{
1262	struct skb_shared_info *shinfo;
1263	int i, k, eat;
1264
1265	eat = min_t(int, len, skb_headlen(skb));
1266	if (eat) {
1267		__skb_pull(skb, eat);
1268		len -= eat;
1269		if (!len)
1270			return;
1271	}
1272	eat = len;
1273	k = 0;
1274	shinfo = skb_shinfo(skb);
1275	for (i = 0; i < shinfo->nr_frags; i++) {
1276		int size = skb_frag_size(&shinfo->frags[i]);
1277
1278		if (size <= eat) {
1279			skb_frag_unref(skb, i);
1280			eat -= size;
1281		} else {
1282			shinfo->frags[k] = shinfo->frags[i];
1283			if (eat) {
1284				shinfo->frags[k].page_offset += eat;
1285				skb_frag_size_sub(&shinfo->frags[k], eat);
1286				eat = 0;
1287			}
1288			k++;
1289		}
1290	}
1291	shinfo->nr_frags = k;
1292
1293	skb_reset_tail_pointer(skb);
1294	skb->data_len -= len;
1295	skb->len = skb->data_len;
 
1296}
1297
1298/* Remove acked data from a packet in the transmit queue. */
1299int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1300{
 
 
1301	if (skb_unclone(skb, GFP_ATOMIC))
1302		return -ENOMEM;
1303
1304	__pskb_trim_head(skb, len);
1305
1306	TCP_SKB_CB(skb)->seq += len;
1307	skb->ip_summed = CHECKSUM_PARTIAL;
1308
1309	skb->truesize	     -= len;
1310	sk->sk_wmem_queued   -= len;
1311	sk_mem_uncharge(sk, len);
1312	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
 
1313
1314	/* Any change of skb->len requires recalculation of tso factor. */
1315	if (tcp_skb_pcount(skb) > 1)
1316		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
1317
1318	return 0;
1319}
1320
1321/* Calculate MSS not accounting any TCP options.  */
1322static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
1323{
1324	const struct tcp_sock *tp = tcp_sk(sk);
1325	const struct inet_connection_sock *icsk = inet_csk(sk);
1326	int mss_now;
1327
1328	/* Calculate base mss without TCP options:
1329	   It is MMS_S - sizeof(tcphdr) of rfc1122
1330	 */
1331	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1332
1333	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1334	if (icsk->icsk_af_ops->net_frag_header_len) {
1335		const struct dst_entry *dst = __sk_dst_get(sk);
1336
1337		if (dst && dst_allfrag(dst))
1338			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1339	}
1340
1341	/* Clamp it (mss_clamp does not include tcp options) */
1342	if (mss_now > tp->rx_opt.mss_clamp)
1343		mss_now = tp->rx_opt.mss_clamp;
1344
1345	/* Now subtract optional transport overhead */
1346	mss_now -= icsk->icsk_ext_hdr_len;
1347
1348	/* Then reserve room for full set of TCP options and 8 bytes of data */
1349	if (mss_now < 48)
1350		mss_now = 48;
1351	return mss_now;
1352}
1353
1354/* Calculate MSS. Not accounting for SACKs here.  */
1355int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1356{
1357	/* Subtract TCP options size, not including SACKs */
1358	return __tcp_mtu_to_mss(sk, pmtu) -
1359	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
1360}
 
1361
1362/* Inverse of above */
1363int tcp_mss_to_mtu(struct sock *sk, int mss)
1364{
1365	const struct tcp_sock *tp = tcp_sk(sk);
1366	const struct inet_connection_sock *icsk = inet_csk(sk);
1367	int mtu;
1368
1369	mtu = mss +
1370	      tp->tcp_header_len +
1371	      icsk->icsk_ext_hdr_len +
1372	      icsk->icsk_af_ops->net_header_len;
1373
1374	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1375	if (icsk->icsk_af_ops->net_frag_header_len) {
1376		const struct dst_entry *dst = __sk_dst_get(sk);
1377
1378		if (dst && dst_allfrag(dst))
1379			mtu += icsk->icsk_af_ops->net_frag_header_len;
1380	}
1381	return mtu;
1382}
1383EXPORT_SYMBOL(tcp_mss_to_mtu);
1384
1385/* MTU probing init per socket */
1386void tcp_mtup_init(struct sock *sk)
1387{
1388	struct tcp_sock *tp = tcp_sk(sk);
1389	struct inet_connection_sock *icsk = inet_csk(sk);
1390	struct net *net = sock_net(sk);
1391
1392	icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
1393	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
1394			       icsk->icsk_af_ops->net_header_len;
1395	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
1396	icsk->icsk_mtup.probe_size = 0;
1397	if (icsk->icsk_mtup.enabled)
1398		icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
1399}
1400EXPORT_SYMBOL(tcp_mtup_init);
1401
1402/* This function synchronize snd mss to current pmtu/exthdr set.
1403
1404   tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1405   for TCP options, but includes only bare TCP header.
1406
1407   tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1408   It is minimum of user_mss and mss received with SYN.
1409   It also does not include TCP options.
1410
1411   inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1412
1413   tp->mss_cache is current effective sending mss, including
1414   all tcp options except for SACKs. It is evaluated,
1415   taking into account current pmtu, but never exceeds
1416   tp->rx_opt.mss_clamp.
1417
1418   NOTE1. rfc1122 clearly states that advertised MSS
1419   DOES NOT include either tcp or ip options.
1420
1421   NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1422   are READ ONLY outside this function.		--ANK (980731)
1423 */
1424unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1425{
1426	struct tcp_sock *tp = tcp_sk(sk);
1427	struct inet_connection_sock *icsk = inet_csk(sk);
1428	int mss_now;
1429
1430	if (icsk->icsk_mtup.search_high > pmtu)
1431		icsk->icsk_mtup.search_high = pmtu;
1432
1433	mss_now = tcp_mtu_to_mss(sk, pmtu);
1434	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1435
1436	/* And store cached results */
1437	icsk->icsk_pmtu_cookie = pmtu;
1438	if (icsk->icsk_mtup.enabled)
1439		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1440	tp->mss_cache = mss_now;
1441
1442	return mss_now;
1443}
1444EXPORT_SYMBOL(tcp_sync_mss);
1445
1446/* Compute the current effective MSS, taking SACKs and IP options,
1447 * and even PMTU discovery events into account.
1448 */
1449unsigned int tcp_current_mss(struct sock *sk)
1450{
1451	const struct tcp_sock *tp = tcp_sk(sk);
1452	const struct dst_entry *dst = __sk_dst_get(sk);
1453	u32 mss_now;
1454	unsigned int header_len;
1455	struct tcp_out_options opts;
1456	struct tcp_md5sig_key *md5;
1457
1458	mss_now = tp->mss_cache;
1459
1460	if (dst) {
1461		u32 mtu = dst_mtu(dst);
1462		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1463			mss_now = tcp_sync_mss(sk, mtu);
1464	}
1465
1466	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1467		     sizeof(struct tcphdr);
1468	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
1469	 * some common options. If this is an odd packet (because we have SACK
1470	 * blocks etc) then our calculated header_len will be different, and
1471	 * we have to adjust mss_now correspondingly */
1472	if (header_len != tp->tcp_header_len) {
1473		int delta = (int) header_len - tp->tcp_header_len;
1474		mss_now -= delta;
1475	}
1476
1477	return mss_now;
1478}
1479
1480/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
1481 * As additional protections, we do not touch cwnd in retransmission phases,
1482 * and if application hit its sndbuf limit recently.
1483 */
1484static void tcp_cwnd_application_limited(struct sock *sk)
1485{
1486	struct tcp_sock *tp = tcp_sk(sk);
1487
1488	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
1489	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1490		/* Limited by application or receiver window. */
1491		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
1492		u32 win_used = max(tp->snd_cwnd_used, init_win);
1493		if (win_used < tp->snd_cwnd) {
1494			tp->snd_ssthresh = tcp_current_ssthresh(sk);
1495			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
1496		}
1497		tp->snd_cwnd_used = 0;
1498	}
1499	tp->snd_cwnd_stamp = tcp_time_stamp;
1500}
1501
1502static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1503{
 
1504	struct tcp_sock *tp = tcp_sk(sk);
1505
1506	/* Track the maximum number of outstanding packets in each
1507	 * window, and remember whether we were cwnd-limited then.
1508	 */
1509	if (!before(tp->snd_una, tp->max_packets_seq) ||
1510	    tp->packets_out > tp->max_packets_out) {
 
1511		tp->max_packets_out = tp->packets_out;
1512		tp->max_packets_seq = tp->snd_nxt;
1513		tp->is_cwnd_limited = is_cwnd_limited;
1514	}
1515
1516	if (tcp_is_cwnd_limited(sk)) {
1517		/* Network is feed fully. */
1518		tp->snd_cwnd_used = 0;
1519		tp->snd_cwnd_stamp = tcp_time_stamp;
1520	} else {
1521		/* Network starves. */
1522		if (tp->packets_out > tp->snd_cwnd_used)
1523			tp->snd_cwnd_used = tp->packets_out;
1524
1525		if (sysctl_tcp_slow_start_after_idle &&
1526		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
 
1527			tcp_cwnd_application_limited(sk);
1528
1529		/* The following conditions together indicate the starvation
1530		 * is caused by insufficient sender buffer:
1531		 * 1) just sent some data (see tcp_write_xmit)
1532		 * 2) not cwnd limited (this else condition)
1533		 * 3) no more data to send (null tcp_send_head )
1534		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1535		 */
1536		if (!tcp_send_head(sk) && sk->sk_socket &&
1537		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1538		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1539			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1540	}
1541}
1542
1543/* Minshall's variant of the Nagle send check. */
1544static bool tcp_minshall_check(const struct tcp_sock *tp)
1545{
1546	return after(tp->snd_sml, tp->snd_una) &&
1547		!after(tp->snd_sml, tp->snd_nxt);
1548}
1549
1550/* Update snd_sml if this skb is under mss
1551 * Note that a TSO packet might end with a sub-mss segment
1552 * The test is really :
1553 * if ((skb->len % mss) != 0)
1554 *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1555 * But we can avoid doing the divide again given we already have
1556 *  skb_pcount = skb->len / mss_now
1557 */
1558static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1559				const struct sk_buff *skb)
1560{
1561	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1562		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1563}
1564
1565/* Return false, if packet can be sent now without violation Nagle's rules:
1566 * 1. It is full sized. (provided by caller in %partial bool)
1567 * 2. Or it contains FIN. (already checked by caller)
1568 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1569 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1570 *    With Minshall's modification: all sent small packets are ACKed.
1571 */
1572static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1573			    int nonagle)
1574{
1575	return partial &&
1576		((nonagle & TCP_NAGLE_CORK) ||
1577		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1578}
1579
1580/* Return how many segs we'd like on a TSO packet,
1581 * to send one TSO packet per ms
1582 */
1583u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
1584		     int min_tso_segs)
1585{
1586	u32 bytes, segs;
1587
1588	bytes = min(sk->sk_pacing_rate >> 10,
1589		    sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
 
1590
1591	/* Goal is to send at least one packet per ms,
1592	 * not one big TSO packet every 100 ms.
1593	 * This preserves ACK clocking and is consistent
1594	 * with tcp_tso_should_defer() heuristic.
1595	 */
1596	segs = max_t(u32, bytes / mss_now, min_tso_segs);
1597
1598	return min_t(u32, segs, sk->sk_gso_max_segs);
1599}
1600EXPORT_SYMBOL(tcp_tso_autosize);
1601
1602/* Return the number of segments we want in the skb we are transmitting.
1603 * See if congestion control module wants to decide; otherwise, autosize.
1604 */
1605static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1606{
1607	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1608	u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
1609
1610	return tso_segs ? :
1611		tcp_tso_autosize(sk, mss_now, sysctl_tcp_min_tso_segs);
 
 
 
 
1612}
1613
1614/* Returns the portion of skb which can be sent right away */
1615static unsigned int tcp_mss_split_point(const struct sock *sk,
1616					const struct sk_buff *skb,
1617					unsigned int mss_now,
1618					unsigned int max_segs,
1619					int nonagle)
1620{
1621	const struct tcp_sock *tp = tcp_sk(sk);
1622	u32 partial, needed, window, max_len;
1623
1624	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1625	max_len = mss_now * max_segs;
1626
1627	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
1628		return max_len;
1629
1630	needed = min(skb->len, window);
1631
1632	if (max_len <= needed)
1633		return max_len;
1634
1635	partial = needed % mss_now;
1636	/* If last segment is not a full MSS, check if Nagle rules allow us
1637	 * to include this last segment in this skb.
1638	 * Otherwise, we'll split the skb at last MSS boundary
1639	 */
1640	if (tcp_nagle_check(partial != 0, tp, nonagle))
1641		return needed - partial;
1642
1643	return needed;
1644}
1645
1646/* Can at least one segment of SKB be sent right now, according to the
1647 * congestion window rules?  If so, return how many segments are allowed.
1648 */
1649static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
1650					 const struct sk_buff *skb)
1651{
1652	u32 in_flight, cwnd, halfcwnd;
1653
1654	/* Don't be strict about the congestion window for the final FIN.  */
1655	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
1656	    tcp_skb_pcount(skb) == 1)
1657		return 1;
1658
1659	in_flight = tcp_packets_in_flight(tp);
1660	cwnd = tp->snd_cwnd;
1661	if (in_flight >= cwnd)
1662		return 0;
1663
1664	/* For better scheduling, ensure we have at least
1665	 * 2 GSO packets in flight.
1666	 */
1667	halfcwnd = max(cwnd >> 1, 1U);
1668	return min(halfcwnd, cwnd - in_flight);
1669}
1670
1671/* Initialize TSO state of a skb.
1672 * This must be invoked the first time we consider transmitting
1673 * SKB onto the wire.
1674 */
1675static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1676{
1677	int tso_segs = tcp_skb_pcount(skb);
1678
1679	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1680		tcp_set_skb_tso_segs(skb, mss_now);
1681		tso_segs = tcp_skb_pcount(skb);
1682	}
1683	return tso_segs;
1684}
1685
1686
1687/* Return true if the Nagle test allows this packet to be
1688 * sent now.
1689 */
1690static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
1691				  unsigned int cur_mss, int nonagle)
1692{
1693	/* Nagle rule does not apply to frames, which sit in the middle of the
1694	 * write_queue (they have no chances to get new data).
1695	 *
1696	 * This is implemented in the callers, where they modify the 'nonagle'
1697	 * argument based upon the location of SKB in the send queue.
1698	 */
1699	if (nonagle & TCP_NAGLE_PUSH)
1700		return true;
1701
1702	/* Don't use the nagle rule for urgent data (or for the final FIN). */
1703	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1704		return true;
1705
1706	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
1707		return true;
1708
1709	return false;
1710}
1711
1712/* Does at least the first segment of SKB fit into the send window? */
1713static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1714			     const struct sk_buff *skb,
1715			     unsigned int cur_mss)
1716{
1717	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1718
1719	if (skb->len > cur_mss)
1720		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1721
1722	return !after(end_seq, tcp_wnd_end(tp));
1723}
1724
1725/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1726 * should be put on the wire right now.  If so, it returns the number of
1727 * packets allowed by the congestion window.
1728 */
1729static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1730				 unsigned int cur_mss, int nonagle)
1731{
1732	const struct tcp_sock *tp = tcp_sk(sk);
1733	unsigned int cwnd_quota;
1734
1735	tcp_init_tso_segs(skb, cur_mss);
1736
1737	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1738		return 0;
1739
1740	cwnd_quota = tcp_cwnd_test(tp, skb);
1741	if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1742		cwnd_quota = 0;
1743
1744	return cwnd_quota;
1745}
1746
1747/* Test if sending is allowed right now. */
1748bool tcp_may_send_now(struct sock *sk)
1749{
1750	const struct tcp_sock *tp = tcp_sk(sk);
1751	struct sk_buff *skb = tcp_send_head(sk);
1752
1753	return skb &&
1754		tcp_snd_test(sk, skb, tcp_current_mss(sk),
1755			     (tcp_skb_is_last(sk, skb) ?
1756			      tp->nonagle : TCP_NAGLE_PUSH));
1757}
1758
1759/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1760 * which is put after SKB on the list.  It is very much like
1761 * tcp_fragment() except that it may make several kinds of assumptions
1762 * in order to speed up the splitting operation.  In particular, we
1763 * know that all the data is in scatter-gather pages, and that the
1764 * packet has never been sent out before (and thus is not cloned).
1765 */
1766static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1767			unsigned int mss_now, gfp_t gfp)
1768{
 
1769	struct sk_buff *buff;
1770	int nlen = skb->len - len;
1771	u8 flags;
1772
1773	/* All of a TSO frame must be composed of paged data.  */
1774	if (skb->len != skb->data_len)
1775		return tcp_fragment(sk, skb, len, mss_now, gfp);
 
1776
1777	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
1778	if (unlikely(!buff))
1779		return -ENOMEM;
 
 
1780
1781	sk->sk_wmem_queued += buff->truesize;
1782	sk_mem_charge(sk, buff->truesize);
1783	buff->truesize += nlen;
1784	skb->truesize -= nlen;
1785
1786	/* Correct the sequence numbers. */
1787	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1788	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1789	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1790
1791	/* PSH and FIN should only be set in the second packet. */
1792	flags = TCP_SKB_CB(skb)->tcp_flags;
1793	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1794	TCP_SKB_CB(buff)->tcp_flags = flags;
1795
1796	/* This packet was never sent out yet, so no SACK bits. */
1797	TCP_SKB_CB(buff)->sacked = 0;
1798
1799	tcp_skb_fragment_eor(skb, buff);
1800
1801	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1802	skb_split(skb, buff, len);
1803	tcp_fragment_tstamp(skb, buff);
1804
1805	/* Fix up tso_factor for both original and new SKB.  */
1806	tcp_set_skb_tso_segs(skb, mss_now);
1807	tcp_set_skb_tso_segs(buff, mss_now);
1808
1809	/* Link BUFF into the send queue. */
1810	__skb_header_release(buff);
1811	tcp_insert_write_queue_after(skb, buff, sk);
1812
1813	return 0;
1814}
1815
1816/* Try to defer sending, if possible, in order to minimize the amount
1817 * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1818 *
1819 * This algorithm is from John Heffner.
1820 */
1821static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1822				 bool *is_cwnd_limited, u32 max_segs)
 
 
1823{
1824	const struct inet_connection_sock *icsk = inet_csk(sk);
1825	u32 age, send_win, cong_win, limit, in_flight;
1826	struct tcp_sock *tp = tcp_sk(sk);
1827	struct skb_mstamp now;
1828	struct sk_buff *head;
1829	int win_divisor;
1830
1831	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1832		goto send_now;
1833
1834	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
1835		goto send_now;
1836
1837	/* Avoid bursty behavior by allowing defer
1838	 * only if the last write was recent.
 
 
1839	 */
1840	if ((s32)(tcp_time_stamp - tp->lsndtime) > 0)
 
1841		goto send_now;
1842
1843	in_flight = tcp_packets_in_flight(tp);
1844
1845	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
 
1846
1847	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1848
1849	/* From in_flight test above, we know that cwnd > in_flight.  */
1850	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1851
1852	limit = min(send_win, cong_win);
1853
1854	/* If a full-sized TSO skb can be sent, do it. */
1855	if (limit >= max_segs * tp->mss_cache)
1856		goto send_now;
1857
1858	/* Middle in queue won't get any more data, full sendable already? */
1859	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
1860		goto send_now;
1861
1862	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1863	if (win_divisor) {
1864		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1865
1866		/* If at least some fraction of a window is available,
1867		 * just use it.
1868		 */
1869		chunk /= win_divisor;
1870		if (limit >= chunk)
1871			goto send_now;
1872	} else {
1873		/* Different approach, try not to defer past a single
1874		 * ACK.  Receiver should ACK every other full sized
1875		 * frame, so if we have space for more than 3 frames
1876		 * then send now.
1877		 */
1878		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
1879			goto send_now;
1880	}
1881
1882	head = tcp_write_queue_head(sk);
1883	skb_mstamp_get(&now);
1884	age = skb_mstamp_us_delta(&now, &head->skb_mstamp);
 
 
1885	/* If next ACK is likely to come too late (half srtt), do not defer */
1886	if (age < (tp->srtt_us >> 4))
1887		goto send_now;
1888
1889	/* Ok, it looks like it is advisable to defer. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1890
1891	if (cong_win < send_win && cong_win <= skb->len)
1892		*is_cwnd_limited = true;
 
 
1893
1894	return true;
1895
1896send_now:
1897	return false;
1898}
1899
1900static inline void tcp_mtu_check_reprobe(struct sock *sk)
1901{
1902	struct inet_connection_sock *icsk = inet_csk(sk);
1903	struct tcp_sock *tp = tcp_sk(sk);
1904	struct net *net = sock_net(sk);
1905	u32 interval;
1906	s32 delta;
1907
1908	interval = net->ipv4.sysctl_tcp_probe_interval;
1909	delta = tcp_time_stamp - icsk->icsk_mtup.probe_timestamp;
1910	if (unlikely(delta >= interval * HZ)) {
1911		int mss = tcp_current_mss(sk);
1912
1913		/* Update current search range */
1914		icsk->icsk_mtup.probe_size = 0;
1915		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
1916			sizeof(struct tcphdr) +
1917			icsk->icsk_af_ops->net_header_len;
1918		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
1919
1920		/* Update probe time stamp */
1921		icsk->icsk_mtup.probe_timestamp = tcp_time_stamp;
1922	}
1923}
1924
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1925/* Create a new MTU probe if we are ready.
1926 * MTU probe is regularly attempting to increase the path MTU by
1927 * deliberately sending larger packets.  This discovers routing
1928 * changes resulting in larger path MTUs.
1929 *
1930 * Returns 0 if we should wait to probe (no cwnd available),
1931 *         1 if a probe was sent,
1932 *         -1 otherwise
1933 */
1934static int tcp_mtu_probe(struct sock *sk)
1935{
1936	struct inet_connection_sock *icsk = inet_csk(sk);
1937	struct tcp_sock *tp = tcp_sk(sk);
1938	struct sk_buff *skb, *nskb, *next;
1939	struct net *net = sock_net(sk);
1940	int probe_size;
1941	int size_needed;
1942	int copy, len;
1943	int mss_now;
1944	int interval;
1945
1946	/* Not currently probing/verifying,
1947	 * not in recovery,
1948	 * have enough cwnd, and
1949	 * not SACKing (the variable headers throw things off)
1950	 */
1951	if (likely(!icsk->icsk_mtup.enabled ||
1952		   icsk->icsk_mtup.probe_size ||
1953		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1954		   tp->snd_cwnd < 11 ||
1955		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
1956		return -1;
1957
1958	/* Use binary search for probe_size between tcp_mss_base,
1959	 * and current mss_clamp. if (search_high - search_low)
1960	 * smaller than a threshold, backoff from probing.
1961	 */
1962	mss_now = tcp_current_mss(sk);
1963	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
1964				    icsk->icsk_mtup.search_low) >> 1);
1965	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
1966	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
1967	/* When misfortune happens, we are reprobing actively,
1968	 * and then reprobe timer has expired. We stick with current
1969	 * probing process by not resetting search range to its orignal.
1970	 */
1971	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
1972		interval < net->ipv4.sysctl_tcp_probe_threshold) {
1973		/* Check whether enough time has elaplased for
1974		 * another round of probing.
1975		 */
1976		tcp_mtu_check_reprobe(sk);
1977		return -1;
1978	}
1979
1980	/* Have enough data in the send queue to probe? */
1981	if (tp->write_seq - tp->snd_nxt < size_needed)
1982		return -1;
1983
1984	if (tp->snd_wnd < size_needed)
1985		return -1;
1986	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
1987		return 0;
1988
1989	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
1990	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1991		if (!tcp_packets_in_flight(tp))
1992			return -1;
1993		else
1994			return 0;
1995	}
1996
 
 
 
1997	/* We're allowed to probe.  Build it now. */
1998	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
1999	if (!nskb)
2000		return -1;
2001	sk->sk_wmem_queued += nskb->truesize;
2002	sk_mem_charge(sk, nskb->truesize);
2003
2004	skb = tcp_send_head(sk);
 
 
2005
2006	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
2007	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
2008	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
2009	TCP_SKB_CB(nskb)->sacked = 0;
2010	nskb->csum = 0;
2011	nskb->ip_summed = skb->ip_summed;
2012
2013	tcp_insert_write_queue_before(nskb, skb, sk);
 
2014
2015	len = 0;
2016	tcp_for_write_queue_from_safe(skb, next, sk) {
2017		copy = min_t(int, skb->len, probe_size - len);
2018		if (nskb->ip_summed) {
2019			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
2020		} else {
2021			__wsum csum = skb_copy_and_csum_bits(skb, 0,
2022							     skb_put(nskb, copy),
2023							     copy, 0);
2024			nskb->csum = csum_block_add(nskb->csum, csum, len);
2025		}
2026
2027		if (skb->len <= copy) {
2028			/* We've eaten all the data from this skb.
2029			 * Throw it away. */
2030			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
 
 
 
 
 
2031			tcp_unlink_write_queue(skb, sk);
2032			sk_wmem_free_skb(sk, skb);
2033		} else {
2034			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2035						   ~(TCPHDR_FIN|TCPHDR_PSH);
2036			if (!skb_shinfo(skb)->nr_frags) {
2037				skb_pull(skb, copy);
2038				if (skb->ip_summed != CHECKSUM_PARTIAL)
2039					skb->csum = csum_partial(skb->data,
2040								 skb->len, 0);
2041			} else {
2042				__pskb_trim_head(skb, copy);
2043				tcp_set_skb_tso_segs(skb, mss_now);
2044			}
2045			TCP_SKB_CB(skb)->seq += copy;
2046		}
2047
2048		len += copy;
2049
2050		if (len >= probe_size)
2051			break;
2052	}
2053	tcp_init_tso_segs(nskb, nskb->len);
2054
2055	/* We're ready to send.  If this fails, the probe will
2056	 * be resegmented into mss-sized pieces by tcp_write_xmit().
2057	 */
2058	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
2059		/* Decrement cwnd here because we are sending
2060		 * effectively two packets. */
2061		tp->snd_cwnd--;
2062		tcp_event_new_data_sent(sk, nskb);
2063
2064		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
2065		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
2066		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
2067
2068		return 1;
2069	}
2070
2071	return -1;
2072}
2073
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2074/* TCP Small Queues :
2075 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2076 * (These limits are doubled for retransmits)
2077 * This allows for :
2078 *  - better RTT estimation and ACK scheduling
2079 *  - faster recovery
2080 *  - high rates
2081 * Alas, some drivers / subsystems require a fair amount
2082 * of queued bytes to ensure line rate.
2083 * One example is wifi aggregation (802.11 AMPDU)
2084 */
2085static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2086				  unsigned int factor)
2087{
2088	unsigned int limit;
2089
2090	limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10);
2091	limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
 
 
 
 
2092	limit <<= factor;
2093
2094	if (atomic_read(&sk->sk_wmem_alloc) > limit) {
2095		/* Always send the 1st or 2nd skb in write queue.
 
 
 
 
 
 
 
 
 
 
 
 
2096		 * No need to wait for TX completion to call us back,
2097		 * after softirq/tasklet schedule.
2098		 * This helps when TX completions are delayed too much.
2099		 */
2100		if (skb == sk->sk_write_queue.next ||
2101		    skb->prev == sk->sk_write_queue.next)
2102			return false;
2103
2104		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2105		/* It is possible TX completion already happened
2106		 * before we set TSQ_THROTTLED, so we must
2107		 * test again the condition.
2108		 */
2109		smp_mb__after_atomic();
2110		if (atomic_read(&sk->sk_wmem_alloc) > limit)
2111			return true;
2112	}
2113	return false;
2114}
2115
2116static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
2117{
2118	const u32 now = tcp_time_stamp;
 
2119
2120	if (tp->chrono_type > TCP_CHRONO_UNSPEC)
2121		tp->chrono_stat[tp->chrono_type - 1] += now - tp->chrono_start;
2122	tp->chrono_start = now;
2123	tp->chrono_type = new;
2124}
2125
2126void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
2127{
2128	struct tcp_sock *tp = tcp_sk(sk);
2129
2130	/* If there are multiple conditions worthy of tracking in a
2131	 * chronograph then the highest priority enum takes precedence
2132	 * over the other conditions. So that if something "more interesting"
2133	 * starts happening, stop the previous chrono and start a new one.
2134	 */
2135	if (type > tp->chrono_type)
2136		tcp_chrono_set(tp, type);
2137}
2138
2139void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
2140{
2141	struct tcp_sock *tp = tcp_sk(sk);
2142
2143
2144	/* There are multiple conditions worthy of tracking in a
2145	 * chronograph, so that the highest priority enum takes
2146	 * precedence over the other conditions (see tcp_chrono_start).
2147	 * If a condition stops, we only stop chrono tracking if
2148	 * it's the "most interesting" or current chrono we are
2149	 * tracking and starts busy chrono if we have pending data.
2150	 */
2151	if (tcp_write_queue_empty(sk))
2152		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
2153	else if (type == tp->chrono_type)
2154		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
2155}
2156
2157/* This routine writes packets to the network.  It advances the
2158 * send_head.  This happens as incoming acks open up the remote
2159 * window for us.
2160 *
2161 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2162 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2163 * account rare use of URG, this is not a big flaw.
2164 *
2165 * Send at most one packet when push_one > 0. Temporarily ignore
2166 * cwnd limit to force at most one packet out when push_one == 2.
2167
2168 * Returns true, if no segments are in flight and we have queued segments,
2169 * but cannot send anything now because of SWS or another problem.
2170 */
2171static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2172			   int push_one, gfp_t gfp)
2173{
2174	struct tcp_sock *tp = tcp_sk(sk);
2175	struct sk_buff *skb;
2176	unsigned int tso_segs, sent_pkts;
2177	int cwnd_quota;
2178	int result;
2179	bool is_cwnd_limited = false, is_rwnd_limited = false;
2180	u32 max_segs;
2181
2182	sent_pkts = 0;
2183
 
2184	if (!push_one) {
2185		/* Do MTU probing. */
2186		result = tcp_mtu_probe(sk);
2187		if (!result) {
2188			return false;
2189		} else if (result > 0) {
2190			sent_pkts = 1;
2191		}
2192	}
2193
2194	max_segs = tcp_tso_segs(sk, mss_now);
2195	while ((skb = tcp_send_head(sk))) {
2196		unsigned int limit;
2197
2198		tso_segs = tcp_init_tso_segs(skb, mss_now);
2199		BUG_ON(!tso_segs);
2200
2201		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
2202			/* "skb_mstamp" is used as a start point for the retransmit timer */
2203			skb_mstamp_get(&skb->skb_mstamp);
 
 
2204			goto repair; /* Skip network transmission */
2205		}
2206
 
 
 
 
 
 
2207		cwnd_quota = tcp_cwnd_test(tp, skb);
2208		if (!cwnd_quota) {
2209			if (push_one == 2)
2210				/* Force out a loss probe pkt. */
2211				cwnd_quota = 1;
2212			else
2213				break;
2214		}
2215
2216		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
2217			is_rwnd_limited = true;
2218			break;
2219		}
2220
2221		if (tso_segs == 1) {
2222			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2223						     (tcp_skb_is_last(sk, skb) ?
2224						      nonagle : TCP_NAGLE_PUSH))))
2225				break;
2226		} else {
2227			if (!push_one &&
2228			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2229						 max_segs))
2230				break;
2231		}
2232
2233		limit = mss_now;
2234		if (tso_segs > 1 && !tcp_urg_mode(tp))
2235			limit = tcp_mss_split_point(sk, skb, mss_now,
2236						    min_t(unsigned int,
2237							  cwnd_quota,
2238							  max_segs),
2239						    nonagle);
2240
2241		if (skb->len > limit &&
2242		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2243			break;
2244
2245		if (test_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
2246			clear_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags);
2247		if (tcp_small_queue_check(sk, skb, 0))
2248			break;
2249
 
 
 
 
 
 
 
 
2250		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
2251			break;
2252
2253repair:
2254		/* Advance the send_head.  This one is sent out.
2255		 * This call will increment packets_out.
2256		 */
2257		tcp_event_new_data_sent(sk, skb);
2258
2259		tcp_minshall_update(tp, mss_now, skb);
2260		sent_pkts += tcp_skb_pcount(skb);
2261
2262		if (push_one)
2263			break;
2264	}
2265
2266	if (is_rwnd_limited)
2267		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
2268	else
2269		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
2270
 
 
 
 
2271	if (likely(sent_pkts)) {
2272		if (tcp_in_cwnd_reduction(sk))
2273			tp->prr_out += sent_pkts;
2274
2275		/* Send one loss probe per tail loss episode. */
2276		if (push_one != 2)
2277			tcp_schedule_loss_probe(sk);
2278		is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
2279		tcp_cwnd_validate(sk, is_cwnd_limited);
2280		return false;
2281	}
2282	return !tp->packets_out && tcp_send_head(sk);
2283}
2284
2285bool tcp_schedule_loss_probe(struct sock *sk)
2286{
2287	struct inet_connection_sock *icsk = inet_csk(sk);
2288	struct tcp_sock *tp = tcp_sk(sk);
2289	u32 timeout, tlp_time_stamp, rto_time_stamp;
2290	u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
2291
2292	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
2293		return false;
2294	/* No consecutive loss probes. */
2295	if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
2296		tcp_rearm_rto(sk);
2297		return false;
2298	}
2299	/* Don't do any loss probe on a Fast Open connection before 3WHS
2300	 * finishes.
2301	 */
2302	if (tp->fastopen_rsk)
2303		return false;
2304
2305	/* TLP is only scheduled when next timer event is RTO. */
2306	if (icsk->icsk_pending != ICSK_TIME_RETRANS)
2307		return false;
2308
 
2309	/* Schedule a loss probe in 2*RTT for SACK capable connections
2310	 * in Open state, that are either limited by cwnd or application.
2311	 */
2312	if (sysctl_tcp_early_retrans < 3 || !tp->packets_out ||
2313	    !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
 
 
2314		return false;
2315
2316	if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
2317	     tcp_send_head(sk))
2318		return false;
2319
2320	/* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
2321	 * for delayed ack when there's one outstanding packet. If no RTT
2322	 * sample is available then probe after TCP_TIMEOUT_INIT.
2323	 */
2324	timeout = rtt << 1 ? : TCP_TIMEOUT_INIT;
2325	if (tp->packets_out == 1)
2326		timeout = max_t(u32, timeout,
2327				(rtt + (rtt >> 1) + TCP_DELACK_MAX));
2328	timeout = max_t(u32, timeout, msecs_to_jiffies(10));
2329
2330	/* If RTO is shorter, just schedule TLP in its place. */
2331	tlp_time_stamp = tcp_time_stamp + timeout;
2332	rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
2333	if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
2334		s32 delta = rto_time_stamp - tcp_time_stamp;
2335		if (delta > 0)
2336			timeout = delta;
2337	}
2338
2339	inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
2340				  TCP_RTO_MAX);
 
 
 
 
 
 
2341	return true;
2342}
2343
2344/* Thanks to skb fast clones, we can detect if a prior transmit of
2345 * a packet is still in a qdisc or driver queue.
2346 * In this case, there is very little point doing a retransmit !
2347 */
2348static bool skb_still_in_host_queue(const struct sock *sk,
2349				    const struct sk_buff *skb)
2350{
2351	if (unlikely(skb_fclone_busy(sk, skb))) {
2352		NET_INC_STATS(sock_net(sk),
2353			      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
2354		return true;
 
 
 
 
2355	}
2356	return false;
2357}
2358
2359/* When probe timeout (PTO) fires, try send a new segment if possible, else
2360 * retransmit the last segment.
2361 */
2362void tcp_send_loss_probe(struct sock *sk)
2363{
2364	struct tcp_sock *tp = tcp_sk(sk);
2365	struct sk_buff *skb;
2366	int pcount;
2367	int mss = tcp_current_mss(sk);
2368
2369	skb = tcp_send_head(sk);
2370	if (skb) {
2371		if (tcp_snd_wnd_test(tp, skb, mss)) {
2372			pcount = tp->packets_out;
2373			tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2374			if (tp->packets_out > pcount)
2375				goto probe_sent;
2376			goto rearm_timer;
2377		}
2378		skb = tcp_write_queue_prev(sk, skb);
2379	} else {
2380		skb = tcp_write_queue_tail(sk);
2381	}
2382
2383	/* At most one outstanding TLP retransmission. */
2384	if (tp->tlp_high_seq)
2385		goto rearm_timer;
2386
2387	/* Retransmit last segment. */
2388	if (WARN_ON(!skb))
 
 
 
 
 
2389		goto rearm_timer;
 
 
 
 
 
 
 
 
 
2390
2391	if (skb_still_in_host_queue(sk, skb))
2392		goto rearm_timer;
2393
2394	pcount = tcp_skb_pcount(skb);
2395	if (WARN_ON(!pcount))
2396		goto rearm_timer;
2397
2398	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2399		if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss,
 
2400					  GFP_ATOMIC)))
2401			goto rearm_timer;
2402		skb = tcp_write_queue_next(sk, skb);
2403	}
2404
2405	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2406		goto rearm_timer;
2407
2408	if (__tcp_retransmit_skb(sk, skb, 1))
2409		goto rearm_timer;
2410
 
 
 
2411	/* Record snd_nxt for loss detection. */
2412	tp->tlp_high_seq = tp->snd_nxt;
2413
2414probe_sent:
2415	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2416	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2417	inet_csk(sk)->icsk_pending = 0;
2418rearm_timer:
2419	tcp_rearm_rto(sk);
2420}
2421
2422/* Push out any pending frames which were held back due to
2423 * TCP_CORK or attempt at coalescing tiny packets.
2424 * The socket must be locked by the caller.
2425 */
2426void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
2427			       int nonagle)
2428{
2429	/* If we are closed, the bytes will have to remain here.
2430	 * In time closedown will finish, we empty the write queue and
2431	 * all will be happy.
2432	 */
2433	if (unlikely(sk->sk_state == TCP_CLOSE))
2434		return;
2435
2436	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
2437			   sk_gfp_mask(sk, GFP_ATOMIC)))
2438		tcp_check_probe_timer(sk);
2439}
2440
2441/* Send _single_ skb sitting at the send head. This function requires
2442 * true push pending frames to setup probe timer etc.
2443 */
2444void tcp_push_one(struct sock *sk, unsigned int mss_now)
2445{
2446	struct sk_buff *skb = tcp_send_head(sk);
2447
2448	BUG_ON(!skb || skb->len < mss_now);
2449
2450	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2451}
2452
2453/* This function returns the amount that we can raise the
2454 * usable window based on the following constraints
2455 *
2456 * 1. The window can never be shrunk once it is offered (RFC 793)
2457 * 2. We limit memory per socket
2458 *
2459 * RFC 1122:
2460 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
2461 *  RECV.NEXT + RCV.WIN fixed until:
2462 *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
2463 *
2464 * i.e. don't raise the right edge of the window until you can raise
2465 * it at least MSS bytes.
2466 *
2467 * Unfortunately, the recommended algorithm breaks header prediction,
2468 * since header prediction assumes th->window stays fixed.
2469 *
2470 * Strictly speaking, keeping th->window fixed violates the receiver
2471 * side SWS prevention criteria. The problem is that under this rule
2472 * a stream of single byte packets will cause the right side of the
2473 * window to always advance by a single byte.
2474 *
2475 * Of course, if the sender implements sender side SWS prevention
2476 * then this will not be a problem.
2477 *
2478 * BSD seems to make the following compromise:
2479 *
2480 *	If the free space is less than the 1/4 of the maximum
2481 *	space available and the free space is less than 1/2 mss,
2482 *	then set the window to 0.
2483 *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
2484 *	Otherwise, just prevent the window from shrinking
2485 *	and from being larger than the largest representable value.
2486 *
2487 * This prevents incremental opening of the window in the regime
2488 * where TCP is limited by the speed of the reader side taking
2489 * data out of the TCP receive queue. It does nothing about
2490 * those cases where the window is constrained on the sender side
2491 * because the pipeline is full.
2492 *
2493 * BSD also seems to "accidentally" limit itself to windows that are a
2494 * multiple of MSS, at least until the free space gets quite small.
2495 * This would appear to be a side effect of the mbuf implementation.
2496 * Combining these two algorithms results in the observed behavior
2497 * of having a fixed window size at almost all times.
2498 *
2499 * Below we obtain similar behavior by forcing the offered window to
2500 * a multiple of the mss when it is feasible to do so.
2501 *
2502 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
2503 * Regular options like TIMESTAMP are taken into account.
2504 */
2505u32 __tcp_select_window(struct sock *sk)
2506{
2507	struct inet_connection_sock *icsk = inet_csk(sk);
2508	struct tcp_sock *tp = tcp_sk(sk);
2509	/* MSS for the peer's data.  Previous versions used mss_clamp
2510	 * here.  I don't know if the value based on our guesses
2511	 * of peer's MSS is better for the performance.  It's more correct
2512	 * but may be worse for the performance because of rcv_mss
2513	 * fluctuations.  --SAW  1998/11/1
2514	 */
2515	int mss = icsk->icsk_ack.rcv_mss;
2516	int free_space = tcp_space(sk);
2517	int allowed_space = tcp_full_space(sk);
2518	int full_space = min_t(int, tp->window_clamp, allowed_space);
2519	int window;
 
 
 
 
2520
2521	if (unlikely(mss > full_space)) {
2522		mss = full_space;
2523		if (mss <= 0)
2524			return 0;
2525	}
2526	if (free_space < (full_space >> 1)) {
2527		icsk->icsk_ack.quick = 0;
2528
2529		if (tcp_under_memory_pressure(sk))
2530			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2531					       4U * tp->advmss);
2532
2533		/* free_space might become our new window, make sure we don't
2534		 * increase it due to wscale.
2535		 */
2536		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
2537
2538		/* if free space is less than mss estimate, or is below 1/16th
2539		 * of the maximum allowed, try to move to zero-window, else
2540		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
2541		 * new incoming data is dropped due to memory limits.
2542		 * With large window, mss test triggers way too late in order
2543		 * to announce zero window in time before rmem limit kicks in.
2544		 */
2545		if (free_space < (allowed_space >> 4) || free_space < mss)
2546			return 0;
2547	}
2548
2549	if (free_space > tp->rcv_ssthresh)
2550		free_space = tp->rcv_ssthresh;
2551
2552	/* Don't do rounding if we are using window scaling, since the
2553	 * scaled window will not line up with the MSS boundary anyway.
2554	 */
2555	window = tp->rcv_wnd;
2556	if (tp->rx_opt.rcv_wscale) {
2557		window = free_space;
2558
2559		/* Advertise enough space so that it won't get scaled away.
2560		 * Import case: prevent zero window announcement if
2561		 * 1<<rcv_wscale > mss.
2562		 */
2563		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
2564			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
2565				  << tp->rx_opt.rcv_wscale);
2566	} else {
 
2567		/* Get the largest window that is a nice multiple of mss.
2568		 * Window clamp already applied above.
2569		 * If our current window offering is within 1 mss of the
2570		 * free space we just keep it. This prevents the divide
2571		 * and multiply from happening most of the time.
2572		 * We also don't do any window rounding when the free space
2573		 * is too small.
2574		 */
2575		if (window <= free_space - mss || window > free_space)
2576			window = (free_space / mss) * mss;
2577		else if (mss == full_space &&
2578			 free_space > window + (full_space >> 1))
2579			window = free_space;
2580	}
2581
2582	return window;
2583}
2584
2585void tcp_skb_collapse_tstamp(struct sk_buff *skb,
2586			     const struct sk_buff *next_skb)
2587{
2588	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
2589		const struct skb_shared_info *next_shinfo =
2590			skb_shinfo(next_skb);
2591		struct skb_shared_info *shinfo = skb_shinfo(skb);
2592
2593		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
2594		shinfo->tskey = next_shinfo->tskey;
2595		TCP_SKB_CB(skb)->txstamp_ack |=
2596			TCP_SKB_CB(next_skb)->txstamp_ack;
2597	}
2598}
2599
2600/* Collapses two adjacent SKB's during retransmission. */
2601static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2602{
2603	struct tcp_sock *tp = tcp_sk(sk);
2604	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
2605	int skb_size, next_skb_size;
2606
2607	skb_size = skb->len;
2608	next_skb_size = next_skb->len;
2609
2610	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
2611
2612	if (next_skb_size) {
2613		if (next_skb_size <= skb_availroom(skb))
2614			skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
2615				      next_skb_size);
2616		else if (!skb_shift(skb, next_skb, next_skb_size))
2617			return false;
2618	}
2619	tcp_highest_sack_combine(sk, next_skb, skb);
2620
2621	tcp_unlink_write_queue(next_skb, sk);
2622
2623	if (next_skb->ip_summed == CHECKSUM_PARTIAL)
2624		skb->ip_summed = CHECKSUM_PARTIAL;
2625
2626	if (skb->ip_summed != CHECKSUM_PARTIAL)
2627		skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
2628
2629	/* Update sequence range on original skb. */
2630	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
2631
2632	/* Merge over control information. This moves PSH/FIN etc. over */
2633	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
2634
2635	/* All done, get rid of second SKB and account for it so
2636	 * packet counting does not break.
2637	 */
2638	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
2639	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
2640
2641	/* changed transmit queue under us so clear hints */
2642	tcp_clear_retrans_hints_partial(tp);
2643	if (next_skb == tp->retransmit_skb_hint)
2644		tp->retransmit_skb_hint = skb;
2645
2646	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2647
2648	tcp_skb_collapse_tstamp(skb, next_skb);
2649
2650	sk_wmem_free_skb(sk, next_skb);
2651	return true;
2652}
2653
2654/* Check if coalescing SKBs is legal. */
2655static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
2656{
2657	if (tcp_skb_pcount(skb) > 1)
2658		return false;
2659	if (skb_cloned(skb))
2660		return false;
2661	if (skb == tcp_send_head(sk))
2662		return false;
2663	/* Some heuristics for collapsing over SACK'd could be invented */
2664	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2665		return false;
2666
2667	return true;
2668}
2669
2670/* Collapse packets in the retransmit queue to make to create
2671 * less packets on the wire. This is only done on retransmission.
2672 */
2673static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2674				     int space)
2675{
2676	struct tcp_sock *tp = tcp_sk(sk);
2677	struct sk_buff *skb = to, *tmp;
2678	bool first = true;
2679
2680	if (!sysctl_tcp_retrans_collapse)
2681		return;
2682	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2683		return;
2684
2685	tcp_for_write_queue_from_safe(skb, tmp, sk) {
2686		if (!tcp_can_collapse(sk, skb))
2687			break;
2688
2689		if (!tcp_skb_can_collapse_to(to))
2690			break;
2691
2692		space -= skb->len;
2693
2694		if (first) {
2695			first = false;
2696			continue;
2697		}
2698
2699		if (space < 0)
2700			break;
2701
2702		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
2703			break;
2704
2705		if (!tcp_collapse_retrans(sk, to))
2706			break;
2707	}
2708}
2709
2710/* This retransmits one SKB.  Policy decisions and retransmit queue
2711 * state updates are done by the caller.  Returns non-zero if an
2712 * error occurred which prevented the send.
2713 */
2714int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2715{
2716	struct inet_connection_sock *icsk = inet_csk(sk);
2717	struct tcp_sock *tp = tcp_sk(sk);
2718	unsigned int cur_mss;
2719	int diff, len, err;
2720
2721
2722	/* Inconclusive MTU probe */
2723	if (icsk->icsk_mtup.probe_size)
2724		icsk->icsk_mtup.probe_size = 0;
2725
2726	/* Do not sent more than we queued. 1/4 is reserved for possible
2727	 * copying overhead: fragmentation, tunneling, mangling etc.
2728	 */
2729	if (atomic_read(&sk->sk_wmem_alloc) >
2730	    min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
2731		  sk->sk_sndbuf))
2732		return -EAGAIN;
2733
2734	if (skb_still_in_host_queue(sk, skb))
2735		return -EBUSY;
2736
2737	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
2738		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
2739			BUG();
 
 
2740		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
2741			return -ENOMEM;
2742	}
2743
2744	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
2745		return -EHOSTUNREACH; /* Routing failure or similar. */
2746
2747	cur_mss = tcp_current_mss(sk);
2748
2749	/* If receiver has shrunk his window, and skb is out of
2750	 * new window, do not retransmit it. The exception is the
2751	 * case, when window is shrunk to zero. In this case
2752	 * our retransmit serves as a zero window probe.
2753	 */
2754	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
2755	    TCP_SKB_CB(skb)->seq != tp->snd_una)
2756		return -EAGAIN;
2757
2758	len = cur_mss * segs;
2759	if (skb->len > len) {
2760		if (tcp_fragment(sk, skb, len, cur_mss, GFP_ATOMIC))
 
2761			return -ENOMEM; /* We'll try again later. */
2762	} else {
2763		if (skb_unclone(skb, GFP_ATOMIC))
2764			return -ENOMEM;
2765
2766		diff = tcp_skb_pcount(skb);
2767		tcp_set_skb_tso_segs(skb, cur_mss);
2768		diff -= tcp_skb_pcount(skb);
2769		if (diff)
2770			tcp_adjust_pcount(sk, skb, diff);
2771		if (skb->len < cur_mss)
2772			tcp_retrans_try_collapse(sk, skb, cur_mss);
2773	}
2774
2775	/* RFC3168, section 6.1.1.1. ECN fallback */
2776	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
2777		tcp_ecn_clear_syn(sk, skb);
2778
 
 
 
 
 
 
 
 
2779	/* make sure skb->data is aligned on arches that require it
2780	 * and check if ack-trimming & collapsing extended the headroom
2781	 * beyond what csum_start can cover.
2782	 */
2783	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
2784		     skb_headroom(skb) >= 0xFFFF)) {
2785		struct sk_buff *nskb;
2786
2787		skb_mstamp_get(&skb->skb_mstamp);
2788		nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
2789		err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2790			     -ENOBUFS;
 
 
 
 
 
 
 
 
 
 
2791	} else {
2792		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2793	}
2794
 
 
 
 
 
 
 
 
 
2795	if (likely(!err)) {
2796		segs = tcp_skb_pcount(skb);
2797
2798		TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2799		/* Update global TCP statistics. */
2800		TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
2801		if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
2802			__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
2803		tp->total_retrans += segs;
2804	}
2805	return err;
2806}
2807
2808int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2809{
2810	struct tcp_sock *tp = tcp_sk(sk);
2811	int err = __tcp_retransmit_skb(sk, skb, segs);
2812
2813	if (err == 0) {
2814#if FASTRETRANS_DEBUG > 0
2815		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2816			net_dbg_ratelimited("retrans_out leaked\n");
2817		}
2818#endif
2819		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
2820		tp->retrans_out += tcp_skb_pcount(skb);
 
2821
2822		/* Save stamp of the first retransmit. */
2823		if (!tp->retrans_stamp)
2824			tp->retrans_stamp = tcp_skb_timestamp(skb);
2825
2826	} else if (err != -EBUSY) {
2827		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2828	}
2829
2830	if (tp->undo_retrans < 0)
2831		tp->undo_retrans = 0;
2832	tp->undo_retrans += tcp_skb_pcount(skb);
2833	return err;
2834}
2835
2836/* Check if we forward retransmits are possible in the current
2837 * window/congestion state.
2838 */
2839static bool tcp_can_forward_retransmit(struct sock *sk)
2840{
2841	const struct inet_connection_sock *icsk = inet_csk(sk);
2842	const struct tcp_sock *tp = tcp_sk(sk);
2843
2844	/* Forward retransmissions are possible only during Recovery. */
2845	if (icsk->icsk_ca_state != TCP_CA_Recovery)
2846		return false;
2847
2848	/* No forward retransmissions in Reno are possible. */
2849	if (tcp_is_reno(tp))
2850		return false;
2851
2852	/* Yeah, we have to make difficult choice between forward transmission
2853	 * and retransmission... Both ways have their merits...
2854	 *
2855	 * For now we do not retransmit anything, while we have some new
2856	 * segments to send. In the other cases, follow rule 3 for
2857	 * NextSeg() specified in RFC3517.
2858	 */
2859
2860	if (tcp_may_send_now(sk))
2861		return false;
2862
2863	return true;
2864}
2865
2866/* This gets called after a retransmit timeout, and the initially
2867 * retransmitted data is acknowledged.  It tries to continue
2868 * resending the rest of the retransmit queue, until either
2869 * we've sent it all or the congestion window limit is reached.
2870 * If doing SACK, the first ACK which comes back for a timeout
2871 * based retransmit packet might feed us FACK information again.
2872 * If so, we use it to avoid unnecessarily retransmissions.
2873 */
2874void tcp_xmit_retransmit_queue(struct sock *sk)
2875{
2876	const struct inet_connection_sock *icsk = inet_csk(sk);
 
2877	struct tcp_sock *tp = tcp_sk(sk);
2878	struct sk_buff *skb;
2879	struct sk_buff *hole = NULL;
2880	u32 max_segs, last_lost;
2881	int mib_idx;
2882	int fwd_rexmitting = 0;
2883
2884	if (!tp->packets_out)
2885		return;
2886
2887	if (!tp->lost_out)
2888		tp->retransmit_high = tp->snd_una;
2889
2890	if (tp->retransmit_skb_hint) {
2891		skb = tp->retransmit_skb_hint;
2892		last_lost = TCP_SKB_CB(skb)->end_seq;
2893		if (after(last_lost, tp->retransmit_high))
2894			last_lost = tp->retransmit_high;
2895	} else {
2896		skb = tcp_write_queue_head(sk);
2897		last_lost = tp->snd_una;
2898	}
2899
2900	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
2901	tcp_for_write_queue_from(skb, sk) {
2902		__u8 sacked;
2903		int segs;
2904
2905		if (skb == tcp_send_head(sk))
2906			break;
 
2907		/* we could do better than to assign each time */
2908		if (!hole)
2909			tp->retransmit_skb_hint = skb;
2910
2911		segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
2912		if (segs <= 0)
2913			return;
2914		sacked = TCP_SKB_CB(skb)->sacked;
2915		/* In case tcp_shift_skb_data() have aggregated large skbs,
2916		 * we need to make sure not sending too bigs TSO packets
2917		 */
2918		segs = min_t(int, segs, max_segs);
2919
2920		if (fwd_rexmitting) {
2921begin_fwd:
2922			if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2923				break;
2924			mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
2925
2926		} else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2927			tp->retransmit_high = last_lost;
2928			if (!tcp_can_forward_retransmit(sk))
2929				break;
2930			/* Backtrack if necessary to non-L'ed skb */
2931			if (hole) {
2932				skb = hole;
2933				hole = NULL;
2934			}
2935			fwd_rexmitting = 1;
2936			goto begin_fwd;
2937
2938		} else if (!(sacked & TCPCB_LOST)) {
2939			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
2940				hole = skb;
2941			continue;
2942
2943		} else {
2944			last_lost = TCP_SKB_CB(skb)->end_seq;
2945			if (icsk->icsk_ca_state != TCP_CA_Loss)
2946				mib_idx = LINUX_MIB_TCPFASTRETRANS;
2947			else
2948				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2949		}
2950
2951		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2952			continue;
2953
2954		if (tcp_small_queue_check(sk, skb, 1))
2955			return;
2956
2957		if (tcp_retransmit_skb(sk, skb, segs))
2958			return;
2959
2960		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
2961
2962		if (tcp_in_cwnd_reduction(sk))
2963			tp->prr_out += tcp_skb_pcount(skb);
2964
2965		if (skb == tcp_write_queue_head(sk))
2966			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2967						  inet_csk(sk)->icsk_rto,
2968						  TCP_RTO_MAX);
2969	}
 
 
 
 
2970}
2971
2972/* We allow to exceed memory limits for FIN packets to expedite
2973 * connection tear down and (memory) recovery.
2974 * Otherwise tcp_send_fin() could be tempted to either delay FIN
2975 * or even be forced to close flow without any FIN.
2976 * In general, we want to allow one skb per socket to avoid hangs
2977 * with edge trigger epoll()
2978 */
2979void sk_forced_mem_schedule(struct sock *sk, int size)
2980{
2981	int amt;
2982
2983	if (size <= sk->sk_forward_alloc)
2984		return;
2985	amt = sk_mem_pages(size);
2986	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
2987	sk_memory_allocated_add(sk, amt);
2988
2989	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
2990		mem_cgroup_charge_skmem(sk->sk_memcg, amt);
2991}
2992
2993/* Send a FIN. The caller locks the socket for us.
2994 * We should try to send a FIN packet really hard, but eventually give up.
2995 */
2996void tcp_send_fin(struct sock *sk)
2997{
2998	struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
2999	struct tcp_sock *tp = tcp_sk(sk);
3000
3001	/* Optimization, tack on the FIN if we have one skb in write queue and
3002	 * this skb was not yet sent, or we are under memory pressure.
3003	 * Note: in the latter case, FIN packet will be sent after a timeout,
3004	 * as TCP stack thinks it has already been transmitted.
3005	 */
3006	if (tskb && (tcp_send_head(sk) || tcp_under_memory_pressure(sk))) {
3007coalesce:
 
 
 
3008		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3009		TCP_SKB_CB(tskb)->end_seq++;
3010		tp->write_seq++;
3011		if (!tcp_send_head(sk)) {
3012			/* This means tskb was already sent.
3013			 * Pretend we included the FIN on previous transmit.
3014			 * We need to set tp->snd_nxt to the value it would have
3015			 * if FIN had been sent. This is because retransmit path
3016			 * does not change tp->snd_nxt.
3017			 */
3018			tp->snd_nxt++;
3019			return;
3020		}
3021	} else {
3022		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
3023		if (unlikely(!skb)) {
3024			if (tskb)
3025				goto coalesce;
3026			return;
3027		}
 
3028		skb_reserve(skb, MAX_TCP_HEADER);
3029		sk_forced_mem_schedule(sk, skb->truesize);
3030		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3031		tcp_init_nondata_skb(skb, tp->write_seq,
3032				     TCPHDR_ACK | TCPHDR_FIN);
3033		tcp_queue_skb(sk, skb);
3034	}
3035	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
3036}
3037
3038/* We get here when a process closes a file descriptor (either due to
3039 * an explicit close() or as a byproduct of exit()'ing) and there
3040 * was unread data in the receive queue.  This behavior is recommended
3041 * by RFC 2525, section 2.17.  -DaveM
3042 */
3043void tcp_send_active_reset(struct sock *sk, gfp_t priority)
3044{
3045	struct sk_buff *skb;
3046
 
 
3047	/* NOTE: No TCP options attached and we never retransmit this. */
3048	skb = alloc_skb(MAX_TCP_HEADER, priority);
3049	if (!skb) {
3050		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3051		return;
3052	}
3053
3054	/* Reserve space for headers and prepare control bits. */
3055	skb_reserve(skb, MAX_TCP_HEADER);
3056	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3057			     TCPHDR_ACK | TCPHDR_RST);
3058	skb_mstamp_get(&skb->skb_mstamp);
3059	/* Send it off. */
3060	if (tcp_transmit_skb(sk, skb, 0, priority))
3061		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3062
3063	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
 
 
 
3064}
3065
3066/* Send a crossed SYN-ACK during socket establishment.
3067 * WARNING: This routine must only be called when we have already sent
3068 * a SYN packet that crossed the incoming SYN that caused this routine
3069 * to get called. If this assumption fails then the initial rcv_wnd
3070 * and rcv_wscale values will not be correct.
3071 */
3072int tcp_send_synack(struct sock *sk)
3073{
3074	struct sk_buff *skb;
3075
3076	skb = tcp_write_queue_head(sk);
3077	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
3078		pr_debug("%s: wrong queue state\n", __func__);
3079		return -EFAULT;
3080	}
3081	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
3082		if (skb_cloned(skb)) {
3083			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
 
 
 
 
3084			if (!nskb)
3085				return -ENOMEM;
3086			tcp_unlink_write_queue(skb, sk);
 
 
3087			__skb_header_release(nskb);
3088			__tcp_add_write_queue_head(sk, nskb);
3089			sk_wmem_free_skb(sk, skb);
3090			sk->sk_wmem_queued += nskb->truesize;
3091			sk_mem_charge(sk, nskb->truesize);
3092			skb = nskb;
3093		}
3094
3095		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3096		tcp_ecn_send_synack(sk, skb);
3097	}
3098	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3099}
3100
3101/**
3102 * tcp_make_synack - Prepare a SYN-ACK.
3103 * sk: listener socket
3104 * dst: dst entry attached to the SYNACK
3105 * req: request_sock pointer
3106 *
3107 * Allocate one skb and build a SYNACK packet.
3108 * @dst is consumed : Caller should not use it again.
 
3109 */
3110struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3111				struct request_sock *req,
3112				struct tcp_fastopen_cookie *foc,
3113				enum tcp_synack_type synack_type)
 
3114{
3115	struct inet_request_sock *ireq = inet_rsk(req);
3116	const struct tcp_sock *tp = tcp_sk(sk);
3117	struct tcp_md5sig_key *md5 = NULL;
3118	struct tcp_out_options opts;
3119	struct sk_buff *skb;
3120	int tcp_header_size;
3121	struct tcphdr *th;
3122	u16 user_mss;
3123	int mss;
 
3124
3125	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
3126	if (unlikely(!skb)) {
3127		dst_release(dst);
3128		return NULL;
3129	}
3130	/* Reserve space for headers. */
3131	skb_reserve(skb, MAX_TCP_HEADER);
3132
3133	switch (synack_type) {
3134	case TCP_SYNACK_NORMAL:
3135		skb_set_owner_w(skb, req_to_sk(req));
3136		break;
3137	case TCP_SYNACK_COOKIE:
3138		/* Under synflood, we do not attach skb to a socket,
3139		 * to avoid false sharing.
3140		 */
3141		break;
3142	case TCP_SYNACK_FASTOPEN:
3143		/* sk is a const pointer, because we want to express multiple
3144		 * cpu might call us concurrently.
3145		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3146		 */
3147		skb_set_owner_w(skb, (struct sock *)sk);
3148		break;
3149	}
3150	skb_dst_set(skb, dst);
3151
3152	mss = dst_metric_advmss(dst);
3153	user_mss = READ_ONCE(tp->rx_opt.user_mss);
3154	if (user_mss && user_mss < mss)
3155		mss = user_mss;
3156
3157	memset(&opts, 0, sizeof(opts));
 
3158#ifdef CONFIG_SYN_COOKIES
3159	if (unlikely(req->cookie_ts))
3160		skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req);
3161	else
3162#endif
3163	skb_mstamp_get(&skb->skb_mstamp);
 
 
 
 
3164
3165#ifdef CONFIG_TCP_MD5SIG
3166	rcu_read_lock();
3167	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
3168#endif
3169	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
3170	tcp_header_size = tcp_synack_options(req, mss, skb, &opts, md5, foc) +
3171			  sizeof(*th);
 
 
 
3172
3173	skb_push(skb, tcp_header_size);
3174	skb_reset_transport_header(skb);
3175
3176	th = (struct tcphdr *)skb->data;
3177	memset(th, 0, sizeof(struct tcphdr));
3178	th->syn = 1;
3179	th->ack = 1;
3180	tcp_ecn_make_synack(req, th);
3181	th->source = htons(ireq->ir_num);
3182	th->dest = ireq->ir_rmt_port;
3183	/* Setting of flags are superfluous here for callers (and ECE is
3184	 * not even correctly set)
3185	 */
3186	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
3187			     TCPHDR_SYN | TCPHDR_ACK);
3188
3189	th->seq = htonl(TCP_SKB_CB(skb)->seq);
3190	/* XXX data is queued and acked as is. No buffer/window check */
3191	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
3192
3193	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3194	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
3195	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
3196	th->doff = (tcp_header_size >> 2);
3197	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3198
3199#ifdef CONFIG_TCP_MD5SIG
3200	/* Okay, we have all we need - do the md5 hash if needed */
3201	if (md5)
3202		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
3203					       md5, req_to_sk(req), skb);
3204	rcu_read_unlock();
3205#endif
3206
3207	/* Do not fool tcpdump (if any), clean our debris */
3208	skb->tstamp = 0;
 
 
 
 
3209	return skb;
3210}
3211EXPORT_SYMBOL(tcp_make_synack);
3212
3213static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
3214{
3215	struct inet_connection_sock *icsk = inet_csk(sk);
3216	const struct tcp_congestion_ops *ca;
3217	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
3218
3219	if (ca_key == TCP_CA_UNSPEC)
3220		return;
3221
3222	rcu_read_lock();
3223	ca = tcp_ca_find_key(ca_key);
3224	if (likely(ca && try_module_get(ca->owner))) {
3225		module_put(icsk->icsk_ca_ops->owner);
3226		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
3227		icsk->icsk_ca_ops = ca;
3228	}
3229	rcu_read_unlock();
3230}
3231
3232/* Do all connect socket setups that can be done AF independent. */
3233static void tcp_connect_init(struct sock *sk)
3234{
3235	const struct dst_entry *dst = __sk_dst_get(sk);
3236	struct tcp_sock *tp = tcp_sk(sk);
3237	__u8 rcv_wscale;
 
3238
3239	/* We'll fix this up when we get a response from the other end.
3240	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
3241	 */
3242	tp->tcp_header_len = sizeof(struct tcphdr) +
3243		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
 
3244
3245#ifdef CONFIG_TCP_MD5SIG
3246	if (tp->af_specific->md5_lookup(sk, sk))
3247		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3248#endif
3249
3250	/* If user gave his TCP_MAXSEG, record it to clamp */
3251	if (tp->rx_opt.user_mss)
3252		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
3253	tp->max_window = 0;
3254	tcp_mtup_init(sk);
3255	tcp_sync_mss(sk, dst_mtu(dst));
3256
3257	tcp_ca_dst_init(sk, dst);
3258
3259	if (!tp->window_clamp)
3260		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
3261	tp->advmss = dst_metric_advmss(dst);
3262	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
3263		tp->advmss = tp->rx_opt.user_mss;
3264
3265	tcp_initialize_rcv_mss(sk);
3266
3267	/* limit the window selection if the user enforce a smaller rx buffer */
3268	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3269	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3270		tp->window_clamp = tcp_full_space(sk);
3271
3272	tcp_select_initial_window(tcp_full_space(sk),
 
 
 
 
3273				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
3274				  &tp->rcv_wnd,
3275				  &tp->window_clamp,
3276				  sysctl_tcp_window_scaling,
3277				  &rcv_wscale,
3278				  dst_metric(dst, RTAX_INITRWND));
3279
3280	tp->rx_opt.rcv_wscale = rcv_wscale;
3281	tp->rcv_ssthresh = tp->rcv_wnd;
3282
3283	sk->sk_err = 0;
3284	sock_reset_flag(sk, SOCK_DONE);
3285	tp->snd_wnd = 0;
3286	tcp_init_wl(tp, 0);
 
3287	tp->snd_una = tp->write_seq;
3288	tp->snd_sml = tp->write_seq;
3289	tp->snd_up = tp->write_seq;
3290	tp->snd_nxt = tp->write_seq;
3291
3292	if (likely(!tp->repair))
3293		tp->rcv_nxt = 0;
3294	else
3295		tp->rcv_tstamp = tcp_time_stamp;
3296	tp->rcv_wup = tp->rcv_nxt;
3297	tp->copied_seq = tp->rcv_nxt;
3298
3299	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
3300	inet_csk(sk)->icsk_retransmits = 0;
3301	tcp_clear_retrans(tp);
3302}
3303
3304static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3305{
3306	struct tcp_sock *tp = tcp_sk(sk);
3307	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3308
3309	tcb->end_seq += skb->len;
3310	__skb_header_release(skb);
3311	__tcp_add_write_queue_tail(sk, skb);
3312	sk->sk_wmem_queued += skb->truesize;
3313	sk_mem_charge(sk, skb->truesize);
3314	tp->write_seq = tcb->end_seq;
3315	tp->packets_out += tcp_skb_pcount(skb);
3316}
3317
3318/* Build and send a SYN with data and (cached) Fast Open cookie. However,
3319 * queue a data-only packet after the regular SYN, such that regular SYNs
3320 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3321 * only the SYN sequence, the data are retransmitted in the first ACK.
3322 * If cookie is not cached or other error occurs, falls back to send a
3323 * regular SYN with Fast Open cookie request option.
3324 */
3325static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3326{
3327	struct tcp_sock *tp = tcp_sk(sk);
3328	struct tcp_fastopen_request *fo = tp->fastopen_req;
3329	int syn_loss = 0, space, err = 0;
3330	unsigned long last_syn_loss = 0;
3331	struct sk_buff *syn_data;
3332
3333	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3334	tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
3335			       &syn_loss, &last_syn_loss);
3336	/* Recurring FO SYN losses: revert to regular handshake temporarily */
3337	if (syn_loss > 1 &&
3338	    time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
3339		fo->cookie.len = -1;
3340		goto fallback;
3341	}
3342
3343	if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
3344		fo->cookie.len = -1;
3345	else if (fo->cookie.len <= 0)
3346		goto fallback;
3347
3348	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3349	 * user-MSS. Reserve maximum option space for middleboxes that add
3350	 * private TCP options. The cost is reduced data space in SYN :(
3351	 */
3352	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->rx_opt.mss_clamp)
3353		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
3354	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3355		MAX_TCP_OPTION_SPACE;
3356
3357	space = min_t(size_t, space, fo->size);
3358
3359	/* limit to order-0 allocations */
3360	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3361
3362	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
3363	if (!syn_data)
3364		goto fallback;
3365	syn_data->ip_summed = CHECKSUM_PARTIAL;
3366	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
3367	if (space) {
3368		int copied = copy_from_iter(skb_put(syn_data, space), space,
3369					    &fo->data->msg_iter);
3370		if (unlikely(!copied)) {
 
3371			kfree_skb(syn_data);
3372			goto fallback;
3373		}
3374		if (copied != space) {
3375			skb_trim(syn_data, copied);
3376			space = copied;
3377		}
 
3378	}
3379	/* No more data pending in inet_wait_for_connect() */
3380	if (space == fo->size)
3381		fo->data = NULL;
3382	fo->copied = space;
3383
3384	tcp_connect_queue_skb(sk, syn_data);
3385	if (syn_data->len)
3386		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3387
3388	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3389
3390	syn->skb_mstamp = syn_data->skb_mstamp;
3391
3392	/* Now full SYN+DATA was cloned and sent (or not),
3393	 * remove the SYN from the original skb (syn_data)
3394	 * we keep in write queue in case of a retransmit, as we
3395	 * also have the SYN packet (with no data) in the same queue.
3396	 */
3397	TCP_SKB_CB(syn_data)->seq++;
3398	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3399	if (!err) {
3400		tp->syn_data = (fo->copied > 0);
 
3401		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3402		goto done;
3403	}
3404
 
 
 
 
3405fallback:
3406	/* Send a regular SYN with Fast Open cookie request option */
3407	if (fo->cookie.len > 0)
3408		fo->cookie.len = 0;
3409	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3410	if (err)
3411		tp->syn_fastopen = 0;
3412done:
3413	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3414	return err;
3415}
3416
3417/* Build a SYN and send it off. */
3418int tcp_connect(struct sock *sk)
3419{
3420	struct tcp_sock *tp = tcp_sk(sk);
3421	struct sk_buff *buff;
3422	int err;
3423
 
 
 
 
 
3424	tcp_connect_init(sk);
3425
3426	if (unlikely(tp->repair)) {
3427		tcp_finish_connect(sk, NULL);
3428		return 0;
3429	}
3430
3431	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3432	if (unlikely(!buff))
3433		return -ENOBUFS;
3434
3435	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
3436	tp->retrans_stamp = tcp_time_stamp;
 
3437	tcp_connect_queue_skb(sk, buff);
3438	tcp_ecn_send_syn(sk, buff);
 
3439
3440	/* Send off SYN; include data in Fast Open. */
3441	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3442	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3443	if (err == -ECONNREFUSED)
3444		return err;
3445
3446	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3447	 * in order to make this packet get counted in tcpOutSegs.
3448	 */
3449	tp->snd_nxt = tp->write_seq;
3450	tp->pushed_seq = tp->write_seq;
 
 
 
 
 
3451	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
3452
3453	/* Timer for repeating the SYN until an answer. */
3454	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3455				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
3456	return 0;
3457}
3458EXPORT_SYMBOL(tcp_connect);
3459
3460/* Send out a delayed ack, the caller does the policy checking
3461 * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
3462 * for details.
3463 */
3464void tcp_send_delayed_ack(struct sock *sk)
3465{
3466	struct inet_connection_sock *icsk = inet_csk(sk);
3467	int ato = icsk->icsk_ack.ato;
3468	unsigned long timeout;
3469
3470	tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
3471
3472	if (ato > TCP_DELACK_MIN) {
3473		const struct tcp_sock *tp = tcp_sk(sk);
3474		int max_ato = HZ / 2;
3475
3476		if (icsk->icsk_ack.pingpong ||
3477		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
3478			max_ato = TCP_DELACK_MAX;
3479
3480		/* Slow path, intersegment interval is "high". */
3481
3482		/* If some rtt estimate is known, use it to bound delayed ack.
3483		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
3484		 * directly.
3485		 */
3486		if (tp->srtt_us) {
3487			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3488					TCP_DELACK_MIN);
3489
3490			if (rtt < max_ato)
3491				max_ato = rtt;
3492		}
3493
3494		ato = min(ato, max_ato);
3495	}
3496
 
 
3497	/* Stay within the limit we were given */
3498	timeout = jiffies + ato;
3499
3500	/* Use new timeout only if there wasn't a older one earlier. */
3501	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
3502		/* If delack timer was blocked or is about to expire,
3503		 * send ACK now.
3504		 */
3505		if (icsk->icsk_ack.blocked ||
3506		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
3507			tcp_send_ack(sk);
3508			return;
3509		}
3510
3511		if (!time_before(timeout, icsk->icsk_ack.timeout))
3512			timeout = icsk->icsk_ack.timeout;
3513	}
3514	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3515	icsk->icsk_ack.timeout = timeout;
3516	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
3517}
3518
3519/* This routine sends an ack and also updates the window. */
3520void tcp_send_ack(struct sock *sk)
3521{
3522	struct sk_buff *buff;
3523
3524	/* If we have been reset, we may not send again. */
3525	if (sk->sk_state == TCP_CLOSE)
3526		return;
3527
3528	tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
3529
3530	/* We are not putting this on the write queue, so
3531	 * tcp_transmit_skb() will set the ownership to this
3532	 * sock.
3533	 */
3534	buff = alloc_skb(MAX_TCP_HEADER,
3535			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
3536	if (unlikely(!buff)) {
 
 
 
 
 
 
3537		inet_csk_schedule_ack(sk);
3538		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
3539		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
3540					  TCP_DELACK_MAX, TCP_RTO_MAX);
3541		return;
3542	}
3543
3544	/* Reserve space for headers and prepare control bits. */
3545	skb_reserve(buff, MAX_TCP_HEADER);
3546	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
3547
3548	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
3549	 * too much.
3550	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
3551	 */
3552	skb_set_tcp_pure_ack(buff);
3553
3554	/* Send it off, this clears delayed acks for us. */
3555	skb_mstamp_get(&buff->skb_mstamp);
3556	tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
 
 
 
 
 
3557}
3558EXPORT_SYMBOL_GPL(tcp_send_ack);
3559
3560/* This routine sends a packet with an out of date sequence
3561 * number. It assumes the other end will try to ack it.
3562 *
3563 * Question: what should we make while urgent mode?
3564 * 4.4BSD forces sending single byte of data. We cannot send
3565 * out of window data, because we have SND.NXT==SND.MAX...
3566 *
3567 * Current solution: to send TWO zero-length segments in urgent mode:
3568 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
3569 * out-of-date with SND.UNA-1 to probe window.
3570 */
3571static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
3572{
3573	struct tcp_sock *tp = tcp_sk(sk);
3574	struct sk_buff *skb;
3575
3576	/* We don't queue it, tcp_transmit_skb() sets ownership. */
3577	skb = alloc_skb(MAX_TCP_HEADER,
3578			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
3579	if (!skb)
3580		return -1;
3581
3582	/* Reserve space for headers and set control bits. */
3583	skb_reserve(skb, MAX_TCP_HEADER);
3584	/* Use a previous sequence.  This should cause the other
3585	 * end to send an ack.  Don't queue or clone SKB, just
3586	 * send it.
3587	 */
3588	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3589	skb_mstamp_get(&skb->skb_mstamp);
3590	NET_INC_STATS(sock_net(sk), mib);
3591	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
3592}
3593
 
3594void tcp_send_window_probe(struct sock *sk)
3595{
3596	if (sk->sk_state == TCP_ESTABLISHED) {
3597		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
 
3598		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
3599	}
3600}
3601
3602/* Initiate keepalive or window probe from timer. */
3603int tcp_write_wakeup(struct sock *sk, int mib)
3604{
3605	struct tcp_sock *tp = tcp_sk(sk);
3606	struct sk_buff *skb;
3607
3608	if (sk->sk_state == TCP_CLOSE)
3609		return -1;
3610
3611	skb = tcp_send_head(sk);
3612	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
3613		int err;
3614		unsigned int mss = tcp_current_mss(sk);
3615		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
3616
3617		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
3618			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
3619
3620		/* We are probing the opening of a window
3621		 * but the window size is != 0
3622		 * must have been a result SWS avoidance ( sender )
3623		 */
3624		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
3625		    skb->len > mss) {
3626			seg_size = min(seg_size, mss);
3627			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3628			if (tcp_fragment(sk, skb, seg_size, mss, GFP_ATOMIC))
 
3629				return -1;
3630		} else if (!tcp_skb_pcount(skb))
3631			tcp_set_skb_tso_segs(skb, mss);
3632
3633		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3634		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3635		if (!err)
3636			tcp_event_new_data_sent(sk, skb);
3637		return err;
3638	} else {
3639		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
3640			tcp_xmit_probe_skb(sk, 1, mib);
3641		return tcp_xmit_probe_skb(sk, 0, mib);
3642	}
3643}
3644
3645/* A window probe timeout has occurred.  If window is not closed send
3646 * a partial packet else a zero probe.
3647 */
3648void tcp_send_probe0(struct sock *sk)
3649{
3650	struct inet_connection_sock *icsk = inet_csk(sk);
3651	struct tcp_sock *tp = tcp_sk(sk);
3652	struct net *net = sock_net(sk);
3653	unsigned long probe_max;
3654	int err;
3655
3656	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
3657
3658	if (tp->packets_out || !tcp_send_head(sk)) {
3659		/* Cancel probe timer, if it is not required. */
3660		icsk->icsk_probes_out = 0;
3661		icsk->icsk_backoff = 0;
 
3662		return;
3663	}
3664
 
3665	if (err <= 0) {
3666		if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
3667			icsk->icsk_backoff++;
3668		icsk->icsk_probes_out++;
3669		probe_max = TCP_RTO_MAX;
3670	} else {
3671		/* If packet was not sent due to local congestion,
3672		 * do not backoff and do not remember icsk_probes_out.
3673		 * Let local senders to fight for local resources.
3674		 *
3675		 * Use accumulated backoff yet.
3676		 */
3677		if (!icsk->icsk_probes_out)
3678			icsk->icsk_probes_out = 1;
3679		probe_max = TCP_RESOURCE_PROBE_INTERVAL;
3680	}
3681	inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3682				  tcp_probe0_when(sk, probe_max),
3683				  TCP_RTO_MAX);
3684}
3685
3686int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
3687{
3688	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
3689	struct flowi fl;
3690	int res;
3691
3692	tcp_rsk(req)->txhash = net_tx_rndhash();
3693	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
 
3694	if (!res) {
3695		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
3696		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3697		if (unlikely(tcp_passive_fastopen(sk)))
3698			tcp_sk(sk)->total_retrans++;
 
3699	}
3700	return res;
3701}
3702EXPORT_SYMBOL(tcp_rtx_synack);