Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Implementation of the Transmission Control Protocol(TCP).
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  13 *		Florian La Roche, <flla@stud.uni-sb.de>
  14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  19 *		Jorge Cwik, <jorge@laser.satlink.net>
  20 */
  21
  22/*
  23 * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
  24 *				:	Fragmentation on mtu decrease
  25 *				:	Segment collapse on retransmit
  26 *				:	AF independence
  27 *
  28 *		Linus Torvalds	:	send_delayed_ack
  29 *		David S. Miller	:	Charge memory using the right skb
  30 *					during syn/ack processing.
  31 *		David S. Miller :	Output engine completely rewritten.
  32 *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
  33 *		Cacophonix Gaul :	draft-minshall-nagle-01
  34 *		J Hadi Salim	:	ECN support
  35 *
  36 */
  37
  38#define pr_fmt(fmt) "TCP: " fmt
  39
  40#include <net/tcp.h>
  41#include <net/mptcp.h>
 
  42
  43#include <linux/compiler.h>
  44#include <linux/gfp.h>
  45#include <linux/module.h>
  46#include <linux/static_key.h>
 
  47
  48#include <trace/events/tcp.h>
  49
  50/* Refresh clocks of a TCP socket,
  51 * ensuring monotically increasing values.
  52 */
  53void tcp_mstamp_refresh(struct tcp_sock *tp)
  54{
  55	u64 val = tcp_clock_ns();
  56
  57	tp->tcp_clock_cache = val;
  58	tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC);
  59}
  60
  61static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
  62			   int push_one, gfp_t gfp);
  63
  64/* Account for new data that has been sent to the network. */
  65static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
  66{
  67	struct inet_connection_sock *icsk = inet_csk(sk);
  68	struct tcp_sock *tp = tcp_sk(sk);
  69	unsigned int prior_packets = tp->packets_out;
  70
  71	WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
  72
  73	__skb_unlink(skb, &sk->sk_write_queue);
  74	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
  75
  76	if (tp->highest_sack == NULL)
  77		tp->highest_sack = skb;
  78
  79	tp->packets_out += tcp_skb_pcount(skb);
  80	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
  81		tcp_rearm_rto(sk);
  82
  83	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
  84		      tcp_skb_pcount(skb));
 
  85}
  86
  87/* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
  88 * window scaling factor due to loss of precision.
  89 * If window has been shrunk, what should we make? It is not clear at all.
  90 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
  91 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
  92 * invalid. OK, let's make this for now:
  93 */
  94static inline __u32 tcp_acceptable_seq(const struct sock *sk)
  95{
  96	const struct tcp_sock *tp = tcp_sk(sk);
  97
  98	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
  99	    (tp->rx_opt.wscale_ok &&
 100	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
 101		return tp->snd_nxt;
 102	else
 103		return tcp_wnd_end(tp);
 104}
 105
 106/* Calculate mss to advertise in SYN segment.
 107 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
 108 *
 109 * 1. It is independent of path mtu.
 110 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
 111 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
 112 *    attached devices, because some buggy hosts are confused by
 113 *    large MSS.
 114 * 4. We do not make 3, we advertise MSS, calculated from first
 115 *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
 116 *    This may be overridden via information stored in routing table.
 117 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
 118 *    probably even Jumbo".
 119 */
 120static __u16 tcp_advertise_mss(struct sock *sk)
 121{
 122	struct tcp_sock *tp = tcp_sk(sk);
 123	const struct dst_entry *dst = __sk_dst_get(sk);
 124	int mss = tp->advmss;
 125
 126	if (dst) {
 127		unsigned int metric = dst_metric_advmss(dst);
 128
 129		if (metric < mss) {
 130			mss = metric;
 131			tp->advmss = mss;
 132		}
 133	}
 134
 135	return (__u16)mss;
 136}
 137
 138/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
 139 * This is the first part of cwnd validation mechanism.
 140 */
 141void tcp_cwnd_restart(struct sock *sk, s32 delta)
 142{
 143	struct tcp_sock *tp = tcp_sk(sk);
 144	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
 145	u32 cwnd = tp->snd_cwnd;
 146
 147	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
 148
 149	tp->snd_ssthresh = tcp_current_ssthresh(sk);
 150	restart_cwnd = min(restart_cwnd, cwnd);
 151
 152	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
 153		cwnd >>= 1;
 154	tp->snd_cwnd = max(cwnd, restart_cwnd);
 155	tp->snd_cwnd_stamp = tcp_jiffies32;
 156	tp->snd_cwnd_used = 0;
 157}
 158
 159/* Congestion state accounting after a packet has been sent. */
 160static void tcp_event_data_sent(struct tcp_sock *tp,
 161				struct sock *sk)
 162{
 163	struct inet_connection_sock *icsk = inet_csk(sk);
 164	const u32 now = tcp_jiffies32;
 165
 166	if (tcp_packets_in_flight(tp) == 0)
 167		tcp_ca_event(sk, CA_EVENT_TX_START);
 168
 169	/* If this is the first data packet sent in response to the
 170	 * previous received data,
 171	 * and it is a reply for ato after last received packet,
 172	 * increase pingpong count.
 173	 */
 174	if (before(tp->lsndtime, icsk->icsk_ack.lrcvtime) &&
 175	    (u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
 176		inet_csk_inc_pingpong_cnt(sk);
 177
 178	tp->lsndtime = now;
 179}
 180
 181/* Account for an ACK we sent. */
 182static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
 183				      u32 rcv_nxt)
 184{
 185	struct tcp_sock *tp = tcp_sk(sk);
 186
 187	if (unlikely(tp->compressed_ack)) {
 188		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
 189			      tp->compressed_ack);
 190		tp->compressed_ack = 0;
 191		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
 192			__sock_put(sk);
 193	}
 194
 195	if (unlikely(rcv_nxt != tp->rcv_nxt))
 196		return;  /* Special ACK sent by DCTCP to reflect ECN */
 197	tcp_dec_quickack_mode(sk, pkts);
 198	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 199}
 200
 201/* Determine a window scaling and initial window to offer.
 202 * Based on the assumption that the given amount of space
 203 * will be offered. Store the results in the tp structure.
 204 * NOTE: for smooth operation initial space offering should
 205 * be a multiple of mss if possible. We assume here that mss >= 1.
 206 * This MUST be enforced by all callers.
 207 */
 208void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
 209			       __u32 *rcv_wnd, __u32 *window_clamp,
 210			       int wscale_ok, __u8 *rcv_wscale,
 211			       __u32 init_rcv_wnd)
 212{
 213	unsigned int space = (__space < 0 ? 0 : __space);
 
 214
 215	/* If no clamp set the clamp to the max possible scaled window */
 216	if (*window_clamp == 0)
 217		(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
 218	space = min(*window_clamp, space);
 219
 220	/* Quantize space offering to a multiple of mss if possible. */
 221	if (space > mss)
 222		space = rounddown(space, mss);
 223
 224	/* NOTE: offering an initial window larger than 32767
 225	 * will break some buggy TCP stacks. If the admin tells us
 226	 * it is likely we could be speaking with such a buggy stack
 227	 * we will truncate our initial window offering to 32K-1
 228	 * unless the remote has sent us a window scaling option,
 229	 * which we interpret as a sign the remote TCP is not
 230	 * misinterpreting the window field as a signed quantity.
 231	 */
 232	if (sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
 233		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
 234	else
 235		(*rcv_wnd) = min_t(u32, space, U16_MAX);
 236
 237	if (init_rcv_wnd)
 238		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
 239
 240	*rcv_wscale = 0;
 241	if (wscale_ok) {
 242		/* Set window scaling on max possible window */
 243		space = max_t(u32, space, sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
 244		space = max_t(u32, space, sysctl_rmem_max);
 245		space = min_t(u32, space, *window_clamp);
 246		*rcv_wscale = clamp_t(int, ilog2(space) - 15,
 247				      0, TCP_MAX_WSCALE);
 248	}
 249	/* Set the clamp no higher than max representable value */
 250	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
 
 251}
 252EXPORT_SYMBOL(tcp_select_initial_window);
 253
 254/* Chose a new window to advertise, update state in tcp_sock for the
 255 * socket, and return result with RFC1323 scaling applied.  The return
 256 * value can be stuffed directly into th->window for an outgoing
 257 * frame.
 258 */
 259static u16 tcp_select_window(struct sock *sk)
 260{
 261	struct tcp_sock *tp = tcp_sk(sk);
 
 262	u32 old_win = tp->rcv_wnd;
 263	u32 cur_win = tcp_receive_window(tp);
 264	u32 new_win = __tcp_select_window(sk);
 
 
 
 
 
 
 
 
 
 265
 266	/* Never shrink the offered window */
 
 267	if (new_win < cur_win) {
 268		/* Danger Will Robinson!
 269		 * Don't update rcv_wup/rcv_wnd here or else
 270		 * we will not be able to advertise a zero
 271		 * window in time.  --DaveM
 272		 *
 273		 * Relax Will Robinson.
 274		 */
 275		if (new_win == 0)
 276			NET_INC_STATS(sock_net(sk),
 277				      LINUX_MIB_TCPWANTZEROWINDOWADV);
 278		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
 
 
 279	}
 
 280	tp->rcv_wnd = new_win;
 281	tp->rcv_wup = tp->rcv_nxt;
 282
 283	/* Make sure we do not exceed the maximum possible
 284	 * scaled window.
 285	 */
 286	if (!tp->rx_opt.rcv_wscale &&
 287	    sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows)
 288		new_win = min(new_win, MAX_TCP_WINDOW);
 289	else
 290		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
 291
 292	/* RFC1323 scaling applied */
 293	new_win >>= tp->rx_opt.rcv_wscale;
 294
 295	/* If we advertise zero window, disable fast path. */
 296	if (new_win == 0) {
 297		tp->pred_flags = 0;
 298		if (old_win)
 299			NET_INC_STATS(sock_net(sk),
 300				      LINUX_MIB_TCPTOZEROWINDOWADV);
 301	} else if (old_win == 0) {
 302		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
 303	}
 304
 305	return new_win;
 306}
 307
 308/* Packet ECN state for a SYN-ACK */
 309static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
 310{
 311	const struct tcp_sock *tp = tcp_sk(sk);
 312
 313	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
 314	if (!(tp->ecn_flags & TCP_ECN_OK))
 315		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
 316	else if (tcp_ca_needs_ecn(sk) ||
 317		 tcp_bpf_ca_needs_ecn(sk))
 318		INET_ECN_xmit(sk);
 319}
 320
 321/* Packet ECN state for a SYN.  */
 322static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
 323{
 324	struct tcp_sock *tp = tcp_sk(sk);
 325	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
 326	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
 327		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
 328
 329	if (!use_ecn) {
 330		const struct dst_entry *dst = __sk_dst_get(sk);
 331
 332		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
 333			use_ecn = true;
 334	}
 335
 336	tp->ecn_flags = 0;
 337
 338	if (use_ecn) {
 339		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
 340		tp->ecn_flags = TCP_ECN_OK;
 341		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
 342			INET_ECN_xmit(sk);
 343	}
 344}
 345
 346static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
 347{
 348	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback)
 349		/* tp->ecn_flags are cleared at a later point in time when
 350		 * SYN ACK is ultimatively being received.
 351		 */
 352		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
 353}
 354
 355static void
 356tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
 357{
 358	if (inet_rsk(req)->ecn_ok)
 359		th->ece = 1;
 360}
 361
 362/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
 363 * be sent.
 364 */
 365static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
 366			 struct tcphdr *th, int tcp_header_len)
 367{
 368	struct tcp_sock *tp = tcp_sk(sk);
 369
 370	if (tp->ecn_flags & TCP_ECN_OK) {
 371		/* Not-retransmitted data segment: set ECT and inject CWR. */
 372		if (skb->len != tcp_header_len &&
 373		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
 374			INET_ECN_xmit(sk);
 375			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
 376				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
 377				th->cwr = 1;
 378				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 379			}
 380		} else if (!tcp_ca_needs_ecn(sk)) {
 381			/* ACK or retransmitted segment: clear ECT|CE */
 382			INET_ECN_dontxmit(sk);
 383		}
 384		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
 385			th->ece = 1;
 386	}
 387}
 388
 389/* Constructs common control bits of non-data skb. If SYN/FIN is present,
 390 * auto increment end seqno.
 391 */
 392static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
 393{
 394	skb->ip_summed = CHECKSUM_PARTIAL;
 395
 396	TCP_SKB_CB(skb)->tcp_flags = flags;
 397	TCP_SKB_CB(skb)->sacked = 0;
 398
 399	tcp_skb_pcount_set(skb, 1);
 400
 401	TCP_SKB_CB(skb)->seq = seq;
 402	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
 403		seq++;
 404	TCP_SKB_CB(skb)->end_seq = seq;
 405}
 406
 407static inline bool tcp_urg_mode(const struct tcp_sock *tp)
 408{
 409	return tp->snd_una != tp->snd_up;
 410}
 411
 412#define OPTION_SACK_ADVERTISE	(1 << 0)
 413#define OPTION_TS		(1 << 1)
 414#define OPTION_MD5		(1 << 2)
 415#define OPTION_WSCALE		(1 << 3)
 416#define OPTION_FAST_OPEN_COOKIE	(1 << 8)
 417#define OPTION_SMC		(1 << 9)
 418#define OPTION_MPTCP		(1 << 10)
 
 419
 420static void smc_options_write(__be32 *ptr, u16 *options)
 421{
 422#if IS_ENABLED(CONFIG_SMC)
 423	if (static_branch_unlikely(&tcp_have_smc)) {
 424		if (unlikely(OPTION_SMC & *options)) {
 425			*ptr++ = htonl((TCPOPT_NOP  << 24) |
 426				       (TCPOPT_NOP  << 16) |
 427				       (TCPOPT_EXP <<  8) |
 428				       (TCPOLEN_EXP_SMC_BASE));
 429			*ptr++ = htonl(TCPOPT_SMC_MAGIC);
 430		}
 431	}
 432#endif
 433}
 434
 435struct tcp_out_options {
 436	u16 options;		/* bit field of OPTION_* */
 437	u16 mss;		/* 0 to disable */
 438	u8 ws;			/* window scale, 0 to disable */
 439	u8 num_sack_blocks;	/* number of SACK blocks to include */
 440	u8 hash_size;		/* bytes in hash_location */
 441	u8 bpf_opt_len;		/* length of BPF hdr option */
 442	__u8 *hash_location;	/* temporary pointer, overloaded */
 443	__u32 tsval, tsecr;	/* need to include OPTION_TS */
 444	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
 445	struct mptcp_out_options mptcp;
 446};
 447
 448static void mptcp_options_write(__be32 *ptr, const struct tcp_sock *tp,
 
 449				struct tcp_out_options *opts)
 450{
 451#if IS_ENABLED(CONFIG_MPTCP)
 452	if (unlikely(OPTION_MPTCP & opts->options))
 453		mptcp_write_options(ptr, tp, &opts->mptcp);
 454#endif
 455}
 456
 457#ifdef CONFIG_CGROUP_BPF
 458static int bpf_skops_write_hdr_opt_arg0(struct sk_buff *skb,
 459					enum tcp_synack_type synack_type)
 460{
 461	if (unlikely(!skb))
 462		return BPF_WRITE_HDR_TCP_CURRENT_MSS;
 463
 464	if (unlikely(synack_type == TCP_SYNACK_COOKIE))
 465		return BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
 466
 467	return 0;
 468}
 469
 470/* req, syn_skb and synack_type are used when writing synack */
 471static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
 472				  struct request_sock *req,
 473				  struct sk_buff *syn_skb,
 474				  enum tcp_synack_type synack_type,
 475				  struct tcp_out_options *opts,
 476				  unsigned int *remaining)
 477{
 478	struct bpf_sock_ops_kern sock_ops;
 479	int err;
 480
 481	if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
 482					   BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) ||
 483	    !*remaining)
 484		return;
 485
 486	/* *remaining has already been aligned to 4 bytes, so *remaining >= 4 */
 487
 488	/* init sock_ops */
 489	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
 490
 491	sock_ops.op = BPF_SOCK_OPS_HDR_OPT_LEN_CB;
 492
 493	if (req) {
 494		/* The listen "sk" cannot be passed here because
 495		 * it is not locked.  It would not make too much
 496		 * sense to do bpf_setsockopt(listen_sk) based
 497		 * on individual connection request also.
 498		 *
 499		 * Thus, "req" is passed here and the cgroup-bpf-progs
 500		 * of the listen "sk" will be run.
 501		 *
 502		 * "req" is also used here for fastopen even the "sk" here is
 503		 * a fullsock "child" sk.  It is to keep the behavior
 504		 * consistent between fastopen and non-fastopen on
 505		 * the bpf programming side.
 506		 */
 507		sock_ops.sk = (struct sock *)req;
 508		sock_ops.syn_skb = syn_skb;
 509	} else {
 510		sock_owned_by_me(sk);
 511
 512		sock_ops.is_fullsock = 1;
 513		sock_ops.sk = sk;
 514	}
 515
 516	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
 517	sock_ops.remaining_opt_len = *remaining;
 518	/* tcp_current_mss() does not pass a skb */
 519	if (skb)
 520		bpf_skops_init_skb(&sock_ops, skb, 0);
 521
 522	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
 523
 524	if (err || sock_ops.remaining_opt_len == *remaining)
 525		return;
 526
 527	opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len;
 528	/* round up to 4 bytes */
 529	opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3;
 530
 531	*remaining -= opts->bpf_opt_len;
 532}
 533
 534static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
 535				    struct request_sock *req,
 536				    struct sk_buff *syn_skb,
 537				    enum tcp_synack_type synack_type,
 538				    struct tcp_out_options *opts)
 539{
 540	u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len;
 541	struct bpf_sock_ops_kern sock_ops;
 542	int err;
 543
 544	if (likely(!max_opt_len))
 545		return;
 546
 547	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
 548
 549	sock_ops.op = BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
 550
 551	if (req) {
 552		sock_ops.sk = (struct sock *)req;
 553		sock_ops.syn_skb = syn_skb;
 554	} else {
 555		sock_owned_by_me(sk);
 556
 557		sock_ops.is_fullsock = 1;
 558		sock_ops.sk = sk;
 559	}
 560
 561	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
 562	sock_ops.remaining_opt_len = max_opt_len;
 563	first_opt_off = tcp_hdrlen(skb) - max_opt_len;
 564	bpf_skops_init_skb(&sock_ops, skb, first_opt_off);
 565
 566	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
 567
 568	if (err)
 569		nr_written = 0;
 570	else
 571		nr_written = max_opt_len - sock_ops.remaining_opt_len;
 572
 573	if (nr_written < max_opt_len)
 574		memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP,
 575		       max_opt_len - nr_written);
 576}
 577#else
 578static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
 579				  struct request_sock *req,
 580				  struct sk_buff *syn_skb,
 581				  enum tcp_synack_type synack_type,
 582				  struct tcp_out_options *opts,
 583				  unsigned int *remaining)
 584{
 585}
 586
 587static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
 588				    struct request_sock *req,
 589				    struct sk_buff *syn_skb,
 590				    enum tcp_synack_type synack_type,
 591				    struct tcp_out_options *opts)
 592{
 593}
 594#endif
 595
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 596/* Write previously computed TCP options to the packet.
 597 *
 598 * Beware: Something in the Internet is very sensitive to the ordering of
 599 * TCP options, we learned this through the hard way, so be careful here.
 600 * Luckily we can at least blame others for their non-compliance but from
 601 * inter-operability perspective it seems that we're somewhat stuck with
 602 * the ordering which we have been using if we want to keep working with
 603 * those broken things (not that it currently hurts anybody as there isn't
 604 * particular reason why the ordering would need to be changed).
 605 *
 606 * At least SACK_PERM as the first option is known to lead to a disaster
 607 * (but it may well be that other scenarios fail similarly).
 608 */
 609static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
 610			      struct tcp_out_options *opts)
 
 
 611{
 
 612	u16 options = opts->options;	/* mungable copy */
 613
 614	if (unlikely(OPTION_MD5 & options)) {
 615		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 616			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 617		/* overload cookie hash location */
 618		opts->hash_location = (__u8 *)ptr;
 619		ptr += 4;
 
 
 620	}
 621
 622	if (unlikely(opts->mss)) {
 623		*ptr++ = htonl((TCPOPT_MSS << 24) |
 624			       (TCPOLEN_MSS << 16) |
 625			       opts->mss);
 626	}
 627
 628	if (likely(OPTION_TS & options)) {
 629		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 630			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
 631				       (TCPOLEN_SACK_PERM << 16) |
 632				       (TCPOPT_TIMESTAMP << 8) |
 633				       TCPOLEN_TIMESTAMP);
 634			options &= ~OPTION_SACK_ADVERTISE;
 635		} else {
 636			*ptr++ = htonl((TCPOPT_NOP << 24) |
 637				       (TCPOPT_NOP << 16) |
 638				       (TCPOPT_TIMESTAMP << 8) |
 639				       TCPOLEN_TIMESTAMP);
 640		}
 641		*ptr++ = htonl(opts->tsval);
 642		*ptr++ = htonl(opts->tsecr);
 643	}
 644
 645	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 646		*ptr++ = htonl((TCPOPT_NOP << 24) |
 647			       (TCPOPT_NOP << 16) |
 648			       (TCPOPT_SACK_PERM << 8) |
 649			       TCPOLEN_SACK_PERM);
 650	}
 651
 652	if (unlikely(OPTION_WSCALE & options)) {
 653		*ptr++ = htonl((TCPOPT_NOP << 24) |
 654			       (TCPOPT_WINDOW << 16) |
 655			       (TCPOLEN_WINDOW << 8) |
 656			       opts->ws);
 657	}
 658
 659	if (unlikely(opts->num_sack_blocks)) {
 660		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
 661			tp->duplicate_sack : tp->selective_acks;
 662		int this_sack;
 663
 664		*ptr++ = htonl((TCPOPT_NOP  << 24) |
 665			       (TCPOPT_NOP  << 16) |
 666			       (TCPOPT_SACK <<  8) |
 667			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
 668						     TCPOLEN_SACK_PERBLOCK)));
 669
 670		for (this_sack = 0; this_sack < opts->num_sack_blocks;
 671		     ++this_sack) {
 672			*ptr++ = htonl(sp[this_sack].start_seq);
 673			*ptr++ = htonl(sp[this_sack].end_seq);
 674		}
 675
 676		tp->rx_opt.dsack = 0;
 677	}
 678
 679	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
 680		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
 681		u8 *p = (u8 *)ptr;
 682		u32 len; /* Fast Open option length */
 683
 684		if (foc->exp) {
 685			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
 686			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
 687				     TCPOPT_FASTOPEN_MAGIC);
 688			p += TCPOLEN_EXP_FASTOPEN_BASE;
 689		} else {
 690			len = TCPOLEN_FASTOPEN_BASE + foc->len;
 691			*p++ = TCPOPT_FASTOPEN;
 692			*p++ = len;
 693		}
 694
 695		memcpy(p, foc->val, foc->len);
 696		if ((len & 3) == 2) {
 697			p[foc->len] = TCPOPT_NOP;
 698			p[foc->len + 1] = TCPOPT_NOP;
 699		}
 700		ptr += (len + 3) >> 2;
 701	}
 702
 703	smc_options_write(ptr, &options);
 704
 705	mptcp_options_write(ptr, tp, opts);
 706}
 707
 708static void smc_set_option(const struct tcp_sock *tp,
 709			   struct tcp_out_options *opts,
 710			   unsigned int *remaining)
 711{
 712#if IS_ENABLED(CONFIG_SMC)
 713	if (static_branch_unlikely(&tcp_have_smc)) {
 714		if (tp->syn_smc) {
 715			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
 716				opts->options |= OPTION_SMC;
 717				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
 718			}
 719		}
 720	}
 721#endif
 722}
 723
 724static void smc_set_option_cond(const struct tcp_sock *tp,
 725				const struct inet_request_sock *ireq,
 726				struct tcp_out_options *opts,
 727				unsigned int *remaining)
 728{
 729#if IS_ENABLED(CONFIG_SMC)
 730	if (static_branch_unlikely(&tcp_have_smc)) {
 731		if (tp->syn_smc && ireq->smc_ok) {
 732			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
 733				opts->options |= OPTION_SMC;
 734				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
 735			}
 736		}
 737	}
 738#endif
 739}
 740
 741static void mptcp_set_option_cond(const struct request_sock *req,
 742				  struct tcp_out_options *opts,
 743				  unsigned int *remaining)
 744{
 745	if (rsk_is_mptcp(req)) {
 746		unsigned int size;
 747
 748		if (mptcp_synack_options(req, &size, &opts->mptcp)) {
 749			if (*remaining >= size) {
 750				opts->options |= OPTION_MPTCP;
 751				*remaining -= size;
 752			}
 753		}
 754	}
 755}
 756
 757/* Compute TCP options for SYN packets. This is not the final
 758 * network wire format yet.
 759 */
 760static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
 761				struct tcp_out_options *opts,
 762				struct tcp_md5sig_key **md5)
 763{
 764	struct tcp_sock *tp = tcp_sk(sk);
 765	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 766	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
 
 767
 768	*md5 = NULL;
 769#ifdef CONFIG_TCP_MD5SIG
 770	if (static_branch_unlikely(&tcp_md5_needed) &&
 771	    rcu_access_pointer(tp->md5sig_info)) {
 772		*md5 = tp->af_specific->md5_lookup(sk, sk);
 773		if (*md5) {
 774			opts->options |= OPTION_MD5;
 775			remaining -= TCPOLEN_MD5SIG_ALIGNED;
 
 
 776		}
 777	}
 778#endif
 779
 780	/* We always get an MSS option.  The option bytes which will be seen in
 781	 * normal data packets should timestamps be used, must be in the MSS
 782	 * advertised.  But we subtract them from tp->mss_cache so that
 783	 * calculations in tcp_sendmsg are simpler etc.  So account for this
 784	 * fact here if necessary.  If we don't do this correctly, as a
 785	 * receiver we won't recognize data packets as being full sized when we
 786	 * should, and thus we won't abide by the delayed ACK rules correctly.
 787	 * SACKs don't matter, we never delay an ACK when we have any of those
 788	 * going out.  */
 789	opts->mss = tcp_advertise_mss(sk);
 790	remaining -= TCPOLEN_MSS_ALIGNED;
 791
 792	if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) {
 793		opts->options |= OPTION_TS;
 794		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
 795		opts->tsecr = tp->rx_opt.ts_recent;
 796		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 797	}
 798	if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) {
 799		opts->ws = tp->rx_opt.rcv_wscale;
 800		opts->options |= OPTION_WSCALE;
 801		remaining -= TCPOLEN_WSCALE_ALIGNED;
 802	}
 803	if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) {
 804		opts->options |= OPTION_SACK_ADVERTISE;
 805		if (unlikely(!(OPTION_TS & opts->options)))
 806			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 807	}
 808
 809	if (fastopen && fastopen->cookie.len >= 0) {
 810		u32 need = fastopen->cookie.len;
 811
 812		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
 813					       TCPOLEN_FASTOPEN_BASE;
 814		need = (need + 3) & ~3U;  /* Align to 32 bits */
 815		if (remaining >= need) {
 816			opts->options |= OPTION_FAST_OPEN_COOKIE;
 817			opts->fastopen_cookie = &fastopen->cookie;
 818			remaining -= need;
 819			tp->syn_fastopen = 1;
 820			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
 821		}
 822	}
 823
 824	smc_set_option(tp, opts, &remaining);
 825
 826	if (sk_is_mptcp(sk)) {
 827		unsigned int size;
 828
 829		if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) {
 830			opts->options |= OPTION_MPTCP;
 831			remaining -= size;
 
 
 832		}
 833	}
 834
 835	bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
 836
 837	return MAX_TCP_OPTION_SPACE - remaining;
 838}
 839
 840/* Set up TCP options for SYN-ACKs. */
 841static unsigned int tcp_synack_options(const struct sock *sk,
 842				       struct request_sock *req,
 843				       unsigned int mss, struct sk_buff *skb,
 844				       struct tcp_out_options *opts,
 845				       const struct tcp_md5sig_key *md5,
 846				       struct tcp_fastopen_cookie *foc,
 847				       enum tcp_synack_type synack_type,
 848				       struct sk_buff *syn_skb)
 849{
 850	struct inet_request_sock *ireq = inet_rsk(req);
 851	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 852
 853#ifdef CONFIG_TCP_MD5SIG
 854	if (md5) {
 855		opts->options |= OPTION_MD5;
 856		remaining -= TCPOLEN_MD5SIG_ALIGNED;
 857
 858		/* We can't fit any SACK blocks in a packet with MD5 + TS
 859		 * options. There was discussion about disabling SACK
 860		 * rather than TS in order to fit in better with old,
 861		 * buggy kernels, but that was deemed to be unnecessary.
 862		 */
 863		if (synack_type != TCP_SYNACK_COOKIE)
 864			ireq->tstamp_ok &= !ireq->sack_ok;
 
 
 
 
 865	}
 866#endif
 867
 868	/* We always send an MSS option. */
 869	opts->mss = mss;
 870	remaining -= TCPOLEN_MSS_ALIGNED;
 871
 872	if (likely(ireq->wscale_ok)) {
 873		opts->ws = ireq->rcv_wscale;
 874		opts->options |= OPTION_WSCALE;
 875		remaining -= TCPOLEN_WSCALE_ALIGNED;
 876	}
 877	if (likely(ireq->tstamp_ok)) {
 878		opts->options |= OPTION_TS;
 879		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
 880		opts->tsecr = req->ts_recent;
 
 881		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 882	}
 883	if (likely(ireq->sack_ok)) {
 884		opts->options |= OPTION_SACK_ADVERTISE;
 885		if (unlikely(!ireq->tstamp_ok))
 886			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 887	}
 888	if (foc != NULL && foc->len >= 0) {
 889		u32 need = foc->len;
 890
 891		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
 892				   TCPOLEN_FASTOPEN_BASE;
 893		need = (need + 3) & ~3U;  /* Align to 32 bits */
 894		if (remaining >= need) {
 895			opts->options |= OPTION_FAST_OPEN_COOKIE;
 896			opts->fastopen_cookie = foc;
 897			remaining -= need;
 898		}
 899	}
 900
 901	mptcp_set_option_cond(req, opts, &remaining);
 902
 903	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
 904
 905	bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb,
 906			      synack_type, opts, &remaining);
 907
 908	return MAX_TCP_OPTION_SPACE - remaining;
 909}
 910
 911/* Compute TCP options for ESTABLISHED sockets. This is not the
 912 * final wire format yet.
 913 */
 914static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
 915					struct tcp_out_options *opts,
 916					struct tcp_md5sig_key **md5)
 917{
 918	struct tcp_sock *tp = tcp_sk(sk);
 919	unsigned int size = 0;
 920	unsigned int eff_sacks;
 921
 922	opts->options = 0;
 923
 924	*md5 = NULL;
 925#ifdef CONFIG_TCP_MD5SIG
 926	if (static_branch_unlikely(&tcp_md5_needed) &&
 927	    rcu_access_pointer(tp->md5sig_info)) {
 928		*md5 = tp->af_specific->md5_lookup(sk, sk);
 929		if (*md5) {
 930			opts->options |= OPTION_MD5;
 931			size += TCPOLEN_MD5SIG_ALIGNED;
 932		}
 933	}
 934#endif
 935
 936	if (likely(tp->rx_opt.tstamp_ok)) {
 937		opts->options |= OPTION_TS;
 938		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
 
 939		opts->tsecr = tp->rx_opt.ts_recent;
 940		size += TCPOLEN_TSTAMP_ALIGNED;
 941	}
 942
 943	/* MPTCP options have precedence over SACK for the limited TCP
 944	 * option space because a MPTCP connection would be forced to
 945	 * fall back to regular TCP if a required multipath option is
 946	 * missing. SACK still gets a chance to use whatever space is
 947	 * left.
 948	 */
 949	if (sk_is_mptcp(sk)) {
 950		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 951		unsigned int opt_size = 0;
 952
 953		if (mptcp_established_options(sk, skb, &opt_size, remaining,
 954					      &opts->mptcp)) {
 955			opts->options |= OPTION_MPTCP;
 956			size += opt_size;
 957		}
 958	}
 959
 960	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
 961	if (unlikely(eff_sacks)) {
 962		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 963		if (unlikely(remaining < TCPOLEN_SACK_BASE_ALIGNED +
 964					 TCPOLEN_SACK_PERBLOCK))
 965			return size;
 966
 967		opts->num_sack_blocks =
 968			min_t(unsigned int, eff_sacks,
 969			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
 970			      TCPOLEN_SACK_PERBLOCK);
 971
 972		size += TCPOLEN_SACK_BASE_ALIGNED +
 973			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
 974	}
 975
 976	if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp,
 977					    BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) {
 978		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 979
 980		bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
 981
 982		size = MAX_TCP_OPTION_SPACE - remaining;
 983	}
 984
 985	return size;
 986}
 987
 988
 989/* TCP SMALL QUEUES (TSQ)
 990 *
 991 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
 992 * to reduce RTT and bufferbloat.
 993 * We do this using a special skb destructor (tcp_wfree).
 994 *
 995 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
 996 * needs to be reallocated in a driver.
 997 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
 998 *
 999 * Since transmit from skb destructor is forbidden, we use a tasklet
1000 * to process all sockets that eventually need to send more skbs.
1001 * We use one tasklet per cpu, with its own queue of sockets.
1002 */
1003struct tsq_tasklet {
1004	struct tasklet_struct	tasklet;
1005	struct list_head	head; /* queue of tcp sockets */
1006};
1007static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
1008
1009static void tcp_tsq_write(struct sock *sk)
1010{
1011	if ((1 << sk->sk_state) &
1012	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
1013	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
1014		struct tcp_sock *tp = tcp_sk(sk);
1015
1016		if (tp->lost_out > tp->retrans_out &&
1017		    tp->snd_cwnd > tcp_packets_in_flight(tp)) {
1018			tcp_mstamp_refresh(tp);
1019			tcp_xmit_retransmit_queue(sk);
1020		}
1021
1022		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
1023			       0, GFP_ATOMIC);
1024	}
1025}
1026
1027static void tcp_tsq_handler(struct sock *sk)
1028{
1029	bh_lock_sock(sk);
1030	if (!sock_owned_by_user(sk))
1031		tcp_tsq_write(sk);
1032	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
1033		sock_hold(sk);
1034	bh_unlock_sock(sk);
1035}
1036/*
1037 * One tasklet per cpu tries to send more skbs.
1038 * We run in tasklet context but need to disable irqs when
1039 * transferring tsq->head because tcp_wfree() might
1040 * interrupt us (non NAPI drivers)
1041 */
1042static void tcp_tasklet_func(struct tasklet_struct *t)
1043{
1044	struct tsq_tasklet *tsq = from_tasklet(tsq,  t, tasklet);
1045	LIST_HEAD(list);
1046	unsigned long flags;
1047	struct list_head *q, *n;
1048	struct tcp_sock *tp;
1049	struct sock *sk;
1050
1051	local_irq_save(flags);
1052	list_splice_init(&tsq->head, &list);
1053	local_irq_restore(flags);
1054
1055	list_for_each_safe(q, n, &list) {
1056		tp = list_entry(q, struct tcp_sock, tsq_node);
1057		list_del(&tp->tsq_node);
1058
1059		sk = (struct sock *)tp;
1060		smp_mb__before_atomic();
1061		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
1062
1063		tcp_tsq_handler(sk);
1064		sk_free(sk);
1065	}
1066}
1067
1068#define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
1069			  TCPF_WRITE_TIMER_DEFERRED |	\
1070			  TCPF_DELACK_TIMER_DEFERRED |	\
1071			  TCPF_MTU_REDUCED_DEFERRED)
 
1072/**
1073 * tcp_release_cb - tcp release_sock() callback
1074 * @sk: socket
1075 *
1076 * called from release_sock() to perform protocol dependent
1077 * actions before socket release.
1078 */
1079void tcp_release_cb(struct sock *sk)
1080{
1081	unsigned long flags, nflags;
 
1082
1083	/* perform an atomic operation only if at least one flag is set */
1084	do {
1085		flags = sk->sk_tsq_flags;
1086		if (!(flags & TCP_DEFERRED_ALL))
1087			return;
1088		nflags = flags & ~TCP_DEFERRED_ALL;
1089	} while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags);
1090
1091	if (flags & TCPF_TSQ_DEFERRED) {
1092		tcp_tsq_write(sk);
1093		__sock_put(sk);
1094	}
1095	/* Here begins the tricky part :
1096	 * We are called from release_sock() with :
1097	 * 1) BH disabled
1098	 * 2) sk_lock.slock spinlock held
1099	 * 3) socket owned by us (sk->sk_lock.owned == 1)
1100	 *
1101	 * But following code is meant to be called from BH handlers,
1102	 * so we should keep BH disabled, but early release socket ownership
1103	 */
1104	sock_release_ownership(sk);
1105
1106	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
1107		tcp_write_timer_handler(sk);
1108		__sock_put(sk);
1109	}
1110	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
1111		tcp_delack_timer_handler(sk);
1112		__sock_put(sk);
1113	}
1114	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
1115		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
1116		__sock_put(sk);
1117	}
 
 
1118}
1119EXPORT_SYMBOL(tcp_release_cb);
1120
1121void __init tcp_tasklet_init(void)
1122{
1123	int i;
1124
1125	for_each_possible_cpu(i) {
1126		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
1127
1128		INIT_LIST_HEAD(&tsq->head);
1129		tasklet_setup(&tsq->tasklet, tcp_tasklet_func);
1130	}
1131}
1132
1133/*
1134 * Write buffer destructor automatically called from kfree_skb.
1135 * We can't xmit new skbs from this context, as we might already
1136 * hold qdisc lock.
1137 */
1138void tcp_wfree(struct sk_buff *skb)
1139{
1140	struct sock *sk = skb->sk;
1141	struct tcp_sock *tp = tcp_sk(sk);
1142	unsigned long flags, nval, oval;
 
 
1143
1144	/* Keep one reference on sk_wmem_alloc.
1145	 * Will be released by sk_free() from here or tcp_tasklet_func()
1146	 */
1147	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
1148
1149	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
1150	 * Wait until our queues (qdisc + devices) are drained.
1151	 * This gives :
1152	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
1153	 * - chance for incoming ACK (processed by another cpu maybe)
1154	 *   to migrate this flow (skb->ooo_okay will be eventually set)
1155	 */
1156	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
1157		goto out;
1158
1159	for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
1160		struct tsq_tasklet *tsq;
1161		bool empty;
1162
1163		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
1164			goto out;
1165
1166		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
1167		nval = cmpxchg(&sk->sk_tsq_flags, oval, nval);
1168		if (nval != oval)
1169			continue;
1170
1171		/* queue this socket to tasklet queue */
1172		local_irq_save(flags);
1173		tsq = this_cpu_ptr(&tsq_tasklet);
1174		empty = list_empty(&tsq->head);
1175		list_add(&tp->tsq_node, &tsq->head);
1176		if (empty)
1177			tasklet_schedule(&tsq->tasklet);
1178		local_irq_restore(flags);
1179		return;
1180	}
1181out:
1182	sk_free(sk);
1183}
1184
1185/* Note: Called under soft irq.
1186 * We can call TCP stack right away, unless socket is owned by user.
1187 */
1188enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
1189{
1190	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
1191	struct sock *sk = (struct sock *)tp;
1192
1193	tcp_tsq_handler(sk);
1194	sock_put(sk);
1195
1196	return HRTIMER_NORESTART;
1197}
1198
1199static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
1200				      u64 prior_wstamp)
1201{
1202	struct tcp_sock *tp = tcp_sk(sk);
1203
1204	if (sk->sk_pacing_status != SK_PACING_NONE) {
1205		unsigned long rate = sk->sk_pacing_rate;
1206
1207		/* Original sch_fq does not pace first 10 MSS
1208		 * Note that tp->data_segs_out overflows after 2^32 packets,
1209		 * this is a minor annoyance.
1210		 */
1211		if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
1212			u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
1213			u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
1214
1215			/* take into account OS jitter */
1216			len_ns -= min_t(u64, len_ns / 2, credit);
1217			tp->tcp_wstamp_ns += len_ns;
1218		}
1219	}
1220	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1221}
1222
1223INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1224INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1225INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
1226
1227/* This routine actually transmits TCP packets queued in by
1228 * tcp_do_sendmsg().  This is used by both the initial
1229 * transmission and possible later retransmissions.
1230 * All SKB's seen here are completely headerless.  It is our
1231 * job to build the TCP header, and pass the packet down to
1232 * IP so it can do the same plus pass the packet off to the
1233 * device.
1234 *
1235 * We are working here with either a clone of the original
1236 * SKB, or a fresh unique copy made by the retransmit engine.
1237 */
1238static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1239			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
1240{
1241	const struct inet_connection_sock *icsk = inet_csk(sk);
1242	struct inet_sock *inet;
1243	struct tcp_sock *tp;
1244	struct tcp_skb_cb *tcb;
1245	struct tcp_out_options opts;
1246	unsigned int tcp_options_size, tcp_header_size;
1247	struct sk_buff *oskb = NULL;
1248	struct tcp_md5sig_key *md5;
1249	struct tcphdr *th;
1250	u64 prior_wstamp;
1251	int err;
1252
1253	BUG_ON(!skb || !tcp_skb_pcount(skb));
1254	tp = tcp_sk(sk);
1255	prior_wstamp = tp->tcp_wstamp_ns;
1256	tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
1257	skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
1258	if (clone_it) {
1259		TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
1260			- tp->snd_una;
1261		oskb = skb;
1262
1263		tcp_skb_tsorted_save(oskb) {
1264			if (unlikely(skb_cloned(oskb)))
1265				skb = pskb_copy(oskb, gfp_mask);
1266			else
1267				skb = skb_clone(oskb, gfp_mask);
1268		} tcp_skb_tsorted_restore(oskb);
1269
1270		if (unlikely(!skb))
1271			return -ENOBUFS;
1272		/* retransmit skbs might have a non zero value in skb->dev
1273		 * because skb->dev is aliased with skb->rbnode.rb_left
1274		 */
1275		skb->dev = NULL;
1276	}
1277
1278	inet = inet_sk(sk);
1279	tcb = TCP_SKB_CB(skb);
1280	memset(&opts, 0, sizeof(opts));
1281
 
1282	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
1283		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
1284	} else {
1285		tcp_options_size = tcp_established_options(sk, skb, &opts,
1286							   &md5);
1287		/* Force a PSH flag on all (GSO) packets to expedite GRO flush
1288		 * at receiver : This slightly improve GRO performance.
1289		 * Note that we do not force the PSH flag for non GSO packets,
1290		 * because they might be sent under high congestion events,
1291		 * and in this case it is better to delay the delivery of 1-MSS
1292		 * packets and thus the corresponding ACK packet that would
1293		 * release the following packet.
1294		 */
1295		if (tcp_skb_pcount(skb) > 1)
1296			tcb->tcp_flags |= TCPHDR_PSH;
1297	}
1298	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
1299
1300	/* if no packet is in qdisc/device queue, then allow XPS to select
1301	 * another queue. We can be called from tcp_tsq_handler()
1302	 * which holds one reference to sk.
1303	 *
1304	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
1305	 * One way to get this would be to set skb->truesize = 2 on them.
 
 
 
 
 
 
1306	 */
1307	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
 
1308
1309	/* If we had to use memory reserve to allocate this skb,
1310	 * this might cause drops if packet is looped back :
1311	 * Other socket might not have SOCK_MEMALLOC.
1312	 * Packets not looped back do not care about pfmemalloc.
1313	 */
1314	skb->pfmemalloc = 0;
1315
1316	skb_push(skb, tcp_header_size);
1317	skb_reset_transport_header(skb);
1318
1319	skb_orphan(skb);
1320	skb->sk = sk;
1321	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1322	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
1323
1324	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
1325
1326	/* Build TCP header and checksum it. */
1327	th = (struct tcphdr *)skb->data;
1328	th->source		= inet->inet_sport;
1329	th->dest		= inet->inet_dport;
1330	th->seq			= htonl(tcb->seq);
1331	th->ack_seq		= htonl(rcv_nxt);
1332	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
1333					tcb->tcp_flags);
1334
1335	th->check		= 0;
1336	th->urg_ptr		= 0;
1337
1338	/* The urg_mode check is necessary during a below snd_una win probe */
1339	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
1340		if (before(tp->snd_up, tcb->seq + 0x10000)) {
1341			th->urg_ptr = htons(tp->snd_up - tcb->seq);
1342			th->urg = 1;
1343		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
1344			th->urg_ptr = htons(0xFFFF);
1345			th->urg = 1;
1346		}
1347	}
1348
1349	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1350	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1351		th->window      = htons(tcp_select_window(sk));
1352		tcp_ecn_send(sk, skb, th, tcp_header_size);
1353	} else {
1354		/* RFC1323: The window in SYN & SYN/ACK segments
1355		 * is never scaled.
1356		 */
1357		th->window	= htons(min(tp->rcv_wnd, 65535U));
1358	}
1359
1360	tcp_options_write((__be32 *)(th + 1), tp, &opts);
1361
 
1362#ifdef CONFIG_TCP_MD5SIG
1363	/* Calculate the MD5 hash, as we have all we need now */
1364	if (md5) {
1365		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1366		tp->af_specific->calc_md5_hash(opts.hash_location,
1367					       md5, sk, skb);
1368	}
1369#endif
 
 
 
 
 
 
 
 
 
 
1370
1371	/* BPF prog is the last one writing header option */
1372	bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts);
1373
1374	INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check,
1375			   tcp_v6_send_check, tcp_v4_send_check,
1376			   sk, skb);
1377
1378	if (likely(tcb->tcp_flags & TCPHDR_ACK))
1379		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
1380
1381	if (skb->len != tcp_header_size) {
1382		tcp_event_data_sent(tp, sk);
1383		tp->data_segs_out += tcp_skb_pcount(skb);
1384		tp->bytes_sent += skb->len - tcp_header_size;
1385	}
1386
1387	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1388		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1389			      tcp_skb_pcount(skb));
1390
1391	tp->segs_out += tcp_skb_pcount(skb);
1392	skb_set_hash_from_sk(skb, sk);
1393	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1394	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1395	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1396
1397	/* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
1398
1399	/* Cleanup our debris for IP stacks */
1400	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1401			       sizeof(struct inet6_skb_parm)));
1402
1403	tcp_add_tx_delay(skb, tp);
1404
1405	err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit,
1406				 inet6_csk_xmit, ip_queue_xmit,
1407				 sk, skb, &inet->cork.fl);
1408
1409	if (unlikely(err > 0)) {
1410		tcp_enter_cwr(sk);
1411		err = net_xmit_eval(err);
1412	}
1413	if (!err && oskb) {
1414		tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1415		tcp_rate_skb_sent(sk, oskb);
1416	}
1417	return err;
1418}
1419
1420static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1421			    gfp_t gfp_mask)
1422{
1423	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
1424				  tcp_sk(sk)->rcv_nxt);
1425}
1426
1427/* This routine just queues the buffer for sending.
1428 *
1429 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
1430 * otherwise socket can stall.
1431 */
1432static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
1433{
1434	struct tcp_sock *tp = tcp_sk(sk);
1435
1436	/* Advance write_seq and place onto the write_queue. */
1437	WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
1438	__skb_header_release(skb);
1439	tcp_add_write_queue_tail(sk, skb);
1440	sk_wmem_queued_add(sk, skb->truesize);
1441	sk_mem_charge(sk, skb->truesize);
1442}
1443
1444/* Initialize TSO segments for a packet. */
1445static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1446{
 
 
1447	if (skb->len <= mss_now) {
1448		/* Avoid the costly divide in the normal
1449		 * non-TSO case.
1450		 */
1451		tcp_skb_pcount_set(skb, 1);
1452		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1453	} else {
1454		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1455		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
1456	}
 
 
 
 
1457}
1458
1459/* Pcount in the middle of the write queue got changed, we need to do various
1460 * tweaks to fix counters
1461 */
1462static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1463{
1464	struct tcp_sock *tp = tcp_sk(sk);
1465
1466	tp->packets_out -= decr;
1467
1468	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1469		tp->sacked_out -= decr;
1470	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1471		tp->retrans_out -= decr;
1472	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1473		tp->lost_out -= decr;
1474
1475	/* Reno case is special. Sigh... */
1476	if (tcp_is_reno(tp) && decr > 0)
1477		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1478
1479	if (tp->lost_skb_hint &&
1480	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1481	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1482		tp->lost_cnt_hint -= decr;
1483
1484	tcp_verify_left_out(tp);
1485}
1486
1487static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
1488{
1489	return TCP_SKB_CB(skb)->txstamp_ack ||
1490		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
1491}
1492
1493static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1494{
1495	struct skb_shared_info *shinfo = skb_shinfo(skb);
1496
1497	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1498	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1499		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1500		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1501
1502		shinfo->tx_flags &= ~tsflags;
1503		shinfo2->tx_flags |= tsflags;
1504		swap(shinfo->tskey, shinfo2->tskey);
1505		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1506		TCP_SKB_CB(skb)->txstamp_ack = 0;
1507	}
1508}
1509
1510static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1511{
1512	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1513	TCP_SKB_CB(skb)->eor = 0;
1514}
1515
1516/* Insert buff after skb on the write or rtx queue of sk.  */
1517static void tcp_insert_write_queue_after(struct sk_buff *skb,
1518					 struct sk_buff *buff,
1519					 struct sock *sk,
1520					 enum tcp_queue tcp_queue)
1521{
1522	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
1523		__skb_queue_after(&sk->sk_write_queue, skb, buff);
1524	else
1525		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
1526}
1527
1528/* Function to create two new TCP segments.  Shrinks the given segment
1529 * to the specified size and appends a new segment with the rest of the
1530 * packet to the list.  This won't be called frequently, I hope.
1531 * Remember, these are still headerless SKBs at this point.
1532 */
1533int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1534		 struct sk_buff *skb, u32 len,
1535		 unsigned int mss_now, gfp_t gfp)
1536{
1537	struct tcp_sock *tp = tcp_sk(sk);
1538	struct sk_buff *buff;
1539	int nsize, old_factor;
1540	long limit;
1541	int nlen;
1542	u8 flags;
1543
1544	if (WARN_ON(len > skb->len))
1545		return -EINVAL;
1546
1547	nsize = skb_headlen(skb) - len;
1548	if (nsize < 0)
1549		nsize = 0;
1550
1551	/* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1552	 * We need some allowance to not penalize applications setting small
1553	 * SO_SNDBUF values.
1554	 * Also allow first and last skb in retransmit queue to be split.
1555	 */
1556	limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
1557	if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1558		     tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1559		     skb != tcp_rtx_queue_head(sk) &&
1560		     skb != tcp_rtx_queue_tail(sk))) {
1561		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1562		return -ENOMEM;
1563	}
1564
1565	if (skb_unclone(skb, gfp))
1566		return -ENOMEM;
1567
1568	/* Get a new skb... force flag on. */
1569	buff = sk_stream_alloc_skb(sk, nsize, gfp, true);
1570	if (!buff)
1571		return -ENOMEM; /* We'll just try again later. */
1572	skb_copy_decrypted(buff, skb);
1573	mptcp_skb_ext_copy(buff, skb);
1574
1575	sk_wmem_queued_add(sk, buff->truesize);
1576	sk_mem_charge(sk, buff->truesize);
1577	nlen = skb->len - len - nsize;
1578	buff->truesize += nlen;
1579	skb->truesize -= nlen;
1580
1581	/* Correct the sequence numbers. */
1582	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1583	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1584	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1585
1586	/* PSH and FIN should only be set in the second packet. */
1587	flags = TCP_SKB_CB(skb)->tcp_flags;
1588	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1589	TCP_SKB_CB(buff)->tcp_flags = flags;
1590	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1591	tcp_skb_fragment_eor(skb, buff);
1592
1593	skb_split(skb, buff, len);
1594
1595	buff->ip_summed = CHECKSUM_PARTIAL;
1596
1597	buff->tstamp = skb->tstamp;
1598	tcp_fragment_tstamp(skb, buff);
1599
1600	old_factor = tcp_skb_pcount(skb);
1601
1602	/* Fix up tso_factor for both original and new SKB.  */
1603	tcp_set_skb_tso_segs(skb, mss_now);
1604	tcp_set_skb_tso_segs(buff, mss_now);
1605
1606	/* Update delivered info for the new segment */
1607	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1608
1609	/* If this packet has been sent out already, we must
1610	 * adjust the various packet counters.
1611	 */
1612	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1613		int diff = old_factor - tcp_skb_pcount(skb) -
1614			tcp_skb_pcount(buff);
1615
1616		if (diff)
1617			tcp_adjust_pcount(sk, skb, diff);
1618	}
1619
1620	/* Link BUFF into the send queue. */
1621	__skb_header_release(buff);
1622	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1623	if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1624		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
1625
1626	return 0;
1627}
1628
1629/* This is similar to __pskb_pull_tail(). The difference is that pulled
1630 * data is not copied, but immediately discarded.
1631 */
1632static int __pskb_trim_head(struct sk_buff *skb, int len)
1633{
1634	struct skb_shared_info *shinfo;
1635	int i, k, eat;
1636
1637	eat = min_t(int, len, skb_headlen(skb));
1638	if (eat) {
1639		__skb_pull(skb, eat);
1640		len -= eat;
1641		if (!len)
1642			return 0;
1643	}
1644	eat = len;
1645	k = 0;
1646	shinfo = skb_shinfo(skb);
1647	for (i = 0; i < shinfo->nr_frags; i++) {
1648		int size = skb_frag_size(&shinfo->frags[i]);
1649
1650		if (size <= eat) {
1651			skb_frag_unref(skb, i);
1652			eat -= size;
1653		} else {
1654			shinfo->frags[k] = shinfo->frags[i];
1655			if (eat) {
1656				skb_frag_off_add(&shinfo->frags[k], eat);
1657				skb_frag_size_sub(&shinfo->frags[k], eat);
1658				eat = 0;
1659			}
1660			k++;
1661		}
1662	}
1663	shinfo->nr_frags = k;
1664
1665	skb->data_len -= len;
1666	skb->len = skb->data_len;
1667	return len;
1668}
1669
1670/* Remove acked data from a packet in the transmit queue. */
1671int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1672{
1673	u32 delta_truesize;
1674
1675	if (skb_unclone(skb, GFP_ATOMIC))
1676		return -ENOMEM;
1677
1678	delta_truesize = __pskb_trim_head(skb, len);
1679
1680	TCP_SKB_CB(skb)->seq += len;
1681	skb->ip_summed = CHECKSUM_PARTIAL;
1682
1683	if (delta_truesize) {
1684		skb->truesize	   -= delta_truesize;
1685		sk_wmem_queued_add(sk, -delta_truesize);
1686		sk_mem_uncharge(sk, delta_truesize);
1687	}
1688
1689	/* Any change of skb->len requires recalculation of tso factor. */
1690	if (tcp_skb_pcount(skb) > 1)
1691		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
1692
1693	return 0;
1694}
1695
1696/* Calculate MSS not accounting any TCP options.  */
1697static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
1698{
1699	const struct tcp_sock *tp = tcp_sk(sk);
1700	const struct inet_connection_sock *icsk = inet_csk(sk);
1701	int mss_now;
1702
1703	/* Calculate base mss without TCP options:
1704	   It is MMS_S - sizeof(tcphdr) of rfc1122
1705	 */
1706	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1707
1708	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1709	if (icsk->icsk_af_ops->net_frag_header_len) {
1710		const struct dst_entry *dst = __sk_dst_get(sk);
1711
1712		if (dst && dst_allfrag(dst))
1713			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1714	}
1715
1716	/* Clamp it (mss_clamp does not include tcp options) */
1717	if (mss_now > tp->rx_opt.mss_clamp)
1718		mss_now = tp->rx_opt.mss_clamp;
1719
1720	/* Now subtract optional transport overhead */
1721	mss_now -= icsk->icsk_ext_hdr_len;
1722
1723	/* Then reserve room for full set of TCP options and 8 bytes of data */
1724	mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
 
1725	return mss_now;
1726}
1727
1728/* Calculate MSS. Not accounting for SACKs here.  */
1729int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1730{
1731	/* Subtract TCP options size, not including SACKs */
1732	return __tcp_mtu_to_mss(sk, pmtu) -
1733	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
1734}
1735EXPORT_SYMBOL(tcp_mtu_to_mss);
1736
1737/* Inverse of above */
1738int tcp_mss_to_mtu(struct sock *sk, int mss)
1739{
1740	const struct tcp_sock *tp = tcp_sk(sk);
1741	const struct inet_connection_sock *icsk = inet_csk(sk);
1742	int mtu;
1743
1744	mtu = mss +
1745	      tp->tcp_header_len +
1746	      icsk->icsk_ext_hdr_len +
1747	      icsk->icsk_af_ops->net_header_len;
1748
1749	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1750	if (icsk->icsk_af_ops->net_frag_header_len) {
1751		const struct dst_entry *dst = __sk_dst_get(sk);
1752
1753		if (dst && dst_allfrag(dst))
1754			mtu += icsk->icsk_af_ops->net_frag_header_len;
1755	}
1756	return mtu;
1757}
1758EXPORT_SYMBOL(tcp_mss_to_mtu);
1759
1760/* MTU probing init per socket */
1761void tcp_mtup_init(struct sock *sk)
1762{
1763	struct tcp_sock *tp = tcp_sk(sk);
1764	struct inet_connection_sock *icsk = inet_csk(sk);
1765	struct net *net = sock_net(sk);
1766
1767	icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1;
1768	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
1769			       icsk->icsk_af_ops->net_header_len;
1770	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
1771	icsk->icsk_mtup.probe_size = 0;
1772	if (icsk->icsk_mtup.enabled)
1773		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
1774}
1775EXPORT_SYMBOL(tcp_mtup_init);
1776
1777/* This function synchronize snd mss to current pmtu/exthdr set.
1778
1779   tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1780   for TCP options, but includes only bare TCP header.
1781
1782   tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1783   It is minimum of user_mss and mss received with SYN.
1784   It also does not include TCP options.
1785
1786   inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1787
1788   tp->mss_cache is current effective sending mss, including
1789   all tcp options except for SACKs. It is evaluated,
1790   taking into account current pmtu, but never exceeds
1791   tp->rx_opt.mss_clamp.
1792
1793   NOTE1. rfc1122 clearly states that advertised MSS
1794   DOES NOT include either tcp or ip options.
1795
1796   NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1797   are READ ONLY outside this function.		--ANK (980731)
1798 */
1799unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1800{
1801	struct tcp_sock *tp = tcp_sk(sk);
1802	struct inet_connection_sock *icsk = inet_csk(sk);
1803	int mss_now;
1804
1805	if (icsk->icsk_mtup.search_high > pmtu)
1806		icsk->icsk_mtup.search_high = pmtu;
1807
1808	mss_now = tcp_mtu_to_mss(sk, pmtu);
1809	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1810
1811	/* And store cached results */
1812	icsk->icsk_pmtu_cookie = pmtu;
1813	if (icsk->icsk_mtup.enabled)
1814		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1815	tp->mss_cache = mss_now;
1816
1817	return mss_now;
1818}
1819EXPORT_SYMBOL(tcp_sync_mss);
1820
1821/* Compute the current effective MSS, taking SACKs and IP options,
1822 * and even PMTU discovery events into account.
1823 */
1824unsigned int tcp_current_mss(struct sock *sk)
1825{
1826	const struct tcp_sock *tp = tcp_sk(sk);
1827	const struct dst_entry *dst = __sk_dst_get(sk);
1828	u32 mss_now;
1829	unsigned int header_len;
1830	struct tcp_out_options opts;
1831	struct tcp_md5sig_key *md5;
1832
1833	mss_now = tp->mss_cache;
1834
1835	if (dst) {
1836		u32 mtu = dst_mtu(dst);
1837		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1838			mss_now = tcp_sync_mss(sk, mtu);
1839	}
1840
1841	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1842		     sizeof(struct tcphdr);
1843	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
1844	 * some common options. If this is an odd packet (because we have SACK
1845	 * blocks etc) then our calculated header_len will be different, and
1846	 * we have to adjust mss_now correspondingly */
1847	if (header_len != tp->tcp_header_len) {
1848		int delta = (int) header_len - tp->tcp_header_len;
1849		mss_now -= delta;
1850	}
1851
1852	return mss_now;
1853}
1854
1855/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
1856 * As additional protections, we do not touch cwnd in retransmission phases,
1857 * and if application hit its sndbuf limit recently.
1858 */
1859static void tcp_cwnd_application_limited(struct sock *sk)
1860{
1861	struct tcp_sock *tp = tcp_sk(sk);
1862
1863	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
1864	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1865		/* Limited by application or receiver window. */
1866		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
1867		u32 win_used = max(tp->snd_cwnd_used, init_win);
1868		if (win_used < tp->snd_cwnd) {
1869			tp->snd_ssthresh = tcp_current_ssthresh(sk);
1870			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
1871		}
1872		tp->snd_cwnd_used = 0;
1873	}
1874	tp->snd_cwnd_stamp = tcp_jiffies32;
1875}
1876
1877static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1878{
1879	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1880	struct tcp_sock *tp = tcp_sk(sk);
1881
1882	/* Track the maximum number of outstanding packets in each
1883	 * window, and remember whether we were cwnd-limited then.
1884	 */
1885	if (!before(tp->snd_una, tp->max_packets_seq) ||
1886	    tp->packets_out > tp->max_packets_out ||
1887	    is_cwnd_limited) {
1888		tp->max_packets_out = tp->packets_out;
1889		tp->max_packets_seq = tp->snd_nxt;
 
 
 
1890		tp->is_cwnd_limited = is_cwnd_limited;
 
 
1891	}
1892
1893	if (tcp_is_cwnd_limited(sk)) {
1894		/* Network is feed fully. */
1895		tp->snd_cwnd_used = 0;
1896		tp->snd_cwnd_stamp = tcp_jiffies32;
1897	} else {
1898		/* Network starves. */
1899		if (tp->packets_out > tp->snd_cwnd_used)
1900			tp->snd_cwnd_used = tp->packets_out;
1901
1902		if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle &&
1903		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
1904		    !ca_ops->cong_control)
1905			tcp_cwnd_application_limited(sk);
1906
1907		/* The following conditions together indicate the starvation
1908		 * is caused by insufficient sender buffer:
1909		 * 1) just sent some data (see tcp_write_xmit)
1910		 * 2) not cwnd limited (this else condition)
1911		 * 3) no more data to send (tcp_write_queue_empty())
1912		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1913		 */
1914		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1915		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1916		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1917			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1918	}
1919}
1920
1921/* Minshall's variant of the Nagle send check. */
1922static bool tcp_minshall_check(const struct tcp_sock *tp)
1923{
1924	return after(tp->snd_sml, tp->snd_una) &&
1925		!after(tp->snd_sml, tp->snd_nxt);
1926}
1927
1928/* Update snd_sml if this skb is under mss
1929 * Note that a TSO packet might end with a sub-mss segment
1930 * The test is really :
1931 * if ((skb->len % mss) != 0)
1932 *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1933 * But we can avoid doing the divide again given we already have
1934 *  skb_pcount = skb->len / mss_now
1935 */
1936static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1937				const struct sk_buff *skb)
1938{
1939	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1940		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1941}
1942
1943/* Return false, if packet can be sent now without violation Nagle's rules:
1944 * 1. It is full sized. (provided by caller in %partial bool)
1945 * 2. Or it contains FIN. (already checked by caller)
1946 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1947 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1948 *    With Minshall's modification: all sent small packets are ACKed.
1949 */
1950static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1951			    int nonagle)
1952{
1953	return partial &&
1954		((nonagle & TCP_NAGLE_CORK) ||
1955		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1956}
1957
1958/* Return how many segs we'd like on a TSO packet,
1959 * to send one TSO packet per ms
 
 
 
 
 
 
 
 
 
 
 
1960 */
1961static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
1962			    int min_tso_segs)
1963{
1964	u32 bytes, segs;
 
1965
1966	bytes = min_t(unsigned long,
1967		      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift),
1968		      sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
1969
1970	/* Goal is to send at least one packet per ms,
1971	 * not one big TSO packet every 100 ms.
1972	 * This preserves ACK clocking and is consistent
1973	 * with tcp_tso_should_defer() heuristic.
1974	 */
1975	segs = max_t(u32, bytes / mss_now, min_tso_segs);
1976
1977	return segs;
1978}
1979
1980/* Return the number of segments we want in the skb we are transmitting.
1981 * See if congestion control module wants to decide; otherwise, autosize.
1982 */
1983static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1984{
1985	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1986	u32 min_tso, tso_segs;
1987
1988	min_tso = ca_ops->min_tso_segs ?
1989			ca_ops->min_tso_segs(sk) :
1990			sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs;
1991
1992	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
1993	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
1994}
1995
1996/* Returns the portion of skb which can be sent right away */
1997static unsigned int tcp_mss_split_point(const struct sock *sk,
1998					const struct sk_buff *skb,
1999					unsigned int mss_now,
2000					unsigned int max_segs,
2001					int nonagle)
2002{
2003	const struct tcp_sock *tp = tcp_sk(sk);
2004	u32 partial, needed, window, max_len;
2005
2006	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2007	max_len = mss_now * max_segs;
2008
2009	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
2010		return max_len;
2011
2012	needed = min(skb->len, window);
2013
2014	if (max_len <= needed)
2015		return max_len;
2016
2017	partial = needed % mss_now;
2018	/* If last segment is not a full MSS, check if Nagle rules allow us
2019	 * to include this last segment in this skb.
2020	 * Otherwise, we'll split the skb at last MSS boundary
2021	 */
2022	if (tcp_nagle_check(partial != 0, tp, nonagle))
2023		return needed - partial;
2024
2025	return needed;
2026}
2027
2028/* Can at least one segment of SKB be sent right now, according to the
2029 * congestion window rules?  If so, return how many segments are allowed.
2030 */
2031static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
2032					 const struct sk_buff *skb)
2033{
2034	u32 in_flight, cwnd, halfcwnd;
2035
2036	/* Don't be strict about the congestion window for the final FIN.  */
2037	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
2038	    tcp_skb_pcount(skb) == 1)
2039		return 1;
2040
2041	in_flight = tcp_packets_in_flight(tp);
2042	cwnd = tp->snd_cwnd;
2043	if (in_flight >= cwnd)
2044		return 0;
2045
2046	/* For better scheduling, ensure we have at least
2047	 * 2 GSO packets in flight.
2048	 */
2049	halfcwnd = max(cwnd >> 1, 1U);
2050	return min(halfcwnd, cwnd - in_flight);
2051}
2052
2053/* Initialize TSO state of a skb.
2054 * This must be invoked the first time we consider transmitting
2055 * SKB onto the wire.
2056 */
2057static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
2058{
2059	int tso_segs = tcp_skb_pcount(skb);
2060
2061	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
2062		tcp_set_skb_tso_segs(skb, mss_now);
2063		tso_segs = tcp_skb_pcount(skb);
2064	}
2065	return tso_segs;
2066}
2067
2068
2069/* Return true if the Nagle test allows this packet to be
2070 * sent now.
2071 */
2072static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
2073				  unsigned int cur_mss, int nonagle)
2074{
2075	/* Nagle rule does not apply to frames, which sit in the middle of the
2076	 * write_queue (they have no chances to get new data).
2077	 *
2078	 * This is implemented in the callers, where they modify the 'nonagle'
2079	 * argument based upon the location of SKB in the send queue.
2080	 */
2081	if (nonagle & TCP_NAGLE_PUSH)
2082		return true;
2083
2084	/* Don't use the nagle rule for urgent data (or for the final FIN). */
2085	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
2086		return true;
2087
2088	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
2089		return true;
2090
2091	return false;
2092}
2093
2094/* Does at least the first segment of SKB fit into the send window? */
2095static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
2096			     const struct sk_buff *skb,
2097			     unsigned int cur_mss)
2098{
2099	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
2100
2101	if (skb->len > cur_mss)
2102		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
2103
2104	return !after(end_seq, tcp_wnd_end(tp));
2105}
2106
2107/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
2108 * which is put after SKB on the list.  It is very much like
2109 * tcp_fragment() except that it may make several kinds of assumptions
2110 * in order to speed up the splitting operation.  In particular, we
2111 * know that all the data is in scatter-gather pages, and that the
2112 * packet has never been sent out before (and thus is not cloned).
2113 */
2114static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
2115			unsigned int mss_now, gfp_t gfp)
2116{
2117	int nlen = skb->len - len;
2118	struct sk_buff *buff;
2119	u8 flags;
2120
2121	/* All of a TSO frame must be composed of paged data.  */
2122	if (skb->len != skb->data_len)
2123		return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
2124				    skb, len, mss_now, gfp);
2125
2126	buff = sk_stream_alloc_skb(sk, 0, gfp, true);
2127	if (unlikely(!buff))
2128		return -ENOMEM;
2129	skb_copy_decrypted(buff, skb);
2130	mptcp_skb_ext_copy(buff, skb);
2131
2132	sk_wmem_queued_add(sk, buff->truesize);
2133	sk_mem_charge(sk, buff->truesize);
2134	buff->truesize += nlen;
2135	skb->truesize -= nlen;
2136
2137	/* Correct the sequence numbers. */
2138	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
2139	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
2140	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
2141
2142	/* PSH and FIN should only be set in the second packet. */
2143	flags = TCP_SKB_CB(skb)->tcp_flags;
2144	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
2145	TCP_SKB_CB(buff)->tcp_flags = flags;
2146
2147	/* This packet was never sent out yet, so no SACK bits. */
2148	TCP_SKB_CB(buff)->sacked = 0;
2149
2150	tcp_skb_fragment_eor(skb, buff);
2151
2152	buff->ip_summed = CHECKSUM_PARTIAL;
2153	skb_split(skb, buff, len);
2154	tcp_fragment_tstamp(skb, buff);
2155
2156	/* Fix up tso_factor for both original and new SKB.  */
2157	tcp_set_skb_tso_segs(skb, mss_now);
2158	tcp_set_skb_tso_segs(buff, mss_now);
2159
2160	/* Link BUFF into the send queue. */
2161	__skb_header_release(buff);
2162	tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
2163
2164	return 0;
2165}
2166
2167/* Try to defer sending, if possible, in order to minimize the amount
2168 * of TSO splitting we do.  View it as a kind of TSO Nagle test.
2169 *
2170 * This algorithm is from John Heffner.
2171 */
2172static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
2173				 bool *is_cwnd_limited,
2174				 bool *is_rwnd_limited,
2175				 u32 max_segs)
2176{
2177	const struct inet_connection_sock *icsk = inet_csk(sk);
2178	u32 send_win, cong_win, limit, in_flight;
2179	struct tcp_sock *tp = tcp_sk(sk);
2180	struct sk_buff *head;
2181	int win_divisor;
2182	s64 delta;
2183
2184	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
2185		goto send_now;
2186
2187	/* Avoid bursty behavior by allowing defer
2188	 * only if the last write was recent (1 ms).
2189	 * Note that tp->tcp_wstamp_ns can be in the future if we have
2190	 * packets waiting in a qdisc or device for EDT delivery.
2191	 */
2192	delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
2193	if (delta > 0)
2194		goto send_now;
2195
2196	in_flight = tcp_packets_in_flight(tp);
2197
2198	BUG_ON(tcp_skb_pcount(skb) <= 1);
2199	BUG_ON(tp->snd_cwnd <= in_flight);
2200
2201	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2202
2203	/* From in_flight test above, we know that cwnd > in_flight.  */
2204	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
2205
2206	limit = min(send_win, cong_win);
2207
2208	/* If a full-sized TSO skb can be sent, do it. */
2209	if (limit >= max_segs * tp->mss_cache)
2210		goto send_now;
2211
2212	/* Middle in queue won't get any more data, full sendable already? */
2213	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
2214		goto send_now;
2215
2216	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
2217	if (win_divisor) {
2218		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
2219
2220		/* If at least some fraction of a window is available,
2221		 * just use it.
2222		 */
2223		chunk /= win_divisor;
2224		if (limit >= chunk)
2225			goto send_now;
2226	} else {
2227		/* Different approach, try not to defer past a single
2228		 * ACK.  Receiver should ACK every other full sized
2229		 * frame, so if we have space for more than 3 frames
2230		 * then send now.
2231		 */
2232		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
2233			goto send_now;
2234	}
2235
2236	/* TODO : use tsorted_sent_queue ? */
2237	head = tcp_rtx_queue_head(sk);
2238	if (!head)
2239		goto send_now;
2240	delta = tp->tcp_clock_cache - head->tstamp;
2241	/* If next ACK is likely to come too late (half srtt), do not defer */
2242	if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
2243		goto send_now;
2244
2245	/* Ok, it looks like it is advisable to defer.
2246	 * Three cases are tracked :
2247	 * 1) We are cwnd-limited
2248	 * 2) We are rwnd-limited
2249	 * 3) We are application limited.
2250	 */
2251	if (cong_win < send_win) {
2252		if (cong_win <= skb->len) {
2253			*is_cwnd_limited = true;
2254			return true;
2255		}
2256	} else {
2257		if (send_win <= skb->len) {
2258			*is_rwnd_limited = true;
2259			return true;
2260		}
2261	}
2262
2263	/* If this packet won't get more data, do not wait. */
2264	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
2265	    TCP_SKB_CB(skb)->eor)
2266		goto send_now;
2267
2268	return true;
2269
2270send_now:
2271	return false;
2272}
2273
2274static inline void tcp_mtu_check_reprobe(struct sock *sk)
2275{
2276	struct inet_connection_sock *icsk = inet_csk(sk);
2277	struct tcp_sock *tp = tcp_sk(sk);
2278	struct net *net = sock_net(sk);
2279	u32 interval;
2280	s32 delta;
2281
2282	interval = net->ipv4.sysctl_tcp_probe_interval;
2283	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
2284	if (unlikely(delta >= interval * HZ)) {
2285		int mss = tcp_current_mss(sk);
2286
2287		/* Update current search range */
2288		icsk->icsk_mtup.probe_size = 0;
2289		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
2290			sizeof(struct tcphdr) +
2291			icsk->icsk_af_ops->net_header_len;
2292		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
2293
2294		/* Update probe time stamp */
2295		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
2296	}
2297}
2298
2299static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2300{
2301	struct sk_buff *skb, *next;
2302
2303	skb = tcp_send_head(sk);
2304	tcp_for_write_queue_from_safe(skb, next, sk) {
2305		if (len <= skb->len)
2306			break;
2307
2308		if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb))
2309			return false;
2310
2311		len -= skb->len;
2312	}
2313
2314	return true;
2315}
2316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2317/* Create a new MTU probe if we are ready.
2318 * MTU probe is regularly attempting to increase the path MTU by
2319 * deliberately sending larger packets.  This discovers routing
2320 * changes resulting in larger path MTUs.
2321 *
2322 * Returns 0 if we should wait to probe (no cwnd available),
2323 *         1 if a probe was sent,
2324 *         -1 otherwise
2325 */
2326static int tcp_mtu_probe(struct sock *sk)
2327{
2328	struct inet_connection_sock *icsk = inet_csk(sk);
2329	struct tcp_sock *tp = tcp_sk(sk);
2330	struct sk_buff *skb, *nskb, *next;
2331	struct net *net = sock_net(sk);
2332	int probe_size;
2333	int size_needed;
2334	int copy, len;
2335	int mss_now;
2336	int interval;
2337
2338	/* Not currently probing/verifying,
2339	 * not in recovery,
2340	 * have enough cwnd, and
2341	 * not SACKing (the variable headers throw things off)
2342	 */
2343	if (likely(!icsk->icsk_mtup.enabled ||
2344		   icsk->icsk_mtup.probe_size ||
2345		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
2346		   tp->snd_cwnd < 11 ||
2347		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
2348		return -1;
2349
2350	/* Use binary search for probe_size between tcp_mss_base,
2351	 * and current mss_clamp. if (search_high - search_low)
2352	 * smaller than a threshold, backoff from probing.
2353	 */
2354	mss_now = tcp_current_mss(sk);
2355	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
2356				    icsk->icsk_mtup.search_low) >> 1);
2357	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
2358	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
2359	/* When misfortune happens, we are reprobing actively,
2360	 * and then reprobe timer has expired. We stick with current
2361	 * probing process by not resetting search range to its orignal.
2362	 */
2363	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
2364		interval < net->ipv4.sysctl_tcp_probe_threshold) {
2365		/* Check whether enough time has elaplased for
2366		 * another round of probing.
2367		 */
2368		tcp_mtu_check_reprobe(sk);
2369		return -1;
2370	}
2371
2372	/* Have enough data in the send queue to probe? */
2373	if (tp->write_seq - tp->snd_nxt < size_needed)
2374		return -1;
2375
2376	if (tp->snd_wnd < size_needed)
2377		return -1;
2378	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
2379		return 0;
2380
2381	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
2382	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
2383		if (!tcp_packets_in_flight(tp))
2384			return -1;
2385		else
2386			return 0;
2387	}
2388
2389	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2390		return -1;
2391
2392	/* We're allowed to probe.  Build it now. */
2393	nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
2394	if (!nskb)
2395		return -1;
 
 
 
 
 
 
 
2396	sk_wmem_queued_add(sk, nskb->truesize);
2397	sk_mem_charge(sk, nskb->truesize);
2398
2399	skb = tcp_send_head(sk);
2400	skb_copy_decrypted(nskb, skb);
2401	mptcp_skb_ext_copy(nskb, skb);
2402
2403	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
2404	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
2405	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
2406	TCP_SKB_CB(nskb)->sacked = 0;
2407	nskb->csum = 0;
2408	nskb->ip_summed = CHECKSUM_PARTIAL;
2409
2410	tcp_insert_write_queue_before(nskb, skb, sk);
2411	tcp_highest_sack_replace(sk, skb, nskb);
2412
2413	len = 0;
2414	tcp_for_write_queue_from_safe(skb, next, sk) {
2415		copy = min_t(int, skb->len, probe_size - len);
2416		skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
2417
2418		if (skb->len <= copy) {
2419			/* We've eaten all the data from this skb.
2420			 * Throw it away. */
2421			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2422			/* If this is the last SKB we copy and eor is set
2423			 * we need to propagate it to the new skb.
2424			 */
2425			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2426			tcp_skb_collapse_tstamp(nskb, skb);
2427			tcp_unlink_write_queue(skb, sk);
2428			sk_wmem_free_skb(sk, skb);
2429		} else {
2430			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2431						   ~(TCPHDR_FIN|TCPHDR_PSH);
2432			if (!skb_shinfo(skb)->nr_frags) {
2433				skb_pull(skb, copy);
2434			} else {
2435				__pskb_trim_head(skb, copy);
2436				tcp_set_skb_tso_segs(skb, mss_now);
2437			}
2438			TCP_SKB_CB(skb)->seq += copy;
2439		}
2440
2441		len += copy;
2442
2443		if (len >= probe_size)
2444			break;
2445	}
2446	tcp_init_tso_segs(nskb, nskb->len);
2447
2448	/* We're ready to send.  If this fails, the probe will
2449	 * be resegmented into mss-sized pieces by tcp_write_xmit().
2450	 */
2451	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
2452		/* Decrement cwnd here because we are sending
2453		 * effectively two packets. */
2454		tp->snd_cwnd--;
2455		tcp_event_new_data_sent(sk, nskb);
2456
2457		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
2458		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
2459		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
2460
2461		return 1;
2462	}
2463
2464	return -1;
2465}
2466
2467static bool tcp_pacing_check(struct sock *sk)
2468{
2469	struct tcp_sock *tp = tcp_sk(sk);
2470
2471	if (!tcp_needs_internal_pacing(sk))
2472		return false;
2473
2474	if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2475		return false;
2476
2477	if (!hrtimer_is_queued(&tp->pacing_timer)) {
2478		hrtimer_start(&tp->pacing_timer,
2479			      ns_to_ktime(tp->tcp_wstamp_ns),
2480			      HRTIMER_MODE_ABS_PINNED_SOFT);
2481		sock_hold(sk);
2482	}
2483	return true;
2484}
2485
 
 
 
 
 
 
 
 
 
 
 
 
2486/* TCP Small Queues :
2487 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2488 * (These limits are doubled for retransmits)
2489 * This allows for :
2490 *  - better RTT estimation and ACK scheduling
2491 *  - faster recovery
2492 *  - high rates
2493 * Alas, some drivers / subsystems require a fair amount
2494 * of queued bytes to ensure line rate.
2495 * One example is wifi aggregation (802.11 AMPDU)
2496 */
2497static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2498				  unsigned int factor)
2499{
2500	unsigned long limit;
2501
2502	limit = max_t(unsigned long,
2503		      2 * skb->truesize,
2504		      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
2505	if (sk->sk_pacing_status == SK_PACING_NONE)
2506		limit = min_t(unsigned long, limit,
2507			      sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
2508	limit <<= factor;
2509
2510	if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
2511	    tcp_sk(sk)->tcp_tx_delay) {
2512		u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay;
 
2513
2514		/* TSQ is based on skb truesize sum (sk_wmem_alloc), so we
2515		 * approximate our needs assuming an ~100% skb->truesize overhead.
2516		 * USEC_PER_SEC is approximated by 2^20.
2517		 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift.
2518		 */
2519		extra_bytes >>= (20 - 1);
2520		limit += extra_bytes;
2521	}
2522	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
2523		/* Always send skb if rtx queue is empty.
2524		 * No need to wait for TX completion to call us back,
2525		 * after softirq/tasklet schedule.
2526		 * This helps when TX completions are delayed too much.
2527		 */
2528		if (tcp_rtx_queue_empty(sk))
2529			return false;
2530
2531		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2532		/* It is possible TX completion already happened
2533		 * before we set TSQ_THROTTLED, so we must
2534		 * test again the condition.
2535		 */
2536		smp_mb__after_atomic();
2537		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2538			return true;
2539	}
2540	return false;
2541}
2542
2543static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
2544{
2545	const u32 now = tcp_jiffies32;
2546	enum tcp_chrono old = tp->chrono_type;
2547
2548	if (old > TCP_CHRONO_UNSPEC)
2549		tp->chrono_stat[old - 1] += now - tp->chrono_start;
2550	tp->chrono_start = now;
2551	tp->chrono_type = new;
2552}
2553
2554void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
2555{
2556	struct tcp_sock *tp = tcp_sk(sk);
2557
2558	/* If there are multiple conditions worthy of tracking in a
2559	 * chronograph then the highest priority enum takes precedence
2560	 * over the other conditions. So that if something "more interesting"
2561	 * starts happening, stop the previous chrono and start a new one.
2562	 */
2563	if (type > tp->chrono_type)
2564		tcp_chrono_set(tp, type);
2565}
2566
2567void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
2568{
2569	struct tcp_sock *tp = tcp_sk(sk);
2570
2571
2572	/* There are multiple conditions worthy of tracking in a
2573	 * chronograph, so that the highest priority enum takes
2574	 * precedence over the other conditions (see tcp_chrono_start).
2575	 * If a condition stops, we only stop chrono tracking if
2576	 * it's the "most interesting" or current chrono we are
2577	 * tracking and starts busy chrono if we have pending data.
2578	 */
2579	if (tcp_rtx_and_write_queues_empty(sk))
2580		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
2581	else if (type == tp->chrono_type)
2582		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
2583}
2584
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2585/* This routine writes packets to the network.  It advances the
2586 * send_head.  This happens as incoming acks open up the remote
2587 * window for us.
2588 *
2589 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2590 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2591 * account rare use of URG, this is not a big flaw.
2592 *
2593 * Send at most one packet when push_one > 0. Temporarily ignore
2594 * cwnd limit to force at most one packet out when push_one == 2.
2595
2596 * Returns true, if no segments are in flight and we have queued segments,
2597 * but cannot send anything now because of SWS or another problem.
2598 */
2599static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2600			   int push_one, gfp_t gfp)
2601{
2602	struct tcp_sock *tp = tcp_sk(sk);
2603	struct sk_buff *skb;
2604	unsigned int tso_segs, sent_pkts;
2605	int cwnd_quota;
2606	int result;
2607	bool is_cwnd_limited = false, is_rwnd_limited = false;
2608	u32 max_segs;
2609
2610	sent_pkts = 0;
2611
2612	tcp_mstamp_refresh(tp);
2613	if (!push_one) {
2614		/* Do MTU probing. */
2615		result = tcp_mtu_probe(sk);
2616		if (!result) {
2617			return false;
2618		} else if (result > 0) {
2619			sent_pkts = 1;
2620		}
2621	}
2622
2623	max_segs = tcp_tso_segs(sk, mss_now);
2624	while ((skb = tcp_send_head(sk))) {
2625		unsigned int limit;
 
2626
2627		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
2628			/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
2629			skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache;
 
2630			list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2631			tcp_init_tso_segs(skb, mss_now);
2632			goto repair; /* Skip network transmission */
2633		}
2634
2635		if (tcp_pacing_check(sk))
2636			break;
2637
2638		tso_segs = tcp_init_tso_segs(skb, mss_now);
2639		BUG_ON(!tso_segs);
2640
2641		cwnd_quota = tcp_cwnd_test(tp, skb);
2642		if (!cwnd_quota) {
2643			if (push_one == 2)
2644				/* Force out a loss probe pkt. */
2645				cwnd_quota = 1;
2646			else
2647				break;
2648		}
 
 
 
 
 
 
2649
2650		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
2651			is_rwnd_limited = true;
2652			break;
2653		}
2654
2655		if (tso_segs == 1) {
2656			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2657						     (tcp_skb_is_last(sk, skb) ?
2658						      nonagle : TCP_NAGLE_PUSH))))
2659				break;
2660		} else {
2661			if (!push_one &&
2662			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2663						 &is_rwnd_limited, max_segs))
2664				break;
2665		}
2666
2667		limit = mss_now;
2668		if (tso_segs > 1 && !tcp_urg_mode(tp))
2669			limit = tcp_mss_split_point(sk, skb, mss_now,
2670						    min_t(unsigned int,
2671							  cwnd_quota,
2672							  max_segs),
2673						    nonagle);
2674
2675		if (skb->len > limit &&
2676		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2677			break;
2678
2679		if (tcp_small_queue_check(sk, skb, 0))
2680			break;
2681
2682		/* Argh, we hit an empty skb(), presumably a thread
2683		 * is sleeping in sendmsg()/sk_stream_wait_memory().
2684		 * We do not want to send a pure-ack packet and have
2685		 * a strange looking rtx queue with empty packet(s).
2686		 */
2687		if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
2688			break;
2689
2690		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
2691			break;
2692
2693repair:
2694		/* Advance the send_head.  This one is sent out.
2695		 * This call will increment packets_out.
2696		 */
2697		tcp_event_new_data_sent(sk, skb);
2698
2699		tcp_minshall_update(tp, mss_now, skb);
2700		sent_pkts += tcp_skb_pcount(skb);
2701
2702		if (push_one)
2703			break;
2704	}
2705
2706	if (is_rwnd_limited)
2707		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
2708	else
2709		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
2710
2711	is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
2712	if (likely(sent_pkts || is_cwnd_limited))
2713		tcp_cwnd_validate(sk, is_cwnd_limited);
2714
2715	if (likely(sent_pkts)) {
2716		if (tcp_in_cwnd_reduction(sk))
2717			tp->prr_out += sent_pkts;
2718
2719		/* Send one loss probe per tail loss episode. */
2720		if (push_one != 2)
2721			tcp_schedule_loss_probe(sk, false);
2722		return false;
2723	}
2724	return !tp->packets_out && !tcp_write_queue_empty(sk);
2725}
2726
2727bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
2728{
2729	struct inet_connection_sock *icsk = inet_csk(sk);
2730	struct tcp_sock *tp = tcp_sk(sk);
2731	u32 timeout, rto_delta_us;
2732	int early_retrans;
2733
2734	/* Don't do any loss probe on a Fast Open connection before 3WHS
2735	 * finishes.
2736	 */
2737	if (rcu_access_pointer(tp->fastopen_rsk))
2738		return false;
2739
2740	early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
2741	/* Schedule a loss probe in 2*RTT for SACK capable connections
2742	 * not in loss recovery, that are either limited by cwnd or application.
2743	 */
2744	if ((early_retrans != 3 && early_retrans != 4) ||
2745	    !tp->packets_out || !tcp_is_sack(tp) ||
2746	    (icsk->icsk_ca_state != TCP_CA_Open &&
2747	     icsk->icsk_ca_state != TCP_CA_CWR))
2748		return false;
2749
2750	/* Probe timeout is 2*rtt. Add minimum RTO to account
2751	 * for delayed ack when there's one outstanding packet. If no RTT
2752	 * sample is available then probe after TCP_TIMEOUT_INIT.
2753	 */
2754	if (tp->srtt_us) {
2755		timeout = usecs_to_jiffies(tp->srtt_us >> 2);
2756		if (tp->packets_out == 1)
2757			timeout += TCP_RTO_MIN;
2758		else
2759			timeout += TCP_TIMEOUT_MIN;
 
2760	} else {
2761		timeout = TCP_TIMEOUT_INIT;
2762	}
2763
2764	/* If the RTO formula yields an earlier time, then use that time. */
2765	rto_delta_us = advancing_rto ?
2766			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2767			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2768	if (rto_delta_us > 0)
2769		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
2770
2771	tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
2772	return true;
2773}
2774
2775/* Thanks to skb fast clones, we can detect if a prior transmit of
2776 * a packet is still in a qdisc or driver queue.
2777 * In this case, there is very little point doing a retransmit !
2778 */
2779static bool skb_still_in_host_queue(struct sock *sk,
2780				    const struct sk_buff *skb)
2781{
2782	if (unlikely(skb_fclone_busy(sk, skb))) {
2783		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2784		smp_mb__after_atomic();
2785		if (skb_fclone_busy(sk, skb)) {
2786			NET_INC_STATS(sock_net(sk),
2787				      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
2788			return true;
2789		}
2790	}
2791	return false;
2792}
2793
2794/* When probe timeout (PTO) fires, try send a new segment if possible, else
2795 * retransmit the last segment.
2796 */
2797void tcp_send_loss_probe(struct sock *sk)
2798{
2799	struct tcp_sock *tp = tcp_sk(sk);
2800	struct sk_buff *skb;
2801	int pcount;
2802	int mss = tcp_current_mss(sk);
2803
2804	/* At most one outstanding TLP */
2805	if (tp->tlp_high_seq)
2806		goto rearm_timer;
2807
2808	tp->tlp_retrans = 0;
2809	skb = tcp_send_head(sk);
2810	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2811		pcount = tp->packets_out;
2812		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2813		if (tp->packets_out > pcount)
2814			goto probe_sent;
2815		goto rearm_timer;
2816	}
2817	skb = skb_rb_last(&sk->tcp_rtx_queue);
2818	if (unlikely(!skb)) {
2819		WARN_ONCE(tp->packets_out,
2820			  "invalid inflight: %u state %u cwnd %u mss %d\n",
2821			  tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
2822		inet_csk(sk)->icsk_pending = 0;
2823		return;
2824	}
2825
2826	if (skb_still_in_host_queue(sk, skb))
2827		goto rearm_timer;
2828
2829	pcount = tcp_skb_pcount(skb);
2830	if (WARN_ON(!pcount))
2831		goto rearm_timer;
2832
2833	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2834		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
2835					  (pcount - 1) * mss, mss,
2836					  GFP_ATOMIC)))
2837			goto rearm_timer;
2838		skb = skb_rb_next(skb);
2839	}
2840
2841	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2842		goto rearm_timer;
2843
2844	if (__tcp_retransmit_skb(sk, skb, 1))
2845		goto rearm_timer;
2846
2847	tp->tlp_retrans = 1;
2848
2849probe_sent:
2850	/* Record snd_nxt for loss detection. */
2851	tp->tlp_high_seq = tp->snd_nxt;
2852
2853	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2854	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2855	inet_csk(sk)->icsk_pending = 0;
2856rearm_timer:
2857	tcp_rearm_rto(sk);
2858}
2859
2860/* Push out any pending frames which were held back due to
2861 * TCP_CORK or attempt at coalescing tiny packets.
2862 * The socket must be locked by the caller.
2863 */
2864void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
2865			       int nonagle)
2866{
2867	/* If we are closed, the bytes will have to remain here.
2868	 * In time closedown will finish, we empty the write queue and
2869	 * all will be happy.
2870	 */
2871	if (unlikely(sk->sk_state == TCP_CLOSE))
2872		return;
2873
2874	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
2875			   sk_gfp_mask(sk, GFP_ATOMIC)))
2876		tcp_check_probe_timer(sk);
2877}
2878
2879/* Send _single_ skb sitting at the send head. This function requires
2880 * true push pending frames to setup probe timer etc.
2881 */
2882void tcp_push_one(struct sock *sk, unsigned int mss_now)
2883{
2884	struct sk_buff *skb = tcp_send_head(sk);
2885
2886	BUG_ON(!skb || skb->len < mss_now);
2887
2888	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2889}
2890
2891/* This function returns the amount that we can raise the
2892 * usable window based on the following constraints
2893 *
2894 * 1. The window can never be shrunk once it is offered (RFC 793)
2895 * 2. We limit memory per socket
2896 *
2897 * RFC 1122:
2898 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
2899 *  RECV.NEXT + RCV.WIN fixed until:
2900 *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
2901 *
2902 * i.e. don't raise the right edge of the window until you can raise
2903 * it at least MSS bytes.
2904 *
2905 * Unfortunately, the recommended algorithm breaks header prediction,
2906 * since header prediction assumes th->window stays fixed.
2907 *
2908 * Strictly speaking, keeping th->window fixed violates the receiver
2909 * side SWS prevention criteria. The problem is that under this rule
2910 * a stream of single byte packets will cause the right side of the
2911 * window to always advance by a single byte.
2912 *
2913 * Of course, if the sender implements sender side SWS prevention
2914 * then this will not be a problem.
2915 *
2916 * BSD seems to make the following compromise:
2917 *
2918 *	If the free space is less than the 1/4 of the maximum
2919 *	space available and the free space is less than 1/2 mss,
2920 *	then set the window to 0.
2921 *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
2922 *	Otherwise, just prevent the window from shrinking
2923 *	and from being larger than the largest representable value.
2924 *
2925 * This prevents incremental opening of the window in the regime
2926 * where TCP is limited by the speed of the reader side taking
2927 * data out of the TCP receive queue. It does nothing about
2928 * those cases where the window is constrained on the sender side
2929 * because the pipeline is full.
2930 *
2931 * BSD also seems to "accidentally" limit itself to windows that are a
2932 * multiple of MSS, at least until the free space gets quite small.
2933 * This would appear to be a side effect of the mbuf implementation.
2934 * Combining these two algorithms results in the observed behavior
2935 * of having a fixed window size at almost all times.
2936 *
2937 * Below we obtain similar behavior by forcing the offered window to
2938 * a multiple of the mss when it is feasible to do so.
2939 *
2940 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
2941 * Regular options like TIMESTAMP are taken into account.
2942 */
2943u32 __tcp_select_window(struct sock *sk)
2944{
2945	struct inet_connection_sock *icsk = inet_csk(sk);
2946	struct tcp_sock *tp = tcp_sk(sk);
 
2947	/* MSS for the peer's data.  Previous versions used mss_clamp
2948	 * here.  I don't know if the value based on our guesses
2949	 * of peer's MSS is better for the performance.  It's more correct
2950	 * but may be worse for the performance because of rcv_mss
2951	 * fluctuations.  --SAW  1998/11/1
2952	 */
2953	int mss = icsk->icsk_ack.rcv_mss;
2954	int free_space = tcp_space(sk);
2955	int allowed_space = tcp_full_space(sk);
2956	int full_space, window;
2957
2958	if (sk_is_mptcp(sk))
2959		mptcp_space(sk, &free_space, &allowed_space);
2960
2961	full_space = min_t(int, tp->window_clamp, allowed_space);
2962
2963	if (unlikely(mss > full_space)) {
2964		mss = full_space;
2965		if (mss <= 0)
2966			return 0;
2967	}
 
 
 
 
 
 
 
 
 
2968	if (free_space < (full_space >> 1)) {
2969		icsk->icsk_ack.quick = 0;
2970
2971		if (tcp_under_memory_pressure(sk))
2972			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
2973					       4U * tp->advmss);
2974
2975		/* free_space might become our new window, make sure we don't
2976		 * increase it due to wscale.
2977		 */
2978		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
2979
2980		/* if free space is less than mss estimate, or is below 1/16th
2981		 * of the maximum allowed, try to move to zero-window, else
2982		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
2983		 * new incoming data is dropped due to memory limits.
2984		 * With large window, mss test triggers way too late in order
2985		 * to announce zero window in time before rmem limit kicks in.
2986		 */
2987		if (free_space < (allowed_space >> 4) || free_space < mss)
2988			return 0;
2989	}
2990
2991	if (free_space > tp->rcv_ssthresh)
2992		free_space = tp->rcv_ssthresh;
2993
2994	/* Don't do rounding if we are using window scaling, since the
2995	 * scaled window will not line up with the MSS boundary anyway.
2996	 */
2997	if (tp->rx_opt.rcv_wscale) {
2998		window = free_space;
2999
3000		/* Advertise enough space so that it won't get scaled away.
3001		 * Import case: prevent zero window announcement if
3002		 * 1<<rcv_wscale > mss.
3003		 */
3004		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
3005	} else {
3006		window = tp->rcv_wnd;
3007		/* Get the largest window that is a nice multiple of mss.
3008		 * Window clamp already applied above.
3009		 * If our current window offering is within 1 mss of the
3010		 * free space we just keep it. This prevents the divide
3011		 * and multiply from happening most of the time.
3012		 * We also don't do any window rounding when the free space
3013		 * is too small.
3014		 */
3015		if (window <= free_space - mss || window > free_space)
3016			window = rounddown(free_space, mss);
3017		else if (mss == full_space &&
3018			 free_space > window + (full_space >> 1))
3019			window = free_space;
3020	}
3021
3022	return window;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3023}
3024
3025void tcp_skb_collapse_tstamp(struct sk_buff *skb,
3026			     const struct sk_buff *next_skb)
3027{
3028	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
3029		const struct skb_shared_info *next_shinfo =
3030			skb_shinfo(next_skb);
3031		struct skb_shared_info *shinfo = skb_shinfo(skb);
3032
3033		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
3034		shinfo->tskey = next_shinfo->tskey;
3035		TCP_SKB_CB(skb)->txstamp_ack |=
3036			TCP_SKB_CB(next_skb)->txstamp_ack;
3037	}
3038}
3039
3040/* Collapses two adjacent SKB's during retransmission. */
3041static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
3042{
3043	struct tcp_sock *tp = tcp_sk(sk);
3044	struct sk_buff *next_skb = skb_rb_next(skb);
3045	int next_skb_size;
3046
3047	next_skb_size = next_skb->len;
3048
3049	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
3050
3051	if (next_skb_size) {
3052		if (next_skb_size <= skb_availroom(skb))
3053			skb_copy_bits(next_skb, 0, skb_put(skb, next_skb_size),
3054				      next_skb_size);
3055		else if (!tcp_skb_shift(skb, next_skb, 1, next_skb_size))
3056			return false;
3057	}
3058	tcp_highest_sack_replace(sk, next_skb, skb);
3059
3060	/* Update sequence range on original skb. */
3061	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
3062
3063	/* Merge over control information. This moves PSH/FIN etc. over */
3064	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
3065
3066	/* All done, get rid of second SKB and account for it so
3067	 * packet counting does not break.
3068	 */
3069	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
3070	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
3071
3072	/* changed transmit queue under us so clear hints */
3073	tcp_clear_retrans_hints_partial(tp);
3074	if (next_skb == tp->retransmit_skb_hint)
3075		tp->retransmit_skb_hint = skb;
3076
3077	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
3078
3079	tcp_skb_collapse_tstamp(skb, next_skb);
3080
3081	tcp_rtx_queue_unlink_and_free(next_skb, sk);
3082	return true;
3083}
3084
3085/* Check if coalescing SKBs is legal. */
3086static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
3087{
3088	if (tcp_skb_pcount(skb) > 1)
3089		return false;
3090	if (skb_cloned(skb))
3091		return false;
 
 
3092	/* Some heuristics for collapsing over SACK'd could be invented */
3093	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3094		return false;
3095
3096	return true;
3097}
3098
3099/* Collapse packets in the retransmit queue to make to create
3100 * less packets on the wire. This is only done on retransmission.
3101 */
3102static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
3103				     int space)
3104{
3105	struct tcp_sock *tp = tcp_sk(sk);
3106	struct sk_buff *skb = to, *tmp;
3107	bool first = true;
3108
3109	if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)
3110		return;
3111	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3112		return;
3113
3114	skb_rbtree_walk_from_safe(skb, tmp) {
3115		if (!tcp_can_collapse(sk, skb))
3116			break;
3117
3118		if (!tcp_skb_can_collapse(to, skb))
3119			break;
3120
3121		space -= skb->len;
3122
3123		if (first) {
3124			first = false;
3125			continue;
3126		}
3127
3128		if (space < 0)
3129			break;
3130
3131		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
3132			break;
3133
3134		if (!tcp_collapse_retrans(sk, to))
3135			break;
3136	}
3137}
3138
3139/* This retransmits one SKB.  Policy decisions and retransmit queue
3140 * state updates are done by the caller.  Returns non-zero if an
3141 * error occurred which prevented the send.
3142 */
3143int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
3144{
3145	struct inet_connection_sock *icsk = inet_csk(sk);
3146	struct tcp_sock *tp = tcp_sk(sk);
3147	unsigned int cur_mss;
3148	int diff, len, err;
3149
3150
3151	/* Inconclusive MTU probe */
3152	if (icsk->icsk_mtup.probe_size)
3153		icsk->icsk_mtup.probe_size = 0;
3154
3155	if (skb_still_in_host_queue(sk, skb))
3156		return -EBUSY;
3157
 
3158	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
 
 
 
 
 
3159		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
3160			WARN_ON_ONCE(1);
3161			return -EINVAL;
3162		}
3163		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
3164			return -ENOMEM;
3165	}
3166
3167	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
3168		return -EHOSTUNREACH; /* Routing failure or similar. */
3169
3170	cur_mss = tcp_current_mss(sk);
 
3171
3172	/* If receiver has shrunk his window, and skb is out of
3173	 * new window, do not retransmit it. The exception is the
3174	 * case, when window is shrunk to zero. In this case
3175	 * our retransmit serves as a zero window probe.
3176	 */
3177	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
3178	    TCP_SKB_CB(skb)->seq != tp->snd_una)
3179		return -EAGAIN;
 
 
3180
3181	len = cur_mss * segs;
 
 
 
 
 
3182	if (skb->len > len) {
3183		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
3184				 cur_mss, GFP_ATOMIC))
3185			return -ENOMEM; /* We'll try again later. */
3186	} else {
3187		if (skb_unclone(skb, GFP_ATOMIC))
3188			return -ENOMEM;
3189
3190		diff = tcp_skb_pcount(skb);
3191		tcp_set_skb_tso_segs(skb, cur_mss);
3192		diff -= tcp_skb_pcount(skb);
3193		if (diff)
3194			tcp_adjust_pcount(sk, skb, diff);
3195		if (skb->len < cur_mss)
3196			tcp_retrans_try_collapse(sk, skb, cur_mss);
 
3197	}
3198
3199	/* RFC3168, section 6.1.1.1. ECN fallback */
3200	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
3201		tcp_ecn_clear_syn(sk, skb);
3202
3203	/* Update global and local TCP statistics. */
3204	segs = tcp_skb_pcount(skb);
3205	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
3206	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3207		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3208	tp->total_retrans += segs;
3209	tp->bytes_retrans += skb->len;
3210
3211	/* make sure skb->data is aligned on arches that require it
3212	 * and check if ack-trimming & collapsing extended the headroom
3213	 * beyond what csum_start can cover.
3214	 */
3215	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
3216		     skb_headroom(skb) >= 0xFFFF)) {
3217		struct sk_buff *nskb;
3218
3219		tcp_skb_tsorted_save(skb) {
3220			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
3221			if (nskb) {
3222				nskb->dev = NULL;
3223				err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
3224			} else {
3225				err = -ENOBUFS;
3226			}
3227		} tcp_skb_tsorted_restore(skb);
3228
3229		if (!err) {
3230			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
3231			tcp_rate_skb_sent(sk, skb);
3232		}
3233	} else {
3234		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3235	}
3236
3237	/* To avoid taking spuriously low RTT samples based on a timestamp
3238	 * for a transmit that never happened, always mark EVER_RETRANS
3239	 */
3240	TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
3241
3242	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
3243		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
3244				  TCP_SKB_CB(skb)->seq, segs, err);
3245
3246	if (likely(!err)) {
3247		trace_tcp_retransmit_skb(sk, skb);
3248	} else if (err != -EBUSY) {
3249		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
3250	}
 
 
 
 
 
 
3251	return err;
3252}
3253
3254int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
3255{
3256	struct tcp_sock *tp = tcp_sk(sk);
3257	int err = __tcp_retransmit_skb(sk, skb, segs);
3258
3259	if (err == 0) {
3260#if FASTRETRANS_DEBUG > 0
3261		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
3262			net_dbg_ratelimited("retrans_out leaked\n");
3263		}
3264#endif
3265		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
3266		tp->retrans_out += tcp_skb_pcount(skb);
3267	}
3268
3269	/* Save stamp of the first (attempted) retransmit. */
3270	if (!tp->retrans_stamp)
3271		tp->retrans_stamp = tcp_skb_timestamp(skb);
3272
3273	if (tp->undo_retrans < 0)
3274		tp->undo_retrans = 0;
3275	tp->undo_retrans += tcp_skb_pcount(skb);
3276	return err;
3277}
3278
3279/* This gets called after a retransmit timeout, and the initially
3280 * retransmitted data is acknowledged.  It tries to continue
3281 * resending the rest of the retransmit queue, until either
3282 * we've sent it all or the congestion window limit is reached.
3283 */
3284void tcp_xmit_retransmit_queue(struct sock *sk)
3285{
3286	const struct inet_connection_sock *icsk = inet_csk(sk);
3287	struct sk_buff *skb, *rtx_head, *hole = NULL;
3288	struct tcp_sock *tp = tcp_sk(sk);
3289	bool rearm_timer = false;
3290	u32 max_segs;
3291	int mib_idx;
3292
3293	if (!tp->packets_out)
3294		return;
3295
3296	rtx_head = tcp_rtx_queue_head(sk);
3297	skb = tp->retransmit_skb_hint ?: rtx_head;
3298	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
3299	skb_rbtree_walk_from(skb) {
3300		__u8 sacked;
3301		int segs;
3302
3303		if (tcp_pacing_check(sk))
3304			break;
3305
3306		/* we could do better than to assign each time */
3307		if (!hole)
3308			tp->retransmit_skb_hint = skb;
3309
3310		segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
3311		if (segs <= 0)
3312			break;
3313		sacked = TCP_SKB_CB(skb)->sacked;
3314		/* In case tcp_shift_skb_data() have aggregated large skbs,
3315		 * we need to make sure not sending too bigs TSO packets
3316		 */
3317		segs = min_t(int, segs, max_segs);
3318
3319		if (tp->retrans_out >= tp->lost_out) {
3320			break;
3321		} else if (!(sacked & TCPCB_LOST)) {
3322			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
3323				hole = skb;
3324			continue;
3325
3326		} else {
3327			if (icsk->icsk_ca_state != TCP_CA_Loss)
3328				mib_idx = LINUX_MIB_TCPFASTRETRANS;
3329			else
3330				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
3331		}
3332
3333		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
3334			continue;
3335
3336		if (tcp_small_queue_check(sk, skb, 1))
3337			break;
3338
3339		if (tcp_retransmit_skb(sk, skb, segs))
3340			break;
3341
3342		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
3343
3344		if (tcp_in_cwnd_reduction(sk))
3345			tp->prr_out += tcp_skb_pcount(skb);
3346
3347		if (skb == rtx_head &&
3348		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
3349			rearm_timer = true;
3350
3351	}
3352	if (rearm_timer)
3353		tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3354				     inet_csk(sk)->icsk_rto,
3355				     TCP_RTO_MAX);
3356}
3357
3358/* We allow to exceed memory limits for FIN packets to expedite
3359 * connection tear down and (memory) recovery.
3360 * Otherwise tcp_send_fin() could be tempted to either delay FIN
3361 * or even be forced to close flow without any FIN.
3362 * In general, we want to allow one skb per socket to avoid hangs
3363 * with edge trigger epoll()
3364 */
3365void sk_forced_mem_schedule(struct sock *sk, int size)
3366{
3367	int amt;
3368
3369	if (size <= sk->sk_forward_alloc)
 
3370		return;
3371	amt = sk_mem_pages(size);
3372	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
3373	sk_memory_allocated_add(sk, amt);
3374
3375	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3376		mem_cgroup_charge_skmem(sk->sk_memcg, amt);
 
3377}
3378
3379/* Send a FIN. The caller locks the socket for us.
3380 * We should try to send a FIN packet really hard, but eventually give up.
3381 */
3382void tcp_send_fin(struct sock *sk)
3383{
3384	struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk);
3385	struct tcp_sock *tp = tcp_sk(sk);
3386
3387	/* Optimization, tack on the FIN if we have one skb in write queue and
3388	 * this skb was not yet sent, or we are under memory pressure.
3389	 * Note: in the latter case, FIN packet will be sent after a timeout,
3390	 * as TCP stack thinks it has already been transmitted.
3391	 */
3392	tskb = tail;
3393	if (!tskb && tcp_under_memory_pressure(sk))
3394		tskb = skb_rb_last(&sk->tcp_rtx_queue);
3395
3396	if (tskb) {
3397		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3398		TCP_SKB_CB(tskb)->end_seq++;
3399		tp->write_seq++;
3400		if (!tail) {
3401			/* This means tskb was already sent.
3402			 * Pretend we included the FIN on previous transmit.
3403			 * We need to set tp->snd_nxt to the value it would have
3404			 * if FIN had been sent. This is because retransmit path
3405			 * does not change tp->snd_nxt.
3406			 */
3407			WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
3408			return;
3409		}
3410	} else {
3411		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
 
 
3412		if (unlikely(!skb))
3413			return;
3414
3415		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3416		skb_reserve(skb, MAX_TCP_HEADER);
3417		sk_forced_mem_schedule(sk, skb->truesize);
3418		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3419		tcp_init_nondata_skb(skb, tp->write_seq,
3420				     TCPHDR_ACK | TCPHDR_FIN);
3421		tcp_queue_skb(sk, skb);
3422	}
3423	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
3424}
3425
3426/* We get here when a process closes a file descriptor (either due to
3427 * an explicit close() or as a byproduct of exit()'ing) and there
3428 * was unread data in the receive queue.  This behavior is recommended
3429 * by RFC 2525, section 2.17.  -DaveM
3430 */
3431void tcp_send_active_reset(struct sock *sk, gfp_t priority)
 
3432{
3433	struct sk_buff *skb;
3434
3435	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
3436
3437	/* NOTE: No TCP options attached and we never retransmit this. */
3438	skb = alloc_skb(MAX_TCP_HEADER, priority);
3439	if (!skb) {
3440		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3441		return;
3442	}
3443
3444	/* Reserve space for headers and prepare control bits. */
3445	skb_reserve(skb, MAX_TCP_HEADER);
3446	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3447			     TCPHDR_ACK | TCPHDR_RST);
3448	tcp_mstamp_refresh(tcp_sk(sk));
3449	/* Send it off. */
3450	if (tcp_transmit_skb(sk, skb, 0, priority))
3451		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3452
3453	/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3454	 * skb here is different to the troublesome skb, so use NULL
3455	 */
3456	trace_tcp_send_reset(sk, NULL);
3457}
3458
3459/* Send a crossed SYN-ACK during socket establishment.
3460 * WARNING: This routine must only be called when we have already sent
3461 * a SYN packet that crossed the incoming SYN that caused this routine
3462 * to get called. If this assumption fails then the initial rcv_wnd
3463 * and rcv_wscale values will not be correct.
3464 */
3465int tcp_send_synack(struct sock *sk)
3466{
3467	struct sk_buff *skb;
3468
3469	skb = tcp_rtx_queue_head(sk);
3470	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
3471		pr_err("%s: wrong queue state\n", __func__);
3472		return -EFAULT;
3473	}
3474	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
3475		if (skb_cloned(skb)) {
3476			struct sk_buff *nskb;
3477
3478			tcp_skb_tsorted_save(skb) {
3479				nskb = skb_copy(skb, GFP_ATOMIC);
3480			} tcp_skb_tsorted_restore(skb);
3481			if (!nskb)
3482				return -ENOMEM;
3483			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
3484			tcp_highest_sack_replace(sk, skb, nskb);
3485			tcp_rtx_queue_unlink_and_free(skb, sk);
3486			__skb_header_release(nskb);
3487			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
3488			sk_wmem_queued_add(sk, nskb->truesize);
3489			sk_mem_charge(sk, nskb->truesize);
3490			skb = nskb;
3491		}
3492
3493		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3494		tcp_ecn_send_synack(sk, skb);
3495	}
3496	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3497}
3498
3499/**
3500 * tcp_make_synack - Allocate one skb and build a SYNACK packet.
3501 * @sk: listener socket
3502 * @dst: dst entry attached to the SYNACK. It is consumed and caller
3503 *       should not use it again.
3504 * @req: request_sock pointer
3505 * @foc: cookie for tcp fast open
3506 * @synack_type: Type of synack to prepare
3507 * @syn_skb: SYN packet just received.  It could be NULL for rtx case.
3508 */
3509struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3510				struct request_sock *req,
3511				struct tcp_fastopen_cookie *foc,
3512				enum tcp_synack_type synack_type,
3513				struct sk_buff *syn_skb)
3514{
3515	struct inet_request_sock *ireq = inet_rsk(req);
3516	const struct tcp_sock *tp = tcp_sk(sk);
3517	struct tcp_md5sig_key *md5 = NULL;
3518	struct tcp_out_options opts;
 
3519	struct sk_buff *skb;
3520	int tcp_header_size;
3521	struct tcphdr *th;
3522	int mss;
3523	u64 now;
3524
3525	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
3526	if (unlikely(!skb)) {
3527		dst_release(dst);
3528		return NULL;
3529	}
3530	/* Reserve space for headers. */
3531	skb_reserve(skb, MAX_TCP_HEADER);
3532
3533	switch (synack_type) {
3534	case TCP_SYNACK_NORMAL:
3535		skb_set_owner_w(skb, req_to_sk(req));
3536		break;
3537	case TCP_SYNACK_COOKIE:
3538		/* Under synflood, we do not attach skb to a socket,
3539		 * to avoid false sharing.
3540		 */
3541		break;
3542	case TCP_SYNACK_FASTOPEN:
3543		/* sk is a const pointer, because we want to express multiple
3544		 * cpu might call us concurrently.
3545		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3546		 */
3547		skb_set_owner_w(skb, (struct sock *)sk);
3548		break;
3549	}
3550	skb_dst_set(skb, dst);
3551
3552	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3553
3554	memset(&opts, 0, sizeof(opts));
3555	now = tcp_clock_ns();
3556#ifdef CONFIG_SYN_COOKIES
3557	if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
3558		skb->skb_mstamp_ns = cookie_init_timestamp(req, now);
 
3559	else
3560#endif
3561	{
3562		skb->skb_mstamp_ns = now;
3563		if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
3564			tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
3565	}
3566
3567#ifdef CONFIG_TCP_MD5SIG
3568	rcu_read_lock();
3569	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
3570#endif
3571	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3572	/* bpf program will be interested in the tcp_flags */
3573	TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
3574	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
3575					     foc, synack_type,
3576					     syn_skb) + sizeof(*th);
3577
3578	skb_push(skb, tcp_header_size);
3579	skb_reset_transport_header(skb);
3580
3581	th = (struct tcphdr *)skb->data;
3582	memset(th, 0, sizeof(struct tcphdr));
3583	th->syn = 1;
3584	th->ack = 1;
3585	tcp_ecn_make_synack(req, th);
3586	th->source = htons(ireq->ir_num);
3587	th->dest = ireq->ir_rmt_port;
3588	skb->mark = ireq->ir_mark;
3589	skb->ip_summed = CHECKSUM_PARTIAL;
3590	th->seq = htonl(tcp_rsk(req)->snt_isn);
3591	/* XXX data is queued and acked as is. No buffer/window check */
3592	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
3593
3594	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3595	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
3596	tcp_options_write((__be32 *)(th + 1), NULL, &opts);
3597	th->doff = (tcp_header_size >> 2);
3598	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3599
3600#ifdef CONFIG_TCP_MD5SIG
3601	/* Okay, we have all we need - do the md5 hash if needed */
3602	if (md5)
 
3603		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
3604					       md5, req_to_sk(req), skb);
 
 
 
 
 
 
 
 
 
3605	rcu_read_unlock();
3606#endif
3607
3608	bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
3609				synack_type, &opts);
3610
3611	skb->skb_mstamp_ns = now;
3612	tcp_add_tx_delay(skb, tp);
3613
3614	return skb;
3615}
3616EXPORT_SYMBOL(tcp_make_synack);
3617
3618static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
3619{
3620	struct inet_connection_sock *icsk = inet_csk(sk);
3621	const struct tcp_congestion_ops *ca;
3622	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
3623
3624	if (ca_key == TCP_CA_UNSPEC)
3625		return;
3626
3627	rcu_read_lock();
3628	ca = tcp_ca_find_key(ca_key);
3629	if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
3630		bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
3631		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
3632		icsk->icsk_ca_ops = ca;
3633	}
3634	rcu_read_unlock();
3635}
3636
3637/* Do all connect socket setups that can be done AF independent. */
3638static void tcp_connect_init(struct sock *sk)
3639{
3640	const struct dst_entry *dst = __sk_dst_get(sk);
3641	struct tcp_sock *tp = tcp_sk(sk);
3642	__u8 rcv_wscale;
3643	u32 rcv_wnd;
3644
3645	/* We'll fix this up when we get a response from the other end.
3646	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
3647	 */
3648	tp->tcp_header_len = sizeof(struct tcphdr);
3649	if (sock_net(sk)->ipv4.sysctl_tcp_timestamps)
3650		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
3651
3652#ifdef CONFIG_TCP_MD5SIG
3653	if (tp->af_specific->md5_lookup(sk, sk))
3654		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3655#endif
3656
3657	/* If user gave his TCP_MAXSEG, record it to clamp */
3658	if (tp->rx_opt.user_mss)
3659		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
3660	tp->max_window = 0;
3661	tcp_mtup_init(sk);
3662	tcp_sync_mss(sk, dst_mtu(dst));
3663
3664	tcp_ca_dst_init(sk, dst);
3665
3666	if (!tp->window_clamp)
3667		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
3668	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3669
3670	tcp_initialize_rcv_mss(sk);
3671
3672	/* limit the window selection if the user enforce a smaller rx buffer */
3673	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3674	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3675		tp->window_clamp = tcp_full_space(sk);
3676
3677	rcv_wnd = tcp_rwnd_init_bpf(sk);
3678	if (rcv_wnd == 0)
3679		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
3680
3681	tcp_select_initial_window(sk, tcp_full_space(sk),
3682				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
3683				  &tp->rcv_wnd,
3684				  &tp->window_clamp,
3685				  sock_net(sk)->ipv4.sysctl_tcp_window_scaling,
3686				  &rcv_wscale,
3687				  rcv_wnd);
3688
3689	tp->rx_opt.rcv_wscale = rcv_wscale;
3690	tp->rcv_ssthresh = tp->rcv_wnd;
3691
3692	sk->sk_err = 0;
3693	sock_reset_flag(sk, SOCK_DONE);
3694	tp->snd_wnd = 0;
3695	tcp_init_wl(tp, 0);
3696	tcp_write_queue_purge(sk);
3697	tp->snd_una = tp->write_seq;
3698	tp->snd_sml = tp->write_seq;
3699	tp->snd_up = tp->write_seq;
3700	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3701
3702	if (likely(!tp->repair))
3703		tp->rcv_nxt = 0;
3704	else
3705		tp->rcv_tstamp = tcp_jiffies32;
3706	tp->rcv_wup = tp->rcv_nxt;
3707	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
3708
3709	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3710	inet_csk(sk)->icsk_retransmits = 0;
3711	tcp_clear_retrans(tp);
3712}
3713
3714static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3715{
3716	struct tcp_sock *tp = tcp_sk(sk);
3717	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3718
3719	tcb->end_seq += skb->len;
3720	__skb_header_release(skb);
3721	sk_wmem_queued_add(sk, skb->truesize);
3722	sk_mem_charge(sk, skb->truesize);
3723	WRITE_ONCE(tp->write_seq, tcb->end_seq);
3724	tp->packets_out += tcp_skb_pcount(skb);
3725}
3726
3727/* Build and send a SYN with data and (cached) Fast Open cookie. However,
3728 * queue a data-only packet after the regular SYN, such that regular SYNs
3729 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3730 * only the SYN sequence, the data are retransmitted in the first ACK.
3731 * If cookie is not cached or other error occurs, falls back to send a
3732 * regular SYN with Fast Open cookie request option.
3733 */
3734static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3735{
 
3736	struct tcp_sock *tp = tcp_sk(sk);
3737	struct tcp_fastopen_request *fo = tp->fastopen_req;
3738	int space, err = 0;
3739	struct sk_buff *syn_data;
 
3740
3741	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3742	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3743		goto fallback;
3744
3745	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3746	 * user-MSS. Reserve maximum option space for middleboxes that add
3747	 * private TCP options. The cost is reduced data space in SYN :(
3748	 */
3749	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
 
 
3750
3751	space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
3752		MAX_TCP_OPTION_SPACE;
3753
3754	space = min_t(size_t, space, fo->size);
3755
3756	/* limit to order-0 allocations */
3757	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3758
3759	syn_data = sk_stream_alloc_skb(sk, space, sk->sk_allocation, false);
 
3760	if (!syn_data)
3761		goto fallback;
3762	syn_data->ip_summed = CHECKSUM_PARTIAL;
3763	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
3764	if (space) {
3765		int copied = copy_from_iter(skb_put(syn_data, space), space,
3766					    &fo->data->msg_iter);
3767		if (unlikely(!copied)) {
 
 
 
 
3768			tcp_skb_tsorted_anchor_cleanup(syn_data);
3769			kfree_skb(syn_data);
3770			goto fallback;
3771		}
3772		if (copied != space) {
3773			skb_trim(syn_data, copied);
3774			space = copied;
3775		}
 
3776		skb_zcopy_set(syn_data, fo->uarg, NULL);
3777	}
3778	/* No more data pending in inet_wait_for_connect() */
3779	if (space == fo->size)
3780		fo->data = NULL;
3781	fo->copied = space;
3782
3783	tcp_connect_queue_skb(sk, syn_data);
3784	if (syn_data->len)
3785		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3786
3787	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3788
3789	syn->skb_mstamp_ns = syn_data->skb_mstamp_ns;
3790
3791	/* Now full SYN+DATA was cloned and sent (or not),
3792	 * remove the SYN from the original skb (syn_data)
3793	 * we keep in write queue in case of a retransmit, as we
3794	 * also have the SYN packet (with no data) in the same queue.
3795	 */
3796	TCP_SKB_CB(syn_data)->seq++;
3797	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3798	if (!err) {
3799		tp->syn_data = (fo->copied > 0);
3800		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
3801		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3802		goto done;
3803	}
3804
3805	/* data was not sent, put it in write_queue */
3806	__skb_queue_tail(&sk->sk_write_queue, syn_data);
3807	tp->packets_out -= tcp_skb_pcount(syn_data);
3808
3809fallback:
3810	/* Send a regular SYN with Fast Open cookie request option */
3811	if (fo->cookie.len > 0)
3812		fo->cookie.len = 0;
3813	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3814	if (err)
3815		tp->syn_fastopen = 0;
3816done:
3817	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3818	return err;
3819}
3820
3821/* Build a SYN and send it off. */
3822int tcp_connect(struct sock *sk)
3823{
3824	struct tcp_sock *tp = tcp_sk(sk);
3825	struct sk_buff *buff;
3826	int err;
3827
3828	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
3829
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3830	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
3831		return -EHOSTUNREACH; /* Routing failure or similar. */
3832
3833	tcp_connect_init(sk);
3834
3835	if (unlikely(tp->repair)) {
3836		tcp_finish_connect(sk, NULL);
3837		return 0;
3838	}
3839
3840	buff = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3841	if (unlikely(!buff))
3842		return -ENOBUFS;
3843
3844	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
 
 
 
3845	tcp_mstamp_refresh(tp);
3846	tp->retrans_stamp = tcp_time_stamp(tp);
3847	tcp_connect_queue_skb(sk, buff);
3848	tcp_ecn_send_syn(sk, buff);
3849	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
3850
3851	/* Send off SYN; include data in Fast Open. */
3852	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3853	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
3854	if (err == -ECONNREFUSED)
3855		return err;
3856
3857	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3858	 * in order to make this packet get counted in tcpOutSegs.
3859	 */
3860	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3861	tp->pushed_seq = tp->write_seq;
3862	buff = tcp_send_head(sk);
3863	if (unlikely(buff)) {
3864		WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
3865		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
3866	}
3867	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
3868
3869	/* Timer for repeating the SYN until an answer. */
3870	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3871				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
3872	return 0;
3873}
3874EXPORT_SYMBOL(tcp_connect);
3875
 
 
 
 
 
 
 
3876/* Send out a delayed ack, the caller does the policy checking
3877 * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
3878 * for details.
3879 */
3880void tcp_send_delayed_ack(struct sock *sk)
3881{
3882	struct inet_connection_sock *icsk = inet_csk(sk);
3883	int ato = icsk->icsk_ack.ato;
3884	unsigned long timeout;
3885
3886	if (ato > TCP_DELACK_MIN) {
3887		const struct tcp_sock *tp = tcp_sk(sk);
3888		int max_ato = HZ / 2;
3889
3890		if (inet_csk_in_pingpong_mode(sk) ||
3891		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
3892			max_ato = TCP_DELACK_MAX;
3893
3894		/* Slow path, intersegment interval is "high". */
3895
3896		/* If some rtt estimate is known, use it to bound delayed ack.
3897		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
3898		 * directly.
3899		 */
3900		if (tp->srtt_us) {
3901			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3902					TCP_DELACK_MIN);
3903
3904			if (rtt < max_ato)
3905				max_ato = rtt;
3906		}
3907
3908		ato = min(ato, max_ato);
3909	}
3910
3911	ato = min_t(u32, ato, inet_csk(sk)->icsk_delack_max);
3912
3913	/* Stay within the limit we were given */
3914	timeout = jiffies + ato;
3915
3916	/* Use new timeout only if there wasn't a older one earlier. */
3917	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
3918		/* If delack timer is about to expire, send ACK now. */
3919		if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
3920			tcp_send_ack(sk);
3921			return;
3922		}
3923
3924		if (!time_before(timeout, icsk->icsk_ack.timeout))
3925			timeout = icsk->icsk_ack.timeout;
3926	}
3927	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
 
3928	icsk->icsk_ack.timeout = timeout;
3929	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
3930}
3931
3932/* This routine sends an ack and also updates the window. */
3933void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
3934{
3935	struct sk_buff *buff;
3936
3937	/* If we have been reset, we may not send again. */
3938	if (sk->sk_state == TCP_CLOSE)
3939		return;
3940
3941	/* We are not putting this on the write queue, so
3942	 * tcp_transmit_skb() will set the ownership to this
3943	 * sock.
3944	 */
3945	buff = alloc_skb(MAX_TCP_HEADER,
3946			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
3947	if (unlikely(!buff)) {
3948		struct inet_connection_sock *icsk = inet_csk(sk);
3949		unsigned long delay;
3950
3951		delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
3952		if (delay < TCP_RTO_MAX)
3953			icsk->icsk_ack.retry++;
3954		inet_csk_schedule_ack(sk);
3955		icsk->icsk_ack.ato = TCP_ATO_MIN;
3956		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX);
3957		return;
3958	}
3959
3960	/* Reserve space for headers and prepare control bits. */
3961	skb_reserve(buff, MAX_TCP_HEADER);
3962	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
3963
3964	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
3965	 * too much.
3966	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
3967	 */
3968	skb_set_tcp_pure_ack(buff);
3969
3970	/* Send it off, this clears delayed acks for us. */
3971	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
3972}
3973EXPORT_SYMBOL_GPL(__tcp_send_ack);
3974
3975void tcp_send_ack(struct sock *sk)
3976{
3977	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
3978}
3979
3980/* This routine sends a packet with an out of date sequence
3981 * number. It assumes the other end will try to ack it.
3982 *
3983 * Question: what should we make while urgent mode?
3984 * 4.4BSD forces sending single byte of data. We cannot send
3985 * out of window data, because we have SND.NXT==SND.MAX...
3986 *
3987 * Current solution: to send TWO zero-length segments in urgent mode:
3988 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
3989 * out-of-date with SND.UNA-1 to probe window.
3990 */
3991static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
3992{
3993	struct tcp_sock *tp = tcp_sk(sk);
3994	struct sk_buff *skb;
3995
3996	/* We don't queue it, tcp_transmit_skb() sets ownership. */
3997	skb = alloc_skb(MAX_TCP_HEADER,
3998			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
3999	if (!skb)
4000		return -1;
4001
4002	/* Reserve space for headers and set control bits. */
4003	skb_reserve(skb, MAX_TCP_HEADER);
4004	/* Use a previous sequence.  This should cause the other
4005	 * end to send an ack.  Don't queue or clone SKB, just
4006	 * send it.
4007	 */
4008	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
4009	NET_INC_STATS(sock_net(sk), mib);
4010	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
4011}
4012
4013/* Called from setsockopt( ... TCP_REPAIR ) */
4014void tcp_send_window_probe(struct sock *sk)
4015{
4016	if (sk->sk_state == TCP_ESTABLISHED) {
4017		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
4018		tcp_mstamp_refresh(tcp_sk(sk));
4019		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
4020	}
4021}
4022
4023/* Initiate keepalive or window probe from timer. */
4024int tcp_write_wakeup(struct sock *sk, int mib)
4025{
4026	struct tcp_sock *tp = tcp_sk(sk);
4027	struct sk_buff *skb;
4028
4029	if (sk->sk_state == TCP_CLOSE)
4030		return -1;
4031
4032	skb = tcp_send_head(sk);
4033	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
4034		int err;
4035		unsigned int mss = tcp_current_mss(sk);
4036		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
4037
4038		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
4039			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
4040
4041		/* We are probing the opening of a window
4042		 * but the window size is != 0
4043		 * must have been a result SWS avoidance ( sender )
4044		 */
4045		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
4046		    skb->len > mss) {
4047			seg_size = min(seg_size, mss);
4048			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4049			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
4050					 skb, seg_size, mss, GFP_ATOMIC))
4051				return -1;
4052		} else if (!tcp_skb_pcount(skb))
4053			tcp_set_skb_tso_segs(skb, mss);
4054
4055		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4056		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
4057		if (!err)
4058			tcp_event_new_data_sent(sk, skb);
4059		return err;
4060	} else {
4061		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
4062			tcp_xmit_probe_skb(sk, 1, mib);
4063		return tcp_xmit_probe_skb(sk, 0, mib);
4064	}
4065}
4066
4067/* A window probe timeout has occurred.  If window is not closed send
4068 * a partial packet else a zero probe.
4069 */
4070void tcp_send_probe0(struct sock *sk)
4071{
4072	struct inet_connection_sock *icsk = inet_csk(sk);
4073	struct tcp_sock *tp = tcp_sk(sk);
4074	struct net *net = sock_net(sk);
4075	unsigned long timeout;
4076	int err;
4077
4078	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
4079
4080	if (tp->packets_out || tcp_write_queue_empty(sk)) {
4081		/* Cancel probe timer, if it is not required. */
4082		icsk->icsk_probes_out = 0;
4083		icsk->icsk_backoff = 0;
4084		icsk->icsk_probes_tstamp = 0;
4085		return;
4086	}
4087
4088	icsk->icsk_probes_out++;
4089	if (err <= 0) {
4090		if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
4091			icsk->icsk_backoff++;
4092		timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
4093	} else {
4094		/* If packet was not sent due to local congestion,
4095		 * Let senders fight for local resources conservatively.
4096		 */
4097		timeout = TCP_RESOURCE_PROBE_INTERVAL;
4098	}
4099
4100	timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
4101	tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
4102}
4103
4104int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
4105{
4106	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
4107	struct flowi fl;
4108	int res;
4109
4110	tcp_rsk(req)->txhash = net_tx_rndhash();
 
 
4111	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
4112				  NULL);
4113	if (!res) {
4114		__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
4115		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
4116		if (unlikely(tcp_passive_fastopen(sk)))
4117			tcp_sk(sk)->total_retrans++;
 
 
 
 
 
4118		trace_tcp_retransmit_synack(sk, req);
4119	}
4120	return res;
4121}
4122EXPORT_SYMBOL(tcp_rtx_synack);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Implementation of the Transmission Control Protocol(TCP).
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  13 *		Florian La Roche, <flla@stud.uni-sb.de>
  14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  19 *		Jorge Cwik, <jorge@laser.satlink.net>
  20 */
  21
  22/*
  23 * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
  24 *				:	Fragmentation on mtu decrease
  25 *				:	Segment collapse on retransmit
  26 *				:	AF independence
  27 *
  28 *		Linus Torvalds	:	send_delayed_ack
  29 *		David S. Miller	:	Charge memory using the right skb
  30 *					during syn/ack processing.
  31 *		David S. Miller :	Output engine completely rewritten.
  32 *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
  33 *		Cacophonix Gaul :	draft-minshall-nagle-01
  34 *		J Hadi Salim	:	ECN support
  35 *
  36 */
  37
  38#define pr_fmt(fmt) "TCP: " fmt
  39
  40#include <net/tcp.h>
  41#include <net/mptcp.h>
  42#include <net/proto_memory.h>
  43
  44#include <linux/compiler.h>
  45#include <linux/gfp.h>
  46#include <linux/module.h>
  47#include <linux/static_key.h>
  48#include <linux/skbuff_ref.h>
  49
  50#include <trace/events/tcp.h>
  51
  52/* Refresh clocks of a TCP socket,
  53 * ensuring monotically increasing values.
  54 */
  55void tcp_mstamp_refresh(struct tcp_sock *tp)
  56{
  57	u64 val = tcp_clock_ns();
  58
  59	tp->tcp_clock_cache = val;
  60	tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC);
  61}
  62
  63static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
  64			   int push_one, gfp_t gfp);
  65
  66/* Account for new data that has been sent to the network. */
  67static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
  68{
  69	struct inet_connection_sock *icsk = inet_csk(sk);
  70	struct tcp_sock *tp = tcp_sk(sk);
  71	unsigned int prior_packets = tp->packets_out;
  72
  73	WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
  74
  75	__skb_unlink(skb, &sk->sk_write_queue);
  76	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
  77
  78	if (tp->highest_sack == NULL)
  79		tp->highest_sack = skb;
  80
  81	tp->packets_out += tcp_skb_pcount(skb);
  82	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
  83		tcp_rearm_rto(sk);
  84
  85	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
  86		      tcp_skb_pcount(skb));
  87	tcp_check_space(sk);
  88}
  89
  90/* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
  91 * window scaling factor due to loss of precision.
  92 * If window has been shrunk, what should we make? It is not clear at all.
  93 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
  94 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
  95 * invalid. OK, let's make this for now:
  96 */
  97static inline __u32 tcp_acceptable_seq(const struct sock *sk)
  98{
  99	const struct tcp_sock *tp = tcp_sk(sk);
 100
 101	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
 102	    (tp->rx_opt.wscale_ok &&
 103	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
 104		return tp->snd_nxt;
 105	else
 106		return tcp_wnd_end(tp);
 107}
 108
 109/* Calculate mss to advertise in SYN segment.
 110 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
 111 *
 112 * 1. It is independent of path mtu.
 113 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
 114 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
 115 *    attached devices, because some buggy hosts are confused by
 116 *    large MSS.
 117 * 4. We do not make 3, we advertise MSS, calculated from first
 118 *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
 119 *    This may be overridden via information stored in routing table.
 120 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
 121 *    probably even Jumbo".
 122 */
 123static __u16 tcp_advertise_mss(struct sock *sk)
 124{
 125	struct tcp_sock *tp = tcp_sk(sk);
 126	const struct dst_entry *dst = __sk_dst_get(sk);
 127	int mss = tp->advmss;
 128
 129	if (dst) {
 130		unsigned int metric = dst_metric_advmss(dst);
 131
 132		if (metric < mss) {
 133			mss = metric;
 134			tp->advmss = mss;
 135		}
 136	}
 137
 138	return (__u16)mss;
 139}
 140
 141/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
 142 * This is the first part of cwnd validation mechanism.
 143 */
 144void tcp_cwnd_restart(struct sock *sk, s32 delta)
 145{
 146	struct tcp_sock *tp = tcp_sk(sk);
 147	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
 148	u32 cwnd = tcp_snd_cwnd(tp);
 149
 150	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
 151
 152	tp->snd_ssthresh = tcp_current_ssthresh(sk);
 153	restart_cwnd = min(restart_cwnd, cwnd);
 154
 155	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
 156		cwnd >>= 1;
 157	tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd));
 158	tp->snd_cwnd_stamp = tcp_jiffies32;
 159	tp->snd_cwnd_used = 0;
 160}
 161
 162/* Congestion state accounting after a packet has been sent. */
 163static void tcp_event_data_sent(struct tcp_sock *tp,
 164				struct sock *sk)
 165{
 166	struct inet_connection_sock *icsk = inet_csk(sk);
 167	const u32 now = tcp_jiffies32;
 168
 169	if (tcp_packets_in_flight(tp) == 0)
 170		tcp_ca_event(sk, CA_EVENT_TX_START);
 171
 172	tp->lsndtime = now;
 173
 174	/* If it is a reply for ato after last received
 175	 * packet, increase pingpong count.
 176	 */
 177	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
 
 178		inet_csk_inc_pingpong_cnt(sk);
 
 
 179}
 180
 181/* Account for an ACK we sent. */
 182static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
 
 183{
 184	struct tcp_sock *tp = tcp_sk(sk);
 185
 186	if (unlikely(tp->compressed_ack)) {
 187		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
 188			      tp->compressed_ack);
 189		tp->compressed_ack = 0;
 190		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
 191			__sock_put(sk);
 192	}
 193
 194	if (unlikely(rcv_nxt != tp->rcv_nxt))
 195		return;  /* Special ACK sent by DCTCP to reflect ECN */
 196	tcp_dec_quickack_mode(sk);
 197	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 198}
 199
 200/* Determine a window scaling and initial window to offer.
 201 * Based on the assumption that the given amount of space
 202 * will be offered. Store the results in the tp structure.
 203 * NOTE: for smooth operation initial space offering should
 204 * be a multiple of mss if possible. We assume here that mss >= 1.
 205 * This MUST be enforced by all callers.
 206 */
 207void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
 208			       __u32 *rcv_wnd, __u32 *__window_clamp,
 209			       int wscale_ok, __u8 *rcv_wscale,
 210			       __u32 init_rcv_wnd)
 211{
 212	unsigned int space = (__space < 0 ? 0 : __space);
 213	u32 window_clamp = READ_ONCE(*__window_clamp);
 214
 215	/* If no clamp set the clamp to the max possible scaled window */
 216	if (window_clamp == 0)
 217		window_clamp = (U16_MAX << TCP_MAX_WSCALE);
 218	space = min(window_clamp, space);
 219
 220	/* Quantize space offering to a multiple of mss if possible. */
 221	if (space > mss)
 222		space = rounddown(space, mss);
 223
 224	/* NOTE: offering an initial window larger than 32767
 225	 * will break some buggy TCP stacks. If the admin tells us
 226	 * it is likely we could be speaking with such a buggy stack
 227	 * we will truncate our initial window offering to 32K-1
 228	 * unless the remote has sent us a window scaling option,
 229	 * which we interpret as a sign the remote TCP is not
 230	 * misinterpreting the window field as a signed quantity.
 231	 */
 232	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
 233		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
 234	else
 235		(*rcv_wnd) = space;
 236
 237	if (init_rcv_wnd)
 238		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
 239
 240	*rcv_wscale = 0;
 241	if (wscale_ok) {
 242		/* Set window scaling on max possible window */
 243		space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
 244		space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
 245		space = min_t(u32, space, window_clamp);
 246		*rcv_wscale = clamp_t(int, ilog2(space) - 15,
 247				      0, TCP_MAX_WSCALE);
 248	}
 249	/* Set the clamp no higher than max representable value */
 250	WRITE_ONCE(*__window_clamp,
 251		   min_t(__u32, U16_MAX << (*rcv_wscale), window_clamp));
 252}
 253EXPORT_SYMBOL(tcp_select_initial_window);
 254
 255/* Chose a new window to advertise, update state in tcp_sock for the
 256 * socket, and return result with RFC1323 scaling applied.  The return
 257 * value can be stuffed directly into th->window for an outgoing
 258 * frame.
 259 */
 260static u16 tcp_select_window(struct sock *sk)
 261{
 262	struct tcp_sock *tp = tcp_sk(sk);
 263	struct net *net = sock_net(sk);
 264	u32 old_win = tp->rcv_wnd;
 265	u32 cur_win, new_win;
 266
 267	/* Make the window 0 if we failed to queue the data because we
 268	 * are out of memory.
 269	 */
 270	if (unlikely(inet_csk(sk)->icsk_ack.pending & ICSK_ACK_NOMEM)) {
 271		tp->pred_flags = 0;
 272		tp->rcv_wnd = 0;
 273		tp->rcv_wup = tp->rcv_nxt;
 274		return 0;
 275	}
 276
 277	cur_win = tcp_receive_window(tp);
 278	new_win = __tcp_select_window(sk);
 279	if (new_win < cur_win) {
 280		/* Danger Will Robinson!
 281		 * Don't update rcv_wup/rcv_wnd here or else
 282		 * we will not be able to advertise a zero
 283		 * window in time.  --DaveM
 284		 *
 285		 * Relax Will Robinson.
 286		 */
 287		if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) {
 288			/* Never shrink the offered window */
 289			if (new_win == 0)
 290				NET_INC_STATS(net, LINUX_MIB_TCPWANTZEROWINDOWADV);
 291			new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
 292		}
 293	}
 294
 295	tp->rcv_wnd = new_win;
 296	tp->rcv_wup = tp->rcv_nxt;
 297
 298	/* Make sure we do not exceed the maximum possible
 299	 * scaled window.
 300	 */
 301	if (!tp->rx_opt.rcv_wscale &&
 302	    READ_ONCE(net->ipv4.sysctl_tcp_workaround_signed_windows))
 303		new_win = min(new_win, MAX_TCP_WINDOW);
 304	else
 305		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
 306
 307	/* RFC1323 scaling applied */
 308	new_win >>= tp->rx_opt.rcv_wscale;
 309
 310	/* If we advertise zero window, disable fast path. */
 311	if (new_win == 0) {
 312		tp->pred_flags = 0;
 313		if (old_win)
 314			NET_INC_STATS(net, LINUX_MIB_TCPTOZEROWINDOWADV);
 
 315	} else if (old_win == 0) {
 316		NET_INC_STATS(net, LINUX_MIB_TCPFROMZEROWINDOWADV);
 317	}
 318
 319	return new_win;
 320}
 321
 322/* Packet ECN state for a SYN-ACK */
 323static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
 324{
 325	const struct tcp_sock *tp = tcp_sk(sk);
 326
 327	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
 328	if (!(tp->ecn_flags & TCP_ECN_OK))
 329		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
 330	else if (tcp_ca_needs_ecn(sk) ||
 331		 tcp_bpf_ca_needs_ecn(sk))
 332		INET_ECN_xmit(sk);
 333}
 334
 335/* Packet ECN state for a SYN.  */
 336static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
 337{
 338	struct tcp_sock *tp = tcp_sk(sk);
 339	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
 340	bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 ||
 341		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
 342
 343	if (!use_ecn) {
 344		const struct dst_entry *dst = __sk_dst_get(sk);
 345
 346		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
 347			use_ecn = true;
 348	}
 349
 350	tp->ecn_flags = 0;
 351
 352	if (use_ecn) {
 353		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
 354		tp->ecn_flags = TCP_ECN_OK;
 355		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
 356			INET_ECN_xmit(sk);
 357	}
 358}
 359
 360static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
 361{
 362	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback))
 363		/* tp->ecn_flags are cleared at a later point in time when
 364		 * SYN ACK is ultimatively being received.
 365		 */
 366		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
 367}
 368
 369static void
 370tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
 371{
 372	if (inet_rsk(req)->ecn_ok)
 373		th->ece = 1;
 374}
 375
 376/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
 377 * be sent.
 378 */
 379static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
 380			 struct tcphdr *th, int tcp_header_len)
 381{
 382	struct tcp_sock *tp = tcp_sk(sk);
 383
 384	if (tp->ecn_flags & TCP_ECN_OK) {
 385		/* Not-retransmitted data segment: set ECT and inject CWR. */
 386		if (skb->len != tcp_header_len &&
 387		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
 388			INET_ECN_xmit(sk);
 389			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
 390				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
 391				th->cwr = 1;
 392				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 393			}
 394		} else if (!tcp_ca_needs_ecn(sk)) {
 395			/* ACK or retransmitted segment: clear ECT|CE */
 396			INET_ECN_dontxmit(sk);
 397		}
 398		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
 399			th->ece = 1;
 400	}
 401}
 402
 403/* Constructs common control bits of non-data skb. If SYN/FIN is present,
 404 * auto increment end seqno.
 405 */
 406static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
 407{
 408	skb->ip_summed = CHECKSUM_PARTIAL;
 409
 410	TCP_SKB_CB(skb)->tcp_flags = flags;
 
 411
 412	tcp_skb_pcount_set(skb, 1);
 413
 414	TCP_SKB_CB(skb)->seq = seq;
 415	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
 416		seq++;
 417	TCP_SKB_CB(skb)->end_seq = seq;
 418}
 419
 420static inline bool tcp_urg_mode(const struct tcp_sock *tp)
 421{
 422	return tp->snd_una != tp->snd_up;
 423}
 424
 425#define OPTION_SACK_ADVERTISE	BIT(0)
 426#define OPTION_TS		BIT(1)
 427#define OPTION_MD5		BIT(2)
 428#define OPTION_WSCALE		BIT(3)
 429#define OPTION_FAST_OPEN_COOKIE	BIT(8)
 430#define OPTION_SMC		BIT(9)
 431#define OPTION_MPTCP		BIT(10)
 432#define OPTION_AO		BIT(11)
 433
 434static void smc_options_write(__be32 *ptr, u16 *options)
 435{
 436#if IS_ENABLED(CONFIG_SMC)
 437	if (static_branch_unlikely(&tcp_have_smc)) {
 438		if (unlikely(OPTION_SMC & *options)) {
 439			*ptr++ = htonl((TCPOPT_NOP  << 24) |
 440				       (TCPOPT_NOP  << 16) |
 441				       (TCPOPT_EXP <<  8) |
 442				       (TCPOLEN_EXP_SMC_BASE));
 443			*ptr++ = htonl(TCPOPT_SMC_MAGIC);
 444		}
 445	}
 446#endif
 447}
 448
 449struct tcp_out_options {
 450	u16 options;		/* bit field of OPTION_* */
 451	u16 mss;		/* 0 to disable */
 452	u8 ws;			/* window scale, 0 to disable */
 453	u8 num_sack_blocks;	/* number of SACK blocks to include */
 454	u8 hash_size;		/* bytes in hash_location */
 455	u8 bpf_opt_len;		/* length of BPF hdr option */
 456	__u8 *hash_location;	/* temporary pointer, overloaded */
 457	__u32 tsval, tsecr;	/* need to include OPTION_TS */
 458	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
 459	struct mptcp_out_options mptcp;
 460};
 461
 462static void mptcp_options_write(struct tcphdr *th, __be32 *ptr,
 463				struct tcp_sock *tp,
 464				struct tcp_out_options *opts)
 465{
 466#if IS_ENABLED(CONFIG_MPTCP)
 467	if (unlikely(OPTION_MPTCP & opts->options))
 468		mptcp_write_options(th, ptr, tp, &opts->mptcp);
 469#endif
 470}
 471
 472#ifdef CONFIG_CGROUP_BPF
 473static int bpf_skops_write_hdr_opt_arg0(struct sk_buff *skb,
 474					enum tcp_synack_type synack_type)
 475{
 476	if (unlikely(!skb))
 477		return BPF_WRITE_HDR_TCP_CURRENT_MSS;
 478
 479	if (unlikely(synack_type == TCP_SYNACK_COOKIE))
 480		return BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
 481
 482	return 0;
 483}
 484
 485/* req, syn_skb and synack_type are used when writing synack */
 486static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
 487				  struct request_sock *req,
 488				  struct sk_buff *syn_skb,
 489				  enum tcp_synack_type synack_type,
 490				  struct tcp_out_options *opts,
 491				  unsigned int *remaining)
 492{
 493	struct bpf_sock_ops_kern sock_ops;
 494	int err;
 495
 496	if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
 497					   BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) ||
 498	    !*remaining)
 499		return;
 500
 501	/* *remaining has already been aligned to 4 bytes, so *remaining >= 4 */
 502
 503	/* init sock_ops */
 504	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
 505
 506	sock_ops.op = BPF_SOCK_OPS_HDR_OPT_LEN_CB;
 507
 508	if (req) {
 509		/* The listen "sk" cannot be passed here because
 510		 * it is not locked.  It would not make too much
 511		 * sense to do bpf_setsockopt(listen_sk) based
 512		 * on individual connection request also.
 513		 *
 514		 * Thus, "req" is passed here and the cgroup-bpf-progs
 515		 * of the listen "sk" will be run.
 516		 *
 517		 * "req" is also used here for fastopen even the "sk" here is
 518		 * a fullsock "child" sk.  It is to keep the behavior
 519		 * consistent between fastopen and non-fastopen on
 520		 * the bpf programming side.
 521		 */
 522		sock_ops.sk = (struct sock *)req;
 523		sock_ops.syn_skb = syn_skb;
 524	} else {
 525		sock_owned_by_me(sk);
 526
 527		sock_ops.is_fullsock = 1;
 528		sock_ops.sk = sk;
 529	}
 530
 531	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
 532	sock_ops.remaining_opt_len = *remaining;
 533	/* tcp_current_mss() does not pass a skb */
 534	if (skb)
 535		bpf_skops_init_skb(&sock_ops, skb, 0);
 536
 537	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
 538
 539	if (err || sock_ops.remaining_opt_len == *remaining)
 540		return;
 541
 542	opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len;
 543	/* round up to 4 bytes */
 544	opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3;
 545
 546	*remaining -= opts->bpf_opt_len;
 547}
 548
 549static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
 550				    struct request_sock *req,
 551				    struct sk_buff *syn_skb,
 552				    enum tcp_synack_type synack_type,
 553				    struct tcp_out_options *opts)
 554{
 555	u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len;
 556	struct bpf_sock_ops_kern sock_ops;
 557	int err;
 558
 559	if (likely(!max_opt_len))
 560		return;
 561
 562	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
 563
 564	sock_ops.op = BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
 565
 566	if (req) {
 567		sock_ops.sk = (struct sock *)req;
 568		sock_ops.syn_skb = syn_skb;
 569	} else {
 570		sock_owned_by_me(sk);
 571
 572		sock_ops.is_fullsock = 1;
 573		sock_ops.sk = sk;
 574	}
 575
 576	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
 577	sock_ops.remaining_opt_len = max_opt_len;
 578	first_opt_off = tcp_hdrlen(skb) - max_opt_len;
 579	bpf_skops_init_skb(&sock_ops, skb, first_opt_off);
 580
 581	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
 582
 583	if (err)
 584		nr_written = 0;
 585	else
 586		nr_written = max_opt_len - sock_ops.remaining_opt_len;
 587
 588	if (nr_written < max_opt_len)
 589		memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP,
 590		       max_opt_len - nr_written);
 591}
 592#else
 593static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
 594				  struct request_sock *req,
 595				  struct sk_buff *syn_skb,
 596				  enum tcp_synack_type synack_type,
 597				  struct tcp_out_options *opts,
 598				  unsigned int *remaining)
 599{
 600}
 601
 602static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
 603				    struct request_sock *req,
 604				    struct sk_buff *syn_skb,
 605				    enum tcp_synack_type synack_type,
 606				    struct tcp_out_options *opts)
 607{
 608}
 609#endif
 610
 611static __be32 *process_tcp_ao_options(struct tcp_sock *tp,
 612				      const struct tcp_request_sock *tcprsk,
 613				      struct tcp_out_options *opts,
 614				      struct tcp_key *key, __be32 *ptr)
 615{
 616#ifdef CONFIG_TCP_AO
 617	u8 maclen = tcp_ao_maclen(key->ao_key);
 618
 619	if (tcprsk) {
 620		u8 aolen = maclen + sizeof(struct tcp_ao_hdr);
 621
 622		*ptr++ = htonl((TCPOPT_AO << 24) | (aolen << 16) |
 623			       (tcprsk->ao_keyid << 8) |
 624			       (tcprsk->ao_rcv_next));
 625	} else {
 626		struct tcp_ao_key *rnext_key;
 627		struct tcp_ao_info *ao_info;
 628
 629		ao_info = rcu_dereference_check(tp->ao_info,
 630			lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk));
 631		rnext_key = READ_ONCE(ao_info->rnext_key);
 632		if (WARN_ON_ONCE(!rnext_key))
 633			return ptr;
 634		*ptr++ = htonl((TCPOPT_AO << 24) |
 635			       (tcp_ao_len(key->ao_key) << 16) |
 636			       (key->ao_key->sndid << 8) |
 637			       (rnext_key->rcvid));
 638	}
 639	opts->hash_location = (__u8 *)ptr;
 640	ptr += maclen / sizeof(*ptr);
 641	if (unlikely(maclen % sizeof(*ptr))) {
 642		memset(ptr, TCPOPT_NOP, sizeof(*ptr));
 643		ptr++;
 644	}
 645#endif
 646	return ptr;
 647}
 648
 649/* Write previously computed TCP options to the packet.
 650 *
 651 * Beware: Something in the Internet is very sensitive to the ordering of
 652 * TCP options, we learned this through the hard way, so be careful here.
 653 * Luckily we can at least blame others for their non-compliance but from
 654 * inter-operability perspective it seems that we're somewhat stuck with
 655 * the ordering which we have been using if we want to keep working with
 656 * those broken things (not that it currently hurts anybody as there isn't
 657 * particular reason why the ordering would need to be changed).
 658 *
 659 * At least SACK_PERM as the first option is known to lead to a disaster
 660 * (but it may well be that other scenarios fail similarly).
 661 */
 662static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
 663			      const struct tcp_request_sock *tcprsk,
 664			      struct tcp_out_options *opts,
 665			      struct tcp_key *key)
 666{
 667	__be32 *ptr = (__be32 *)(th + 1);
 668	u16 options = opts->options;	/* mungable copy */
 669
 670	if (tcp_key_is_md5(key)) {
 671		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 672			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 673		/* overload cookie hash location */
 674		opts->hash_location = (__u8 *)ptr;
 675		ptr += 4;
 676	} else if (tcp_key_is_ao(key)) {
 677		ptr = process_tcp_ao_options(tp, tcprsk, opts, key, ptr);
 678	}
 
 679	if (unlikely(opts->mss)) {
 680		*ptr++ = htonl((TCPOPT_MSS << 24) |
 681			       (TCPOLEN_MSS << 16) |
 682			       opts->mss);
 683	}
 684
 685	if (likely(OPTION_TS & options)) {
 686		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 687			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
 688				       (TCPOLEN_SACK_PERM << 16) |
 689				       (TCPOPT_TIMESTAMP << 8) |
 690				       TCPOLEN_TIMESTAMP);
 691			options &= ~OPTION_SACK_ADVERTISE;
 692		} else {
 693			*ptr++ = htonl((TCPOPT_NOP << 24) |
 694				       (TCPOPT_NOP << 16) |
 695				       (TCPOPT_TIMESTAMP << 8) |
 696				       TCPOLEN_TIMESTAMP);
 697		}
 698		*ptr++ = htonl(opts->tsval);
 699		*ptr++ = htonl(opts->tsecr);
 700	}
 701
 702	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 703		*ptr++ = htonl((TCPOPT_NOP << 24) |
 704			       (TCPOPT_NOP << 16) |
 705			       (TCPOPT_SACK_PERM << 8) |
 706			       TCPOLEN_SACK_PERM);
 707	}
 708
 709	if (unlikely(OPTION_WSCALE & options)) {
 710		*ptr++ = htonl((TCPOPT_NOP << 24) |
 711			       (TCPOPT_WINDOW << 16) |
 712			       (TCPOLEN_WINDOW << 8) |
 713			       opts->ws);
 714	}
 715
 716	if (unlikely(opts->num_sack_blocks)) {
 717		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
 718			tp->duplicate_sack : tp->selective_acks;
 719		int this_sack;
 720
 721		*ptr++ = htonl((TCPOPT_NOP  << 24) |
 722			       (TCPOPT_NOP  << 16) |
 723			       (TCPOPT_SACK <<  8) |
 724			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
 725						     TCPOLEN_SACK_PERBLOCK)));
 726
 727		for (this_sack = 0; this_sack < opts->num_sack_blocks;
 728		     ++this_sack) {
 729			*ptr++ = htonl(sp[this_sack].start_seq);
 730			*ptr++ = htonl(sp[this_sack].end_seq);
 731		}
 732
 733		tp->rx_opt.dsack = 0;
 734	}
 735
 736	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
 737		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
 738		u8 *p = (u8 *)ptr;
 739		u32 len; /* Fast Open option length */
 740
 741		if (foc->exp) {
 742			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
 743			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
 744				     TCPOPT_FASTOPEN_MAGIC);
 745			p += TCPOLEN_EXP_FASTOPEN_BASE;
 746		} else {
 747			len = TCPOLEN_FASTOPEN_BASE + foc->len;
 748			*p++ = TCPOPT_FASTOPEN;
 749			*p++ = len;
 750		}
 751
 752		memcpy(p, foc->val, foc->len);
 753		if ((len & 3) == 2) {
 754			p[foc->len] = TCPOPT_NOP;
 755			p[foc->len + 1] = TCPOPT_NOP;
 756		}
 757		ptr += (len + 3) >> 2;
 758	}
 759
 760	smc_options_write(ptr, &options);
 761
 762	mptcp_options_write(th, ptr, tp, opts);
 763}
 764
 765static void smc_set_option(const struct tcp_sock *tp,
 766			   struct tcp_out_options *opts,
 767			   unsigned int *remaining)
 768{
 769#if IS_ENABLED(CONFIG_SMC)
 770	if (static_branch_unlikely(&tcp_have_smc)) {
 771		if (tp->syn_smc) {
 772			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
 773				opts->options |= OPTION_SMC;
 774				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
 775			}
 776		}
 777	}
 778#endif
 779}
 780
 781static void smc_set_option_cond(const struct tcp_sock *tp,
 782				const struct inet_request_sock *ireq,
 783				struct tcp_out_options *opts,
 784				unsigned int *remaining)
 785{
 786#if IS_ENABLED(CONFIG_SMC)
 787	if (static_branch_unlikely(&tcp_have_smc)) {
 788		if (tp->syn_smc && ireq->smc_ok) {
 789			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
 790				opts->options |= OPTION_SMC;
 791				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
 792			}
 793		}
 794	}
 795#endif
 796}
 797
 798static void mptcp_set_option_cond(const struct request_sock *req,
 799				  struct tcp_out_options *opts,
 800				  unsigned int *remaining)
 801{
 802	if (rsk_is_mptcp(req)) {
 803		unsigned int size;
 804
 805		if (mptcp_synack_options(req, &size, &opts->mptcp)) {
 806			if (*remaining >= size) {
 807				opts->options |= OPTION_MPTCP;
 808				*remaining -= size;
 809			}
 810		}
 811	}
 812}
 813
 814/* Compute TCP options for SYN packets. This is not the final
 815 * network wire format yet.
 816 */
 817static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
 818				struct tcp_out_options *opts,
 819				struct tcp_key *key)
 820{
 821	struct tcp_sock *tp = tcp_sk(sk);
 822	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 823	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
 824	bool timestamps;
 825
 826	/* Better than switch (key.type) as it has static branches */
 827	if (tcp_key_is_md5(key)) {
 828		timestamps = false;
 829		opts->options |= OPTION_MD5;
 830		remaining -= TCPOLEN_MD5SIG_ALIGNED;
 831	} else {
 832		timestamps = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps);
 833		if (tcp_key_is_ao(key)) {
 834			opts->options |= OPTION_AO;
 835			remaining -= tcp_ao_len_aligned(key->ao_key);
 836		}
 837	}
 
 838
 839	/* We always get an MSS option.  The option bytes which will be seen in
 840	 * normal data packets should timestamps be used, must be in the MSS
 841	 * advertised.  But we subtract them from tp->mss_cache so that
 842	 * calculations in tcp_sendmsg are simpler etc.  So account for this
 843	 * fact here if necessary.  If we don't do this correctly, as a
 844	 * receiver we won't recognize data packets as being full sized when we
 845	 * should, and thus we won't abide by the delayed ACK rules correctly.
 846	 * SACKs don't matter, we never delay an ACK when we have any of those
 847	 * going out.  */
 848	opts->mss = tcp_advertise_mss(sk);
 849	remaining -= TCPOLEN_MSS_ALIGNED;
 850
 851	if (likely(timestamps)) {
 852		opts->options |= OPTION_TS;
 853		opts->tsval = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + tp->tsoffset;
 854		opts->tsecr = tp->rx_opt.ts_recent;
 855		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 856	}
 857	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) {
 858		opts->ws = tp->rx_opt.rcv_wscale;
 859		opts->options |= OPTION_WSCALE;
 860		remaining -= TCPOLEN_WSCALE_ALIGNED;
 861	}
 862	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) {
 863		opts->options |= OPTION_SACK_ADVERTISE;
 864		if (unlikely(!(OPTION_TS & opts->options)))
 865			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 866	}
 867
 868	if (fastopen && fastopen->cookie.len >= 0) {
 869		u32 need = fastopen->cookie.len;
 870
 871		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
 872					       TCPOLEN_FASTOPEN_BASE;
 873		need = (need + 3) & ~3U;  /* Align to 32 bits */
 874		if (remaining >= need) {
 875			opts->options |= OPTION_FAST_OPEN_COOKIE;
 876			opts->fastopen_cookie = &fastopen->cookie;
 877			remaining -= need;
 878			tp->syn_fastopen = 1;
 879			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
 880		}
 881	}
 882
 883	smc_set_option(tp, opts, &remaining);
 884
 885	if (sk_is_mptcp(sk)) {
 886		unsigned int size;
 887
 888		if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) {
 889			if (remaining >= size) {
 890				opts->options |= OPTION_MPTCP;
 891				remaining -= size;
 892			}
 893		}
 894	}
 895
 896	bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
 897
 898	return MAX_TCP_OPTION_SPACE - remaining;
 899}
 900
 901/* Set up TCP options for SYN-ACKs. */
 902static unsigned int tcp_synack_options(const struct sock *sk,
 903				       struct request_sock *req,
 904				       unsigned int mss, struct sk_buff *skb,
 905				       struct tcp_out_options *opts,
 906				       const struct tcp_key *key,
 907				       struct tcp_fastopen_cookie *foc,
 908				       enum tcp_synack_type synack_type,
 909				       struct sk_buff *syn_skb)
 910{
 911	struct inet_request_sock *ireq = inet_rsk(req);
 912	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 913
 914	if (tcp_key_is_md5(key)) {
 
 915		opts->options |= OPTION_MD5;
 916		remaining -= TCPOLEN_MD5SIG_ALIGNED;
 917
 918		/* We can't fit any SACK blocks in a packet with MD5 + TS
 919		 * options. There was discussion about disabling SACK
 920		 * rather than TS in order to fit in better with old,
 921		 * buggy kernels, but that was deemed to be unnecessary.
 922		 */
 923		if (synack_type != TCP_SYNACK_COOKIE)
 924			ireq->tstamp_ok &= !ireq->sack_ok;
 925	} else if (tcp_key_is_ao(key)) {
 926		opts->options |= OPTION_AO;
 927		remaining -= tcp_ao_len_aligned(key->ao_key);
 928		ireq->tstamp_ok &= !ireq->sack_ok;
 929	}
 
 930
 931	/* We always send an MSS option. */
 932	opts->mss = mss;
 933	remaining -= TCPOLEN_MSS_ALIGNED;
 934
 935	if (likely(ireq->wscale_ok)) {
 936		opts->ws = ireq->rcv_wscale;
 937		opts->options |= OPTION_WSCALE;
 938		remaining -= TCPOLEN_WSCALE_ALIGNED;
 939	}
 940	if (likely(ireq->tstamp_ok)) {
 941		opts->options |= OPTION_TS;
 942		opts->tsval = tcp_skb_timestamp_ts(tcp_rsk(req)->req_usec_ts, skb) +
 943			      tcp_rsk(req)->ts_off;
 944		opts->tsecr = READ_ONCE(req->ts_recent);
 945		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 946	}
 947	if (likely(ireq->sack_ok)) {
 948		opts->options |= OPTION_SACK_ADVERTISE;
 949		if (unlikely(!ireq->tstamp_ok))
 950			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 951	}
 952	if (foc != NULL && foc->len >= 0) {
 953		u32 need = foc->len;
 954
 955		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
 956				   TCPOLEN_FASTOPEN_BASE;
 957		need = (need + 3) & ~3U;  /* Align to 32 bits */
 958		if (remaining >= need) {
 959			opts->options |= OPTION_FAST_OPEN_COOKIE;
 960			opts->fastopen_cookie = foc;
 961			remaining -= need;
 962		}
 963	}
 964
 965	mptcp_set_option_cond(req, opts, &remaining);
 966
 967	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
 968
 969	bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb,
 970			      synack_type, opts, &remaining);
 971
 972	return MAX_TCP_OPTION_SPACE - remaining;
 973}
 974
 975/* Compute TCP options for ESTABLISHED sockets. This is not the
 976 * final wire format yet.
 977 */
 978static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
 979					struct tcp_out_options *opts,
 980					struct tcp_key *key)
 981{
 982	struct tcp_sock *tp = tcp_sk(sk);
 983	unsigned int size = 0;
 984	unsigned int eff_sacks;
 985
 986	opts->options = 0;
 987
 988	/* Better than switch (key.type) as it has static branches */
 989	if (tcp_key_is_md5(key)) {
 990		opts->options |= OPTION_MD5;
 991		size += TCPOLEN_MD5SIG_ALIGNED;
 992	} else if (tcp_key_is_ao(key)) {
 993		opts->options |= OPTION_AO;
 994		size += tcp_ao_len_aligned(key->ao_key);
 
 
 995	}
 
 996
 997	if (likely(tp->rx_opt.tstamp_ok)) {
 998		opts->options |= OPTION_TS;
 999		opts->tsval = skb ? tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) +
1000				tp->tsoffset : 0;
1001		opts->tsecr = tp->rx_opt.ts_recent;
1002		size += TCPOLEN_TSTAMP_ALIGNED;
1003	}
1004
1005	/* MPTCP options have precedence over SACK for the limited TCP
1006	 * option space because a MPTCP connection would be forced to
1007	 * fall back to regular TCP if a required multipath option is
1008	 * missing. SACK still gets a chance to use whatever space is
1009	 * left.
1010	 */
1011	if (sk_is_mptcp(sk)) {
1012		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
1013		unsigned int opt_size = 0;
1014
1015		if (mptcp_established_options(sk, skb, &opt_size, remaining,
1016					      &opts->mptcp)) {
1017			opts->options |= OPTION_MPTCP;
1018			size += opt_size;
1019		}
1020	}
1021
1022	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
1023	if (unlikely(eff_sacks)) {
1024		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
1025		if (unlikely(remaining < TCPOLEN_SACK_BASE_ALIGNED +
1026					 TCPOLEN_SACK_PERBLOCK))
1027			return size;
1028
1029		opts->num_sack_blocks =
1030			min_t(unsigned int, eff_sacks,
1031			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
1032			      TCPOLEN_SACK_PERBLOCK);
1033
1034		size += TCPOLEN_SACK_BASE_ALIGNED +
1035			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
1036	}
1037
1038	if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp,
1039					    BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) {
1040		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
1041
1042		bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
1043
1044		size = MAX_TCP_OPTION_SPACE - remaining;
1045	}
1046
1047	return size;
1048}
1049
1050
1051/* TCP SMALL QUEUES (TSQ)
1052 *
1053 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
1054 * to reduce RTT and bufferbloat.
1055 * We do this using a special skb destructor (tcp_wfree).
1056 *
1057 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
1058 * needs to be reallocated in a driver.
1059 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
1060 *
1061 * Since transmit from skb destructor is forbidden, we use a tasklet
1062 * to process all sockets that eventually need to send more skbs.
1063 * We use one tasklet per cpu, with its own queue of sockets.
1064 */
1065struct tsq_tasklet {
1066	struct tasklet_struct	tasklet;
1067	struct list_head	head; /* queue of tcp sockets */
1068};
1069static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
1070
1071static void tcp_tsq_write(struct sock *sk)
1072{
1073	if ((1 << sk->sk_state) &
1074	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
1075	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
1076		struct tcp_sock *tp = tcp_sk(sk);
1077
1078		if (tp->lost_out > tp->retrans_out &&
1079		    tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) {
1080			tcp_mstamp_refresh(tp);
1081			tcp_xmit_retransmit_queue(sk);
1082		}
1083
1084		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
1085			       0, GFP_ATOMIC);
1086	}
1087}
1088
1089static void tcp_tsq_handler(struct sock *sk)
1090{
1091	bh_lock_sock(sk);
1092	if (!sock_owned_by_user(sk))
1093		tcp_tsq_write(sk);
1094	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
1095		sock_hold(sk);
1096	bh_unlock_sock(sk);
1097}
1098/*
1099 * One tasklet per cpu tries to send more skbs.
1100 * We run in tasklet context but need to disable irqs when
1101 * transferring tsq->head because tcp_wfree() might
1102 * interrupt us (non NAPI drivers)
1103 */
1104static void tcp_tasklet_func(struct tasklet_struct *t)
1105{
1106	struct tsq_tasklet *tsq = from_tasklet(tsq,  t, tasklet);
1107	LIST_HEAD(list);
1108	unsigned long flags;
1109	struct list_head *q, *n;
1110	struct tcp_sock *tp;
1111	struct sock *sk;
1112
1113	local_irq_save(flags);
1114	list_splice_init(&tsq->head, &list);
1115	local_irq_restore(flags);
1116
1117	list_for_each_safe(q, n, &list) {
1118		tp = list_entry(q, struct tcp_sock, tsq_node);
1119		list_del(&tp->tsq_node);
1120
1121		sk = (struct sock *)tp;
1122		smp_mb__before_atomic();
1123		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
1124
1125		tcp_tsq_handler(sk);
1126		sk_free(sk);
1127	}
1128}
1129
1130#define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
1131			  TCPF_WRITE_TIMER_DEFERRED |	\
1132			  TCPF_DELACK_TIMER_DEFERRED |	\
1133			  TCPF_MTU_REDUCED_DEFERRED |	\
1134			  TCPF_ACK_DEFERRED)
1135/**
1136 * tcp_release_cb - tcp release_sock() callback
1137 * @sk: socket
1138 *
1139 * called from release_sock() to perform protocol dependent
1140 * actions before socket release.
1141 */
1142void tcp_release_cb(struct sock *sk)
1143{
1144	unsigned long flags = smp_load_acquire(&sk->sk_tsq_flags);
1145	unsigned long nflags;
1146
1147	/* perform an atomic operation only if at least one flag is set */
1148	do {
 
1149		if (!(flags & TCP_DEFERRED_ALL))
1150			return;
1151		nflags = flags & ~TCP_DEFERRED_ALL;
1152	} while (!try_cmpxchg(&sk->sk_tsq_flags, &flags, nflags));
1153
1154	if (flags & TCPF_TSQ_DEFERRED) {
1155		tcp_tsq_write(sk);
1156		__sock_put(sk);
1157	}
 
 
 
 
 
 
 
 
 
 
1158
1159	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
1160		tcp_write_timer_handler(sk);
1161		__sock_put(sk);
1162	}
1163	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
1164		tcp_delack_timer_handler(sk);
1165		__sock_put(sk);
1166	}
1167	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
1168		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
1169		__sock_put(sk);
1170	}
1171	if ((flags & TCPF_ACK_DEFERRED) && inet_csk_ack_scheduled(sk))
1172		tcp_send_ack(sk);
1173}
1174EXPORT_SYMBOL(tcp_release_cb);
1175
1176void __init tcp_tasklet_init(void)
1177{
1178	int i;
1179
1180	for_each_possible_cpu(i) {
1181		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
1182
1183		INIT_LIST_HEAD(&tsq->head);
1184		tasklet_setup(&tsq->tasklet, tcp_tasklet_func);
1185	}
1186}
1187
1188/*
1189 * Write buffer destructor automatically called from kfree_skb.
1190 * We can't xmit new skbs from this context, as we might already
1191 * hold qdisc lock.
1192 */
1193void tcp_wfree(struct sk_buff *skb)
1194{
1195	struct sock *sk = skb->sk;
1196	struct tcp_sock *tp = tcp_sk(sk);
1197	unsigned long flags, nval, oval;
1198	struct tsq_tasklet *tsq;
1199	bool empty;
1200
1201	/* Keep one reference on sk_wmem_alloc.
1202	 * Will be released by sk_free() from here or tcp_tasklet_func()
1203	 */
1204	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
1205
1206	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
1207	 * Wait until our queues (qdisc + devices) are drained.
1208	 * This gives :
1209	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
1210	 * - chance for incoming ACK (processed by another cpu maybe)
1211	 *   to migrate this flow (skb->ooo_okay will be eventually set)
1212	 */
1213	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
1214		goto out;
1215
1216	oval = smp_load_acquire(&sk->sk_tsq_flags);
1217	do {
 
 
1218		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
1219			goto out;
1220
1221		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
1222	} while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval));
 
 
1223
1224	/* queue this socket to tasklet queue */
1225	local_irq_save(flags);
1226	tsq = this_cpu_ptr(&tsq_tasklet);
1227	empty = list_empty(&tsq->head);
1228	list_add(&tp->tsq_node, &tsq->head);
1229	if (empty)
1230		tasklet_schedule(&tsq->tasklet);
1231	local_irq_restore(flags);
1232	return;
 
1233out:
1234	sk_free(sk);
1235}
1236
1237/* Note: Called under soft irq.
1238 * We can call TCP stack right away, unless socket is owned by user.
1239 */
1240enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
1241{
1242	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
1243	struct sock *sk = (struct sock *)tp;
1244
1245	tcp_tsq_handler(sk);
1246	sock_put(sk);
1247
1248	return HRTIMER_NORESTART;
1249}
1250
1251static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
1252				      u64 prior_wstamp)
1253{
1254	struct tcp_sock *tp = tcp_sk(sk);
1255
1256	if (sk->sk_pacing_status != SK_PACING_NONE) {
1257		unsigned long rate = READ_ONCE(sk->sk_pacing_rate);
1258
1259		/* Original sch_fq does not pace first 10 MSS
1260		 * Note that tp->data_segs_out overflows after 2^32 packets,
1261		 * this is a minor annoyance.
1262		 */
1263		if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
1264			u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
1265			u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
1266
1267			/* take into account OS jitter */
1268			len_ns -= min_t(u64, len_ns / 2, credit);
1269			tp->tcp_wstamp_ns += len_ns;
1270		}
1271	}
1272	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1273}
1274
1275INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1276INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1277INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
1278
1279/* This routine actually transmits TCP packets queued in by
1280 * tcp_do_sendmsg().  This is used by both the initial
1281 * transmission and possible later retransmissions.
1282 * All SKB's seen here are completely headerless.  It is our
1283 * job to build the TCP header, and pass the packet down to
1284 * IP so it can do the same plus pass the packet off to the
1285 * device.
1286 *
1287 * We are working here with either a clone of the original
1288 * SKB, or a fresh unique copy made by the retransmit engine.
1289 */
1290static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1291			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
1292{
1293	const struct inet_connection_sock *icsk = inet_csk(sk);
1294	struct inet_sock *inet;
1295	struct tcp_sock *tp;
1296	struct tcp_skb_cb *tcb;
1297	struct tcp_out_options opts;
1298	unsigned int tcp_options_size, tcp_header_size;
1299	struct sk_buff *oskb = NULL;
1300	struct tcp_key key;
1301	struct tcphdr *th;
1302	u64 prior_wstamp;
1303	int err;
1304
1305	BUG_ON(!skb || !tcp_skb_pcount(skb));
1306	tp = tcp_sk(sk);
1307	prior_wstamp = tp->tcp_wstamp_ns;
1308	tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
1309	skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC);
1310	if (clone_it) {
 
 
1311		oskb = skb;
1312
1313		tcp_skb_tsorted_save(oskb) {
1314			if (unlikely(skb_cloned(oskb)))
1315				skb = pskb_copy(oskb, gfp_mask);
1316			else
1317				skb = skb_clone(oskb, gfp_mask);
1318		} tcp_skb_tsorted_restore(oskb);
1319
1320		if (unlikely(!skb))
1321			return -ENOBUFS;
1322		/* retransmit skbs might have a non zero value in skb->dev
1323		 * because skb->dev is aliased with skb->rbnode.rb_left
1324		 */
1325		skb->dev = NULL;
1326	}
1327
1328	inet = inet_sk(sk);
1329	tcb = TCP_SKB_CB(skb);
1330	memset(&opts, 0, sizeof(opts));
1331
1332	tcp_get_current_key(sk, &key);
1333	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
1334		tcp_options_size = tcp_syn_options(sk, skb, &opts, &key);
1335	} else {
1336		tcp_options_size = tcp_established_options(sk, skb, &opts, &key);
 
1337		/* Force a PSH flag on all (GSO) packets to expedite GRO flush
1338		 * at receiver : This slightly improve GRO performance.
1339		 * Note that we do not force the PSH flag for non GSO packets,
1340		 * because they might be sent under high congestion events,
1341		 * and in this case it is better to delay the delivery of 1-MSS
1342		 * packets and thus the corresponding ACK packet that would
1343		 * release the following packet.
1344		 */
1345		if (tcp_skb_pcount(skb) > 1)
1346			tcb->tcp_flags |= TCPHDR_PSH;
1347	}
1348	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
1349
1350	/* We set skb->ooo_okay to one if this packet can select
1351	 * a different TX queue than prior packets of this flow,
1352	 * to avoid self inflicted reorders.
1353	 * The 'other' queue decision is based on current cpu number
1354	 * if XPS is enabled, or sk->sk_txhash otherwise.
1355	 * We can switch to another (and better) queue if:
1356	 * 1) No packet with payload is in qdisc/device queues.
1357	 *    Delays in TX completion can defeat the test
1358	 *    even if packets were already sent.
1359	 * 2) Or rtx queue is empty.
1360	 *    This mitigates above case if ACK packets for
1361	 *    all prior packets were already processed.
1362	 */
1363	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) ||
1364			tcp_rtx_queue_empty(sk);
1365
1366	/* If we had to use memory reserve to allocate this skb,
1367	 * this might cause drops if packet is looped back :
1368	 * Other socket might not have SOCK_MEMALLOC.
1369	 * Packets not looped back do not care about pfmemalloc.
1370	 */
1371	skb->pfmemalloc = 0;
1372
1373	skb_push(skb, tcp_header_size);
1374	skb_reset_transport_header(skb);
1375
1376	skb_orphan(skb);
1377	skb->sk = sk;
1378	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1379	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
1380
1381	skb_set_dst_pending_confirm(skb, READ_ONCE(sk->sk_dst_pending_confirm));
1382
1383	/* Build TCP header and checksum it. */
1384	th = (struct tcphdr *)skb->data;
1385	th->source		= inet->inet_sport;
1386	th->dest		= inet->inet_dport;
1387	th->seq			= htonl(tcb->seq);
1388	th->ack_seq		= htonl(rcv_nxt);
1389	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
1390					tcb->tcp_flags);
1391
1392	th->check		= 0;
1393	th->urg_ptr		= 0;
1394
1395	/* The urg_mode check is necessary during a below snd_una win probe */
1396	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
1397		if (before(tp->snd_up, tcb->seq + 0x10000)) {
1398			th->urg_ptr = htons(tp->snd_up - tcb->seq);
1399			th->urg = 1;
1400		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
1401			th->urg_ptr = htons(0xFFFF);
1402			th->urg = 1;
1403		}
1404	}
1405
1406	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1407	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1408		th->window      = htons(tcp_select_window(sk));
1409		tcp_ecn_send(sk, skb, th, tcp_header_size);
1410	} else {
1411		/* RFC1323: The window in SYN & SYN/ACK segments
1412		 * is never scaled.
1413		 */
1414		th->window	= htons(min(tp->rcv_wnd, 65535U));
1415	}
1416
1417	tcp_options_write(th, tp, NULL, &opts, &key);
1418
1419	if (tcp_key_is_md5(&key)) {
1420#ifdef CONFIG_TCP_MD5SIG
1421		/* Calculate the MD5 hash, as we have all we need now */
1422		sk_gso_disable(sk);
 
1423		tp->af_specific->calc_md5_hash(opts.hash_location,
1424					       key.md5_key, sk, skb);
 
1425#endif
1426	} else if (tcp_key_is_ao(&key)) {
1427		int err;
1428
1429		err = tcp_ao_transmit_skb(sk, skb, key.ao_key, th,
1430					  opts.hash_location);
1431		if (err) {
1432			kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED);
1433			return -ENOMEM;
1434		}
1435	}
1436
1437	/* BPF prog is the last one writing header option */
1438	bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts);
1439
1440	INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check,
1441			   tcp_v6_send_check, tcp_v4_send_check,
1442			   sk, skb);
1443
1444	if (likely(tcb->tcp_flags & TCPHDR_ACK))
1445		tcp_event_ack_sent(sk, rcv_nxt);
1446
1447	if (skb->len != tcp_header_size) {
1448		tcp_event_data_sent(tp, sk);
1449		tp->data_segs_out += tcp_skb_pcount(skb);
1450		tp->bytes_sent += skb->len - tcp_header_size;
1451	}
1452
1453	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1454		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1455			      tcp_skb_pcount(skb));
1456
1457	tp->segs_out += tcp_skb_pcount(skb);
1458	skb_set_hash_from_sk(skb, sk);
1459	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1460	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1461	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1462
1463	/* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
1464
1465	/* Cleanup our debris for IP stacks */
1466	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1467			       sizeof(struct inet6_skb_parm)));
1468
1469	tcp_add_tx_delay(skb, tp);
1470
1471	err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit,
1472				 inet6_csk_xmit, ip_queue_xmit,
1473				 sk, skb, &inet->cork.fl);
1474
1475	if (unlikely(err > 0)) {
1476		tcp_enter_cwr(sk);
1477		err = net_xmit_eval(err);
1478	}
1479	if (!err && oskb) {
1480		tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1481		tcp_rate_skb_sent(sk, oskb);
1482	}
1483	return err;
1484}
1485
1486static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1487			    gfp_t gfp_mask)
1488{
1489	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
1490				  tcp_sk(sk)->rcv_nxt);
1491}
1492
1493/* This routine just queues the buffer for sending.
1494 *
1495 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
1496 * otherwise socket can stall.
1497 */
1498static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
1499{
1500	struct tcp_sock *tp = tcp_sk(sk);
1501
1502	/* Advance write_seq and place onto the write_queue. */
1503	WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
1504	__skb_header_release(skb);
1505	tcp_add_write_queue_tail(sk, skb);
1506	sk_wmem_queued_add(sk, skb->truesize);
1507	sk_mem_charge(sk, skb->truesize);
1508}
1509
1510/* Initialize TSO segments for a packet. */
1511static int tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
1512{
1513	int tso_segs;
1514
1515	if (skb->len <= mss_now) {
1516		/* Avoid the costly divide in the normal
1517		 * non-TSO case.
1518		 */
 
1519		TCP_SKB_CB(skb)->tcp_gso_size = 0;
1520		tcp_skb_pcount_set(skb, 1);
1521		return 1;
 
1522	}
1523	TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
1524	tso_segs = DIV_ROUND_UP(skb->len, mss_now);
1525	tcp_skb_pcount_set(skb, tso_segs);
1526	return tso_segs;
1527}
1528
1529/* Pcount in the middle of the write queue got changed, we need to do various
1530 * tweaks to fix counters
1531 */
1532static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1533{
1534	struct tcp_sock *tp = tcp_sk(sk);
1535
1536	tp->packets_out -= decr;
1537
1538	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1539		tp->sacked_out -= decr;
1540	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1541		tp->retrans_out -= decr;
1542	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1543		tp->lost_out -= decr;
1544
1545	/* Reno case is special. Sigh... */
1546	if (tcp_is_reno(tp) && decr > 0)
1547		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1548
1549	if (tp->lost_skb_hint &&
1550	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1551	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1552		tp->lost_cnt_hint -= decr;
1553
1554	tcp_verify_left_out(tp);
1555}
1556
1557static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
1558{
1559	return TCP_SKB_CB(skb)->txstamp_ack ||
1560		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
1561}
1562
1563static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1564{
1565	struct skb_shared_info *shinfo = skb_shinfo(skb);
1566
1567	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1568	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1569		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1570		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1571
1572		shinfo->tx_flags &= ~tsflags;
1573		shinfo2->tx_flags |= tsflags;
1574		swap(shinfo->tskey, shinfo2->tskey);
1575		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1576		TCP_SKB_CB(skb)->txstamp_ack = 0;
1577	}
1578}
1579
1580static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1581{
1582	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1583	TCP_SKB_CB(skb)->eor = 0;
1584}
1585
1586/* Insert buff after skb on the write or rtx queue of sk.  */
1587static void tcp_insert_write_queue_after(struct sk_buff *skb,
1588					 struct sk_buff *buff,
1589					 struct sock *sk,
1590					 enum tcp_queue tcp_queue)
1591{
1592	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
1593		__skb_queue_after(&sk->sk_write_queue, skb, buff);
1594	else
1595		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
1596}
1597
1598/* Function to create two new TCP segments.  Shrinks the given segment
1599 * to the specified size and appends a new segment with the rest of the
1600 * packet to the list.  This won't be called frequently, I hope.
1601 * Remember, these are still headerless SKBs at this point.
1602 */
1603int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1604		 struct sk_buff *skb, u32 len,
1605		 unsigned int mss_now, gfp_t gfp)
1606{
1607	struct tcp_sock *tp = tcp_sk(sk);
1608	struct sk_buff *buff;
1609	int old_factor;
1610	long limit;
1611	int nlen;
1612	u8 flags;
1613
1614	if (WARN_ON(len > skb->len))
1615		return -EINVAL;
1616
1617	DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb));
 
 
1618
1619	/* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1620	 * We need some allowance to not penalize applications setting small
1621	 * SO_SNDBUF values.
1622	 * Also allow first and last skb in retransmit queue to be split.
1623	 */
1624	limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE);
1625	if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1626		     tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1627		     skb != tcp_rtx_queue_head(sk) &&
1628		     skb != tcp_rtx_queue_tail(sk))) {
1629		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1630		return -ENOMEM;
1631	}
1632
1633	if (skb_unclone_keeptruesize(skb, gfp))
1634		return -ENOMEM;
1635
1636	/* Get a new skb... force flag on. */
1637	buff = tcp_stream_alloc_skb(sk, gfp, true);
1638	if (!buff)
1639		return -ENOMEM; /* We'll just try again later. */
1640	skb_copy_decrypted(buff, skb);
1641	mptcp_skb_ext_copy(buff, skb);
1642
1643	sk_wmem_queued_add(sk, buff->truesize);
1644	sk_mem_charge(sk, buff->truesize);
1645	nlen = skb->len - len;
1646	buff->truesize += nlen;
1647	skb->truesize -= nlen;
1648
1649	/* Correct the sequence numbers. */
1650	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1651	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1652	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1653
1654	/* PSH and FIN should only be set in the second packet. */
1655	flags = TCP_SKB_CB(skb)->tcp_flags;
1656	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1657	TCP_SKB_CB(buff)->tcp_flags = flags;
1658	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1659	tcp_skb_fragment_eor(skb, buff);
1660
1661	skb_split(skb, buff, len);
1662
1663	skb_set_delivery_time(buff, skb->tstamp, SKB_CLOCK_MONOTONIC);
 
 
1664	tcp_fragment_tstamp(skb, buff);
1665
1666	old_factor = tcp_skb_pcount(skb);
1667
1668	/* Fix up tso_factor for both original and new SKB.  */
1669	tcp_set_skb_tso_segs(skb, mss_now);
1670	tcp_set_skb_tso_segs(buff, mss_now);
1671
1672	/* Update delivered info for the new segment */
1673	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1674
1675	/* If this packet has been sent out already, we must
1676	 * adjust the various packet counters.
1677	 */
1678	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1679		int diff = old_factor - tcp_skb_pcount(skb) -
1680			tcp_skb_pcount(buff);
1681
1682		if (diff)
1683			tcp_adjust_pcount(sk, skb, diff);
1684	}
1685
1686	/* Link BUFF into the send queue. */
1687	__skb_header_release(buff);
1688	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1689	if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1690		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
1691
1692	return 0;
1693}
1694
1695/* This is similar to __pskb_pull_tail(). The difference is that pulled
1696 * data is not copied, but immediately discarded.
1697 */
1698static int __pskb_trim_head(struct sk_buff *skb, int len)
1699{
1700	struct skb_shared_info *shinfo;
1701	int i, k, eat;
1702
1703	DEBUG_NET_WARN_ON_ONCE(skb_headlen(skb));
 
 
 
 
 
 
1704	eat = len;
1705	k = 0;
1706	shinfo = skb_shinfo(skb);
1707	for (i = 0; i < shinfo->nr_frags; i++) {
1708		int size = skb_frag_size(&shinfo->frags[i]);
1709
1710		if (size <= eat) {
1711			skb_frag_unref(skb, i);
1712			eat -= size;
1713		} else {
1714			shinfo->frags[k] = shinfo->frags[i];
1715			if (eat) {
1716				skb_frag_off_add(&shinfo->frags[k], eat);
1717				skb_frag_size_sub(&shinfo->frags[k], eat);
1718				eat = 0;
1719			}
1720			k++;
1721		}
1722	}
1723	shinfo->nr_frags = k;
1724
1725	skb->data_len -= len;
1726	skb->len = skb->data_len;
1727	return len;
1728}
1729
1730/* Remove acked data from a packet in the transmit queue. */
1731int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1732{
1733	u32 delta_truesize;
1734
1735	if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
1736		return -ENOMEM;
1737
1738	delta_truesize = __pskb_trim_head(skb, len);
1739
1740	TCP_SKB_CB(skb)->seq += len;
 
1741
1742	skb->truesize	   -= delta_truesize;
1743	sk_wmem_queued_add(sk, -delta_truesize);
1744	if (!skb_zcopy_pure(skb))
1745		sk_mem_uncharge(sk, delta_truesize);
 
1746
1747	/* Any change of skb->len requires recalculation of tso factor. */
1748	if (tcp_skb_pcount(skb) > 1)
1749		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
1750
1751	return 0;
1752}
1753
1754/* Calculate MSS not accounting any TCP options.  */
1755static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
1756{
1757	const struct tcp_sock *tp = tcp_sk(sk);
1758	const struct inet_connection_sock *icsk = inet_csk(sk);
1759	int mss_now;
1760
1761	/* Calculate base mss without TCP options:
1762	   It is MMS_S - sizeof(tcphdr) of rfc1122
1763	 */
1764	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1765
 
 
 
 
 
 
 
 
1766	/* Clamp it (mss_clamp does not include tcp options) */
1767	if (mss_now > tp->rx_opt.mss_clamp)
1768		mss_now = tp->rx_opt.mss_clamp;
1769
1770	/* Now subtract optional transport overhead */
1771	mss_now -= icsk->icsk_ext_hdr_len;
1772
1773	/* Then reserve room for full set of TCP options and 8 bytes of data */
1774	mss_now = max(mss_now,
1775		      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss));
1776	return mss_now;
1777}
1778
1779/* Calculate MSS. Not accounting for SACKs here.  */
1780int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1781{
1782	/* Subtract TCP options size, not including SACKs */
1783	return __tcp_mtu_to_mss(sk, pmtu) -
1784	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
1785}
1786EXPORT_SYMBOL(tcp_mtu_to_mss);
1787
1788/* Inverse of above */
1789int tcp_mss_to_mtu(struct sock *sk, int mss)
1790{
1791	const struct tcp_sock *tp = tcp_sk(sk);
1792	const struct inet_connection_sock *icsk = inet_csk(sk);
 
1793
1794	return mss +
1795	      tp->tcp_header_len +
1796	      icsk->icsk_ext_hdr_len +
1797	      icsk->icsk_af_ops->net_header_len;
 
 
 
 
 
 
 
 
 
1798}
1799EXPORT_SYMBOL(tcp_mss_to_mtu);
1800
1801/* MTU probing init per socket */
1802void tcp_mtup_init(struct sock *sk)
1803{
1804	struct tcp_sock *tp = tcp_sk(sk);
1805	struct inet_connection_sock *icsk = inet_csk(sk);
1806	struct net *net = sock_net(sk);
1807
1808	icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1;
1809	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
1810			       icsk->icsk_af_ops->net_header_len;
1811	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss));
1812	icsk->icsk_mtup.probe_size = 0;
1813	if (icsk->icsk_mtup.enabled)
1814		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
1815}
1816EXPORT_SYMBOL(tcp_mtup_init);
1817
1818/* This function synchronize snd mss to current pmtu/exthdr set.
1819
1820   tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1821   for TCP options, but includes only bare TCP header.
1822
1823   tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1824   It is minimum of user_mss and mss received with SYN.
1825   It also does not include TCP options.
1826
1827   inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1828
1829   tp->mss_cache is current effective sending mss, including
1830   all tcp options except for SACKs. It is evaluated,
1831   taking into account current pmtu, but never exceeds
1832   tp->rx_opt.mss_clamp.
1833
1834   NOTE1. rfc1122 clearly states that advertised MSS
1835   DOES NOT include either tcp or ip options.
1836
1837   NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1838   are READ ONLY outside this function.		--ANK (980731)
1839 */
1840unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1841{
1842	struct tcp_sock *tp = tcp_sk(sk);
1843	struct inet_connection_sock *icsk = inet_csk(sk);
1844	int mss_now;
1845
1846	if (icsk->icsk_mtup.search_high > pmtu)
1847		icsk->icsk_mtup.search_high = pmtu;
1848
1849	mss_now = tcp_mtu_to_mss(sk, pmtu);
1850	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1851
1852	/* And store cached results */
1853	icsk->icsk_pmtu_cookie = pmtu;
1854	if (icsk->icsk_mtup.enabled)
1855		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1856	tp->mss_cache = mss_now;
1857
1858	return mss_now;
1859}
1860EXPORT_SYMBOL(tcp_sync_mss);
1861
1862/* Compute the current effective MSS, taking SACKs and IP options,
1863 * and even PMTU discovery events into account.
1864 */
1865unsigned int tcp_current_mss(struct sock *sk)
1866{
1867	const struct tcp_sock *tp = tcp_sk(sk);
1868	const struct dst_entry *dst = __sk_dst_get(sk);
1869	u32 mss_now;
1870	unsigned int header_len;
1871	struct tcp_out_options opts;
1872	struct tcp_key key;
1873
1874	mss_now = tp->mss_cache;
1875
1876	if (dst) {
1877		u32 mtu = dst_mtu(dst);
1878		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1879			mss_now = tcp_sync_mss(sk, mtu);
1880	}
1881	tcp_get_current_key(sk, &key);
1882	header_len = tcp_established_options(sk, NULL, &opts, &key) +
1883		     sizeof(struct tcphdr);
1884	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
1885	 * some common options. If this is an odd packet (because we have SACK
1886	 * blocks etc) then our calculated header_len will be different, and
1887	 * we have to adjust mss_now correspondingly */
1888	if (header_len != tp->tcp_header_len) {
1889		int delta = (int) header_len - tp->tcp_header_len;
1890		mss_now -= delta;
1891	}
1892
1893	return mss_now;
1894}
1895
1896/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
1897 * As additional protections, we do not touch cwnd in retransmission phases,
1898 * and if application hit its sndbuf limit recently.
1899 */
1900static void tcp_cwnd_application_limited(struct sock *sk)
1901{
1902	struct tcp_sock *tp = tcp_sk(sk);
1903
1904	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
1905	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1906		/* Limited by application or receiver window. */
1907		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
1908		u32 win_used = max(tp->snd_cwnd_used, init_win);
1909		if (win_used < tcp_snd_cwnd(tp)) {
1910			tp->snd_ssthresh = tcp_current_ssthresh(sk);
1911			tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1);
1912		}
1913		tp->snd_cwnd_used = 0;
1914	}
1915	tp->snd_cwnd_stamp = tcp_jiffies32;
1916}
1917
1918static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1919{
1920	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1921	struct tcp_sock *tp = tcp_sk(sk);
1922
1923	/* Track the strongest available signal of the degree to which the cwnd
1924	 * is fully utilized. If cwnd-limited then remember that fact for the
1925	 * current window. If not cwnd-limited then track the maximum number of
1926	 * outstanding packets in the current window. (If cwnd-limited then we
1927	 * chose to not update tp->max_packets_out to avoid an extra else
1928	 * clause with no functional impact.)
1929	 */
1930	if (!before(tp->snd_una, tp->cwnd_usage_seq) ||
1931	    is_cwnd_limited ||
1932	    (!tp->is_cwnd_limited &&
1933	     tp->packets_out > tp->max_packets_out)) {
1934		tp->is_cwnd_limited = is_cwnd_limited;
1935		tp->max_packets_out = tp->packets_out;
1936		tp->cwnd_usage_seq = tp->snd_nxt;
1937	}
1938
1939	if (tcp_is_cwnd_limited(sk)) {
1940		/* Network is feed fully. */
1941		tp->snd_cwnd_used = 0;
1942		tp->snd_cwnd_stamp = tcp_jiffies32;
1943	} else {
1944		/* Network starves. */
1945		if (tp->packets_out > tp->snd_cwnd_used)
1946			tp->snd_cwnd_used = tp->packets_out;
1947
1948		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) &&
1949		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
1950		    !ca_ops->cong_control)
1951			tcp_cwnd_application_limited(sk);
1952
1953		/* The following conditions together indicate the starvation
1954		 * is caused by insufficient sender buffer:
1955		 * 1) just sent some data (see tcp_write_xmit)
1956		 * 2) not cwnd limited (this else condition)
1957		 * 3) no more data to send (tcp_write_queue_empty())
1958		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1959		 */
1960		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1961		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1962		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1963			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1964	}
1965}
1966
1967/* Minshall's variant of the Nagle send check. */
1968static bool tcp_minshall_check(const struct tcp_sock *tp)
1969{
1970	return after(tp->snd_sml, tp->snd_una) &&
1971		!after(tp->snd_sml, tp->snd_nxt);
1972}
1973
1974/* Update snd_sml if this skb is under mss
1975 * Note that a TSO packet might end with a sub-mss segment
1976 * The test is really :
1977 * if ((skb->len % mss) != 0)
1978 *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1979 * But we can avoid doing the divide again given we already have
1980 *  skb_pcount = skb->len / mss_now
1981 */
1982static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1983				const struct sk_buff *skb)
1984{
1985	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1986		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1987}
1988
1989/* Return false, if packet can be sent now without violation Nagle's rules:
1990 * 1. It is full sized. (provided by caller in %partial bool)
1991 * 2. Or it contains FIN. (already checked by caller)
1992 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1993 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1994 *    With Minshall's modification: all sent small packets are ACKed.
1995 */
1996static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1997			    int nonagle)
1998{
1999	return partial &&
2000		((nonagle & TCP_NAGLE_CORK) ||
2001		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
2002}
2003
2004/* Return how many segs we'd like on a TSO packet,
2005 * depending on current pacing rate, and how close the peer is.
2006 *
2007 * Rationale is:
2008 * - For close peers, we rather send bigger packets to reduce
2009 *   cpu costs, because occasional losses will be repaired fast.
2010 * - For long distance/rtt flows, we would like to get ACK clocking
2011 *   with 1 ACK per ms.
2012 *
2013 * Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting
2014 * in bigger TSO bursts. We we cut the RTT-based allowance in half
2015 * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
2016 * is below 1500 bytes after 6 * ~500 usec = 3ms.
2017 */
2018static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
2019			    int min_tso_segs)
2020{
2021	unsigned long bytes;
2022	u32 r;
2023
2024	bytes = READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift);
 
 
2025
2026	r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);
2027	if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
2028		bytes += sk->sk_gso_max_size >> r;
2029
2030	bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size);
 
2031
2032	return max_t(u32, bytes / mss_now, min_tso_segs);
2033}
2034
2035/* Return the number of segments we want in the skb we are transmitting.
2036 * See if congestion control module wants to decide; otherwise, autosize.
2037 */
2038static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
2039{
2040	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
2041	u32 min_tso, tso_segs;
2042
2043	min_tso = ca_ops->min_tso_segs ?
2044			ca_ops->min_tso_segs(sk) :
2045			READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
2046
2047	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
2048	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
2049}
2050
2051/* Returns the portion of skb which can be sent right away */
2052static unsigned int tcp_mss_split_point(const struct sock *sk,
2053					const struct sk_buff *skb,
2054					unsigned int mss_now,
2055					unsigned int max_segs,
2056					int nonagle)
2057{
2058	const struct tcp_sock *tp = tcp_sk(sk);
2059	u32 partial, needed, window, max_len;
2060
2061	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2062	max_len = mss_now * max_segs;
2063
2064	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
2065		return max_len;
2066
2067	needed = min(skb->len, window);
2068
2069	if (max_len <= needed)
2070		return max_len;
2071
2072	partial = needed % mss_now;
2073	/* If last segment is not a full MSS, check if Nagle rules allow us
2074	 * to include this last segment in this skb.
2075	 * Otherwise, we'll split the skb at last MSS boundary
2076	 */
2077	if (tcp_nagle_check(partial != 0, tp, nonagle))
2078		return needed - partial;
2079
2080	return needed;
2081}
2082
2083/* Can at least one segment of SKB be sent right now, according to the
2084 * congestion window rules?  If so, return how many segments are allowed.
2085 */
2086static u32 tcp_cwnd_test(const struct tcp_sock *tp)
 
2087{
2088	u32 in_flight, cwnd, halfcwnd;
2089
 
 
 
 
 
2090	in_flight = tcp_packets_in_flight(tp);
2091	cwnd = tcp_snd_cwnd(tp);
2092	if (in_flight >= cwnd)
2093		return 0;
2094
2095	/* For better scheduling, ensure we have at least
2096	 * 2 GSO packets in flight.
2097	 */
2098	halfcwnd = max(cwnd >> 1, 1U);
2099	return min(halfcwnd, cwnd - in_flight);
2100}
2101
2102/* Initialize TSO state of a skb.
2103 * This must be invoked the first time we consider transmitting
2104 * SKB onto the wire.
2105 */
2106static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
2107{
2108	int tso_segs = tcp_skb_pcount(skb);
2109
2110	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now))
2111		return tcp_set_skb_tso_segs(skb, mss_now);
2112
 
2113	return tso_segs;
2114}
2115
2116
2117/* Return true if the Nagle test allows this packet to be
2118 * sent now.
2119 */
2120static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
2121				  unsigned int cur_mss, int nonagle)
2122{
2123	/* Nagle rule does not apply to frames, which sit in the middle of the
2124	 * write_queue (they have no chances to get new data).
2125	 *
2126	 * This is implemented in the callers, where they modify the 'nonagle'
2127	 * argument based upon the location of SKB in the send queue.
2128	 */
2129	if (nonagle & TCP_NAGLE_PUSH)
2130		return true;
2131
2132	/* Don't use the nagle rule for urgent data (or for the final FIN). */
2133	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
2134		return true;
2135
2136	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
2137		return true;
2138
2139	return false;
2140}
2141
2142/* Does at least the first segment of SKB fit into the send window? */
2143static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
2144			     const struct sk_buff *skb,
2145			     unsigned int cur_mss)
2146{
2147	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
2148
2149	if (skb->len > cur_mss)
2150		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
2151
2152	return !after(end_seq, tcp_wnd_end(tp));
2153}
2154
2155/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
2156 * which is put after SKB on the list.  It is very much like
2157 * tcp_fragment() except that it may make several kinds of assumptions
2158 * in order to speed up the splitting operation.  In particular, we
2159 * know that all the data is in scatter-gather pages, and that the
2160 * packet has never been sent out before (and thus is not cloned).
2161 */
2162static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
2163			unsigned int mss_now, gfp_t gfp)
2164{
2165	int nlen = skb->len - len;
2166	struct sk_buff *buff;
2167	u8 flags;
2168
2169	/* All of a TSO frame must be composed of paged data.  */
2170	DEBUG_NET_WARN_ON_ONCE(skb->len != skb->data_len);
 
 
2171
2172	buff = tcp_stream_alloc_skb(sk, gfp, true);
2173	if (unlikely(!buff))
2174		return -ENOMEM;
2175	skb_copy_decrypted(buff, skb);
2176	mptcp_skb_ext_copy(buff, skb);
2177
2178	sk_wmem_queued_add(sk, buff->truesize);
2179	sk_mem_charge(sk, buff->truesize);
2180	buff->truesize += nlen;
2181	skb->truesize -= nlen;
2182
2183	/* Correct the sequence numbers. */
2184	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
2185	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
2186	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
2187
2188	/* PSH and FIN should only be set in the second packet. */
2189	flags = TCP_SKB_CB(skb)->tcp_flags;
2190	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
2191	TCP_SKB_CB(buff)->tcp_flags = flags;
2192
 
 
 
2193	tcp_skb_fragment_eor(skb, buff);
2194
 
2195	skb_split(skb, buff, len);
2196	tcp_fragment_tstamp(skb, buff);
2197
2198	/* Fix up tso_factor for both original and new SKB.  */
2199	tcp_set_skb_tso_segs(skb, mss_now);
2200	tcp_set_skb_tso_segs(buff, mss_now);
2201
2202	/* Link BUFF into the send queue. */
2203	__skb_header_release(buff);
2204	tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
2205
2206	return 0;
2207}
2208
2209/* Try to defer sending, if possible, in order to minimize the amount
2210 * of TSO splitting we do.  View it as a kind of TSO Nagle test.
2211 *
2212 * This algorithm is from John Heffner.
2213 */
2214static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
2215				 bool *is_cwnd_limited,
2216				 bool *is_rwnd_limited,
2217				 u32 max_segs)
2218{
2219	const struct inet_connection_sock *icsk = inet_csk(sk);
2220	u32 send_win, cong_win, limit, in_flight;
2221	struct tcp_sock *tp = tcp_sk(sk);
2222	struct sk_buff *head;
2223	int win_divisor;
2224	s64 delta;
2225
2226	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
2227		goto send_now;
2228
2229	/* Avoid bursty behavior by allowing defer
2230	 * only if the last write was recent (1 ms).
2231	 * Note that tp->tcp_wstamp_ns can be in the future if we have
2232	 * packets waiting in a qdisc or device for EDT delivery.
2233	 */
2234	delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
2235	if (delta > 0)
2236		goto send_now;
2237
2238	in_flight = tcp_packets_in_flight(tp);
2239
2240	BUG_ON(tcp_skb_pcount(skb) <= 1);
2241	BUG_ON(tcp_snd_cwnd(tp) <= in_flight);
2242
2243	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2244
2245	/* From in_flight test above, we know that cwnd > in_flight.  */
2246	cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache;
2247
2248	limit = min(send_win, cong_win);
2249
2250	/* If a full-sized TSO skb can be sent, do it. */
2251	if (limit >= max_segs * tp->mss_cache)
2252		goto send_now;
2253
2254	/* Middle in queue won't get any more data, full sendable already? */
2255	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
2256		goto send_now;
2257
2258	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
2259	if (win_divisor) {
2260		u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache);
2261
2262		/* If at least some fraction of a window is available,
2263		 * just use it.
2264		 */
2265		chunk /= win_divisor;
2266		if (limit >= chunk)
2267			goto send_now;
2268	} else {
2269		/* Different approach, try not to defer past a single
2270		 * ACK.  Receiver should ACK every other full sized
2271		 * frame, so if we have space for more than 3 frames
2272		 * then send now.
2273		 */
2274		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
2275			goto send_now;
2276	}
2277
2278	/* TODO : use tsorted_sent_queue ? */
2279	head = tcp_rtx_queue_head(sk);
2280	if (!head)
2281		goto send_now;
2282	delta = tp->tcp_clock_cache - head->tstamp;
2283	/* If next ACK is likely to come too late (half srtt), do not defer */
2284	if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
2285		goto send_now;
2286
2287	/* Ok, it looks like it is advisable to defer.
2288	 * Three cases are tracked :
2289	 * 1) We are cwnd-limited
2290	 * 2) We are rwnd-limited
2291	 * 3) We are application limited.
2292	 */
2293	if (cong_win < send_win) {
2294		if (cong_win <= skb->len) {
2295			*is_cwnd_limited = true;
2296			return true;
2297		}
2298	} else {
2299		if (send_win <= skb->len) {
2300			*is_rwnd_limited = true;
2301			return true;
2302		}
2303	}
2304
2305	/* If this packet won't get more data, do not wait. */
2306	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
2307	    TCP_SKB_CB(skb)->eor)
2308		goto send_now;
2309
2310	return true;
2311
2312send_now:
2313	return false;
2314}
2315
2316static inline void tcp_mtu_check_reprobe(struct sock *sk)
2317{
2318	struct inet_connection_sock *icsk = inet_csk(sk);
2319	struct tcp_sock *tp = tcp_sk(sk);
2320	struct net *net = sock_net(sk);
2321	u32 interval;
2322	s32 delta;
2323
2324	interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval);
2325	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
2326	if (unlikely(delta >= interval * HZ)) {
2327		int mss = tcp_current_mss(sk);
2328
2329		/* Update current search range */
2330		icsk->icsk_mtup.probe_size = 0;
2331		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
2332			sizeof(struct tcphdr) +
2333			icsk->icsk_af_ops->net_header_len;
2334		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
2335
2336		/* Update probe time stamp */
2337		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
2338	}
2339}
2340
2341static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2342{
2343	struct sk_buff *skb, *next;
2344
2345	skb = tcp_send_head(sk);
2346	tcp_for_write_queue_from_safe(skb, next, sk) {
2347		if (len <= skb->len)
2348			break;
2349
2350		if (tcp_has_tx_tstamp(skb) || !tcp_skb_can_collapse(skb, next))
2351			return false;
2352
2353		len -= skb->len;
2354	}
2355
2356	return true;
2357}
2358
2359static int tcp_clone_payload(struct sock *sk, struct sk_buff *to,
2360			     int probe_size)
2361{
2362	skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags;
2363	int i, todo, len = 0, nr_frags = 0;
2364	const struct sk_buff *skb;
2365
2366	if (!sk_wmem_schedule(sk, to->truesize + probe_size))
2367		return -ENOMEM;
2368
2369	skb_queue_walk(&sk->sk_write_queue, skb) {
2370		const skb_frag_t *fragfrom = skb_shinfo(skb)->frags;
2371
2372		if (skb_headlen(skb))
2373			return -EINVAL;
2374
2375		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, fragfrom++) {
2376			if (len >= probe_size)
2377				goto commit;
2378			todo = min_t(int, skb_frag_size(fragfrom),
2379				     probe_size - len);
2380			len += todo;
2381			if (lastfrag &&
2382			    skb_frag_page(fragfrom) == skb_frag_page(lastfrag) &&
2383			    skb_frag_off(fragfrom) == skb_frag_off(lastfrag) +
2384						      skb_frag_size(lastfrag)) {
2385				skb_frag_size_add(lastfrag, todo);
2386				continue;
2387			}
2388			if (unlikely(nr_frags == MAX_SKB_FRAGS))
2389				return -E2BIG;
2390			skb_frag_page_copy(fragto, fragfrom);
2391			skb_frag_off_copy(fragto, fragfrom);
2392			skb_frag_size_set(fragto, todo);
2393			nr_frags++;
2394			lastfrag = fragto++;
2395		}
2396	}
2397commit:
2398	WARN_ON_ONCE(len != probe_size);
2399	for (i = 0; i < nr_frags; i++)
2400		skb_frag_ref(to, i);
2401
2402	skb_shinfo(to)->nr_frags = nr_frags;
2403	to->truesize += probe_size;
2404	to->len += probe_size;
2405	to->data_len += probe_size;
2406	__skb_header_release(to);
2407	return 0;
2408}
2409
2410/* tcp_mtu_probe() and tcp_grow_skb() can both eat an skb (src) if
2411 * all its payload was moved to another one (dst).
2412 * Make sure to transfer tcp_flags, eor, and tstamp.
2413 */
2414static void tcp_eat_one_skb(struct sock *sk,
2415			    struct sk_buff *dst,
2416			    struct sk_buff *src)
2417{
2418	TCP_SKB_CB(dst)->tcp_flags |= TCP_SKB_CB(src)->tcp_flags;
2419	TCP_SKB_CB(dst)->eor = TCP_SKB_CB(src)->eor;
2420	tcp_skb_collapse_tstamp(dst, src);
2421	tcp_unlink_write_queue(src, sk);
2422	tcp_wmem_free_skb(sk, src);
2423}
2424
2425/* Create a new MTU probe if we are ready.
2426 * MTU probe is regularly attempting to increase the path MTU by
2427 * deliberately sending larger packets.  This discovers routing
2428 * changes resulting in larger path MTUs.
2429 *
2430 * Returns 0 if we should wait to probe (no cwnd available),
2431 *         1 if a probe was sent,
2432 *         -1 otherwise
2433 */
2434static int tcp_mtu_probe(struct sock *sk)
2435{
2436	struct inet_connection_sock *icsk = inet_csk(sk);
2437	struct tcp_sock *tp = tcp_sk(sk);
2438	struct sk_buff *skb, *nskb, *next;
2439	struct net *net = sock_net(sk);
2440	int probe_size;
2441	int size_needed;
2442	int copy, len;
2443	int mss_now;
2444	int interval;
2445
2446	/* Not currently probing/verifying,
2447	 * not in recovery,
2448	 * have enough cwnd, and
2449	 * not SACKing (the variable headers throw things off)
2450	 */
2451	if (likely(!icsk->icsk_mtup.enabled ||
2452		   icsk->icsk_mtup.probe_size ||
2453		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
2454		   tcp_snd_cwnd(tp) < 11 ||
2455		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
2456		return -1;
2457
2458	/* Use binary search for probe_size between tcp_mss_base,
2459	 * and current mss_clamp. if (search_high - search_low)
2460	 * smaller than a threshold, backoff from probing.
2461	 */
2462	mss_now = tcp_current_mss(sk);
2463	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
2464				    icsk->icsk_mtup.search_low) >> 1);
2465	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
2466	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
2467	/* When misfortune happens, we are reprobing actively,
2468	 * and then reprobe timer has expired. We stick with current
2469	 * probing process by not resetting search range to its orignal.
2470	 */
2471	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
2472	    interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) {
2473		/* Check whether enough time has elaplased for
2474		 * another round of probing.
2475		 */
2476		tcp_mtu_check_reprobe(sk);
2477		return -1;
2478	}
2479
2480	/* Have enough data in the send queue to probe? */
2481	if (tp->write_seq - tp->snd_nxt < size_needed)
2482		return -1;
2483
2484	if (tp->snd_wnd < size_needed)
2485		return -1;
2486	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
2487		return 0;
2488
2489	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
2490	if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) {
2491		if (!tcp_packets_in_flight(tp))
2492			return -1;
2493		else
2494			return 0;
2495	}
2496
2497	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2498		return -1;
2499
2500	/* We're allowed to probe.  Build it now. */
2501	nskb = tcp_stream_alloc_skb(sk, GFP_ATOMIC, false);
2502	if (!nskb)
2503		return -1;
2504
2505	/* build the payload, and be prepared to abort if this fails. */
2506	if (tcp_clone_payload(sk, nskb, probe_size)) {
2507		tcp_skb_tsorted_anchor_cleanup(nskb);
2508		consume_skb(nskb);
2509		return -1;
2510	}
2511	sk_wmem_queued_add(sk, nskb->truesize);
2512	sk_mem_charge(sk, nskb->truesize);
2513
2514	skb = tcp_send_head(sk);
2515	skb_copy_decrypted(nskb, skb);
2516	mptcp_skb_ext_copy(nskb, skb);
2517
2518	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
2519	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
2520	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
 
 
 
2521
2522	tcp_insert_write_queue_before(nskb, skb, sk);
2523	tcp_highest_sack_replace(sk, skb, nskb);
2524
2525	len = 0;
2526	tcp_for_write_queue_from_safe(skb, next, sk) {
2527		copy = min_t(int, skb->len, probe_size - len);
 
2528
2529		if (skb->len <= copy) {
2530			tcp_eat_one_skb(sk, nskb, skb);
 
 
 
 
 
 
 
 
 
2531		} else {
2532			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2533						   ~(TCPHDR_FIN|TCPHDR_PSH);
2534			__pskb_trim_head(skb, copy);
2535			tcp_set_skb_tso_segs(skb, mss_now);
 
 
 
 
2536			TCP_SKB_CB(skb)->seq += copy;
2537		}
2538
2539		len += copy;
2540
2541		if (len >= probe_size)
2542			break;
2543	}
2544	tcp_init_tso_segs(nskb, nskb->len);
2545
2546	/* We're ready to send.  If this fails, the probe will
2547	 * be resegmented into mss-sized pieces by tcp_write_xmit().
2548	 */
2549	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
2550		/* Decrement cwnd here because we are sending
2551		 * effectively two packets. */
2552		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
2553		tcp_event_new_data_sent(sk, nskb);
2554
2555		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
2556		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
2557		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
2558
2559		return 1;
2560	}
2561
2562	return -1;
2563}
2564
2565static bool tcp_pacing_check(struct sock *sk)
2566{
2567	struct tcp_sock *tp = tcp_sk(sk);
2568
2569	if (!tcp_needs_internal_pacing(sk))
2570		return false;
2571
2572	if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2573		return false;
2574
2575	if (!hrtimer_is_queued(&tp->pacing_timer)) {
2576		hrtimer_start(&tp->pacing_timer,
2577			      ns_to_ktime(tp->tcp_wstamp_ns),
2578			      HRTIMER_MODE_ABS_PINNED_SOFT);
2579		sock_hold(sk);
2580	}
2581	return true;
2582}
2583
2584static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk)
2585{
2586	const struct rb_node *node = sk->tcp_rtx_queue.rb_node;
2587
2588	/* No skb in the rtx queue. */
2589	if (!node)
2590		return true;
2591
2592	/* Only one skb in rtx queue. */
2593	return !node->rb_left && !node->rb_right;
2594}
2595
2596/* TCP Small Queues :
2597 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2598 * (These limits are doubled for retransmits)
2599 * This allows for :
2600 *  - better RTT estimation and ACK scheduling
2601 *  - faster recovery
2602 *  - high rates
2603 * Alas, some drivers / subsystems require a fair amount
2604 * of queued bytes to ensure line rate.
2605 * One example is wifi aggregation (802.11 AMPDU)
2606 */
2607static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2608				  unsigned int factor)
2609{
2610	unsigned long limit;
2611
2612	limit = max_t(unsigned long,
2613		      2 * skb->truesize,
2614		      READ_ONCE(sk->sk_pacing_rate) >> READ_ONCE(sk->sk_pacing_shift));
2615	if (sk->sk_pacing_status == SK_PACING_NONE)
2616		limit = min_t(unsigned long, limit,
2617			      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
2618	limit <<= factor;
2619
2620	if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
2621	    tcp_sk(sk)->tcp_tx_delay) {
2622		u64 extra_bytes = (u64)READ_ONCE(sk->sk_pacing_rate) *
2623				  tcp_sk(sk)->tcp_tx_delay;
2624
2625		/* TSQ is based on skb truesize sum (sk_wmem_alloc), so we
2626		 * approximate our needs assuming an ~100% skb->truesize overhead.
2627		 * USEC_PER_SEC is approximated by 2^20.
2628		 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift.
2629		 */
2630		extra_bytes >>= (20 - 1);
2631		limit += extra_bytes;
2632	}
2633	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
2634		/* Always send skb if rtx queue is empty or has one skb.
2635		 * No need to wait for TX completion to call us back,
2636		 * after softirq/tasklet schedule.
2637		 * This helps when TX completions are delayed too much.
2638		 */
2639		if (tcp_rtx_queue_empty_or_single_skb(sk))
2640			return false;
2641
2642		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2643		/* It is possible TX completion already happened
2644		 * before we set TSQ_THROTTLED, so we must
2645		 * test again the condition.
2646		 */
2647		smp_mb__after_atomic();
2648		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2649			return true;
2650	}
2651	return false;
2652}
2653
2654static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
2655{
2656	const u32 now = tcp_jiffies32;
2657	enum tcp_chrono old = tp->chrono_type;
2658
2659	if (old > TCP_CHRONO_UNSPEC)
2660		tp->chrono_stat[old - 1] += now - tp->chrono_start;
2661	tp->chrono_start = now;
2662	tp->chrono_type = new;
2663}
2664
2665void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
2666{
2667	struct tcp_sock *tp = tcp_sk(sk);
2668
2669	/* If there are multiple conditions worthy of tracking in a
2670	 * chronograph then the highest priority enum takes precedence
2671	 * over the other conditions. So that if something "more interesting"
2672	 * starts happening, stop the previous chrono and start a new one.
2673	 */
2674	if (type > tp->chrono_type)
2675		tcp_chrono_set(tp, type);
2676}
2677
2678void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
2679{
2680	struct tcp_sock *tp = tcp_sk(sk);
2681
2682
2683	/* There are multiple conditions worthy of tracking in a
2684	 * chronograph, so that the highest priority enum takes
2685	 * precedence over the other conditions (see tcp_chrono_start).
2686	 * If a condition stops, we only stop chrono tracking if
2687	 * it's the "most interesting" or current chrono we are
2688	 * tracking and starts busy chrono if we have pending data.
2689	 */
2690	if (tcp_rtx_and_write_queues_empty(sk))
2691		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
2692	else if (type == tp->chrono_type)
2693		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
2694}
2695
2696/* First skb in the write queue is smaller than ideal packet size.
2697 * Check if we can move payload from the second skb in the queue.
2698 */
2699static void tcp_grow_skb(struct sock *sk, struct sk_buff *skb, int amount)
2700{
2701	struct sk_buff *next_skb = skb->next;
2702	unsigned int nlen;
2703
2704	if (tcp_skb_is_last(sk, skb))
2705		return;
2706
2707	if (!tcp_skb_can_collapse(skb, next_skb))
2708		return;
2709
2710	nlen = min_t(u32, amount, next_skb->len);
2711	if (!nlen || !skb_shift(skb, next_skb, nlen))
2712		return;
2713
2714	TCP_SKB_CB(skb)->end_seq += nlen;
2715	TCP_SKB_CB(next_skb)->seq += nlen;
2716
2717	if (!next_skb->len) {
2718		/* In case FIN is set, we need to update end_seq */
2719		TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
2720
2721		tcp_eat_one_skb(sk, skb, next_skb);
2722	}
2723}
2724
2725/* This routine writes packets to the network.  It advances the
2726 * send_head.  This happens as incoming acks open up the remote
2727 * window for us.
2728 *
2729 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2730 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2731 * account rare use of URG, this is not a big flaw.
2732 *
2733 * Send at most one packet when push_one > 0. Temporarily ignore
2734 * cwnd limit to force at most one packet out when push_one == 2.
2735
2736 * Returns true, if no segments are in flight and we have queued segments,
2737 * but cannot send anything now because of SWS or another problem.
2738 */
2739static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2740			   int push_one, gfp_t gfp)
2741{
2742	struct tcp_sock *tp = tcp_sk(sk);
2743	struct sk_buff *skb;
2744	unsigned int tso_segs, sent_pkts;
2745	u32 cwnd_quota, max_segs;
2746	int result;
2747	bool is_cwnd_limited = false, is_rwnd_limited = false;
 
2748
2749	sent_pkts = 0;
2750
2751	tcp_mstamp_refresh(tp);
2752	if (!push_one) {
2753		/* Do MTU probing. */
2754		result = tcp_mtu_probe(sk);
2755		if (!result) {
2756			return false;
2757		} else if (result > 0) {
2758			sent_pkts = 1;
2759		}
2760	}
2761
2762	max_segs = tcp_tso_segs(sk, mss_now);
2763	while ((skb = tcp_send_head(sk))) {
2764		unsigned int limit;
2765		int missing_bytes;
2766
2767		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
2768			/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
2769			tp->tcp_wstamp_ns = tp->tcp_clock_cache;
2770			skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC);
2771			list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2772			tcp_init_tso_segs(skb, mss_now);
2773			goto repair; /* Skip network transmission */
2774		}
2775
2776		if (tcp_pacing_check(sk))
2777			break;
2778
2779		cwnd_quota = tcp_cwnd_test(tp);
 
 
 
2780		if (!cwnd_quota) {
2781			if (push_one == 2)
2782				/* Force out a loss probe pkt. */
2783				cwnd_quota = 1;
2784			else
2785				break;
2786		}
2787		cwnd_quota = min(cwnd_quota, max_segs);
2788		missing_bytes = cwnd_quota * mss_now - skb->len;
2789		if (missing_bytes > 0)
2790			tcp_grow_skb(sk, skb, missing_bytes);
2791
2792		tso_segs = tcp_set_skb_tso_segs(skb, mss_now);
2793
2794		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
2795			is_rwnd_limited = true;
2796			break;
2797		}
2798
2799		if (tso_segs == 1) {
2800			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2801						     (tcp_skb_is_last(sk, skb) ?
2802						      nonagle : TCP_NAGLE_PUSH))))
2803				break;
2804		} else {
2805			if (!push_one &&
2806			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2807						 &is_rwnd_limited, max_segs))
2808				break;
2809		}
2810
2811		limit = mss_now;
2812		if (tso_segs > 1 && !tcp_urg_mode(tp))
2813			limit = tcp_mss_split_point(sk, skb, mss_now,
2814						    cwnd_quota,
 
 
2815						    nonagle);
2816
2817		if (skb->len > limit &&
2818		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2819			break;
2820
2821		if (tcp_small_queue_check(sk, skb, 0))
2822			break;
2823
2824		/* Argh, we hit an empty skb(), presumably a thread
2825		 * is sleeping in sendmsg()/sk_stream_wait_memory().
2826		 * We do not want to send a pure-ack packet and have
2827		 * a strange looking rtx queue with empty packet(s).
2828		 */
2829		if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
2830			break;
2831
2832		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
2833			break;
2834
2835repair:
2836		/* Advance the send_head.  This one is sent out.
2837		 * This call will increment packets_out.
2838		 */
2839		tcp_event_new_data_sent(sk, skb);
2840
2841		tcp_minshall_update(tp, mss_now, skb);
2842		sent_pkts += tcp_skb_pcount(skb);
2843
2844		if (push_one)
2845			break;
2846	}
2847
2848	if (is_rwnd_limited)
2849		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
2850	else
2851		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
2852
2853	is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp));
2854	if (likely(sent_pkts || is_cwnd_limited))
2855		tcp_cwnd_validate(sk, is_cwnd_limited);
2856
2857	if (likely(sent_pkts)) {
2858		if (tcp_in_cwnd_reduction(sk))
2859			tp->prr_out += sent_pkts;
2860
2861		/* Send one loss probe per tail loss episode. */
2862		if (push_one != 2)
2863			tcp_schedule_loss_probe(sk, false);
2864		return false;
2865	}
2866	return !tp->packets_out && !tcp_write_queue_empty(sk);
2867}
2868
2869bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
2870{
2871	struct inet_connection_sock *icsk = inet_csk(sk);
2872	struct tcp_sock *tp = tcp_sk(sk);
2873	u32 timeout, timeout_us, rto_delta_us;
2874	int early_retrans;
2875
2876	/* Don't do any loss probe on a Fast Open connection before 3WHS
2877	 * finishes.
2878	 */
2879	if (rcu_access_pointer(tp->fastopen_rsk))
2880		return false;
2881
2882	early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans);
2883	/* Schedule a loss probe in 2*RTT for SACK capable connections
2884	 * not in loss recovery, that are either limited by cwnd or application.
2885	 */
2886	if ((early_retrans != 3 && early_retrans != 4) ||
2887	    !tp->packets_out || !tcp_is_sack(tp) ||
2888	    (icsk->icsk_ca_state != TCP_CA_Open &&
2889	     icsk->icsk_ca_state != TCP_CA_CWR))
2890		return false;
2891
2892	/* Probe timeout is 2*rtt. Add minimum RTO to account
2893	 * for delayed ack when there's one outstanding packet. If no RTT
2894	 * sample is available then probe after TCP_TIMEOUT_INIT.
2895	 */
2896	if (tp->srtt_us) {
2897		timeout_us = tp->srtt_us >> 2;
2898		if (tp->packets_out == 1)
2899			timeout_us += tcp_rto_min_us(sk);
2900		else
2901			timeout_us += TCP_TIMEOUT_MIN_US;
2902		timeout = usecs_to_jiffies(timeout_us);
2903	} else {
2904		timeout = TCP_TIMEOUT_INIT;
2905	}
2906
2907	/* If the RTO formula yields an earlier time, then use that time. */
2908	rto_delta_us = advancing_rto ?
2909			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2910			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2911	if (rto_delta_us > 0)
2912		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
2913
2914	tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
2915	return true;
2916}
2917
2918/* Thanks to skb fast clones, we can detect if a prior transmit of
2919 * a packet is still in a qdisc or driver queue.
2920 * In this case, there is very little point doing a retransmit !
2921 */
2922static bool skb_still_in_host_queue(struct sock *sk,
2923				    const struct sk_buff *skb)
2924{
2925	if (unlikely(skb_fclone_busy(sk, skb))) {
2926		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2927		smp_mb__after_atomic();
2928		if (skb_fclone_busy(sk, skb)) {
2929			NET_INC_STATS(sock_net(sk),
2930				      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
2931			return true;
2932		}
2933	}
2934	return false;
2935}
2936
2937/* When probe timeout (PTO) fires, try send a new segment if possible, else
2938 * retransmit the last segment.
2939 */
2940void tcp_send_loss_probe(struct sock *sk)
2941{
2942	struct tcp_sock *tp = tcp_sk(sk);
2943	struct sk_buff *skb;
2944	int pcount;
2945	int mss = tcp_current_mss(sk);
2946
2947	/* At most one outstanding TLP */
2948	if (tp->tlp_high_seq)
2949		goto rearm_timer;
2950
2951	tp->tlp_retrans = 0;
2952	skb = tcp_send_head(sk);
2953	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2954		pcount = tp->packets_out;
2955		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2956		if (tp->packets_out > pcount)
2957			goto probe_sent;
2958		goto rearm_timer;
2959	}
2960	skb = skb_rb_last(&sk->tcp_rtx_queue);
2961	if (unlikely(!skb)) {
2962		tcp_warn_once(sk, tp->packets_out, "invalid inflight: ");
2963		smp_store_release(&inet_csk(sk)->icsk_pending, 0);
 
 
2964		return;
2965	}
2966
2967	if (skb_still_in_host_queue(sk, skb))
2968		goto rearm_timer;
2969
2970	pcount = tcp_skb_pcount(skb);
2971	if (WARN_ON(!pcount))
2972		goto rearm_timer;
2973
2974	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2975		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
2976					  (pcount - 1) * mss, mss,
2977					  GFP_ATOMIC)))
2978			goto rearm_timer;
2979		skb = skb_rb_next(skb);
2980	}
2981
2982	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2983		goto rearm_timer;
2984
2985	if (__tcp_retransmit_skb(sk, skb, 1))
2986		goto rearm_timer;
2987
2988	tp->tlp_retrans = 1;
2989
2990probe_sent:
2991	/* Record snd_nxt for loss detection. */
2992	tp->tlp_high_seq = tp->snd_nxt;
2993
2994	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2995	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2996	smp_store_release(&inet_csk(sk)->icsk_pending, 0);
2997rearm_timer:
2998	tcp_rearm_rto(sk);
2999}
3000
3001/* Push out any pending frames which were held back due to
3002 * TCP_CORK or attempt at coalescing tiny packets.
3003 * The socket must be locked by the caller.
3004 */
3005void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
3006			       int nonagle)
3007{
3008	/* If we are closed, the bytes will have to remain here.
3009	 * In time closedown will finish, we empty the write queue and
3010	 * all will be happy.
3011	 */
3012	if (unlikely(sk->sk_state == TCP_CLOSE))
3013		return;
3014
3015	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
3016			   sk_gfp_mask(sk, GFP_ATOMIC)))
3017		tcp_check_probe_timer(sk);
3018}
3019
3020/* Send _single_ skb sitting at the send head. This function requires
3021 * true push pending frames to setup probe timer etc.
3022 */
3023void tcp_push_one(struct sock *sk, unsigned int mss_now)
3024{
3025	struct sk_buff *skb = tcp_send_head(sk);
3026
3027	BUG_ON(!skb || skb->len < mss_now);
3028
3029	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
3030}
3031
3032/* This function returns the amount that we can raise the
3033 * usable window based on the following constraints
3034 *
3035 * 1. The window can never be shrunk once it is offered (RFC 793)
3036 * 2. We limit memory per socket
3037 *
3038 * RFC 1122:
3039 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
3040 *  RECV.NEXT + RCV.WIN fixed until:
3041 *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
3042 *
3043 * i.e. don't raise the right edge of the window until you can raise
3044 * it at least MSS bytes.
3045 *
3046 * Unfortunately, the recommended algorithm breaks header prediction,
3047 * since header prediction assumes th->window stays fixed.
3048 *
3049 * Strictly speaking, keeping th->window fixed violates the receiver
3050 * side SWS prevention criteria. The problem is that under this rule
3051 * a stream of single byte packets will cause the right side of the
3052 * window to always advance by a single byte.
3053 *
3054 * Of course, if the sender implements sender side SWS prevention
3055 * then this will not be a problem.
3056 *
3057 * BSD seems to make the following compromise:
3058 *
3059 *	If the free space is less than the 1/4 of the maximum
3060 *	space available and the free space is less than 1/2 mss,
3061 *	then set the window to 0.
3062 *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
3063 *	Otherwise, just prevent the window from shrinking
3064 *	and from being larger than the largest representable value.
3065 *
3066 * This prevents incremental opening of the window in the regime
3067 * where TCP is limited by the speed of the reader side taking
3068 * data out of the TCP receive queue. It does nothing about
3069 * those cases where the window is constrained on the sender side
3070 * because the pipeline is full.
3071 *
3072 * BSD also seems to "accidentally" limit itself to windows that are a
3073 * multiple of MSS, at least until the free space gets quite small.
3074 * This would appear to be a side effect of the mbuf implementation.
3075 * Combining these two algorithms results in the observed behavior
3076 * of having a fixed window size at almost all times.
3077 *
3078 * Below we obtain similar behavior by forcing the offered window to
3079 * a multiple of the mss when it is feasible to do so.
3080 *
3081 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
3082 * Regular options like TIMESTAMP are taken into account.
3083 */
3084u32 __tcp_select_window(struct sock *sk)
3085{
3086	struct inet_connection_sock *icsk = inet_csk(sk);
3087	struct tcp_sock *tp = tcp_sk(sk);
3088	struct net *net = sock_net(sk);
3089	/* MSS for the peer's data.  Previous versions used mss_clamp
3090	 * here.  I don't know if the value based on our guesses
3091	 * of peer's MSS is better for the performance.  It's more correct
3092	 * but may be worse for the performance because of rcv_mss
3093	 * fluctuations.  --SAW  1998/11/1
3094	 */
3095	int mss = icsk->icsk_ack.rcv_mss;
3096	int free_space = tcp_space(sk);
3097	int allowed_space = tcp_full_space(sk);
3098	int full_space, window;
3099
3100	if (sk_is_mptcp(sk))
3101		mptcp_space(sk, &free_space, &allowed_space);
3102
3103	full_space = min_t(int, tp->window_clamp, allowed_space);
3104
3105	if (unlikely(mss > full_space)) {
3106		mss = full_space;
3107		if (mss <= 0)
3108			return 0;
3109	}
3110
3111	/* Only allow window shrink if the sysctl is enabled and we have
3112	 * a non-zero scaling factor in effect.
3113	 */
3114	if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale)
3115		goto shrink_window_allowed;
3116
3117	/* do not allow window to shrink */
3118
3119	if (free_space < (full_space >> 1)) {
3120		icsk->icsk_ack.quick = 0;
3121
3122		if (tcp_under_memory_pressure(sk))
3123			tcp_adjust_rcv_ssthresh(sk);
 
3124
3125		/* free_space might become our new window, make sure we don't
3126		 * increase it due to wscale.
3127		 */
3128		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
3129
3130		/* if free space is less than mss estimate, or is below 1/16th
3131		 * of the maximum allowed, try to move to zero-window, else
3132		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
3133		 * new incoming data is dropped due to memory limits.
3134		 * With large window, mss test triggers way too late in order
3135		 * to announce zero window in time before rmem limit kicks in.
3136		 */
3137		if (free_space < (allowed_space >> 4) || free_space < mss)
3138			return 0;
3139	}
3140
3141	if (free_space > tp->rcv_ssthresh)
3142		free_space = tp->rcv_ssthresh;
3143
3144	/* Don't do rounding if we are using window scaling, since the
3145	 * scaled window will not line up with the MSS boundary anyway.
3146	 */
3147	if (tp->rx_opt.rcv_wscale) {
3148		window = free_space;
3149
3150		/* Advertise enough space so that it won't get scaled away.
3151		 * Import case: prevent zero window announcement if
3152		 * 1<<rcv_wscale > mss.
3153		 */
3154		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
3155	} else {
3156		window = tp->rcv_wnd;
3157		/* Get the largest window that is a nice multiple of mss.
3158		 * Window clamp already applied above.
3159		 * If our current window offering is within 1 mss of the
3160		 * free space we just keep it. This prevents the divide
3161		 * and multiply from happening most of the time.
3162		 * We also don't do any window rounding when the free space
3163		 * is too small.
3164		 */
3165		if (window <= free_space - mss || window > free_space)
3166			window = rounddown(free_space, mss);
3167		else if (mss == full_space &&
3168			 free_space > window + (full_space >> 1))
3169			window = free_space;
3170	}
3171
3172	return window;
3173
3174shrink_window_allowed:
3175	/* new window should always be an exact multiple of scaling factor */
3176	free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
3177
3178	if (free_space < (full_space >> 1)) {
3179		icsk->icsk_ack.quick = 0;
3180
3181		if (tcp_under_memory_pressure(sk))
3182			tcp_adjust_rcv_ssthresh(sk);
3183
3184		/* if free space is too low, return a zero window */
3185		if (free_space < (allowed_space >> 4) || free_space < mss ||
3186			free_space < (1 << tp->rx_opt.rcv_wscale))
3187			return 0;
3188	}
3189
3190	if (free_space > tp->rcv_ssthresh) {
3191		free_space = tp->rcv_ssthresh;
3192		/* new window should always be an exact multiple of scaling factor
3193		 *
3194		 * For this case, we ALIGN "up" (increase free_space) because
3195		 * we know free_space is not zero here, it has been reduced from
3196		 * the memory-based limit, and rcv_ssthresh is not a hard limit
3197		 * (unlike sk_rcvbuf).
3198		 */
3199		free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale));
3200	}
3201
3202	return free_space;
3203}
3204
3205void tcp_skb_collapse_tstamp(struct sk_buff *skb,
3206			     const struct sk_buff *next_skb)
3207{
3208	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
3209		const struct skb_shared_info *next_shinfo =
3210			skb_shinfo(next_skb);
3211		struct skb_shared_info *shinfo = skb_shinfo(skb);
3212
3213		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
3214		shinfo->tskey = next_shinfo->tskey;
3215		TCP_SKB_CB(skb)->txstamp_ack |=
3216			TCP_SKB_CB(next_skb)->txstamp_ack;
3217	}
3218}
3219
3220/* Collapses two adjacent SKB's during retransmission. */
3221static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
3222{
3223	struct tcp_sock *tp = tcp_sk(sk);
3224	struct sk_buff *next_skb = skb_rb_next(skb);
3225	int next_skb_size;
3226
3227	next_skb_size = next_skb->len;
3228
3229	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
3230
3231	if (next_skb_size && !tcp_skb_shift(skb, next_skb, 1, next_skb_size))
3232		return false;
3233
 
 
 
 
3234	tcp_highest_sack_replace(sk, next_skb, skb);
3235
3236	/* Update sequence range on original skb. */
3237	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
3238
3239	/* Merge over control information. This moves PSH/FIN etc. over */
3240	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
3241
3242	/* All done, get rid of second SKB and account for it so
3243	 * packet counting does not break.
3244	 */
3245	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
3246	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
3247
3248	/* changed transmit queue under us so clear hints */
3249	tcp_clear_retrans_hints_partial(tp);
3250	if (next_skb == tp->retransmit_skb_hint)
3251		tp->retransmit_skb_hint = skb;
3252
3253	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
3254
3255	tcp_skb_collapse_tstamp(skb, next_skb);
3256
3257	tcp_rtx_queue_unlink_and_free(next_skb, sk);
3258	return true;
3259}
3260
3261/* Check if coalescing SKBs is legal. */
3262static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
3263{
3264	if (tcp_skb_pcount(skb) > 1)
3265		return false;
3266	if (skb_cloned(skb))
3267		return false;
3268	if (!skb_frags_readable(skb))
3269		return false;
3270	/* Some heuristics for collapsing over SACK'd could be invented */
3271	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3272		return false;
3273
3274	return true;
3275}
3276
3277/* Collapse packets in the retransmit queue to make to create
3278 * less packets on the wire. This is only done on retransmission.
3279 */
3280static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
3281				     int space)
3282{
3283	struct tcp_sock *tp = tcp_sk(sk);
3284	struct sk_buff *skb = to, *tmp;
3285	bool first = true;
3286
3287	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse))
3288		return;
3289	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3290		return;
3291
3292	skb_rbtree_walk_from_safe(skb, tmp) {
3293		if (!tcp_can_collapse(sk, skb))
3294			break;
3295
3296		if (!tcp_skb_can_collapse(to, skb))
3297			break;
3298
3299		space -= skb->len;
3300
3301		if (first) {
3302			first = false;
3303			continue;
3304		}
3305
3306		if (space < 0)
3307			break;
3308
3309		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
3310			break;
3311
3312		if (!tcp_collapse_retrans(sk, to))
3313			break;
3314	}
3315}
3316
3317/* This retransmits one SKB.  Policy decisions and retransmit queue
3318 * state updates are done by the caller.  Returns non-zero if an
3319 * error occurred which prevented the send.
3320 */
3321int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
3322{
3323	struct inet_connection_sock *icsk = inet_csk(sk);
3324	struct tcp_sock *tp = tcp_sk(sk);
3325	unsigned int cur_mss;
3326	int diff, len, err;
3327	int avail_wnd;
3328
3329	/* Inconclusive MTU probe */
3330	if (icsk->icsk_mtup.probe_size)
3331		icsk->icsk_mtup.probe_size = 0;
3332
3333	if (skb_still_in_host_queue(sk, skb))
3334		return -EBUSY;
3335
3336start:
3337	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
3338		if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
3339			TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
3340			TCP_SKB_CB(skb)->seq++;
3341			goto start;
3342		}
3343		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
3344			WARN_ON_ONCE(1);
3345			return -EINVAL;
3346		}
3347		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
3348			return -ENOMEM;
3349	}
3350
3351	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
3352		return -EHOSTUNREACH; /* Routing failure or similar. */
3353
3354	cur_mss = tcp_current_mss(sk);
3355	avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
3356
3357	/* If receiver has shrunk his window, and skb is out of
3358	 * new window, do not retransmit it. The exception is the
3359	 * case, when window is shrunk to zero. In this case
3360	 * our retransmit of one segment serves as a zero window probe.
3361	 */
3362	if (avail_wnd <= 0) {
3363		if (TCP_SKB_CB(skb)->seq != tp->snd_una)
3364			return -EAGAIN;
3365		avail_wnd = cur_mss;
3366	}
3367
3368	len = cur_mss * segs;
3369	if (len > avail_wnd) {
3370		len = rounddown(avail_wnd, cur_mss);
3371		if (!len)
3372			len = avail_wnd;
3373	}
3374	if (skb->len > len) {
3375		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
3376				 cur_mss, GFP_ATOMIC))
3377			return -ENOMEM; /* We'll try again later. */
3378	} else {
3379		if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
3380			return -ENOMEM;
3381
3382		diff = tcp_skb_pcount(skb);
3383		tcp_set_skb_tso_segs(skb, cur_mss);
3384		diff -= tcp_skb_pcount(skb);
3385		if (diff)
3386			tcp_adjust_pcount(sk, skb, diff);
3387		avail_wnd = min_t(int, avail_wnd, cur_mss);
3388		if (skb->len < avail_wnd)
3389			tcp_retrans_try_collapse(sk, skb, avail_wnd);
3390	}
3391
3392	/* RFC3168, section 6.1.1.1. ECN fallback */
3393	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
3394		tcp_ecn_clear_syn(sk, skb);
3395
3396	/* Update global and local TCP statistics. */
3397	segs = tcp_skb_pcount(skb);
3398	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
3399	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3400		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3401	tp->total_retrans += segs;
3402	tp->bytes_retrans += skb->len;
3403
3404	/* make sure skb->data is aligned on arches that require it
3405	 * and check if ack-trimming & collapsing extended the headroom
3406	 * beyond what csum_start can cover.
3407	 */
3408	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
3409		     skb_headroom(skb) >= 0xFFFF)) {
3410		struct sk_buff *nskb;
3411
3412		tcp_skb_tsorted_save(skb) {
3413			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
3414			if (nskb) {
3415				nskb->dev = NULL;
3416				err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
3417			} else {
3418				err = -ENOBUFS;
3419			}
3420		} tcp_skb_tsorted_restore(skb);
3421
3422		if (!err) {
3423			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
3424			tcp_rate_skb_sent(sk, skb);
3425		}
3426	} else {
3427		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3428	}
3429
 
 
 
 
 
3430	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
3431		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
3432				  TCP_SKB_CB(skb)->seq, segs, err);
3433
3434	if (likely(!err)) {
3435		trace_tcp_retransmit_skb(sk, skb);
3436	} else if (err != -EBUSY) {
3437		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
3438	}
3439
3440	/* To avoid taking spuriously low RTT samples based on a timestamp
3441	 * for a transmit that never happened, always mark EVER_RETRANS
3442	 */
3443	TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
3444
3445	return err;
3446}
3447
3448int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
3449{
3450	struct tcp_sock *tp = tcp_sk(sk);
3451	int err = __tcp_retransmit_skb(sk, skb, segs);
3452
3453	if (err == 0) {
3454#if FASTRETRANS_DEBUG > 0
3455		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
3456			net_dbg_ratelimited("retrans_out leaked\n");
3457		}
3458#endif
3459		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
3460		tp->retrans_out += tcp_skb_pcount(skb);
3461	}
3462
3463	/* Save stamp of the first (attempted) retransmit. */
3464	if (!tp->retrans_stamp)
3465		tp->retrans_stamp = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb);
3466
3467	if (tp->undo_retrans < 0)
3468		tp->undo_retrans = 0;
3469	tp->undo_retrans += tcp_skb_pcount(skb);
3470	return err;
3471}
3472
3473/* This gets called after a retransmit timeout, and the initially
3474 * retransmitted data is acknowledged.  It tries to continue
3475 * resending the rest of the retransmit queue, until either
3476 * we've sent it all or the congestion window limit is reached.
3477 */
3478void tcp_xmit_retransmit_queue(struct sock *sk)
3479{
3480	const struct inet_connection_sock *icsk = inet_csk(sk);
3481	struct sk_buff *skb, *rtx_head, *hole = NULL;
3482	struct tcp_sock *tp = tcp_sk(sk);
3483	bool rearm_timer = false;
3484	u32 max_segs;
3485	int mib_idx;
3486
3487	if (!tp->packets_out)
3488		return;
3489
3490	rtx_head = tcp_rtx_queue_head(sk);
3491	skb = tp->retransmit_skb_hint ?: rtx_head;
3492	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
3493	skb_rbtree_walk_from(skb) {
3494		__u8 sacked;
3495		int segs;
3496
3497		if (tcp_pacing_check(sk))
3498			break;
3499
3500		/* we could do better than to assign each time */
3501		if (!hole)
3502			tp->retransmit_skb_hint = skb;
3503
3504		segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp);
3505		if (segs <= 0)
3506			break;
3507		sacked = TCP_SKB_CB(skb)->sacked;
3508		/* In case tcp_shift_skb_data() have aggregated large skbs,
3509		 * we need to make sure not sending too bigs TSO packets
3510		 */
3511		segs = min_t(int, segs, max_segs);
3512
3513		if (tp->retrans_out >= tp->lost_out) {
3514			break;
3515		} else if (!(sacked & TCPCB_LOST)) {
3516			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
3517				hole = skb;
3518			continue;
3519
3520		} else {
3521			if (icsk->icsk_ca_state != TCP_CA_Loss)
3522				mib_idx = LINUX_MIB_TCPFASTRETRANS;
3523			else
3524				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
3525		}
3526
3527		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
3528			continue;
3529
3530		if (tcp_small_queue_check(sk, skb, 1))
3531			break;
3532
3533		if (tcp_retransmit_skb(sk, skb, segs))
3534			break;
3535
3536		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
3537
3538		if (tcp_in_cwnd_reduction(sk))
3539			tp->prr_out += tcp_skb_pcount(skb);
3540
3541		if (skb == rtx_head &&
3542		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
3543			rearm_timer = true;
3544
3545	}
3546	if (rearm_timer)
3547		tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3548				     inet_csk(sk)->icsk_rto,
3549				     TCP_RTO_MAX);
3550}
3551
3552/* We allow to exceed memory limits for FIN packets to expedite
3553 * connection tear down and (memory) recovery.
3554 * Otherwise tcp_send_fin() could be tempted to either delay FIN
3555 * or even be forced to close flow without any FIN.
3556 * In general, we want to allow one skb per socket to avoid hangs
3557 * with edge trigger epoll()
3558 */
3559void sk_forced_mem_schedule(struct sock *sk, int size)
3560{
3561	int delta, amt;
3562
3563	delta = size - sk->sk_forward_alloc;
3564	if (delta <= 0)
3565		return;
3566	amt = sk_mem_pages(delta);
3567	sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
3568	sk_memory_allocated_add(sk, amt);
3569
3570	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3571		mem_cgroup_charge_skmem(sk->sk_memcg, amt,
3572					gfp_memcg_charge() | __GFP_NOFAIL);
3573}
3574
3575/* Send a FIN. The caller locks the socket for us.
3576 * We should try to send a FIN packet really hard, but eventually give up.
3577 */
3578void tcp_send_fin(struct sock *sk)
3579{
3580	struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk);
3581	struct tcp_sock *tp = tcp_sk(sk);
3582
3583	/* Optimization, tack on the FIN if we have one skb in write queue and
3584	 * this skb was not yet sent, or we are under memory pressure.
3585	 * Note: in the latter case, FIN packet will be sent after a timeout,
3586	 * as TCP stack thinks it has already been transmitted.
3587	 */
3588	tskb = tail;
3589	if (!tskb && tcp_under_memory_pressure(sk))
3590		tskb = skb_rb_last(&sk->tcp_rtx_queue);
3591
3592	if (tskb) {
3593		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3594		TCP_SKB_CB(tskb)->end_seq++;
3595		tp->write_seq++;
3596		if (!tail) {
3597			/* This means tskb was already sent.
3598			 * Pretend we included the FIN on previous transmit.
3599			 * We need to set tp->snd_nxt to the value it would have
3600			 * if FIN had been sent. This is because retransmit path
3601			 * does not change tp->snd_nxt.
3602			 */
3603			WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
3604			return;
3605		}
3606	} else {
3607		skb = alloc_skb_fclone(MAX_TCP_HEADER,
3608				       sk_gfp_mask(sk, GFP_ATOMIC |
3609						       __GFP_NOWARN));
3610		if (unlikely(!skb))
3611			return;
3612
3613		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3614		skb_reserve(skb, MAX_TCP_HEADER);
3615		sk_forced_mem_schedule(sk, skb->truesize);
3616		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3617		tcp_init_nondata_skb(skb, tp->write_seq,
3618				     TCPHDR_ACK | TCPHDR_FIN);
3619		tcp_queue_skb(sk, skb);
3620	}
3621	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
3622}
3623
3624/* We get here when a process closes a file descriptor (either due to
3625 * an explicit close() or as a byproduct of exit()'ing) and there
3626 * was unread data in the receive queue.  This behavior is recommended
3627 * by RFC 2525, section 2.17.  -DaveM
3628 */
3629void tcp_send_active_reset(struct sock *sk, gfp_t priority,
3630			   enum sk_rst_reason reason)
3631{
3632	struct sk_buff *skb;
3633
3634	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
3635
3636	/* NOTE: No TCP options attached and we never retransmit this. */
3637	skb = alloc_skb(MAX_TCP_HEADER, priority);
3638	if (!skb) {
3639		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3640		return;
3641	}
3642
3643	/* Reserve space for headers and prepare control bits. */
3644	skb_reserve(skb, MAX_TCP_HEADER);
3645	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3646			     TCPHDR_ACK | TCPHDR_RST);
3647	tcp_mstamp_refresh(tcp_sk(sk));
3648	/* Send it off. */
3649	if (tcp_transmit_skb(sk, skb, 0, priority))
3650		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3651
3652	/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3653	 * skb here is different to the troublesome skb, so use NULL
3654	 */
3655	trace_tcp_send_reset(sk, NULL, reason);
3656}
3657
3658/* Send a crossed SYN-ACK during socket establishment.
3659 * WARNING: This routine must only be called when we have already sent
3660 * a SYN packet that crossed the incoming SYN that caused this routine
3661 * to get called. If this assumption fails then the initial rcv_wnd
3662 * and rcv_wscale values will not be correct.
3663 */
3664int tcp_send_synack(struct sock *sk)
3665{
3666	struct sk_buff *skb;
3667
3668	skb = tcp_rtx_queue_head(sk);
3669	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
3670		pr_err("%s: wrong queue state\n", __func__);
3671		return -EFAULT;
3672	}
3673	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
3674		if (skb_cloned(skb)) {
3675			struct sk_buff *nskb;
3676
3677			tcp_skb_tsorted_save(skb) {
3678				nskb = skb_copy(skb, GFP_ATOMIC);
3679			} tcp_skb_tsorted_restore(skb);
3680			if (!nskb)
3681				return -ENOMEM;
3682			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
3683			tcp_highest_sack_replace(sk, skb, nskb);
3684			tcp_rtx_queue_unlink_and_free(skb, sk);
3685			__skb_header_release(nskb);
3686			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
3687			sk_wmem_queued_add(sk, nskb->truesize);
3688			sk_mem_charge(sk, nskb->truesize);
3689			skb = nskb;
3690		}
3691
3692		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3693		tcp_ecn_send_synack(sk, skb);
3694	}
3695	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3696}
3697
3698/**
3699 * tcp_make_synack - Allocate one skb and build a SYNACK packet.
3700 * @sk: listener socket
3701 * @dst: dst entry attached to the SYNACK. It is consumed and caller
3702 *       should not use it again.
3703 * @req: request_sock pointer
3704 * @foc: cookie for tcp fast open
3705 * @synack_type: Type of synack to prepare
3706 * @syn_skb: SYN packet just received.  It could be NULL for rtx case.
3707 */
3708struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3709				struct request_sock *req,
3710				struct tcp_fastopen_cookie *foc,
3711				enum tcp_synack_type synack_type,
3712				struct sk_buff *syn_skb)
3713{
3714	struct inet_request_sock *ireq = inet_rsk(req);
3715	const struct tcp_sock *tp = tcp_sk(sk);
 
3716	struct tcp_out_options opts;
3717	struct tcp_key key = {};
3718	struct sk_buff *skb;
3719	int tcp_header_size;
3720	struct tcphdr *th;
3721	int mss;
3722	u64 now;
3723
3724	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
3725	if (unlikely(!skb)) {
3726		dst_release(dst);
3727		return NULL;
3728	}
3729	/* Reserve space for headers. */
3730	skb_reserve(skb, MAX_TCP_HEADER);
3731
3732	switch (synack_type) {
3733	case TCP_SYNACK_NORMAL:
3734		skb_set_owner_edemux(skb, req_to_sk(req));
3735		break;
3736	case TCP_SYNACK_COOKIE:
3737		/* Under synflood, we do not attach skb to a socket,
3738		 * to avoid false sharing.
3739		 */
3740		break;
3741	case TCP_SYNACK_FASTOPEN:
3742		/* sk is a const pointer, because we want to express multiple
3743		 * cpu might call us concurrently.
3744		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3745		 */
3746		skb_set_owner_w(skb, (struct sock *)sk);
3747		break;
3748	}
3749	skb_dst_set(skb, dst);
3750
3751	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3752
3753	memset(&opts, 0, sizeof(opts));
3754	now = tcp_clock_ns();
3755#ifdef CONFIG_SYN_COOKIES
3756	if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
3757		skb_set_delivery_time(skb, cookie_init_timestamp(req, now),
3758				      SKB_CLOCK_MONOTONIC);
3759	else
3760#endif
3761	{
3762		skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC);
3763		if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
3764			tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
3765	}
3766
3767#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
3768	rcu_read_lock();
 
3769#endif
3770	if (tcp_rsk_used_ao(req)) {
3771#ifdef CONFIG_TCP_AO
3772		struct tcp_ao_key *ao_key = NULL;
3773		u8 keyid = tcp_rsk(req)->ao_keyid;
3774		u8 rnext = tcp_rsk(req)->ao_rcv_next;
3775
3776		ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req),
3777							    keyid, -1);
3778		/* If there is no matching key - avoid sending anything,
3779		 * especially usigned segments. It could try harder and lookup
3780		 * for another peer-matching key, but the peer has requested
3781		 * ao_keyid (RFC5925 RNextKeyID), so let's keep it simple here.
3782		 */
3783		if (unlikely(!ao_key)) {
3784			trace_tcp_ao_synack_no_key(sk, keyid, rnext);
3785			rcu_read_unlock();
3786			kfree_skb(skb);
3787			net_warn_ratelimited("TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n",
3788					     keyid);
3789			return NULL;
3790		}
3791		key.ao_key = ao_key;
3792		key.type = TCP_KEY_AO;
3793#endif
3794	} else {
3795#ifdef CONFIG_TCP_MD5SIG
3796		key.md5_key = tcp_rsk(req)->af_specific->req_md5_lookup(sk,
3797					req_to_sk(req));
3798		if (key.md5_key)
3799			key.type = TCP_KEY_MD5;
3800#endif
3801	}
3802	skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4);
3803	/* bpf program will be interested in the tcp_flags */
3804	TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
3805	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts,
3806					     &key, foc, synack_type, syn_skb)
3807					+ sizeof(*th);
3808
3809	skb_push(skb, tcp_header_size);
3810	skb_reset_transport_header(skb);
3811
3812	th = (struct tcphdr *)skb->data;
3813	memset(th, 0, sizeof(struct tcphdr));
3814	th->syn = 1;
3815	th->ack = 1;
3816	tcp_ecn_make_synack(req, th);
3817	th->source = htons(ireq->ir_num);
3818	th->dest = ireq->ir_rmt_port;
3819	skb->mark = ireq->ir_mark;
3820	skb->ip_summed = CHECKSUM_PARTIAL;
3821	th->seq = htonl(tcp_rsk(req)->snt_isn);
3822	/* XXX data is queued and acked as is. No buffer/window check */
3823	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
3824
3825	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3826	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
3827	tcp_options_write(th, NULL, tcp_rsk(req), &opts, &key);
3828	th->doff = (tcp_header_size >> 2);
3829	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3830
 
3831	/* Okay, we have all we need - do the md5 hash if needed */
3832	if (tcp_key_is_md5(&key)) {
3833#ifdef CONFIG_TCP_MD5SIG
3834		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
3835					key.md5_key, req_to_sk(req), skb);
3836#endif
3837	} else if (tcp_key_is_ao(&key)) {
3838#ifdef CONFIG_TCP_AO
3839		tcp_rsk(req)->af_specific->ao_synack_hash(opts.hash_location,
3840					key.ao_key, req, skb,
3841					opts.hash_location - (u8 *)th, 0);
3842#endif
3843	}
3844#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
3845	rcu_read_unlock();
3846#endif
3847
3848	bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
3849				synack_type, &opts);
3850
3851	skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC);
3852	tcp_add_tx_delay(skb, tp);
3853
3854	return skb;
3855}
3856EXPORT_SYMBOL(tcp_make_synack);
3857
3858static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
3859{
3860	struct inet_connection_sock *icsk = inet_csk(sk);
3861	const struct tcp_congestion_ops *ca;
3862	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
3863
3864	if (ca_key == TCP_CA_UNSPEC)
3865		return;
3866
3867	rcu_read_lock();
3868	ca = tcp_ca_find_key(ca_key);
3869	if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
3870		bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
3871		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
3872		icsk->icsk_ca_ops = ca;
3873	}
3874	rcu_read_unlock();
3875}
3876
3877/* Do all connect socket setups that can be done AF independent. */
3878static void tcp_connect_init(struct sock *sk)
3879{
3880	const struct dst_entry *dst = __sk_dst_get(sk);
3881	struct tcp_sock *tp = tcp_sk(sk);
3882	__u8 rcv_wscale;
3883	u32 rcv_wnd;
3884
3885	/* We'll fix this up when we get a response from the other end.
3886	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
3887	 */
3888	tp->tcp_header_len = sizeof(struct tcphdr);
3889	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps))
3890		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
3891
3892	tcp_ao_connect_init(sk);
 
 
 
3893
3894	/* If user gave his TCP_MAXSEG, record it to clamp */
3895	if (tp->rx_opt.user_mss)
3896		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
3897	tp->max_window = 0;
3898	tcp_mtup_init(sk);
3899	tcp_sync_mss(sk, dst_mtu(dst));
3900
3901	tcp_ca_dst_init(sk, dst);
3902
3903	if (!tp->window_clamp)
3904		WRITE_ONCE(tp->window_clamp, dst_metric(dst, RTAX_WINDOW));
3905	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3906
3907	tcp_initialize_rcv_mss(sk);
3908
3909	/* limit the window selection if the user enforce a smaller rx buffer */
3910	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3911	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3912		WRITE_ONCE(tp->window_clamp, tcp_full_space(sk));
3913
3914	rcv_wnd = tcp_rwnd_init_bpf(sk);
3915	if (rcv_wnd == 0)
3916		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
3917
3918	tcp_select_initial_window(sk, tcp_full_space(sk),
3919				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
3920				  &tp->rcv_wnd,
3921				  &tp->window_clamp,
3922				  READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling),
3923				  &rcv_wscale,
3924				  rcv_wnd);
3925
3926	tp->rx_opt.rcv_wscale = rcv_wscale;
3927	tp->rcv_ssthresh = tp->rcv_wnd;
3928
3929	WRITE_ONCE(sk->sk_err, 0);
3930	sock_reset_flag(sk, SOCK_DONE);
3931	tp->snd_wnd = 0;
3932	tcp_init_wl(tp, 0);
3933	tcp_write_queue_purge(sk);
3934	tp->snd_una = tp->write_seq;
3935	tp->snd_sml = tp->write_seq;
3936	tp->snd_up = tp->write_seq;
3937	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3938
3939	if (likely(!tp->repair))
3940		tp->rcv_nxt = 0;
3941	else
3942		tp->rcv_tstamp = tcp_jiffies32;
3943	tp->rcv_wup = tp->rcv_nxt;
3944	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
3945
3946	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3947	inet_csk(sk)->icsk_retransmits = 0;
3948	tcp_clear_retrans(tp);
3949}
3950
3951static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3952{
3953	struct tcp_sock *tp = tcp_sk(sk);
3954	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3955
3956	tcb->end_seq += skb->len;
3957	__skb_header_release(skb);
3958	sk_wmem_queued_add(sk, skb->truesize);
3959	sk_mem_charge(sk, skb->truesize);
3960	WRITE_ONCE(tp->write_seq, tcb->end_seq);
3961	tp->packets_out += tcp_skb_pcount(skb);
3962}
3963
3964/* Build and send a SYN with data and (cached) Fast Open cookie. However,
3965 * queue a data-only packet after the regular SYN, such that regular SYNs
3966 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3967 * only the SYN sequence, the data are retransmitted in the first ACK.
3968 * If cookie is not cached or other error occurs, falls back to send a
3969 * regular SYN with Fast Open cookie request option.
3970 */
3971static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3972{
3973	struct inet_connection_sock *icsk = inet_csk(sk);
3974	struct tcp_sock *tp = tcp_sk(sk);
3975	struct tcp_fastopen_request *fo = tp->fastopen_req;
3976	struct page_frag *pfrag = sk_page_frag(sk);
3977	struct sk_buff *syn_data;
3978	int space, err = 0;
3979
3980	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3981	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3982		goto fallback;
3983
3984	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3985	 * user-MSS. Reserve maximum option space for middleboxes that add
3986	 * private TCP options. The cost is reduced data space in SYN :(
3987	 */
3988	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
3989	/* Sync mss_cache after updating the mss_clamp */
3990	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
3991
3992	space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) -
3993		MAX_TCP_OPTION_SPACE;
3994
3995	space = min_t(size_t, space, fo->size);
3996
3997	if (space &&
3998	    !skb_page_frag_refill(min_t(size_t, space, PAGE_SIZE),
3999				  pfrag, sk->sk_allocation))
4000		goto fallback;
4001	syn_data = tcp_stream_alloc_skb(sk, sk->sk_allocation, false);
4002	if (!syn_data)
4003		goto fallback;
 
4004	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
4005	if (space) {
4006		space = min_t(size_t, space, pfrag->size - pfrag->offset);
4007		space = tcp_wmem_schedule(sk, space);
4008	}
4009	if (space) {
4010		space = copy_page_from_iter(pfrag->page, pfrag->offset,
4011					    space, &fo->data->msg_iter);
4012		if (unlikely(!space)) {
4013			tcp_skb_tsorted_anchor_cleanup(syn_data);
4014			kfree_skb(syn_data);
4015			goto fallback;
4016		}
4017		skb_fill_page_desc(syn_data, 0, pfrag->page,
4018				   pfrag->offset, space);
4019		page_ref_inc(pfrag->page);
4020		pfrag->offset += space;
4021		skb_len_add(syn_data, space);
4022		skb_zcopy_set(syn_data, fo->uarg, NULL);
4023	}
4024	/* No more data pending in inet_wait_for_connect() */
4025	if (space == fo->size)
4026		fo->data = NULL;
4027	fo->copied = space;
4028
4029	tcp_connect_queue_skb(sk, syn_data);
4030	if (syn_data->len)
4031		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
4032
4033	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
4034
4035	skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, SKB_CLOCK_MONOTONIC);
4036
4037	/* Now full SYN+DATA was cloned and sent (or not),
4038	 * remove the SYN from the original skb (syn_data)
4039	 * we keep in write queue in case of a retransmit, as we
4040	 * also have the SYN packet (with no data) in the same queue.
4041	 */
4042	TCP_SKB_CB(syn_data)->seq++;
4043	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
4044	if (!err) {
4045		tp->syn_data = (fo->copied > 0);
4046		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
4047		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
4048		goto done;
4049	}
4050
4051	/* data was not sent, put it in write_queue */
4052	__skb_queue_tail(&sk->sk_write_queue, syn_data);
4053	tp->packets_out -= tcp_skb_pcount(syn_data);
4054
4055fallback:
4056	/* Send a regular SYN with Fast Open cookie request option */
4057	if (fo->cookie.len > 0)
4058		fo->cookie.len = 0;
4059	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
4060	if (err)
4061		tp->syn_fastopen = 0;
4062done:
4063	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
4064	return err;
4065}
4066
4067/* Build a SYN and send it off. */
4068int tcp_connect(struct sock *sk)
4069{
4070	struct tcp_sock *tp = tcp_sk(sk);
4071	struct sk_buff *buff;
4072	int err;
4073
4074	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
4075
4076#if defined(CONFIG_TCP_MD5SIG) && defined(CONFIG_TCP_AO)
4077	/* Has to be checked late, after setting daddr/saddr/ops.
4078	 * Return error if the peer has both a md5 and a tcp-ao key
4079	 * configured as this is ambiguous.
4080	 */
4081	if (unlikely(rcu_dereference_protected(tp->md5sig_info,
4082					       lockdep_sock_is_held(sk)))) {
4083		bool needs_ao = !!tp->af_specific->ao_lookup(sk, sk, -1, -1);
4084		bool needs_md5 = !!tp->af_specific->md5_lookup(sk, sk);
4085		struct tcp_ao_info *ao_info;
4086
4087		ao_info = rcu_dereference_check(tp->ao_info,
4088						lockdep_sock_is_held(sk));
4089		if (ao_info) {
4090			/* This is an extra check: tcp_ao_required() in
4091			 * tcp_v{4,6}_parse_md5_keys() should prevent adding
4092			 * md5 keys on ao_required socket.
4093			 */
4094			needs_ao |= ao_info->ao_required;
4095			WARN_ON_ONCE(ao_info->ao_required && needs_md5);
4096		}
4097		if (needs_md5 && needs_ao)
4098			return -EKEYREJECTED;
4099
4100		/* If we have a matching md5 key and no matching tcp-ao key
4101		 * then free up ao_info if allocated.
4102		 */
4103		if (needs_md5) {
4104			tcp_ao_destroy_sock(sk, false);
4105		} else if (needs_ao) {
4106			tcp_clear_md5_list(sk);
4107			kfree(rcu_replace_pointer(tp->md5sig_info, NULL,
4108						  lockdep_sock_is_held(sk)));
4109		}
4110	}
4111#endif
4112#ifdef CONFIG_TCP_AO
4113	if (unlikely(rcu_dereference_protected(tp->ao_info,
4114					       lockdep_sock_is_held(sk)))) {
4115		/* Don't allow connecting if ao is configured but no
4116		 * matching key is found.
4117		 */
4118		if (!tp->af_specific->ao_lookup(sk, sk, -1, -1))
4119			return -EKEYREJECTED;
4120	}
4121#endif
4122
4123	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
4124		return -EHOSTUNREACH; /* Routing failure or similar. */
4125
4126	tcp_connect_init(sk);
4127
4128	if (unlikely(tp->repair)) {
4129		tcp_finish_connect(sk, NULL);
4130		return 0;
4131	}
4132
4133	buff = tcp_stream_alloc_skb(sk, sk->sk_allocation, true);
4134	if (unlikely(!buff))
4135		return -ENOBUFS;
4136
4137	/* SYN eats a sequence byte, write_seq updated by
4138	 * tcp_connect_queue_skb().
4139	 */
4140	tcp_init_nondata_skb(buff, tp->write_seq, TCPHDR_SYN);
4141	tcp_mstamp_refresh(tp);
4142	tp->retrans_stamp = tcp_time_stamp_ts(tp);
4143	tcp_connect_queue_skb(sk, buff);
4144	tcp_ecn_send_syn(sk, buff);
4145	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
4146
4147	/* Send off SYN; include data in Fast Open. */
4148	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
4149	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
4150	if (err == -ECONNREFUSED)
4151		return err;
4152
4153	/* We change tp->snd_nxt after the tcp_transmit_skb() call
4154	 * in order to make this packet get counted in tcpOutSegs.
4155	 */
4156	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
4157	tp->pushed_seq = tp->write_seq;
4158	buff = tcp_send_head(sk);
4159	if (unlikely(buff)) {
4160		WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
4161		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
4162	}
4163	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
4164
4165	/* Timer for repeating the SYN until an answer. */
4166	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
4167				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
4168	return 0;
4169}
4170EXPORT_SYMBOL(tcp_connect);
4171
4172u32 tcp_delack_max(const struct sock *sk)
4173{
4174	u32 delack_from_rto_min = max(tcp_rto_min(sk), 2) - 1;
4175
4176	return min(inet_csk(sk)->icsk_delack_max, delack_from_rto_min);
4177}
4178
4179/* Send out a delayed ack, the caller does the policy checking
4180 * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
4181 * for details.
4182 */
4183void tcp_send_delayed_ack(struct sock *sk)
4184{
4185	struct inet_connection_sock *icsk = inet_csk(sk);
4186	int ato = icsk->icsk_ack.ato;
4187	unsigned long timeout;
4188
4189	if (ato > TCP_DELACK_MIN) {
4190		const struct tcp_sock *tp = tcp_sk(sk);
4191		int max_ato = HZ / 2;
4192
4193		if (inet_csk_in_pingpong_mode(sk) ||
4194		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
4195			max_ato = TCP_DELACK_MAX;
4196
4197		/* Slow path, intersegment interval is "high". */
4198
4199		/* If some rtt estimate is known, use it to bound delayed ack.
4200		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
4201		 * directly.
4202		 */
4203		if (tp->srtt_us) {
4204			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
4205					TCP_DELACK_MIN);
4206
4207			if (rtt < max_ato)
4208				max_ato = rtt;
4209		}
4210
4211		ato = min(ato, max_ato);
4212	}
4213
4214	ato = min_t(u32, ato, tcp_delack_max(sk));
4215
4216	/* Stay within the limit we were given */
4217	timeout = jiffies + ato;
4218
4219	/* Use new timeout only if there wasn't a older one earlier. */
4220	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
4221		/* If delack timer is about to expire, send ACK now. */
4222		if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
4223			tcp_send_ack(sk);
4224			return;
4225		}
4226
4227		if (!time_before(timeout, icsk->icsk_ack.timeout))
4228			timeout = icsk->icsk_ack.timeout;
4229	}
4230	smp_store_release(&icsk->icsk_ack.pending,
4231			  icsk->icsk_ack.pending | ICSK_ACK_SCHED | ICSK_ACK_TIMER);
4232	icsk->icsk_ack.timeout = timeout;
4233	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
4234}
4235
4236/* This routine sends an ack and also updates the window. */
4237void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
4238{
4239	struct sk_buff *buff;
4240
4241	/* If we have been reset, we may not send again. */
4242	if (sk->sk_state == TCP_CLOSE)
4243		return;
4244
4245	/* We are not putting this on the write queue, so
4246	 * tcp_transmit_skb() will set the ownership to this
4247	 * sock.
4248	 */
4249	buff = alloc_skb(MAX_TCP_HEADER,
4250			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
4251	if (unlikely(!buff)) {
4252		struct inet_connection_sock *icsk = inet_csk(sk);
4253		unsigned long delay;
4254
4255		delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
4256		if (delay < TCP_RTO_MAX)
4257			icsk->icsk_ack.retry++;
4258		inet_csk_schedule_ack(sk);
4259		icsk->icsk_ack.ato = TCP_ATO_MIN;
4260		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX);
4261		return;
4262	}
4263
4264	/* Reserve space for headers and prepare control bits. */
4265	skb_reserve(buff, MAX_TCP_HEADER);
4266	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
4267
4268	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
4269	 * too much.
4270	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
4271	 */
4272	skb_set_tcp_pure_ack(buff);
4273
4274	/* Send it off, this clears delayed acks for us. */
4275	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
4276}
4277EXPORT_SYMBOL_GPL(__tcp_send_ack);
4278
4279void tcp_send_ack(struct sock *sk)
4280{
4281	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
4282}
4283
4284/* This routine sends a packet with an out of date sequence
4285 * number. It assumes the other end will try to ack it.
4286 *
4287 * Question: what should we make while urgent mode?
4288 * 4.4BSD forces sending single byte of data. We cannot send
4289 * out of window data, because we have SND.NXT==SND.MAX...
4290 *
4291 * Current solution: to send TWO zero-length segments in urgent mode:
4292 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
4293 * out-of-date with SND.UNA-1 to probe window.
4294 */
4295static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
4296{
4297	struct tcp_sock *tp = tcp_sk(sk);
4298	struct sk_buff *skb;
4299
4300	/* We don't queue it, tcp_transmit_skb() sets ownership. */
4301	skb = alloc_skb(MAX_TCP_HEADER,
4302			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
4303	if (!skb)
4304		return -1;
4305
4306	/* Reserve space for headers and set control bits. */
4307	skb_reserve(skb, MAX_TCP_HEADER);
4308	/* Use a previous sequence.  This should cause the other
4309	 * end to send an ack.  Don't queue or clone SKB, just
4310	 * send it.
4311	 */
4312	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
4313	NET_INC_STATS(sock_net(sk), mib);
4314	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
4315}
4316
4317/* Called from setsockopt( ... TCP_REPAIR ) */
4318void tcp_send_window_probe(struct sock *sk)
4319{
4320	if (sk->sk_state == TCP_ESTABLISHED) {
4321		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
4322		tcp_mstamp_refresh(tcp_sk(sk));
4323		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
4324	}
4325}
4326
4327/* Initiate keepalive or window probe from timer. */
4328int tcp_write_wakeup(struct sock *sk, int mib)
4329{
4330	struct tcp_sock *tp = tcp_sk(sk);
4331	struct sk_buff *skb;
4332
4333	if (sk->sk_state == TCP_CLOSE)
4334		return -1;
4335
4336	skb = tcp_send_head(sk);
4337	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
4338		int err;
4339		unsigned int mss = tcp_current_mss(sk);
4340		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
4341
4342		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
4343			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
4344
4345		/* We are probing the opening of a window
4346		 * but the window size is != 0
4347		 * must have been a result SWS avoidance ( sender )
4348		 */
4349		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
4350		    skb->len > mss) {
4351			seg_size = min(seg_size, mss);
4352			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4353			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
4354					 skb, seg_size, mss, GFP_ATOMIC))
4355				return -1;
4356		} else if (!tcp_skb_pcount(skb))
4357			tcp_set_skb_tso_segs(skb, mss);
4358
4359		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4360		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
4361		if (!err)
4362			tcp_event_new_data_sent(sk, skb);
4363		return err;
4364	} else {
4365		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
4366			tcp_xmit_probe_skb(sk, 1, mib);
4367		return tcp_xmit_probe_skb(sk, 0, mib);
4368	}
4369}
4370
4371/* A window probe timeout has occurred.  If window is not closed send
4372 * a partial packet else a zero probe.
4373 */
4374void tcp_send_probe0(struct sock *sk)
4375{
4376	struct inet_connection_sock *icsk = inet_csk(sk);
4377	struct tcp_sock *tp = tcp_sk(sk);
4378	struct net *net = sock_net(sk);
4379	unsigned long timeout;
4380	int err;
4381
4382	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
4383
4384	if (tp->packets_out || tcp_write_queue_empty(sk)) {
4385		/* Cancel probe timer, if it is not required. */
4386		icsk->icsk_probes_out = 0;
4387		icsk->icsk_backoff = 0;
4388		icsk->icsk_probes_tstamp = 0;
4389		return;
4390	}
4391
4392	icsk->icsk_probes_out++;
4393	if (err <= 0) {
4394		if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2))
4395			icsk->icsk_backoff++;
4396		timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
4397	} else {
4398		/* If packet was not sent due to local congestion,
4399		 * Let senders fight for local resources conservatively.
4400		 */
4401		timeout = TCP_RESOURCE_PROBE_INTERVAL;
4402	}
4403
4404	timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
4405	tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
4406}
4407
4408int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
4409{
4410	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
4411	struct flowi fl;
4412	int res;
4413
4414	/* Paired with WRITE_ONCE() in sock_setsockopt() */
4415	if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED)
4416		WRITE_ONCE(tcp_rsk(req)->txhash, net_tx_rndhash());
4417	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
4418				  NULL);
4419	if (!res) {
4420		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
4421		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
4422		if (unlikely(tcp_passive_fastopen(sk))) {
4423			/* sk has const attribute because listeners are lockless.
4424			 * However in this case, we are dealing with a passive fastopen
4425			 * socket thus we can change total_retrans value.
4426			 */
4427			tcp_sk_rw(sk)->total_retrans++;
4428		}
4429		trace_tcp_retransmit_synack(sk, req);
4430	}
4431	return res;
4432}
4433EXPORT_SYMBOL(tcp_rtx_synack);