Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		Implementation of the Transmission Control Protocol(TCP).
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  11 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  12 *		Florian La Roche, <flla@stud.uni-sb.de>
  13 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  14 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  15 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  16 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  17 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  18 *		Jorge Cwik, <jorge@laser.satlink.net>
  19 */
  20
  21/*
  22 * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
  23 *				:	Fragmentation on mtu decrease
  24 *				:	Segment collapse on retransmit
  25 *				:	AF independence
  26 *
  27 *		Linus Torvalds	:	send_delayed_ack
  28 *		David S. Miller	:	Charge memory using the right skb
  29 *					during syn/ack processing.
  30 *		David S. Miller :	Output engine completely rewritten.
  31 *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
  32 *		Cacophonix Gaul :	draft-minshall-nagle-01
  33 *		J Hadi Salim	:	ECN support
  34 *
  35 */
  36
 
 
  37#include <net/tcp.h>
 
  38
  39#include <linux/compiler.h>
  40#include <linux/gfp.h>
  41#include <linux/module.h>
 
  42
  43/* People can turn this off for buggy TCP's found in printers etc. */
  44int sysctl_tcp_retrans_collapse __read_mostly = 1;
  45
  46/* People can turn this on to work with those rare, broken TCPs that
  47 * interpret the window field as a signed quantity.
  48 */
  49int sysctl_tcp_workaround_signed_windows __read_mostly = 0;
  50
  51/* This limits the percentage of the congestion window which we
  52 * will allow a single TSO frame to consume.  Building TSO frames
  53 * which are too large can cause TCP streams to be bursty.
  54 */
  55int sysctl_tcp_tso_win_divisor __read_mostly = 3;
  56
  57int sysctl_tcp_mtu_probing __read_mostly = 0;
  58int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
  59
  60/* By default, RFC2861 behavior.  */
  61int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
  62
  63int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */
  64EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
 
  65
 
 
  66
  67/* Account for new data that has been sent to the network. */
  68static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
  69{
 
  70	struct tcp_sock *tp = tcp_sk(sk);
  71	unsigned int prior_packets = tp->packets_out;
  72
  73	tcp_advance_send_head(sk, skb);
  74	tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
 
 
  75
  76	/* Don't override Nagle indefinitely with F-RTO */
  77	if (tp->frto_counter == 2)
  78		tp->frto_counter = 3;
  79
  80	tp->packets_out += tcp_skb_pcount(skb);
  81	if (!prior_packets)
  82		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
  83					  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
 
 
 
  84}
  85
  86/* SND.NXT, if window was not shrunk.
 
  87 * If window has been shrunk, what should we make? It is not clear at all.
  88 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
  89 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
  90 * invalid. OK, let's make this for now:
  91 */
  92static inline __u32 tcp_acceptable_seq(struct sock *sk)
  93{
  94	struct tcp_sock *tp = tcp_sk(sk);
  95
  96	if (!before(tcp_wnd_end(tp), tp->snd_nxt))
 
 
  97		return tp->snd_nxt;
  98	else
  99		return tcp_wnd_end(tp);
 100}
 101
 102/* Calculate mss to advertise in SYN segment.
 103 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
 104 *
 105 * 1. It is independent of path mtu.
 106 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
 107 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
 108 *    attached devices, because some buggy hosts are confused by
 109 *    large MSS.
 110 * 4. We do not make 3, we advertise MSS, calculated from first
 111 *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
 112 *    This may be overridden via information stored in routing table.
 113 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
 114 *    probably even Jumbo".
 115 */
 116static __u16 tcp_advertise_mss(struct sock *sk)
 117{
 118	struct tcp_sock *tp = tcp_sk(sk);
 119	struct dst_entry *dst = __sk_dst_get(sk);
 120	int mss = tp->advmss;
 121
 122	if (dst) {
 123		unsigned int metric = dst_metric_advmss(dst);
 124
 125		if (metric < mss) {
 126			mss = metric;
 127			tp->advmss = mss;
 128		}
 129	}
 130
 131	return (__u16)mss;
 132}
 133
 134/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
 135 * This is the first part of cwnd validation mechanism. */
 136static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
 
 137{
 138	struct tcp_sock *tp = tcp_sk(sk);
 139	s32 delta = tcp_time_stamp - tp->lsndtime;
 140	u32 restart_cwnd = tcp_init_cwnd(tp, dst);
 141	u32 cwnd = tp->snd_cwnd;
 142
 143	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
 144
 145	tp->snd_ssthresh = tcp_current_ssthresh(sk);
 146	restart_cwnd = min(restart_cwnd, cwnd);
 147
 148	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
 149		cwnd >>= 1;
 150	tp->snd_cwnd = max(cwnd, restart_cwnd);
 151	tp->snd_cwnd_stamp = tcp_time_stamp;
 152	tp->snd_cwnd_used = 0;
 153}
 154
 155/* Congestion state accounting after a packet has been sent. */
 156static void tcp_event_data_sent(struct tcp_sock *tp,
 157				struct sk_buff *skb, struct sock *sk)
 158{
 159	struct inet_connection_sock *icsk = inet_csk(sk);
 160	const u32 now = tcp_time_stamp;
 161
 162	if (sysctl_tcp_slow_start_after_idle &&
 163	    (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto))
 164		tcp_cwnd_restart(sk, __sk_dst_get(sk));
 165
 166	tp->lsndtime = now;
 167
 168	/* If it is a reply for ato after last received
 169	 * packet, enter pingpong mode.
 170	 */
 171	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
 172		icsk->icsk_ack.pingpong = 1;
 173}
 174
 175/* Account for an ACK we sent. */
 176static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
 
 177{
 
 
 
 
 
 
 
 
 
 
 
 
 178	tcp_dec_quickack_mode(sk, pkts);
 179	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 180}
 181
 182/* Determine a window scaling and initial window to offer.
 183 * Based on the assumption that the given amount of space
 184 * will be offered. Store the results in the tp structure.
 185 * NOTE: for smooth operation initial space offering should
 186 * be a multiple of mss if possible. We assume here that mss >= 1.
 187 * This MUST be enforced by all callers.
 188 */
 189void tcp_select_initial_window(int __space, __u32 mss,
 190			       __u32 *rcv_wnd, __u32 *window_clamp,
 191			       int wscale_ok, __u8 *rcv_wscale,
 192			       __u32 init_rcv_wnd)
 193{
 194	unsigned int space = (__space < 0 ? 0 : __space);
 195
 196	/* If no clamp set the clamp to the max possible scaled window */
 197	if (*window_clamp == 0)
 198		(*window_clamp) = (65535 << 14);
 199	space = min(*window_clamp, space);
 200
 201	/* Quantize space offering to a multiple of mss if possible. */
 202	if (space > mss)
 203		space = (space / mss) * mss;
 204
 205	/* NOTE: offering an initial window larger than 32767
 206	 * will break some buggy TCP stacks. If the admin tells us
 207	 * it is likely we could be speaking with such a buggy stack
 208	 * we will truncate our initial window offering to 32K-1
 209	 * unless the remote has sent us a window scaling option,
 210	 * which we interpret as a sign the remote TCP is not
 211	 * misinterpreting the window field as a signed quantity.
 212	 */
 213	if (sysctl_tcp_workaround_signed_windows)
 214		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
 215	else
 216		(*rcv_wnd) = space;
 
 
 
 217
 218	(*rcv_wscale) = 0;
 219	if (wscale_ok) {
 220		/* Set window scaling on max possible window
 221		 * See RFC1323 for an explanation of the limit to 14
 222		 */
 223		space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max);
 224		space = min_t(u32, space, *window_clamp);
 225		while (space > 65535 && (*rcv_wscale) < 14) {
 226			space >>= 1;
 227			(*rcv_wscale)++;
 228		}
 229	}
 230
 231	/* Set initial window to a value enough for senders starting with
 232	 * initial congestion window of TCP_DEFAULT_INIT_RCVWND. Place
 233	 * a limit on the initial window when mss is larger than 1460.
 234	 */
 235	if (mss > (1 << *rcv_wscale)) {
 236		int init_cwnd = TCP_DEFAULT_INIT_RCVWND;
 237		if (mss > 1460)
 238			init_cwnd =
 239			max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
 240		/* when initializing use the value from init_rcv_wnd
 241		 * rather than the default from above
 242		 */
 243		if (init_rcv_wnd)
 244			*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
 245		else
 246			*rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
 247	}
 248
 249	/* Set the clamp no higher than max representable value */
 250	(*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
 251}
 252EXPORT_SYMBOL(tcp_select_initial_window);
 253
 254/* Chose a new window to advertise, update state in tcp_sock for the
 255 * socket, and return result with RFC1323 scaling applied.  The return
 256 * value can be stuffed directly into th->window for an outgoing
 257 * frame.
 258 */
 259static u16 tcp_select_window(struct sock *sk)
 260{
 261	struct tcp_sock *tp = tcp_sk(sk);
 
 262	u32 cur_win = tcp_receive_window(tp);
 263	u32 new_win = __tcp_select_window(sk);
 264
 265	/* Never shrink the offered window */
 266	if (new_win < cur_win) {
 267		/* Danger Will Robinson!
 268		 * Don't update rcv_wup/rcv_wnd here or else
 269		 * we will not be able to advertise a zero
 270		 * window in time.  --DaveM
 271		 *
 272		 * Relax Will Robinson.
 273		 */
 
 
 
 274		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
 275	}
 276	tp->rcv_wnd = new_win;
 277	tp->rcv_wup = tp->rcv_nxt;
 278
 279	/* Make sure we do not exceed the maximum possible
 280	 * scaled window.
 281	 */
 282	if (!tp->rx_opt.rcv_wscale && sysctl_tcp_workaround_signed_windows)
 
 283		new_win = min(new_win, MAX_TCP_WINDOW);
 284	else
 285		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
 286
 287	/* RFC1323 scaling applied */
 288	new_win >>= tp->rx_opt.rcv_wscale;
 289
 290	/* If we advertise zero window, disable fast path. */
 291	if (new_win == 0)
 292		tp->pred_flags = 0;
 
 
 
 
 
 
 293
 294	return new_win;
 295}
 296
 297/* Packet ECN state for a SYN-ACK */
 298static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
 299{
 300	TCP_SKB_CB(skb)->flags &= ~TCPHDR_CWR;
 
 
 301	if (!(tp->ecn_flags & TCP_ECN_OK))
 302		TCP_SKB_CB(skb)->flags &= ~TCPHDR_ECE;
 
 
 
 303}
 304
 305/* Packet ECN state for a SYN.  */
 306static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
 307{
 308	struct tcp_sock *tp = tcp_sk(sk);
 
 
 
 
 
 
 
 
 
 
 309
 310	tp->ecn_flags = 0;
 311	if (sysctl_tcp_ecn == 1) {
 312		TCP_SKB_CB(skb)->flags |= TCPHDR_ECE | TCPHDR_CWR;
 
 313		tp->ecn_flags = TCP_ECN_OK;
 
 
 314	}
 315}
 316
 317static __inline__ void
 318TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
 
 
 
 
 
 
 
 
 
 319{
 320	if (inet_rsk(req)->ecn_ok)
 321		th->ece = 1;
 322}
 323
 324/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
 325 * be sent.
 326 */
 327static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
 328				int tcp_header_len)
 329{
 330	struct tcp_sock *tp = tcp_sk(sk);
 331
 332	if (tp->ecn_flags & TCP_ECN_OK) {
 333		/* Not-retransmitted data segment: set ECT and inject CWR. */
 334		if (skb->len != tcp_header_len &&
 335		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
 336			INET_ECN_xmit(sk);
 337			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
 338				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
 339				tcp_hdr(skb)->cwr = 1;
 340				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 341			}
 342		} else {
 343			/* ACK or retransmitted segment: clear ECT|CE */
 344			INET_ECN_dontxmit(sk);
 345		}
 346		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
 347			tcp_hdr(skb)->ece = 1;
 348	}
 349}
 350
 351/* Constructs common control bits of non-data skb. If SYN/FIN is present,
 352 * auto increment end seqno.
 353 */
 354static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
 355{
 356	skb->ip_summed = CHECKSUM_PARTIAL;
 357	skb->csum = 0;
 358
 359	TCP_SKB_CB(skb)->flags = flags;
 360	TCP_SKB_CB(skb)->sacked = 0;
 361
 362	skb_shinfo(skb)->gso_segs = 1;
 363	skb_shinfo(skb)->gso_size = 0;
 364	skb_shinfo(skb)->gso_type = 0;
 365
 366	TCP_SKB_CB(skb)->seq = seq;
 367	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
 368		seq++;
 369	TCP_SKB_CB(skb)->end_seq = seq;
 370}
 371
 372static inline int tcp_urg_mode(const struct tcp_sock *tp)
 373{
 374	return tp->snd_una != tp->snd_up;
 375}
 376
 377#define OPTION_SACK_ADVERTISE	(1 << 0)
 378#define OPTION_TS		(1 << 1)
 379#define OPTION_MD5		(1 << 2)
 380#define OPTION_WSCALE		(1 << 3)
 381#define OPTION_COOKIE_EXTENSION	(1 << 4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 382
 383struct tcp_out_options {
 384	u8 options;		/* bit field of OPTION_* */
 
 385	u8 ws;			/* window scale, 0 to disable */
 386	u8 num_sack_blocks;	/* number of SACK blocks to include */
 387	u8 hash_size;		/* bytes in hash_location */
 388	u16 mss;		/* 0 to disable */
 389	__u32 tsval, tsecr;	/* need to include OPTION_TS */
 390	__u8 *hash_location;	/* temporary pointer, overloaded */
 
 
 
 391};
 392
 393/* The sysctl int routines are generic, so check consistency here.
 394 */
 395static u8 tcp_cookie_size_check(u8 desired)
 
 
 
 
 
 
 
 
 
 
 396{
 397	int cookie_size;
 
 398
 399	if (desired > 0)
 400		/* previously specified */
 401		return desired;
 402
 403	cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
 404	if (cookie_size <= 0)
 405		/* no default specified */
 406		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 407
 408	if (cookie_size <= TCP_COOKIE_MIN)
 409		/* value too small, specify minimum */
 410		return TCP_COOKIE_MIN;
 411
 412	if (cookie_size >= TCP_COOKIE_MAX)
 413		/* value too large, specify maximum */
 414		return TCP_COOKIE_MAX;
 415
 416	if (cookie_size & 1)
 417		/* 8-bit multiple, illegal, fix it */
 418		cookie_size++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 419
 420	return (u8)cookie_size;
 
 
 421}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 422
 423/* Write previously computed TCP options to the packet.
 424 *
 425 * Beware: Something in the Internet is very sensitive to the ordering of
 426 * TCP options, we learned this through the hard way, so be careful here.
 427 * Luckily we can at least blame others for their non-compliance but from
 428 * inter-operatibility perspective it seems that we're somewhat stuck with
 429 * the ordering which we have been using if we want to keep working with
 430 * those broken things (not that it currently hurts anybody as there isn't
 431 * particular reason why the ordering would need to be changed).
 432 *
 433 * At least SACK_PERM as the first option is known to lead to a disaster
 434 * (but it may well be that other scenarios fail similarly).
 435 */
 436static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
 437			      struct tcp_out_options *opts)
 438{
 439	u8 options = opts->options;	/* mungable copy */
 
 440
 441	/* Having both authentication and cookies for security is redundant,
 442	 * and there's certainly not enough room.  Instead, the cookie-less
 443	 * extension variant is proposed.
 444	 *
 445	 * Consider the pessimal case with authentication.  The options
 446	 * could look like:
 447	 *   COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40
 448	 */
 449	if (unlikely(OPTION_MD5 & options)) {
 450		if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
 451			*ptr++ = htonl((TCPOPT_COOKIE << 24) |
 452				       (TCPOLEN_COOKIE_BASE << 16) |
 453				       (TCPOPT_MD5SIG << 8) |
 454				       TCPOLEN_MD5SIG);
 455		} else {
 456			*ptr++ = htonl((TCPOPT_NOP << 24) |
 457				       (TCPOPT_NOP << 16) |
 458				       (TCPOPT_MD5SIG << 8) |
 459				       TCPOLEN_MD5SIG);
 460		}
 461		options &= ~OPTION_COOKIE_EXTENSION;
 462		/* overload cookie hash location */
 463		opts->hash_location = (__u8 *)ptr;
 464		ptr += 4;
 465	}
 466
 467	if (unlikely(opts->mss)) {
 468		*ptr++ = htonl((TCPOPT_MSS << 24) |
 469			       (TCPOLEN_MSS << 16) |
 470			       opts->mss);
 471	}
 472
 473	if (likely(OPTION_TS & options)) {
 474		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 475			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
 476				       (TCPOLEN_SACK_PERM << 16) |
 477				       (TCPOPT_TIMESTAMP << 8) |
 478				       TCPOLEN_TIMESTAMP);
 479			options &= ~OPTION_SACK_ADVERTISE;
 480		} else {
 481			*ptr++ = htonl((TCPOPT_NOP << 24) |
 482				       (TCPOPT_NOP << 16) |
 483				       (TCPOPT_TIMESTAMP << 8) |
 484				       TCPOLEN_TIMESTAMP);
 485		}
 486		*ptr++ = htonl(opts->tsval);
 487		*ptr++ = htonl(opts->tsecr);
 488	}
 489
 490	/* Specification requires after timestamp, so do it now.
 491	 *
 492	 * Consider the pessimal case without authentication.  The options
 493	 * could look like:
 494	 *   MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40
 495	 */
 496	if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
 497		__u8 *cookie_copy = opts->hash_location;
 498		u8 cookie_size = opts->hash_size;
 499
 500		/* 8-bit multiple handled in tcp_cookie_size_check() above,
 501		 * and elsewhere.
 502		 */
 503		if (0x2 & cookie_size) {
 504			__u8 *p = (__u8 *)ptr;
 505
 506			/* 16-bit multiple */
 507			*p++ = TCPOPT_COOKIE;
 508			*p++ = TCPOLEN_COOKIE_BASE + cookie_size;
 509			*p++ = *cookie_copy++;
 510			*p++ = *cookie_copy++;
 511			ptr++;
 512			cookie_size -= 2;
 513		} else {
 514			/* 32-bit multiple */
 515			*ptr++ = htonl(((TCPOPT_NOP << 24) |
 516					(TCPOPT_NOP << 16) |
 517					(TCPOPT_COOKIE << 8) |
 518					TCPOLEN_COOKIE_BASE) +
 519				       cookie_size);
 520		}
 521
 522		if (cookie_size > 0) {
 523			memcpy(ptr, cookie_copy, cookie_size);
 524			ptr += (cookie_size / 4);
 525		}
 526	}
 527
 528	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 529		*ptr++ = htonl((TCPOPT_NOP << 24) |
 530			       (TCPOPT_NOP << 16) |
 531			       (TCPOPT_SACK_PERM << 8) |
 532			       TCPOLEN_SACK_PERM);
 533	}
 534
 535	if (unlikely(OPTION_WSCALE & options)) {
 536		*ptr++ = htonl((TCPOPT_NOP << 24) |
 537			       (TCPOPT_WINDOW << 16) |
 538			       (TCPOLEN_WINDOW << 8) |
 539			       opts->ws);
 540	}
 541
 542	if (unlikely(opts->num_sack_blocks)) {
 543		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
 544			tp->duplicate_sack : tp->selective_acks;
 545		int this_sack;
 546
 547		*ptr++ = htonl((TCPOPT_NOP  << 24) |
 548			       (TCPOPT_NOP  << 16) |
 549			       (TCPOPT_SACK <<  8) |
 550			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
 551						     TCPOLEN_SACK_PERBLOCK)));
 552
 553		for (this_sack = 0; this_sack < opts->num_sack_blocks;
 554		     ++this_sack) {
 555			*ptr++ = htonl(sp[this_sack].start_seq);
 556			*ptr++ = htonl(sp[this_sack].end_seq);
 557		}
 558
 559		tp->rx_opt.dsack = 0;
 560	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 561}
 562
 563/* Compute TCP options for SYN packets. This is not the final
 564 * network wire format yet.
 565 */
 566static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
 567				struct tcp_out_options *opts,
 568				struct tcp_md5sig_key **md5) {
 
 569	struct tcp_sock *tp = tcp_sk(sk);
 570	struct tcp_cookie_values *cvp = tp->cookie_values;
 571	unsigned remaining = MAX_TCP_OPTION_SPACE;
 572	u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
 573			 tcp_cookie_size_check(cvp->cookie_desired) :
 574			 0;
 575
 
 576#ifdef CONFIG_TCP_MD5SIG
 577	*md5 = tp->af_specific->md5_lookup(sk, sk);
 578	if (*md5) {
 579		opts->options |= OPTION_MD5;
 580		remaining -= TCPOLEN_MD5SIG_ALIGNED;
 
 
 
 581	}
 582#else
 583	*md5 = NULL;
 584#endif
 585
 586	/* We always get an MSS option.  The option bytes which will be seen in
 587	 * normal data packets should timestamps be used, must be in the MSS
 588	 * advertised.  But we subtract them from tp->mss_cache so that
 589	 * calculations in tcp_sendmsg are simpler etc.  So account for this
 590	 * fact here if necessary.  If we don't do this correctly, as a
 591	 * receiver we won't recognize data packets as being full sized when we
 592	 * should, and thus we won't abide by the delayed ACK rules correctly.
 593	 * SACKs don't matter, we never delay an ACK when we have any of those
 594	 * going out.  */
 595	opts->mss = tcp_advertise_mss(sk);
 596	remaining -= TCPOLEN_MSS_ALIGNED;
 597
 598	if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
 599		opts->options |= OPTION_TS;
 600		opts->tsval = TCP_SKB_CB(skb)->when;
 601		opts->tsecr = tp->rx_opt.ts_recent;
 602		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 603	}
 604	if (likely(sysctl_tcp_window_scaling)) {
 605		opts->ws = tp->rx_opt.rcv_wscale;
 606		opts->options |= OPTION_WSCALE;
 607		remaining -= TCPOLEN_WSCALE_ALIGNED;
 608	}
 609	if (likely(sysctl_tcp_sack)) {
 610		opts->options |= OPTION_SACK_ADVERTISE;
 611		if (unlikely(!(OPTION_TS & opts->options)))
 612			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 613	}
 614
 615	/* Note that timestamps are required by the specification.
 616	 *
 617	 * Odd numbers of bytes are prohibited by the specification, ensuring
 618	 * that the cookie is 16-bit aligned, and the resulting cookie pair is
 619	 * 32-bit aligned.
 620	 */
 621	if (*md5 == NULL &&
 622	    (OPTION_TS & opts->options) &&
 623	    cookie_size > 0) {
 624		int need = TCPOLEN_COOKIE_BASE + cookie_size;
 625
 626		if (0x2 & need) {
 627			/* 32-bit multiple */
 628			need += 2; /* NOPs */
 629
 630			if (need > remaining) {
 631				/* try shrinking cookie to fit */
 632				cookie_size -= 2;
 633				need -= 4;
 634			}
 635		}
 636		while (need > remaining && TCP_COOKIE_MIN <= cookie_size) {
 637			cookie_size -= 4;
 638			need -= 4;
 639		}
 640		if (TCP_COOKIE_MIN <= cookie_size) {
 641			opts->options |= OPTION_COOKIE_EXTENSION;
 642			opts->hash_location = (__u8 *)&cvp->cookie_pair[0];
 643			opts->hash_size = cookie_size;
 644
 645			/* Remember for future incarnations. */
 646			cvp->cookie_desired = cookie_size;
 647
 648			if (cvp->cookie_desired != cvp->cookie_pair_size) {
 649				/* Currently use random bytes as a nonce,
 650				 * assuming these are completely unpredictable
 651				 * by hostile users of the same system.
 652				 */
 653				get_random_bytes(&cvp->cookie_pair[0],
 654						 cookie_size);
 655				cvp->cookie_pair_size = cookie_size;
 656			}
 657
 
 
 
 
 
 
 658			remaining -= need;
 
 
 
 
 
 
 
 
 
 
 
 
 
 659		}
 660	}
 
 
 
 661	return MAX_TCP_OPTION_SPACE - remaining;
 662}
 663
 664/* Set up TCP options for SYN-ACKs. */
 665static unsigned tcp_synack_options(struct sock *sk,
 666				   struct request_sock *req,
 667				   unsigned mss, struct sk_buff *skb,
 668				   struct tcp_out_options *opts,
 669				   struct tcp_md5sig_key **md5,
 670				   struct tcp_extend_values *xvp)
 
 
 671{
 672	struct inet_request_sock *ireq = inet_rsk(req);
 673	unsigned remaining = MAX_TCP_OPTION_SPACE;
 674	u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
 675			 xvp->cookie_plus :
 676			 0;
 677
 678#ifdef CONFIG_TCP_MD5SIG
 679	*md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
 680	if (*md5) {
 681		opts->options |= OPTION_MD5;
 682		remaining -= TCPOLEN_MD5SIG_ALIGNED;
 683
 684		/* We can't fit any SACK blocks in a packet with MD5 + TS
 685		 * options. There was discussion about disabling SACK
 686		 * rather than TS in order to fit in better with old,
 687		 * buggy kernels, but that was deemed to be unnecessary.
 688		 */
 689		ireq->tstamp_ok &= !ireq->sack_ok;
 
 690	}
 691#else
 692	*md5 = NULL;
 693#endif
 694
 695	/* We always send an MSS option. */
 696	opts->mss = mss;
 697	remaining -= TCPOLEN_MSS_ALIGNED;
 698
 699	if (likely(ireq->wscale_ok)) {
 700		opts->ws = ireq->rcv_wscale;
 701		opts->options |= OPTION_WSCALE;
 702		remaining -= TCPOLEN_WSCALE_ALIGNED;
 703	}
 704	if (likely(ireq->tstamp_ok)) {
 705		opts->options |= OPTION_TS;
 706		opts->tsval = TCP_SKB_CB(skb)->when;
 707		opts->tsecr = req->ts_recent;
 708		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 709	}
 710	if (likely(ireq->sack_ok)) {
 711		opts->options |= OPTION_SACK_ADVERTISE;
 712		if (unlikely(!ireq->tstamp_ok))
 713			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 714	}
 
 
 715
 716	/* Similar rationale to tcp_syn_options() applies here, too.
 717	 * If the <SYN> options fit, the same options should fit now!
 718	 */
 719	if (*md5 == NULL &&
 720	    ireq->tstamp_ok &&
 721	    cookie_plus > TCPOLEN_COOKIE_BASE) {
 722		int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */
 723
 724		if (0x2 & need) {
 725			/* 32-bit multiple */
 726			need += 2; /* NOPs */
 727		}
 728		if (need <= remaining) {
 729			opts->options |= OPTION_COOKIE_EXTENSION;
 730			opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE;
 731			remaining -= need;
 732		} else {
 733			/* There's no error return, so flag it. */
 734			xvp->cookie_out_never = 1; /* true */
 735			opts->hash_size = 0;
 736		}
 737	}
 
 
 
 
 
 
 
 
 738	return MAX_TCP_OPTION_SPACE - remaining;
 739}
 740
 741/* Compute TCP options for ESTABLISHED sockets. This is not the
 742 * final wire format yet.
 743 */
 744static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
 745					struct tcp_out_options *opts,
 746					struct tcp_md5sig_key **md5) {
 747	struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
 748	struct tcp_sock *tp = tcp_sk(sk);
 749	unsigned size = 0;
 750	unsigned int eff_sacks;
 751
 
 
 
 752#ifdef CONFIG_TCP_MD5SIG
 753	*md5 = tp->af_specific->md5_lookup(sk, sk);
 754	if (unlikely(*md5)) {
 755		opts->options |= OPTION_MD5;
 756		size += TCPOLEN_MD5SIG_ALIGNED;
 
 
 
 757	}
 758#else
 759	*md5 = NULL;
 760#endif
 761
 762	if (likely(tp->rx_opt.tstamp_ok)) {
 763		opts->options |= OPTION_TS;
 764		opts->tsval = tcb ? tcb->when : 0;
 765		opts->tsecr = tp->rx_opt.ts_recent;
 766		size += TCPOLEN_TSTAMP_ALIGNED;
 767	}
 768
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 769	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
 770	if (unlikely(eff_sacks)) {
 771		const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
 
 
 
 
 772		opts->num_sack_blocks =
 773			min_t(unsigned, eff_sacks,
 774			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
 775			      TCPOLEN_SACK_PERBLOCK);
 
 776		size += TCPOLEN_SACK_BASE_ALIGNED +
 777			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
 778	}
 779
 
 
 
 
 
 
 
 
 
 780	return size;
 781}
 782
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 783/* This routine actually transmits TCP packets queued in by
 784 * tcp_do_sendmsg().  This is used by both the initial
 785 * transmission and possible later retransmissions.
 786 * All SKB's seen here are completely headerless.  It is our
 787 * job to build the TCP header, and pass the packet down to
 788 * IP so it can do the same plus pass the packet off to the
 789 * device.
 790 *
 791 * We are working here with either a clone of the original
 792 * SKB, or a fresh unique copy made by the retransmit engine.
 793 */
 794static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
 795			    gfp_t gfp_mask)
 796{
 797	const struct inet_connection_sock *icsk = inet_csk(sk);
 798	struct inet_sock *inet;
 799	struct tcp_sock *tp;
 800	struct tcp_skb_cb *tcb;
 801	struct tcp_out_options opts;
 802	unsigned tcp_options_size, tcp_header_size;
 
 803	struct tcp_md5sig_key *md5;
 804	struct tcphdr *th;
 
 805	int err;
 806
 807	BUG_ON(!skb || !tcp_skb_pcount(skb));
 
 
 
 
 
 
 
 
 
 
 
 
 
 808
 809	/* If congestion control is doing timestamping, we must
 810	 * take such a timestamp before we potentially clone/copy.
 811	 */
 812	if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
 813		__net_timestamp(skb);
 814
 815	if (likely(clone_it)) {
 816		if (unlikely(skb_cloned(skb)))
 817			skb = pskb_copy(skb, gfp_mask);
 818		else
 819			skb = skb_clone(skb, gfp_mask);
 820		if (unlikely(!skb))
 821			return -ENOBUFS;
 
 
 
 
 822	}
 823
 824	inet = inet_sk(sk);
 825	tp = tcp_sk(sk);
 826	tcb = TCP_SKB_CB(skb);
 827	memset(&opts, 0, sizeof(opts));
 828
 829	if (unlikely(tcb->flags & TCPHDR_SYN))
 830		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
 831	else
 832		tcp_options_size = tcp_established_options(sk, skb, &opts,
 833							   &md5);
 
 
 
 
 
 
 
 
 
 
 
 834	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
 835
 836	if (tcp_packets_in_flight(tp) == 0) {
 837		tcp_ca_event(sk, CA_EVENT_TX_START);
 838		skb->ooo_okay = 1;
 839	} else
 840		skb->ooo_okay = 0;
 
 
 
 
 
 
 
 
 
 
 841
 842	skb_push(skb, tcp_header_size);
 843	skb_reset_transport_header(skb);
 844	skb_set_owner_w(skb, sk);
 
 
 
 
 
 
 845
 846	/* Build TCP header and checksum it. */
 847	th = tcp_hdr(skb);
 848	th->source		= inet->inet_sport;
 849	th->dest		= inet->inet_dport;
 850	th->seq			= htonl(tcb->seq);
 851	th->ack_seq		= htonl(tp->rcv_nxt);
 852	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
 853					tcb->flags);
 854
 855	if (unlikely(tcb->flags & TCPHDR_SYN)) {
 856		/* RFC1323: The window in SYN & SYN/ACK segments
 857		 * is never scaled.
 858		 */
 859		th->window	= htons(min(tp->rcv_wnd, 65535U));
 860	} else {
 861		th->window	= htons(tcp_select_window(sk));
 862	}
 863	th->check		= 0;
 864	th->urg_ptr		= 0;
 865
 866	/* The urg_mode check is necessary during a below snd_una win probe */
 867	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
 868		if (before(tp->snd_up, tcb->seq + 0x10000)) {
 869			th->urg_ptr = htons(tp->snd_up - tcb->seq);
 870			th->urg = 1;
 871		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
 872			th->urg_ptr = htons(0xFFFF);
 873			th->urg = 1;
 874		}
 875	}
 876
 877	tcp_options_write((__be32 *)(th + 1), tp, &opts);
 878	if (likely((tcb->flags & TCPHDR_SYN) == 0))
 879		TCP_ECN_send(sk, skb, tcp_header_size);
 
 
 
 
 
 
 
 
 
 880
 881#ifdef CONFIG_TCP_MD5SIG
 882	/* Calculate the MD5 hash, as we have all we need now */
 883	if (md5) {
 884		sk_nocaps_add(sk, NETIF_F_GSO_MASK);
 885		tp->af_specific->calc_md5_hash(opts.hash_location,
 886					       md5, sk, NULL, skb);
 887	}
 888#endif
 889
 890	icsk->icsk_af_ops->send_check(sk, skb);
 
 891
 892	if (likely(tcb->flags & TCPHDR_ACK))
 893		tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
 894
 895	if (skb->len != tcp_header_size)
 896		tcp_event_data_sent(tp, skb, sk);
 
 
 
 
 
 
 
 897
 898	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
 899		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
 900			      tcp_skb_pcount(skb));
 901
 902	err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl);
 903	if (likely(err <= 0))
 904		return err;
 905
 906	tcp_enter_cwr(sk, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 907
 908	return net_xmit_eval(err);
 
 
 
 
 909}
 910
 911/* This routine just queues the buffer for sending.
 912 *
 913 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
 914 * otherwise socket can stall.
 915 */
 916static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
 917{
 918	struct tcp_sock *tp = tcp_sk(sk);
 919
 920	/* Advance write_seq and place onto the write_queue. */
 921	tp->write_seq = TCP_SKB_CB(skb)->end_seq;
 922	skb_header_release(skb);
 923	tcp_add_write_queue_tail(sk, skb);
 924	sk->sk_wmem_queued += skb->truesize;
 925	sk_mem_charge(sk, skb->truesize);
 926}
 927
 928/* Initialize TSO segments for a packet. */
 929static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
 930				 unsigned int mss_now)
 931{
 932	if (skb->len <= mss_now || !sk_can_gso(sk) ||
 933	    skb->ip_summed == CHECKSUM_NONE) {
 934		/* Avoid the costly divide in the normal
 935		 * non-TSO case.
 936		 */
 937		skb_shinfo(skb)->gso_segs = 1;
 938		skb_shinfo(skb)->gso_size = 0;
 939		skb_shinfo(skb)->gso_type = 0;
 940	} else {
 941		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now);
 942		skb_shinfo(skb)->gso_size = mss_now;
 943		skb_shinfo(skb)->gso_type = sk->sk_gso_type;
 944	}
 945}
 946
 947/* When a modification to fackets out becomes necessary, we need to check
 948 * skb is counted to fackets_out or not.
 949 */
 950static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
 951				   int decr)
 952{
 953	struct tcp_sock *tp = tcp_sk(sk);
 954
 955	if (!tp->sacked_out || tcp_is_reno(tp))
 956		return;
 957
 958	if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
 959		tp->fackets_out -= decr;
 960}
 961
 962/* Pcount in the middle of the write queue got changed, we need to do various
 963 * tweaks to fix counters
 964 */
 965static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr)
 966{
 967	struct tcp_sock *tp = tcp_sk(sk);
 968
 969	tp->packets_out -= decr;
 970
 971	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
 972		tp->sacked_out -= decr;
 973	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
 974		tp->retrans_out -= decr;
 975	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
 976		tp->lost_out -= decr;
 977
 978	/* Reno case is special. Sigh... */
 979	if (tcp_is_reno(tp) && decr > 0)
 980		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
 981
 982	tcp_adjust_fackets_out(sk, skb, decr);
 983
 984	if (tp->lost_skb_hint &&
 985	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
 986	    (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
 987		tp->lost_cnt_hint -= decr;
 988
 989	tcp_verify_left_out(tp);
 990}
 991
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 992/* Function to create two new TCP segments.  Shrinks the given segment
 993 * to the specified size and appends a new segment with the rest of the
 994 * packet to the list.  This won't be called frequently, I hope.
 995 * Remember, these are still headerless SKBs at this point.
 996 */
 997int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
 998		 unsigned int mss_now)
 
 999{
1000	struct tcp_sock *tp = tcp_sk(sk);
1001	struct sk_buff *buff;
1002	int nsize, old_factor;
 
1003	int nlen;
1004	u8 flags;
1005
1006	if (WARN_ON(len > skb->len))
1007		return -EINVAL;
1008
1009	nsize = skb_headlen(skb) - len;
1010	if (nsize < 0)
1011		nsize = 0;
1012
1013	if (skb_cloned(skb) &&
1014	    skb_is_nonlinear(skb) &&
1015	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
 
 
 
 
 
 
 
 
 
 
 
 
1016		return -ENOMEM;
1017
1018	/* Get a new skb... force flag on. */
1019	buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC);
1020	if (buff == NULL)
1021		return -ENOMEM; /* We'll just try again later. */
 
 
1022
1023	sk->sk_wmem_queued += buff->truesize;
1024	sk_mem_charge(sk, buff->truesize);
1025	nlen = skb->len - len - nsize;
1026	buff->truesize += nlen;
1027	skb->truesize -= nlen;
1028
1029	/* Correct the sequence numbers. */
1030	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1031	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1032	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1033
1034	/* PSH and FIN should only be set in the second packet. */
1035	flags = TCP_SKB_CB(skb)->flags;
1036	TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1037	TCP_SKB_CB(buff)->flags = flags;
1038	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
 
1039
1040	if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) {
1041		/* Copy and checksum data tail into the new buffer. */
1042		buff->csum = csum_partial_copy_nocheck(skb->data + len,
1043						       skb_put(buff, nsize),
1044						       nsize, 0);
1045
1046		skb_trim(skb, len);
1047
1048		skb->csum = csum_block_sub(skb->csum, buff->csum, len);
1049	} else {
1050		skb->ip_summed = CHECKSUM_PARTIAL;
1051		skb_split(skb, buff, len);
1052	}
1053
1054	buff->ip_summed = skb->ip_summed;
1055
1056	/* Looks stupid, but our code really uses when of
1057	 * skbs, which it never sent before. --ANK
1058	 */
1059	TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
1060	buff->tstamp = skb->tstamp;
1061
1062	old_factor = tcp_skb_pcount(skb);
1063
1064	/* Fix up tso_factor for both original and new SKB.  */
1065	tcp_set_skb_tso_segs(sk, skb, mss_now);
1066	tcp_set_skb_tso_segs(sk, buff, mss_now);
 
 
 
1067
1068	/* If this packet has been sent out already, we must
1069	 * adjust the various packet counters.
1070	 */
1071	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1072		int diff = old_factor - tcp_skb_pcount(skb) -
1073			tcp_skb_pcount(buff);
1074
1075		if (diff)
1076			tcp_adjust_pcount(sk, skb, diff);
1077	}
1078
1079	/* Link BUFF into the send queue. */
1080	skb_header_release(buff);
1081	tcp_insert_write_queue_after(skb, buff, sk);
 
 
1082
1083	return 0;
1084}
1085
1086/* This is similar to __pskb_pull_head() (it will go to core/skbuff.c
1087 * eventually). The difference is that pulled data not copied, but
1088 * immediately discarded.
1089 */
1090static void __pskb_trim_head(struct sk_buff *skb, int len)
1091{
 
1092	int i, k, eat;
1093
 
 
 
 
 
 
 
1094	eat = len;
1095	k = 0;
1096	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1097		if (skb_shinfo(skb)->frags[i].size <= eat) {
1098			put_page(skb_shinfo(skb)->frags[i].page);
1099			eat -= skb_shinfo(skb)->frags[i].size;
 
 
 
1100		} else {
1101			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1102			if (eat) {
1103				skb_shinfo(skb)->frags[k].page_offset += eat;
1104				skb_shinfo(skb)->frags[k].size -= eat;
1105				eat = 0;
1106			}
1107			k++;
1108		}
1109	}
1110	skb_shinfo(skb)->nr_frags = k;
1111
1112	skb_reset_tail_pointer(skb);
1113	skb->data_len -= len;
1114	skb->len = skb->data_len;
 
1115}
1116
1117/* Remove acked data from a packet in the transmit queue. */
1118int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1119{
1120	if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
 
 
1121		return -ENOMEM;
1122
1123	/* If len == headlen, we avoid __skb_pull to preserve alignment. */
1124	if (unlikely(len < skb_headlen(skb)))
1125		__skb_pull(skb, len);
1126	else
1127		__pskb_trim_head(skb, len - skb_headlen(skb));
1128
1129	TCP_SKB_CB(skb)->seq += len;
1130	skb->ip_summed = CHECKSUM_PARTIAL;
1131
1132	skb->truesize	     -= len;
1133	sk->sk_wmem_queued   -= len;
1134	sk_mem_uncharge(sk, len);
1135	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
 
 
1136
1137	/* Any change of skb->len requires recalculation of tso
1138	 * factor and mss.
1139	 */
1140	if (tcp_skb_pcount(skb) > 1)
1141		tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk));
1142
1143	return 0;
1144}
1145
1146/* Calculate MSS. Not accounting for SACKs here.  */
1147int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1148{
1149	struct tcp_sock *tp = tcp_sk(sk);
1150	struct inet_connection_sock *icsk = inet_csk(sk);
1151	int mss_now;
1152
1153	/* Calculate base mss without TCP options:
1154	   It is MMS_S - sizeof(tcphdr) of rfc1122
1155	 */
1156	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1157
 
 
 
 
 
 
 
 
1158	/* Clamp it (mss_clamp does not include tcp options) */
1159	if (mss_now > tp->rx_opt.mss_clamp)
1160		mss_now = tp->rx_opt.mss_clamp;
1161
1162	/* Now subtract optional transport overhead */
1163	mss_now -= icsk->icsk_ext_hdr_len;
1164
1165	/* Then reserve room for full set of TCP options and 8 bytes of data */
1166	if (mss_now < 48)
1167		mss_now = 48;
1168
1169	/* Now subtract TCP options size, not including SACKs */
1170	mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
1171
1172	return mss_now;
1173}
1174
 
 
 
 
 
 
 
 
 
1175/* Inverse of above */
1176int tcp_mss_to_mtu(struct sock *sk, int mss)
1177{
1178	struct tcp_sock *tp = tcp_sk(sk);
1179	struct inet_connection_sock *icsk = inet_csk(sk);
1180	int mtu;
1181
1182	mtu = mss +
1183	      tp->tcp_header_len +
1184	      icsk->icsk_ext_hdr_len +
1185	      icsk->icsk_af_ops->net_header_len;
1186
 
 
 
 
 
 
 
1187	return mtu;
1188}
 
1189
1190/* MTU probing init per socket */
1191void tcp_mtup_init(struct sock *sk)
1192{
1193	struct tcp_sock *tp = tcp_sk(sk);
1194	struct inet_connection_sock *icsk = inet_csk(sk);
 
1195
1196	icsk->icsk_mtup.enabled = sysctl_tcp_mtu_probing > 1;
1197	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
1198			       icsk->icsk_af_ops->net_header_len;
1199	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, sysctl_tcp_base_mss);
1200	icsk->icsk_mtup.probe_size = 0;
 
 
1201}
1202EXPORT_SYMBOL(tcp_mtup_init);
1203
1204/* This function synchronize snd mss to current pmtu/exthdr set.
1205
1206   tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1207   for TCP options, but includes only bare TCP header.
1208
1209   tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1210   It is minimum of user_mss and mss received with SYN.
1211   It also does not include TCP options.
1212
1213   inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1214
1215   tp->mss_cache is current effective sending mss, including
1216   all tcp options except for SACKs. It is evaluated,
1217   taking into account current pmtu, but never exceeds
1218   tp->rx_opt.mss_clamp.
1219
1220   NOTE1. rfc1122 clearly states that advertised MSS
1221   DOES NOT include either tcp or ip options.
1222
1223   NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1224   are READ ONLY outside this function.		--ANK (980731)
1225 */
1226unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1227{
1228	struct tcp_sock *tp = tcp_sk(sk);
1229	struct inet_connection_sock *icsk = inet_csk(sk);
1230	int mss_now;
1231
1232	if (icsk->icsk_mtup.search_high > pmtu)
1233		icsk->icsk_mtup.search_high = pmtu;
1234
1235	mss_now = tcp_mtu_to_mss(sk, pmtu);
1236	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1237
1238	/* And store cached results */
1239	icsk->icsk_pmtu_cookie = pmtu;
1240	if (icsk->icsk_mtup.enabled)
1241		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1242	tp->mss_cache = mss_now;
1243
1244	return mss_now;
1245}
1246EXPORT_SYMBOL(tcp_sync_mss);
1247
1248/* Compute the current effective MSS, taking SACKs and IP options,
1249 * and even PMTU discovery events into account.
1250 */
1251unsigned int tcp_current_mss(struct sock *sk)
1252{
1253	struct tcp_sock *tp = tcp_sk(sk);
1254	struct dst_entry *dst = __sk_dst_get(sk);
1255	u32 mss_now;
1256	unsigned header_len;
1257	struct tcp_out_options opts;
1258	struct tcp_md5sig_key *md5;
1259
1260	mss_now = tp->mss_cache;
1261
1262	if (dst) {
1263		u32 mtu = dst_mtu(dst);
1264		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1265			mss_now = tcp_sync_mss(sk, mtu);
1266	}
1267
1268	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1269		     sizeof(struct tcphdr);
1270	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
1271	 * some common options. If this is an odd packet (because we have SACK
1272	 * blocks etc) then our calculated header_len will be different, and
1273	 * we have to adjust mss_now correspondingly */
1274	if (header_len != tp->tcp_header_len) {
1275		int delta = (int) header_len - tp->tcp_header_len;
1276		mss_now -= delta;
1277	}
1278
1279	return mss_now;
1280}
1281
1282/* Congestion window validation. (RFC2861) */
1283static void tcp_cwnd_validate(struct sock *sk)
 
 
 
1284{
1285	struct tcp_sock *tp = tcp_sk(sk);
1286
1287	if (tp->packets_out >= tp->snd_cwnd) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1288		/* Network is feed fully. */
1289		tp->snd_cwnd_used = 0;
1290		tp->snd_cwnd_stamp = tcp_time_stamp;
1291	} else {
1292		/* Network starves. */
1293		if (tp->packets_out > tp->snd_cwnd_used)
1294			tp->snd_cwnd_used = tp->packets_out;
1295
1296		if (sysctl_tcp_slow_start_after_idle &&
1297		    (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
 
1298			tcp_cwnd_application_limited(sk);
 
 
 
 
 
 
 
 
 
 
 
 
1299	}
1300}
1301
1302/* Returns the portion of skb which can be sent right away without
1303 * introducing MSS oddities to segment boundaries. In rare cases where
1304 * mss_now != mss_cache, we will request caller to create a small skb
1305 * per input skb which could be mostly avoided here (if desired).
1306 *
1307 * We explicitly want to create a request for splitting write queue tail
1308 * to a small skb for Nagle purposes while avoiding unnecessary modulos,
1309 * thus all the complexity (cwnd_len is always MSS multiple which we
1310 * return whenever allowed by the other factors). Basically we need the
1311 * modulo only when the receiver window alone is the limiting factor or
1312 * when we would be allowed to send the split-due-to-Nagle skb fully.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1313 */
1314static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb,
1315					unsigned int mss_now, unsigned int cwnd)
1316{
1317	struct tcp_sock *tp = tcp_sk(sk);
1318	u32 needed, window, cwnd_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1319
1320	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1321	cwnd_len = mss_now * cwnd;
1322
1323	if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk)))
1324		return cwnd_len;
1325
1326	needed = min(skb->len, window);
1327
1328	if (cwnd_len <= needed)
1329		return cwnd_len;
 
 
 
 
 
 
 
 
1330
1331	return needed - needed % mss_now;
1332}
1333
1334/* Can at least one segment of SKB be sent right now, according to the
1335 * congestion window rules?  If so, return how many segments are allowed.
1336 */
1337static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
1338					 struct sk_buff *skb)
1339{
1340	u32 in_flight, cwnd;
1341
1342	/* Don't be strict about the congestion window for the final FIN.  */
1343	if ((TCP_SKB_CB(skb)->flags & TCPHDR_FIN) && tcp_skb_pcount(skb) == 1)
 
1344		return 1;
1345
1346	in_flight = tcp_packets_in_flight(tp);
1347	cwnd = tp->snd_cwnd;
1348	if (in_flight < cwnd)
1349		return (cwnd - in_flight);
1350
1351	return 0;
 
 
 
 
1352}
1353
1354/* Initialize TSO state of a skb.
1355 * This must be invoked the first time we consider transmitting
1356 * SKB onto the wire.
1357 */
1358static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
1359			     unsigned int mss_now)
1360{
1361	int tso_segs = tcp_skb_pcount(skb);
1362
1363	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
1364		tcp_set_skb_tso_segs(sk, skb, mss_now);
1365		tso_segs = tcp_skb_pcount(skb);
1366	}
1367	return tso_segs;
1368}
1369
1370/* Minshall's variant of the Nagle send check. */
1371static inline int tcp_minshall_check(const struct tcp_sock *tp)
1372{
1373	return after(tp->snd_sml, tp->snd_una) &&
1374		!after(tp->snd_sml, tp->snd_nxt);
1375}
1376
1377/* Return 0, if packet can be sent now without violation Nagle's rules:
1378 * 1. It is full sized.
1379 * 2. Or it contains FIN. (already checked by caller)
1380 * 3. Or TCP_NODELAY was set.
1381 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1382 *    With Minshall's modification: all sent small packets are ACKed.
1383 */
1384static inline int tcp_nagle_check(const struct tcp_sock *tp,
1385				  const struct sk_buff *skb,
1386				  unsigned mss_now, int nonagle)
1387{
1388	return skb->len < mss_now &&
1389		((nonagle & TCP_NAGLE_CORK) ||
1390		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1391}
1392
1393/* Return non-zero if the Nagle test allows this packet to be
1394 * sent now.
1395 */
1396static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb,
1397				 unsigned int cur_mss, int nonagle)
1398{
1399	/* Nagle rule does not apply to frames, which sit in the middle of the
1400	 * write_queue (they have no chances to get new data).
1401	 *
1402	 * This is implemented in the callers, where they modify the 'nonagle'
1403	 * argument based upon the location of SKB in the send queue.
1404	 */
1405	if (nonagle & TCP_NAGLE_PUSH)
1406		return 1;
1407
1408	/* Don't use the nagle rule for urgent data (or for the final FIN).
1409	 * Nagle can be ignored during F-RTO too (see RFC4138).
1410	 */
1411	if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
1412	    (TCP_SKB_CB(skb)->flags & TCPHDR_FIN))
1413		return 1;
1414
1415	if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
1416		return 1;
1417
1418	return 0;
1419}
1420
1421/* Does at least the first segment of SKB fit into the send window? */
1422static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb,
1423				   unsigned int cur_mss)
 
1424{
1425	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
1426
1427	if (skb->len > cur_mss)
1428		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
1429
1430	return !after(end_seq, tcp_wnd_end(tp));
1431}
1432
1433/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1434 * should be put on the wire right now.  If so, it returns the number of
1435 * packets allowed by the congestion window.
1436 */
1437static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
1438				 unsigned int cur_mss, int nonagle)
1439{
1440	struct tcp_sock *tp = tcp_sk(sk);
1441	unsigned int cwnd_quota;
1442
1443	tcp_init_tso_segs(sk, skb, cur_mss);
1444
1445	if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1446		return 0;
1447
1448	cwnd_quota = tcp_cwnd_test(tp, skb);
1449	if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1450		cwnd_quota = 0;
1451
1452	return cwnd_quota;
1453}
1454
1455/* Test if sending is allowed right now. */
1456int tcp_may_send_now(struct sock *sk)
1457{
1458	struct tcp_sock *tp = tcp_sk(sk);
1459	struct sk_buff *skb = tcp_send_head(sk);
1460
1461	return skb &&
1462		tcp_snd_test(sk, skb, tcp_current_mss(sk),
1463			     (tcp_skb_is_last(sk, skb) ?
1464			      tp->nonagle : TCP_NAGLE_PUSH));
1465}
1466
1467/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1468 * which is put after SKB on the list.  It is very much like
1469 * tcp_fragment() except that it may make several kinds of assumptions
1470 * in order to speed up the splitting operation.  In particular, we
1471 * know that all the data is in scatter-gather pages, and that the
1472 * packet has never been sent out before (and thus is not cloned).
1473 */
1474static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1475			unsigned int mss_now, gfp_t gfp)
1476{
1477	struct sk_buff *buff;
1478	int nlen = skb->len - len;
 
1479	u8 flags;
1480
1481	/* All of a TSO frame must be composed of paged data.  */
1482	if (skb->len != skb->data_len)
1483		return tcp_fragment(sk, skb, len, mss_now);
 
1484
1485	buff = sk_stream_alloc_skb(sk, 0, gfp);
1486	if (unlikely(buff == NULL))
1487		return -ENOMEM;
 
 
1488
1489	sk->sk_wmem_queued += buff->truesize;
1490	sk_mem_charge(sk, buff->truesize);
1491	buff->truesize += nlen;
1492	skb->truesize -= nlen;
1493
1494	/* Correct the sequence numbers. */
1495	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1496	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1497	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1498
1499	/* PSH and FIN should only be set in the second packet. */
1500	flags = TCP_SKB_CB(skb)->flags;
1501	TCP_SKB_CB(skb)->flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1502	TCP_SKB_CB(buff)->flags = flags;
1503
1504	/* This packet was never sent out yet, so no SACK bits. */
1505	TCP_SKB_CB(buff)->sacked = 0;
1506
1507	buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1508	skb_split(skb, buff, len);
 
1509
1510	/* Fix up tso_factor for both original and new SKB.  */
1511	tcp_set_skb_tso_segs(sk, skb, mss_now);
1512	tcp_set_skb_tso_segs(sk, buff, mss_now);
1513
1514	/* Link BUFF into the send queue. */
1515	skb_header_release(buff);
1516	tcp_insert_write_queue_after(skb, buff, sk);
1517
1518	return 0;
1519}
1520
1521/* Try to defer sending, if possible, in order to minimize the amount
1522 * of TSO splitting we do.  View it as a kind of TSO Nagle test.
1523 *
1524 * This algorithm is from John Heffner.
1525 */
1526static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
 
 
 
1527{
1528	struct tcp_sock *tp = tcp_sk(sk);
1529	const struct inet_connection_sock *icsk = inet_csk(sk);
1530	u32 send_win, cong_win, limit, in_flight;
 
 
1531	int win_divisor;
 
1532
1533	if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
1534		goto send_now;
1535
1536	if (icsk->icsk_ca_state != TCP_CA_Open)
1537		goto send_now;
1538
1539	/* Defer for less than two clock ticks. */
1540	if (tp->tso_deferred &&
1541	    (((u32)jiffies << 1) >> 1) - (tp->tso_deferred >> 1) > 1)
 
1542		goto send_now;
1543
1544	in_flight = tcp_packets_in_flight(tp);
1545
1546	BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight));
 
1547
1548	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
1549
1550	/* From in_flight test above, we know that cwnd > in_flight.  */
1551	cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
1552
1553	limit = min(send_win, cong_win);
1554
1555	/* If a full-sized TSO skb can be sent, do it. */
1556	if (limit >= sk->sk_gso_max_size)
1557		goto send_now;
1558
1559	/* Middle in queue won't get any more data, full sendable already? */
1560	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
1561		goto send_now;
1562
1563	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
1564	if (win_divisor) {
1565		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1566
1567		/* If at least some fraction of a window is available,
1568		 * just use it.
1569		 */
1570		chunk /= win_divisor;
1571		if (limit >= chunk)
1572			goto send_now;
1573	} else {
1574		/* Different approach, try not to defer past a single
1575		 * ACK.  Receiver should ACK every other full sized
1576		 * frame, so if we have space for more than 3 frames
1577		 * then send now.
1578		 */
1579		if (limit > tcp_max_burst(tp) * tp->mss_cache)
1580			goto send_now;
1581	}
1582
1583	/* Ok, it looks like it is advisable to defer.  */
1584	tp->tso_deferred = 1 | (jiffies << 1);
 
 
 
 
 
 
1585
1586	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1587
1588send_now:
1589	tp->tso_deferred = 0;
1590	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1591}
1592
1593/* Create a new MTU probe if we are ready.
1594 * MTU probe is regularly attempting to increase the path MTU by
1595 * deliberately sending larger packets.  This discovers routing
1596 * changes resulting in larger path MTUs.
1597 *
1598 * Returns 0 if we should wait to probe (no cwnd available),
1599 *         1 if a probe was sent,
1600 *         -1 otherwise
1601 */
1602static int tcp_mtu_probe(struct sock *sk)
1603{
1604	struct tcp_sock *tp = tcp_sk(sk);
1605	struct inet_connection_sock *icsk = inet_csk(sk);
 
1606	struct sk_buff *skb, *nskb, *next;
1607	int len;
1608	int probe_size;
1609	int size_needed;
1610	int copy;
1611	int mss_now;
 
1612
1613	/* Not currently probing/verifying,
1614	 * not in recovery,
1615	 * have enough cwnd, and
1616	 * not SACKing (the variable headers throw things off) */
1617	if (!icsk->icsk_mtup.enabled ||
1618	    icsk->icsk_mtup.probe_size ||
1619	    inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1620	    tp->snd_cwnd < 11 ||
1621	    tp->rx_opt.num_sacks || tp->rx_opt.dsack)
 
1622		return -1;
1623
1624	/* Very simple search strategy: just double the MSS. */
 
 
 
1625	mss_now = tcp_current_mss(sk);
1626	probe_size = 2 * tp->mss_cache;
 
1627	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
1628	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
1629		/* TODO: set timer for probe_converge_event */
 
 
 
 
 
 
 
 
 
1630		return -1;
1631	}
1632
1633	/* Have enough data in the send queue to probe? */
1634	if (tp->write_seq - tp->snd_nxt < size_needed)
1635		return -1;
1636
1637	if (tp->snd_wnd < size_needed)
1638		return -1;
1639	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
1640		return 0;
1641
1642	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
1643	if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
1644		if (!tcp_packets_in_flight(tp))
1645			return -1;
1646		else
1647			return 0;
1648	}
1649
 
 
 
1650	/* We're allowed to probe.  Build it now. */
1651	if ((nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC)) == NULL)
 
1652		return -1;
1653	sk->sk_wmem_queued += nskb->truesize;
1654	sk_mem_charge(sk, nskb->truesize);
1655
1656	skb = tcp_send_head(sk);
 
 
1657
1658	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
1659	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
1660	TCP_SKB_CB(nskb)->flags = TCPHDR_ACK;
1661	TCP_SKB_CB(nskb)->sacked = 0;
1662	nskb->csum = 0;
1663	nskb->ip_summed = skb->ip_summed;
1664
1665	tcp_insert_write_queue_before(nskb, skb, sk);
 
1666
1667	len = 0;
1668	tcp_for_write_queue_from_safe(skb, next, sk) {
1669		copy = min_t(int, skb->len, probe_size - len);
1670		if (nskb->ip_summed)
1671			skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
1672		else
1673			nskb->csum = skb_copy_and_csum_bits(skb, 0,
1674							    skb_put(nskb, copy),
1675							    copy, nskb->csum);
1676
1677		if (skb->len <= copy) {
1678			/* We've eaten all the data from this skb.
1679			 * Throw it away. */
1680			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags;
 
 
 
 
 
1681			tcp_unlink_write_queue(skb, sk);
1682			sk_wmem_free_skb(sk, skb);
1683		} else {
1684			TCP_SKB_CB(nskb)->flags |= TCP_SKB_CB(skb)->flags &
1685						   ~(TCPHDR_FIN|TCPHDR_PSH);
1686			if (!skb_shinfo(skb)->nr_frags) {
1687				skb_pull(skb, copy);
1688				if (skb->ip_summed != CHECKSUM_PARTIAL)
1689					skb->csum = csum_partial(skb->data,
1690								 skb->len, 0);
1691			} else {
1692				__pskb_trim_head(skb, copy);
1693				tcp_set_skb_tso_segs(sk, skb, mss_now);
1694			}
1695			TCP_SKB_CB(skb)->seq += copy;
1696		}
1697
1698		len += copy;
1699
1700		if (len >= probe_size)
1701			break;
1702	}
1703	tcp_init_tso_segs(sk, nskb, nskb->len);
1704
1705	/* We're ready to send.  If this fails, the probe will
1706	 * be resegmented into mss-sized pieces by tcp_write_xmit(). */
1707	TCP_SKB_CB(nskb)->when = tcp_time_stamp;
1708	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1709		/* Decrement cwnd here because we are sending
1710		 * effectively two packets. */
1711		tp->snd_cwnd--;
1712		tcp_event_new_data_sent(sk, nskb);
1713
1714		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
1715		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
1716		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
1717
1718		return 1;
1719	}
1720
1721	return -1;
1722}
1723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1724/* This routine writes packets to the network.  It advances the
1725 * send_head.  This happens as incoming acks open up the remote
1726 * window for us.
1727 *
1728 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
1729 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1730 * account rare use of URG, this is not a big flaw.
1731 *
1732 * Returns 1, if no segments are in flight and we have queued segments, but
1733 * cannot send anything now because of SWS or another problem.
 
 
 
1734 */
1735static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1736			  int push_one, gfp_t gfp)
1737{
1738	struct tcp_sock *tp = tcp_sk(sk);
1739	struct sk_buff *skb;
1740	unsigned int tso_segs, sent_pkts;
1741	int cwnd_quota;
1742	int result;
 
 
1743
1744	sent_pkts = 0;
1745
 
1746	if (!push_one) {
1747		/* Do MTU probing. */
1748		result = tcp_mtu_probe(sk);
1749		if (!result) {
1750			return 0;
1751		} else if (result > 0) {
1752			sent_pkts = 1;
1753		}
1754	}
1755
 
1756	while ((skb = tcp_send_head(sk))) {
1757		unsigned int limit;
1758
1759		tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
 
 
 
 
 
 
 
 
 
 
 
 
1760		BUG_ON(!tso_segs);
1761
1762		cwnd_quota = tcp_cwnd_test(tp, skb);
1763		if (!cwnd_quota)
1764			break;
 
 
 
 
 
1765
1766		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
 
1767			break;
 
1768
1769		if (tso_segs == 1) {
1770			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
1771						     (tcp_skb_is_last(sk, skb) ?
1772						      nonagle : TCP_NAGLE_PUSH))))
1773				break;
1774		} else {
1775			if (!push_one && tcp_tso_should_defer(sk, skb))
 
 
1776				break;
1777		}
1778
1779		limit = mss_now;
1780		if (tso_segs > 1 && !tcp_urg_mode(tp))
1781			limit = tcp_mss_split_point(sk, skb, mss_now,
1782						    cwnd_quota);
 
 
 
1783
1784		if (skb->len > limit &&
1785		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
1786			break;
1787
1788		TCP_SKB_CB(skb)->when = tcp_time_stamp;
 
 
 
 
 
 
 
 
 
1789
1790		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
1791			break;
1792
 
1793		/* Advance the send_head.  This one is sent out.
1794		 * This call will increment packets_out.
1795		 */
1796		tcp_event_new_data_sent(sk, skb);
1797
1798		tcp_minshall_update(tp, mss_now, skb);
1799		sent_pkts++;
1800
1801		if (push_one)
1802			break;
1803	}
1804
 
 
 
 
 
 
 
 
 
1805	if (likely(sent_pkts)) {
1806		tcp_cwnd_validate(sk);
1807		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1808	}
1809	return !tp->packets_out && tcp_send_head(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1810}
1811
1812/* Push out any pending frames which were held back due to
1813 * TCP_CORK or attempt at coalescing tiny packets.
1814 * The socket must be locked by the caller.
1815 */
1816void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
1817			       int nonagle)
1818{
1819	/* If we are closed, the bytes will have to remain here.
1820	 * In time closedown will finish, we empty the write queue and
1821	 * all will be happy.
1822	 */
1823	if (unlikely(sk->sk_state == TCP_CLOSE))
1824		return;
1825
1826	if (tcp_write_xmit(sk, cur_mss, nonagle, 0, GFP_ATOMIC))
 
1827		tcp_check_probe_timer(sk);
1828}
1829
1830/* Send _single_ skb sitting at the send head. This function requires
1831 * true push pending frames to setup probe timer etc.
1832 */
1833void tcp_push_one(struct sock *sk, unsigned int mss_now)
1834{
1835	struct sk_buff *skb = tcp_send_head(sk);
1836
1837	BUG_ON(!skb || skb->len < mss_now);
1838
1839	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
1840}
1841
1842/* This function returns the amount that we can raise the
1843 * usable window based on the following constraints
1844 *
1845 * 1. The window can never be shrunk once it is offered (RFC 793)
1846 * 2. We limit memory per socket
1847 *
1848 * RFC 1122:
1849 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
1850 *  RECV.NEXT + RCV.WIN fixed until:
1851 *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
1852 *
1853 * i.e. don't raise the right edge of the window until you can raise
1854 * it at least MSS bytes.
1855 *
1856 * Unfortunately, the recommended algorithm breaks header prediction,
1857 * since header prediction assumes th->window stays fixed.
1858 *
1859 * Strictly speaking, keeping th->window fixed violates the receiver
1860 * side SWS prevention criteria. The problem is that under this rule
1861 * a stream of single byte packets will cause the right side of the
1862 * window to always advance by a single byte.
1863 *
1864 * Of course, if the sender implements sender side SWS prevention
1865 * then this will not be a problem.
1866 *
1867 * BSD seems to make the following compromise:
1868 *
1869 *	If the free space is less than the 1/4 of the maximum
1870 *	space available and the free space is less than 1/2 mss,
1871 *	then set the window to 0.
1872 *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
1873 *	Otherwise, just prevent the window from shrinking
1874 *	and from being larger than the largest representable value.
1875 *
1876 * This prevents incremental opening of the window in the regime
1877 * where TCP is limited by the speed of the reader side taking
1878 * data out of the TCP receive queue. It does nothing about
1879 * those cases where the window is constrained on the sender side
1880 * because the pipeline is full.
1881 *
1882 * BSD also seems to "accidentally" limit itself to windows that are a
1883 * multiple of MSS, at least until the free space gets quite small.
1884 * This would appear to be a side effect of the mbuf implementation.
1885 * Combining these two algorithms results in the observed behavior
1886 * of having a fixed window size at almost all times.
1887 *
1888 * Below we obtain similar behavior by forcing the offered window to
1889 * a multiple of the mss when it is feasible to do so.
1890 *
1891 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
1892 * Regular options like TIMESTAMP are taken into account.
1893 */
1894u32 __tcp_select_window(struct sock *sk)
1895{
1896	struct inet_connection_sock *icsk = inet_csk(sk);
1897	struct tcp_sock *tp = tcp_sk(sk);
1898	/* MSS for the peer's data.  Previous versions used mss_clamp
1899	 * here.  I don't know if the value based on our guesses
1900	 * of peer's MSS is better for the performance.  It's more correct
1901	 * but may be worse for the performance because of rcv_mss
1902	 * fluctuations.  --SAW  1998/11/1
1903	 */
1904	int mss = icsk->icsk_ack.rcv_mss;
1905	int free_space = tcp_space(sk);
1906	int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
1907	int window;
1908
1909	if (mss > full_space)
1910		mss = full_space;
 
 
1911
 
 
 
 
 
1912	if (free_space < (full_space >> 1)) {
1913		icsk->icsk_ack.quick = 0;
1914
1915		if (tcp_memory_pressure)
1916			tp->rcv_ssthresh = min(tp->rcv_ssthresh,
1917					       4U * tp->advmss);
1918
1919		if (free_space < mss)
 
 
 
 
 
 
 
 
 
 
 
 
1920			return 0;
1921	}
1922
1923	if (free_space > tp->rcv_ssthresh)
1924		free_space = tp->rcv_ssthresh;
1925
1926	/* Don't do rounding if we are using window scaling, since the
1927	 * scaled window will not line up with the MSS boundary anyway.
1928	 */
1929	window = tp->rcv_wnd;
1930	if (tp->rx_opt.rcv_wscale) {
1931		window = free_space;
1932
1933		/* Advertise enough space so that it won't get scaled away.
1934		 * Import case: prevent zero window announcement if
1935		 * 1<<rcv_wscale > mss.
1936		 */
1937		if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window)
1938			window = (((window >> tp->rx_opt.rcv_wscale) + 1)
1939				  << tp->rx_opt.rcv_wscale);
1940	} else {
 
1941		/* Get the largest window that is a nice multiple of mss.
1942		 * Window clamp already applied above.
1943		 * If our current window offering is within 1 mss of the
1944		 * free space we just keep it. This prevents the divide
1945		 * and multiply from happening most of the time.
1946		 * We also don't do any window rounding when the free space
1947		 * is too small.
1948		 */
1949		if (window <= free_space - mss || window > free_space)
1950			window = (free_space / mss) * mss;
1951		else if (mss == full_space &&
1952			 free_space > window + (full_space >> 1))
1953			window = free_space;
1954	}
1955
1956	return window;
1957}
1958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1959/* Collapses two adjacent SKB's during retransmission. */
1960static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
1961{
1962	struct tcp_sock *tp = tcp_sk(sk);
1963	struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
1964	int skb_size, next_skb_size;
1965
1966	skb_size = skb->len;
1967	next_skb_size = next_skb->len;
1968
1969	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
1970
1971	tcp_highest_sack_combine(sk, next_skb, skb);
1972
1973	tcp_unlink_write_queue(next_skb, sk);
1974
1975	skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size),
1976				  next_skb_size);
1977
1978	if (next_skb->ip_summed == CHECKSUM_PARTIAL)
1979		skb->ip_summed = CHECKSUM_PARTIAL;
1980
1981	if (skb->ip_summed != CHECKSUM_PARTIAL)
1982		skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size);
1983
1984	/* Update sequence range on original skb. */
1985	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
1986
1987	/* Merge over control information. This moves PSH/FIN etc. over */
1988	TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags;
1989
1990	/* All done, get rid of second SKB and account for it so
1991	 * packet counting does not break.
1992	 */
1993	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
 
1994
1995	/* changed transmit queue under us so clear hints */
1996	tcp_clear_retrans_hints_partial(tp);
1997	if (next_skb == tp->retransmit_skb_hint)
1998		tp->retransmit_skb_hint = skb;
1999
2000	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
2001
2002	sk_wmem_free_skb(sk, next_skb);
 
 
 
2003}
2004
2005/* Check if coalescing SKBs is legal. */
2006static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb)
2007{
2008	if (tcp_skb_pcount(skb) > 1)
2009		return 0;
2010	/* TODO: SACK collapsing could be used to remove this condition */
2011	if (skb_shinfo(skb)->nr_frags != 0)
2012		return 0;
2013	if (skb_cloned(skb))
2014		return 0;
2015	if (skb == tcp_send_head(sk))
2016		return 0;
2017	/* Some heurestics for collapsing over SACK'd could be invented */
2018	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
2019		return 0;
2020
2021	return 1;
2022}
2023
2024/* Collapse packets in the retransmit queue to make to create
2025 * less packets on the wire. This is only done on retransmission.
2026 */
2027static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2028				     int space)
2029{
2030	struct tcp_sock *tp = tcp_sk(sk);
2031	struct sk_buff *skb = to, *tmp;
2032	int first = 1;
2033
2034	if (!sysctl_tcp_retrans_collapse)
2035		return;
2036	if (TCP_SKB_CB(skb)->flags & TCPHDR_SYN)
2037		return;
2038
2039	tcp_for_write_queue_from_safe(skb, tmp, sk) {
2040		if (!tcp_can_collapse(sk, skb))
2041			break;
2042
 
 
 
2043		space -= skb->len;
2044
2045		if (first) {
2046			first = 0;
2047			continue;
2048		}
2049
2050		if (space < 0)
2051			break;
2052		/* Punt if not enough space exists in the first SKB for
2053		 * the data in the second
2054		 */
2055		if (skb->len > skb_tailroom(to))
2056			break;
2057
2058		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
2059			break;
2060
2061		tcp_collapse_retrans(sk, to);
 
2062	}
2063}
2064
2065/* This retransmits one SKB.  Policy decisions and retransmit queue
2066 * state updates are done by the caller.  Returns non-zero if an
2067 * error occurred which prevented the send.
2068 */
2069int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2070{
2071	struct tcp_sock *tp = tcp_sk(sk);
2072	struct inet_connection_sock *icsk = inet_csk(sk);
 
2073	unsigned int cur_mss;
2074	int err;
 
2075
2076	/* Inconslusive MTU probe */
2077	if (icsk->icsk_mtup.probe_size) {
2078		icsk->icsk_mtup.probe_size = 0;
2079	}
2080
2081	/* Do not sent more than we queued. 1/4 is reserved for possible
2082	 * copying overhead: fragmentation, tunneling, mangling etc.
2083	 */
2084	if (atomic_read(&sk->sk_wmem_alloc) >
2085	    min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
2086		return -EAGAIN;
2087
2088	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
2089		if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
2090			BUG();
 
 
2091		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
2092			return -ENOMEM;
2093	}
2094
2095	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
2096		return -EHOSTUNREACH; /* Routing failure or similar. */
2097
2098	cur_mss = tcp_current_mss(sk);
 
2099
2100	/* If receiver has shrunk his window, and skb is out of
2101	 * new window, do not retransmit it. The exception is the
2102	 * case, when window is shrunk to zero. In this case
2103	 * our retransmit serves as a zero window probe.
2104	 */
2105	if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) &&
2106	    TCP_SKB_CB(skb)->seq != tp->snd_una)
2107		return -EAGAIN;
 
 
2108
2109	if (skb->len > cur_mss) {
2110		if (tcp_fragment(sk, skb, cur_mss, cur_mss))
 
 
 
 
 
 
 
2111			return -ENOMEM; /* We'll try again later. */
2112	} else {
2113		int oldpcount = tcp_skb_pcount(skb);
2114
2115		if (unlikely(oldpcount > 1)) {
2116			tcp_init_tso_segs(sk, skb, cur_mss);
2117			tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
2118		}
2119	}
2120
2121	tcp_retrans_try_collapse(sk, skb, cur_mss);
2122
2123	/* Some Solaris stacks overoptimize and ignore the FIN on a
2124	 * retransmit when old data is attached.  So strip it off
2125	 * since it is cheap to do so and saves bytes on the network.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2126	 */
2127	if (skb->len > 0 &&
2128	    (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) &&
2129	    tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
2130		if (!pskb_trim(skb, 0)) {
2131			/* Reuse, even though it does some unnecessary work */
2132			tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1,
2133					     TCP_SKB_CB(skb)->flags);
2134			skb->ip_summed = CHECKSUM_NONE;
 
 
 
 
 
 
 
 
 
2135		}
 
 
2136	}
2137
2138	/* Make a copy, if the first transmission SKB clone we made
2139	 * is still in somebody's hands, else make a clone.
2140	 */
2141	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2142
2143	err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2144
2145	if (err == 0) {
2146		/* Update global TCP statistics. */
2147		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
 
 
 
 
 
 
2148
2149		tp->total_retrans++;
 
 
 
2150
 
2151#if FASTRETRANS_DEBUG > 0
2152		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
2153			if (net_ratelimit())
2154				printk(KERN_DEBUG "retrans_out leaked.\n");
2155		}
2156#endif
2157		if (!tp->retrans_out)
2158			tp->lost_retrans_low = tp->snd_nxt;
2159		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
2160		tp->retrans_out += tcp_skb_pcount(skb);
2161
2162		/* Save stamp of the first retransmit. */
2163		if (!tp->retrans_stamp)
2164			tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2165
2166		tp->undo_retrans += tcp_skb_pcount(skb);
2167
2168		/* snd_nxt is stored to detect loss of retransmitted segment,
2169		 * see tcp_input.c tcp_sacktag_write_queue().
2170		 */
2171		TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt;
2172	}
2173	return err;
2174}
2175
2176/* Check if we forward retransmits are possible in the current
2177 * window/congestion state.
2178 */
2179static int tcp_can_forward_retransmit(struct sock *sk)
2180{
2181	const struct inet_connection_sock *icsk = inet_csk(sk);
2182	struct tcp_sock *tp = tcp_sk(sk);
2183
2184	/* Forward retransmissions are possible only during Recovery. */
2185	if (icsk->icsk_ca_state != TCP_CA_Recovery)
2186		return 0;
2187
2188	/* No forward retransmissions in Reno are possible. */
2189	if (tcp_is_reno(tp))
2190		return 0;
2191
2192	/* Yeah, we have to make difficult choice between forward transmission
2193	 * and retransmission... Both ways have their merits...
2194	 *
2195	 * For now we do not retransmit anything, while we have some new
2196	 * segments to send. In the other cases, follow rule 3 for
2197	 * NextSeg() specified in RFC3517.
2198	 */
2199
2200	if (tcp_may_send_now(sk))
2201		return 0;
2202
2203	return 1;
 
 
 
 
 
 
 
2204}
2205
2206/* This gets called after a retransmit timeout, and the initially
2207 * retransmitted data is acknowledged.  It tries to continue
2208 * resending the rest of the retransmit queue, until either
2209 * we've sent it all or the congestion window limit is reached.
2210 * If doing SACK, the first ACK which comes back for a timeout
2211 * based retransmit packet might feed us FACK information again.
2212 * If so, we use it to avoid unnecessarily retransmissions.
2213 */
2214void tcp_xmit_retransmit_queue(struct sock *sk)
2215{
2216	const struct inet_connection_sock *icsk = inet_csk(sk);
 
2217	struct tcp_sock *tp = tcp_sk(sk);
2218	struct sk_buff *skb;
2219	struct sk_buff *hole = NULL;
2220	u32 last_lost;
2221	int mib_idx;
2222	int fwd_rexmitting = 0;
2223
2224	if (!tp->packets_out)
2225		return;
2226
2227	if (!tp->lost_out)
2228		tp->retransmit_high = tp->snd_una;
2229
2230	if (tp->retransmit_skb_hint) {
2231		skb = tp->retransmit_skb_hint;
2232		last_lost = TCP_SKB_CB(skb)->end_seq;
2233		if (after(last_lost, tp->retransmit_high))
2234			last_lost = tp->retransmit_high;
2235	} else {
2236		skb = tcp_write_queue_head(sk);
2237		last_lost = tp->snd_una;
2238	}
2239
2240	tcp_for_write_queue_from(skb, sk) {
2241		__u8 sacked = TCP_SKB_CB(skb)->sacked;
2242
2243		if (skb == tcp_send_head(sk))
2244			break;
 
2245		/* we could do better than to assign each time */
2246		if (hole == NULL)
2247			tp->retransmit_skb_hint = skb;
2248
2249		/* Assume this retransmit will generate
2250		 * only one packet for congestion window
2251		 * calculation purposes.  This works because
2252		 * tcp_retransmit_skb() will chop up the
2253		 * packet to be MSS sized and all the
2254		 * packet counting works out.
2255		 */
2256		if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2257			return;
2258
2259		if (fwd_rexmitting) {
2260begin_fwd:
2261			if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2262				break;
2263			mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
2264
2265		} else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
2266			tp->retransmit_high = last_lost;
2267			if (!tcp_can_forward_retransmit(sk))
2268				break;
2269			/* Backtrack if necessary to non-L'ed skb */
2270			if (hole != NULL) {
2271				skb = hole;
2272				hole = NULL;
2273			}
2274			fwd_rexmitting = 1;
2275			goto begin_fwd;
2276
 
 
2277		} else if (!(sacked & TCPCB_LOST)) {
2278			if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
2279				hole = skb;
2280			continue;
2281
2282		} else {
2283			last_lost = TCP_SKB_CB(skb)->end_seq;
2284			if (icsk->icsk_ca_state != TCP_CA_Loss)
2285				mib_idx = LINUX_MIB_TCPFASTRETRANS;
2286			else
2287				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
2288		}
2289
2290		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
2291			continue;
2292
2293		if (tcp_retransmit_skb(sk, skb))
2294			return;
2295		NET_INC_STATS_BH(sock_net(sk), mib_idx);
 
 
 
 
 
 
 
 
 
 
 
2296
2297		if (skb == tcp_write_queue_head(sk))
2298			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2299						  inet_csk(sk)->icsk_rto,
2300						  TCP_RTO_MAX);
2301	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2302}
2303
2304/* Send a fin.  The caller locks the socket for us.  This cannot be
2305 * allowed to fail queueing a FIN frame under any circumstances.
2306 */
2307void tcp_send_fin(struct sock *sk)
2308{
 
2309	struct tcp_sock *tp = tcp_sk(sk);
2310	struct sk_buff *skb = tcp_write_queue_tail(sk);
2311	int mss_now;
2312
2313	/* Optimization, tack on the FIN if we have a queue of
2314	 * unsent frames.  But be careful about outgoing SACKS
2315	 * and IP options.
 
2316	 */
2317	mss_now = tcp_current_mss(sk);
2318
2319	if (tcp_send_head(sk) != NULL) {
2320		TCP_SKB_CB(skb)->flags |= TCPHDR_FIN;
2321		TCP_SKB_CB(skb)->end_seq++;
 
 
2322		tp->write_seq++;
2323	} else {
2324		/* Socket is locked, keep trying until memory is available. */
2325		for (;;) {
2326			skb = alloc_skb_fclone(MAX_TCP_HEADER,
2327					       sk->sk_allocation);
2328			if (skb)
2329				break;
2330			yield();
 
2331		}
 
 
 
 
2332
2333		/* Reserve space for headers and prepare control bits. */
2334		skb_reserve(skb, MAX_TCP_HEADER);
 
2335		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
2336		tcp_init_nondata_skb(skb, tp->write_seq,
2337				     TCPHDR_ACK | TCPHDR_FIN);
2338		tcp_queue_skb(sk, skb);
2339	}
2340	__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
2341}
2342
2343/* We get here when a process closes a file descriptor (either due to
2344 * an explicit close() or as a byproduct of exit()'ing) and there
2345 * was unread data in the receive queue.  This behavior is recommended
2346 * by RFC 2525, section 2.17.  -DaveM
2347 */
2348void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2349{
2350	struct sk_buff *skb;
2351
 
 
2352	/* NOTE: No TCP options attached and we never retransmit this. */
2353	skb = alloc_skb(MAX_TCP_HEADER, priority);
2354	if (!skb) {
2355		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2356		return;
2357	}
2358
2359	/* Reserve space for headers and prepare control bits. */
2360	skb_reserve(skb, MAX_TCP_HEADER);
2361	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2362			     TCPHDR_ACK | TCPHDR_RST);
 
2363	/* Send it off. */
2364	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2365	if (tcp_transmit_skb(sk, skb, 0, priority))
2366		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2367
2368	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
 
 
 
2369}
2370
2371/* Send a crossed SYN-ACK during socket establishment.
2372 * WARNING: This routine must only be called when we have already sent
2373 * a SYN packet that crossed the incoming SYN that caused this routine
2374 * to get called. If this assumption fails then the initial rcv_wnd
2375 * and rcv_wscale values will not be correct.
2376 */
2377int tcp_send_synack(struct sock *sk)
2378{
2379	struct sk_buff *skb;
2380
2381	skb = tcp_write_queue_head(sk);
2382	if (skb == NULL || !(TCP_SKB_CB(skb)->flags & TCPHDR_SYN)) {
2383		printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n");
2384		return -EFAULT;
2385	}
2386	if (!(TCP_SKB_CB(skb)->flags & TCPHDR_ACK)) {
2387		if (skb_cloned(skb)) {
2388			struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
2389			if (nskb == NULL)
 
 
 
 
2390				return -ENOMEM;
2391			tcp_unlink_write_queue(skb, sk);
2392			skb_header_release(nskb);
2393			__tcp_add_write_queue_head(sk, nskb);
2394			sk_wmem_free_skb(sk, skb);
2395			sk->sk_wmem_queued += nskb->truesize;
 
2396			sk_mem_charge(sk, nskb->truesize);
2397			skb = nskb;
2398		}
2399
2400		TCP_SKB_CB(skb)->flags |= TCPHDR_ACK;
2401		TCP_ECN_send_synack(tcp_sk(sk), skb);
2402	}
2403	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2404	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2405}
2406
2407/* Prepare a SYN-ACK. */
2408struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
 
 
 
 
 
 
 
 
 
2409				struct request_sock *req,
2410				struct request_values *rvp)
 
 
2411{
2412	struct tcp_out_options opts;
2413	struct tcp_extend_values *xvp = tcp_xv(rvp);
2414	struct inet_request_sock *ireq = inet_rsk(req);
2415	struct tcp_sock *tp = tcp_sk(sk);
2416	const struct tcp_cookie_values *cvp = tp->cookie_values;
2417	struct tcphdr *th;
2418	struct sk_buff *skb;
2419	struct tcp_md5sig_key *md5;
2420	int tcp_header_size;
 
2421	int mss;
2422	int s_data_desired = 0;
2423
2424	if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
2425		s_data_desired = cvp->s_data_desired;
2426	skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
2427	if (skb == NULL)
2428		return NULL;
2429
2430	/* Reserve space for headers. */
2431	skb_reserve(skb, MAX_TCP_HEADER);
2432
2433	skb_dst_set(skb, dst_clone(dst));
2434
2435	mss = dst_metric_advmss(dst);
2436	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
2437		mss = tp->rx_opt.user_mss;
2438
2439	if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
2440		__u8 rcv_wscale;
2441		/* Set this up on the first call only */
2442		req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW);
2443
2444		/* limit the window selection if the user enforce a smaller rx buffer */
2445		if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2446		    (req->window_clamp > tcp_full_space(sk) || req->window_clamp == 0))
2447			req->window_clamp = tcp_full_space(sk);
2448
2449		/* tcp_full_space because it is guaranteed to be the first packet */
2450		tcp_select_initial_window(tcp_full_space(sk),
2451			mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
2452			&req->rcv_wnd,
2453			&req->window_clamp,
2454			ireq->wscale_ok,
2455			&rcv_wscale,
2456			dst_metric(dst, RTAX_INITRWND));
2457		ireq->rcv_wscale = rcv_wscale;
2458	}
 
 
 
2459
2460	memset(&opts, 0, sizeof(opts));
 
2461#ifdef CONFIG_SYN_COOKIES
2462	if (unlikely(req->cookie_ts))
2463		TCP_SKB_CB(skb)->when = cookie_init_timestamp(req);
 
2464	else
2465#endif
2466	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2467	tcp_header_size = tcp_synack_options(sk, req, mss,
2468					     skb, &opts, &md5, xvp)
2469			+ sizeof(*th);
 
 
 
 
 
 
 
 
 
 
 
 
2470
2471	skb_push(skb, tcp_header_size);
2472	skb_reset_transport_header(skb);
2473
2474	th = tcp_hdr(skb);
2475	memset(th, 0, sizeof(struct tcphdr));
2476	th->syn = 1;
2477	th->ack = 1;
2478	TCP_ECN_make_synack(req, th);
2479	th->source = ireq->loc_port;
2480	th->dest = ireq->rmt_port;
2481	/* Setting of flags are superfluous here for callers (and ECE is
2482	 * not even correctly set)
2483	 */
2484	tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2485			     TCPHDR_SYN | TCPHDR_ACK);
2486
2487	if (OPTION_COOKIE_EXTENSION & opts.options) {
2488		if (s_data_desired) {
2489			u8 *buf = skb_put(skb, s_data_desired);
2490
2491			/* copy data directly from the listening socket. */
2492			memcpy(buf, cvp->s_data_payload, s_data_desired);
2493			TCP_SKB_CB(skb)->end_seq += s_data_desired;
2494		}
2495
2496		if (opts.hash_size > 0) {
2497			__u32 workspace[SHA_WORKSPACE_WORDS];
2498			u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS];
2499			u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1];
2500
2501			/* Secret recipe depends on the Timestamp, (future)
2502			 * Sequence and Acknowledgment Numbers, Initiator
2503			 * Cookie, and others handled by IP variant caller.
2504			 */
2505			*tail-- ^= opts.tsval;
2506			*tail-- ^= tcp_rsk(req)->rcv_isn + 1;
2507			*tail-- ^= TCP_SKB_CB(skb)->seq + 1;
2508
2509			/* recommended */
2510			*tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source);
2511			*tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
2512
2513			sha_transform((__u32 *)&xvp->cookie_bakery[0],
2514				      (char *)mess,
2515				      &workspace[0]);
2516			opts.hash_location =
2517				(__u8 *)&xvp->cookie_bakery[0];
2518		}
2519	}
2520
2521	th->seq = htonl(TCP_SKB_CB(skb)->seq);
2522	th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1);
2523
2524	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
2525	th->window = htons(min(req->rcv_wnd, 65535U));
2526	tcp_options_write((__be32 *)(th + 1), tp, &opts);
2527	th->doff = (tcp_header_size >> 2);
2528	TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb));
2529
2530#ifdef CONFIG_TCP_MD5SIG
2531	/* Okay, we have all we need - do the md5 hash if needed */
2532	if (md5) {
2533		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
2534					       md5, NULL, req, skb);
2535	}
2536#endif
2537
 
 
 
 
 
 
2538	return skb;
2539}
2540EXPORT_SYMBOL(tcp_make_synack);
2541
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2542/* Do all connect socket setups that can be done AF independent. */
2543static void tcp_connect_init(struct sock *sk)
2544{
2545	struct dst_entry *dst = __sk_dst_get(sk);
2546	struct tcp_sock *tp = tcp_sk(sk);
2547	__u8 rcv_wscale;
 
2548
2549	/* We'll fix this up when we get a response from the other end.
2550	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
2551	 */
2552	tp->tcp_header_len = sizeof(struct tcphdr) +
2553		(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
 
2554
2555#ifdef CONFIG_TCP_MD5SIG
2556	if (tp->af_specific->md5_lookup(sk, sk) != NULL)
2557		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
2558#endif
2559
2560	/* If user gave his TCP_MAXSEG, record it to clamp */
2561	if (tp->rx_opt.user_mss)
2562		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
2563	tp->max_window = 0;
2564	tcp_mtup_init(sk);
2565	tcp_sync_mss(sk, dst_mtu(dst));
2566
 
 
2567	if (!tp->window_clamp)
2568		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
2569	tp->advmss = dst_metric_advmss(dst);
2570	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
2571		tp->advmss = tp->rx_opt.user_mss;
2572
2573	tcp_initialize_rcv_mss(sk);
2574
2575	/* limit the window selection if the user enforce a smaller rx buffer */
2576	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
2577	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
2578		tp->window_clamp = tcp_full_space(sk);
2579
2580	tcp_select_initial_window(tcp_full_space(sk),
 
 
 
 
2581				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
2582				  &tp->rcv_wnd,
2583				  &tp->window_clamp,
2584				  sysctl_tcp_window_scaling,
2585				  &rcv_wscale,
2586				  dst_metric(dst, RTAX_INITRWND));
2587
2588	tp->rx_opt.rcv_wscale = rcv_wscale;
2589	tp->rcv_ssthresh = tp->rcv_wnd;
2590
2591	sk->sk_err = 0;
2592	sock_reset_flag(sk, SOCK_DONE);
2593	tp->snd_wnd = 0;
2594	tcp_init_wl(tp, 0);
 
2595	tp->snd_una = tp->write_seq;
2596	tp->snd_sml = tp->write_seq;
2597	tp->snd_up = tp->write_seq;
2598	tp->rcv_nxt = 0;
2599	tp->rcv_wup = 0;
2600	tp->copied_seq = 0;
 
 
 
 
 
2601
2602	inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2603	inet_csk(sk)->icsk_retransmits = 0;
2604	tcp_clear_retrans(tp);
2605}
2606
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2607/* Build a SYN and send it off. */
2608int tcp_connect(struct sock *sk)
2609{
2610	struct tcp_sock *tp = tcp_sk(sk);
2611	struct sk_buff *buff;
2612	int err;
2613
 
 
 
 
 
2614	tcp_connect_init(sk);
2615
2616	buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation);
2617	if (unlikely(buff == NULL))
2618		return -ENOBUFS;
 
2619
2620	/* Reserve space for headers. */
2621	skb_reserve(buff, MAX_TCP_HEADER);
 
2622
2623	tp->snd_nxt = tp->write_seq;
2624	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
2625	TCP_ECN_send_syn(sk, buff);
2626
2627	/* Send it off. */
2628	TCP_SKB_CB(buff)->when = tcp_time_stamp;
2629	tp->retrans_stamp = TCP_SKB_CB(buff)->when;
2630	skb_header_release(buff);
2631	__tcp_add_write_queue_tail(sk, buff);
2632	sk->sk_wmem_queued += buff->truesize;
2633	sk_mem_charge(sk, buff->truesize);
2634	tp->packets_out += tcp_skb_pcount(buff);
2635	err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
2636	if (err == -ECONNREFUSED)
2637		return err;
2638
2639	/* We change tp->snd_nxt after the tcp_transmit_skb() call
2640	 * in order to make this packet get counted in tcpOutSegs.
2641	 */
2642	tp->snd_nxt = tp->write_seq;
2643	tp->pushed_seq = tp->write_seq;
 
 
 
 
 
2644	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
2645
2646	/* Timer for repeating the SYN until an answer. */
2647	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2648				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
2649	return 0;
2650}
2651EXPORT_SYMBOL(tcp_connect);
2652
2653/* Send out a delayed ack, the caller does the policy checking
2654 * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
2655 * for details.
2656 */
2657void tcp_send_delayed_ack(struct sock *sk)
2658{
2659	struct inet_connection_sock *icsk = inet_csk(sk);
2660	int ato = icsk->icsk_ack.ato;
2661	unsigned long timeout;
2662
2663	if (ato > TCP_DELACK_MIN) {
2664		const struct tcp_sock *tp = tcp_sk(sk);
2665		int max_ato = HZ / 2;
2666
2667		if (icsk->icsk_ack.pingpong ||
2668		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
2669			max_ato = TCP_DELACK_MAX;
2670
2671		/* Slow path, intersegment interval is "high". */
2672
2673		/* If some rtt estimate is known, use it to bound delayed ack.
2674		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
2675		 * directly.
2676		 */
2677		if (tp->srtt) {
2678			int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
 
2679
2680			if (rtt < max_ato)
2681				max_ato = rtt;
2682		}
2683
2684		ato = min(ato, max_ato);
2685	}
2686
 
 
2687	/* Stay within the limit we were given */
2688	timeout = jiffies + ato;
2689
2690	/* Use new timeout only if there wasn't a older one earlier. */
2691	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
2692		/* If delack timer was blocked or is about to expire,
2693		 * send ACK now.
2694		 */
2695		if (icsk->icsk_ack.blocked ||
2696		    time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
2697			tcp_send_ack(sk);
2698			return;
2699		}
2700
2701		if (!time_before(timeout, icsk->icsk_ack.timeout))
2702			timeout = icsk->icsk_ack.timeout;
2703	}
2704	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
2705	icsk->icsk_ack.timeout = timeout;
2706	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
2707}
2708
2709/* This routine sends an ack and also updates the window. */
2710void tcp_send_ack(struct sock *sk)
2711{
2712	struct sk_buff *buff;
2713
2714	/* If we have been reset, we may not send again. */
2715	if (sk->sk_state == TCP_CLOSE)
2716		return;
2717
2718	/* We are not putting this on the write queue, so
2719	 * tcp_transmit_skb() will set the ownership to this
2720	 * sock.
2721	 */
2722	buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2723	if (buff == NULL) {
 
 
 
 
 
 
 
2724		inet_csk_schedule_ack(sk);
2725		inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
2726		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
2727					  TCP_DELACK_MAX, TCP_RTO_MAX);
2728		return;
2729	}
2730
2731	/* Reserve space for headers and prepare control bits. */
2732	skb_reserve(buff, MAX_TCP_HEADER);
2733	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
2734
 
 
 
 
 
 
2735	/* Send it off, this clears delayed acks for us. */
2736	TCP_SKB_CB(buff)->when = tcp_time_stamp;
2737	tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
 
 
 
 
 
2738}
2739
2740/* This routine sends a packet with an out of date sequence
2741 * number. It assumes the other end will try to ack it.
2742 *
2743 * Question: what should we make while urgent mode?
2744 * 4.4BSD forces sending single byte of data. We cannot send
2745 * out of window data, because we have SND.NXT==SND.MAX...
2746 *
2747 * Current solution: to send TWO zero-length segments in urgent mode:
2748 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
2749 * out-of-date with SND.UNA-1 to probe window.
2750 */
2751static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2752{
2753	struct tcp_sock *tp = tcp_sk(sk);
2754	struct sk_buff *skb;
2755
2756	/* We don't queue it, tcp_transmit_skb() sets ownership. */
2757	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
2758	if (skb == NULL)
 
2759		return -1;
2760
2761	/* Reserve space for headers and set control bits. */
2762	skb_reserve(skb, MAX_TCP_HEADER);
2763	/* Use a previous sequence.  This should cause the other
2764	 * end to send an ack.  Don't queue or clone SKB, just
2765	 * send it.
2766	 */
2767	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
2768	TCP_SKB_CB(skb)->when = tcp_time_stamp;
2769	return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
 
 
 
 
 
 
 
 
 
 
2770}
2771
2772/* Initiate keepalive or window probe from timer. */
2773int tcp_write_wakeup(struct sock *sk)
2774{
2775	struct tcp_sock *tp = tcp_sk(sk);
2776	struct sk_buff *skb;
2777
2778	if (sk->sk_state == TCP_CLOSE)
2779		return -1;
2780
2781	if ((skb = tcp_send_head(sk)) != NULL &&
2782	    before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
2783		int err;
2784		unsigned int mss = tcp_current_mss(sk);
2785		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2786
2787		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
2788			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
2789
2790		/* We are probing the opening of a window
2791		 * but the window size is != 0
2792		 * must have been a result SWS avoidance ( sender )
2793		 */
2794		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
2795		    skb->len > mss) {
2796			seg_size = min(seg_size, mss);
2797			TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
2798			if (tcp_fragment(sk, skb, seg_size, mss))
 
2799				return -1;
2800		} else if (!tcp_skb_pcount(skb))
2801			tcp_set_skb_tso_segs(sk, skb, mss);
2802
2803		TCP_SKB_CB(skb)->flags |= TCPHDR_PSH;
2804		TCP_SKB_CB(skb)->when = tcp_time_stamp;
2805		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2806		if (!err)
2807			tcp_event_new_data_sent(sk, skb);
2808		return err;
2809	} else {
2810		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
2811			tcp_xmit_probe_skb(sk, 1);
2812		return tcp_xmit_probe_skb(sk, 0);
2813	}
2814}
2815
2816/* A window probe timeout has occurred.  If window is not closed send
2817 * a partial packet else a zero probe.
2818 */
2819void tcp_send_probe0(struct sock *sk)
2820{
2821	struct inet_connection_sock *icsk = inet_csk(sk);
2822	struct tcp_sock *tp = tcp_sk(sk);
 
 
2823	int err;
2824
2825	err = tcp_write_wakeup(sk);
2826
2827	if (tp->packets_out || !tcp_send_head(sk)) {
2828		/* Cancel probe timer, if it is not required. */
2829		icsk->icsk_probes_out = 0;
2830		icsk->icsk_backoff = 0;
 
2831		return;
2832	}
2833
 
2834	if (err <= 0) {
2835		if (icsk->icsk_backoff < sysctl_tcp_retries2)
2836			icsk->icsk_backoff++;
2837		icsk->icsk_probes_out++;
2838		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2839					  min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
2840					  TCP_RTO_MAX);
2841	} else {
2842		/* If packet was not sent due to local congestion,
2843		 * do not backoff and do not remember icsk_probes_out.
2844		 * Let local senders to fight for local resources.
2845		 *
2846		 * Use accumulated backoff yet.
2847		 */
2848		if (!icsk->icsk_probes_out)
2849			icsk->icsk_probes_out = 1;
2850		inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
2851					  min(icsk->icsk_rto << icsk->icsk_backoff,
2852					      TCP_RESOURCE_PROBE_INTERVAL),
2853					  TCP_RTO_MAX);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2854	}
 
2855}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		Implementation of the Transmission Control Protocol(TCP).
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Mark Evans, <evansmp@uhura.aston.ac.uk>
  12 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
  13 *		Florian La Roche, <flla@stud.uni-sb.de>
  14 *		Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
  15 *		Linus Torvalds, <torvalds@cs.helsinki.fi>
  16 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  17 *		Matthew Dillon, <dillon@apollo.west.oic.com>
  18 *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
  19 *		Jorge Cwik, <jorge@laser.satlink.net>
  20 */
  21
  22/*
  23 * Changes:	Pedro Roque	:	Retransmit queue handled by TCP.
  24 *				:	Fragmentation on mtu decrease
  25 *				:	Segment collapse on retransmit
  26 *				:	AF independence
  27 *
  28 *		Linus Torvalds	:	send_delayed_ack
  29 *		David S. Miller	:	Charge memory using the right skb
  30 *					during syn/ack processing.
  31 *		David S. Miller :	Output engine completely rewritten.
  32 *		Andrea Arcangeli:	SYNACK carry ts_recent in tsecr.
  33 *		Cacophonix Gaul :	draft-minshall-nagle-01
  34 *		J Hadi Salim	:	ECN support
  35 *
  36 */
  37
  38#define pr_fmt(fmt) "TCP: " fmt
  39
  40#include <net/tcp.h>
  41#include <net/mptcp.h>
  42
  43#include <linux/compiler.h>
  44#include <linux/gfp.h>
  45#include <linux/module.h>
  46#include <linux/static_key.h>
  47
  48#include <trace/events/tcp.h>
 
 
 
 
 
 
  49
  50/* Refresh clocks of a TCP socket,
  51 * ensuring monotically increasing values.
 
  52 */
  53void tcp_mstamp_refresh(struct tcp_sock *tp)
  54{
  55	u64 val = tcp_clock_ns();
 
 
 
 
  56
  57	tp->tcp_clock_cache = val;
  58	tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC);
  59}
  60
  61static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
  62			   int push_one, gfp_t gfp);
  63
  64/* Account for new data that has been sent to the network. */
  65static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
  66{
  67	struct inet_connection_sock *icsk = inet_csk(sk);
  68	struct tcp_sock *tp = tcp_sk(sk);
  69	unsigned int prior_packets = tp->packets_out;
  70
  71	WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
  72
  73	__skb_unlink(skb, &sk->sk_write_queue);
  74	tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
  75
  76	if (tp->highest_sack == NULL)
  77		tp->highest_sack = skb;
 
  78
  79	tp->packets_out += tcp_skb_pcount(skb);
  80	if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
  81		tcp_rearm_rto(sk);
  82
  83	NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
  84		      tcp_skb_pcount(skb));
  85	tcp_check_space(sk);
  86}
  87
  88/* SND.NXT, if window was not shrunk or the amount of shrunk was less than one
  89 * window scaling factor due to loss of precision.
  90 * If window has been shrunk, what should we make? It is not clear at all.
  91 * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-(
  92 * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
  93 * invalid. OK, let's make this for now:
  94 */
  95static inline __u32 tcp_acceptable_seq(const struct sock *sk)
  96{
  97	const struct tcp_sock *tp = tcp_sk(sk);
  98
  99	if (!before(tcp_wnd_end(tp), tp->snd_nxt) ||
 100	    (tp->rx_opt.wscale_ok &&
 101	     ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale))))
 102		return tp->snd_nxt;
 103	else
 104		return tcp_wnd_end(tp);
 105}
 106
 107/* Calculate mss to advertise in SYN segment.
 108 * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
 109 *
 110 * 1. It is independent of path mtu.
 111 * 2. Ideally, it is maximal possible segment size i.e. 65535-40.
 112 * 3. For IPv4 it is reasonable to calculate it from maximal MTU of
 113 *    attached devices, because some buggy hosts are confused by
 114 *    large MSS.
 115 * 4. We do not make 3, we advertise MSS, calculated from first
 116 *    hop device mtu, but allow to raise it to ip_rt_min_advmss.
 117 *    This may be overridden via information stored in routing table.
 118 * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
 119 *    probably even Jumbo".
 120 */
 121static __u16 tcp_advertise_mss(struct sock *sk)
 122{
 123	struct tcp_sock *tp = tcp_sk(sk);
 124	const struct dst_entry *dst = __sk_dst_get(sk);
 125	int mss = tp->advmss;
 126
 127	if (dst) {
 128		unsigned int metric = dst_metric_advmss(dst);
 129
 130		if (metric < mss) {
 131			mss = metric;
 132			tp->advmss = mss;
 133		}
 134	}
 135
 136	return (__u16)mss;
 137}
 138
 139/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
 140 * This is the first part of cwnd validation mechanism.
 141 */
 142void tcp_cwnd_restart(struct sock *sk, s32 delta)
 143{
 144	struct tcp_sock *tp = tcp_sk(sk);
 145	u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
 146	u32 cwnd = tcp_snd_cwnd(tp);
 
 147
 148	tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
 149
 150	tp->snd_ssthresh = tcp_current_ssthresh(sk);
 151	restart_cwnd = min(restart_cwnd, cwnd);
 152
 153	while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
 154		cwnd >>= 1;
 155	tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd));
 156	tp->snd_cwnd_stamp = tcp_jiffies32;
 157	tp->snd_cwnd_used = 0;
 158}
 159
 160/* Congestion state accounting after a packet has been sent. */
 161static void tcp_event_data_sent(struct tcp_sock *tp,
 162				struct sock *sk)
 163{
 164	struct inet_connection_sock *icsk = inet_csk(sk);
 165	const u32 now = tcp_jiffies32;
 166
 167	if (tcp_packets_in_flight(tp) == 0)
 168		tcp_ca_event(sk, CA_EVENT_TX_START);
 
 169
 170	tp->lsndtime = now;
 171
 172	/* If it is a reply for ato after last received
 173	 * packet, enter pingpong mode.
 174	 */
 175	if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
 176		inet_csk_enter_pingpong_mode(sk);
 177}
 178
 179/* Account for an ACK we sent. */
 180static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
 181				      u32 rcv_nxt)
 182{
 183	struct tcp_sock *tp = tcp_sk(sk);
 184
 185	if (unlikely(tp->compressed_ack)) {
 186		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED,
 187			      tp->compressed_ack);
 188		tp->compressed_ack = 0;
 189		if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
 190			__sock_put(sk);
 191	}
 192
 193	if (unlikely(rcv_nxt != tp->rcv_nxt))
 194		return;  /* Special ACK sent by DCTCP to reflect ECN */
 195	tcp_dec_quickack_mode(sk, pkts);
 196	inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
 197}
 198
 199/* Determine a window scaling and initial window to offer.
 200 * Based on the assumption that the given amount of space
 201 * will be offered. Store the results in the tp structure.
 202 * NOTE: for smooth operation initial space offering should
 203 * be a multiple of mss if possible. We assume here that mss >= 1.
 204 * This MUST be enforced by all callers.
 205 */
 206void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
 207			       __u32 *rcv_wnd, __u32 *window_clamp,
 208			       int wscale_ok, __u8 *rcv_wscale,
 209			       __u32 init_rcv_wnd)
 210{
 211	unsigned int space = (__space < 0 ? 0 : __space);
 212
 213	/* If no clamp set the clamp to the max possible scaled window */
 214	if (*window_clamp == 0)
 215		(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
 216	space = min(*window_clamp, space);
 217
 218	/* Quantize space offering to a multiple of mss if possible. */
 219	if (space > mss)
 220		space = rounddown(space, mss);
 221
 222	/* NOTE: offering an initial window larger than 32767
 223	 * will break some buggy TCP stacks. If the admin tells us
 224	 * it is likely we could be speaking with such a buggy stack
 225	 * we will truncate our initial window offering to 32K-1
 226	 * unless the remote has sent us a window scaling option,
 227	 * which we interpret as a sign the remote TCP is not
 228	 * misinterpreting the window field as a signed quantity.
 229	 */
 230	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
 231		(*rcv_wnd) = min(space, MAX_TCP_WINDOW);
 232	else
 233		(*rcv_wnd) = min_t(u32, space, U16_MAX);
 234
 235	if (init_rcv_wnd)
 236		*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
 237
 238	*rcv_wscale = 0;
 239	if (wscale_ok) {
 240		/* Set window scaling on max possible window */
 241		space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
 242		space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
 
 243		space = min_t(u32, space, *window_clamp);
 244		*rcv_wscale = clamp_t(int, ilog2(space) - 15,
 245				      0, TCP_MAX_WSCALE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 246	}
 
 247	/* Set the clamp no higher than max representable value */
 248	(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
 249}
 250EXPORT_SYMBOL(tcp_select_initial_window);
 251
 252/* Chose a new window to advertise, update state in tcp_sock for the
 253 * socket, and return result with RFC1323 scaling applied.  The return
 254 * value can be stuffed directly into th->window for an outgoing
 255 * frame.
 256 */
 257static u16 tcp_select_window(struct sock *sk)
 258{
 259	struct tcp_sock *tp = tcp_sk(sk);
 260	u32 old_win = tp->rcv_wnd;
 261	u32 cur_win = tcp_receive_window(tp);
 262	u32 new_win = __tcp_select_window(sk);
 263
 264	/* Never shrink the offered window */
 265	if (new_win < cur_win) {
 266		/* Danger Will Robinson!
 267		 * Don't update rcv_wup/rcv_wnd here or else
 268		 * we will not be able to advertise a zero
 269		 * window in time.  --DaveM
 270		 *
 271		 * Relax Will Robinson.
 272		 */
 273		if (new_win == 0)
 274			NET_INC_STATS(sock_net(sk),
 275				      LINUX_MIB_TCPWANTZEROWINDOWADV);
 276		new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
 277	}
 278	tp->rcv_wnd = new_win;
 279	tp->rcv_wup = tp->rcv_nxt;
 280
 281	/* Make sure we do not exceed the maximum possible
 282	 * scaled window.
 283	 */
 284	if (!tp->rx_opt.rcv_wscale &&
 285	    READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_workaround_signed_windows))
 286		new_win = min(new_win, MAX_TCP_WINDOW);
 287	else
 288		new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale));
 289
 290	/* RFC1323 scaling applied */
 291	new_win >>= tp->rx_opt.rcv_wscale;
 292
 293	/* If we advertise zero window, disable fast path. */
 294	if (new_win == 0) {
 295		tp->pred_flags = 0;
 296		if (old_win)
 297			NET_INC_STATS(sock_net(sk),
 298				      LINUX_MIB_TCPTOZEROWINDOWADV);
 299	} else if (old_win == 0) {
 300		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
 301	}
 302
 303	return new_win;
 304}
 305
 306/* Packet ECN state for a SYN-ACK */
 307static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
 308{
 309	const struct tcp_sock *tp = tcp_sk(sk);
 310
 311	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
 312	if (!(tp->ecn_flags & TCP_ECN_OK))
 313		TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
 314	else if (tcp_ca_needs_ecn(sk) ||
 315		 tcp_bpf_ca_needs_ecn(sk))
 316		INET_ECN_xmit(sk);
 317}
 318
 319/* Packet ECN state for a SYN.  */
 320static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
 321{
 322	struct tcp_sock *tp = tcp_sk(sk);
 323	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk);
 324	bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 ||
 325		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;
 326
 327	if (!use_ecn) {
 328		const struct dst_entry *dst = __sk_dst_get(sk);
 329
 330		if (dst && dst_feature(dst, RTAX_FEATURE_ECN))
 331			use_ecn = true;
 332	}
 333
 334	tp->ecn_flags = 0;
 335
 336	if (use_ecn) {
 337		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
 338		tp->ecn_flags = TCP_ECN_OK;
 339		if (tcp_ca_needs_ecn(sk) || bpf_needs_ecn)
 340			INET_ECN_xmit(sk);
 341	}
 342}
 343
 344static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)
 345{
 346	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback))
 347		/* tp->ecn_flags are cleared at a later point in time when
 348		 * SYN ACK is ultimatively being received.
 349		 */
 350		TCP_SKB_CB(skb)->tcp_flags &= ~(TCPHDR_ECE | TCPHDR_CWR);
 351}
 352
 353static void
 354tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th)
 355{
 356	if (inet_rsk(req)->ecn_ok)
 357		th->ece = 1;
 358}
 359
 360/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
 361 * be sent.
 362 */
 363static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
 364			 struct tcphdr *th, int tcp_header_len)
 365{
 366	struct tcp_sock *tp = tcp_sk(sk);
 367
 368	if (tp->ecn_flags & TCP_ECN_OK) {
 369		/* Not-retransmitted data segment: set ECT and inject CWR. */
 370		if (skb->len != tcp_header_len &&
 371		    !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) {
 372			INET_ECN_xmit(sk);
 373			if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) {
 374				tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
 375				th->cwr = 1;
 376				skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 377			}
 378		} else if (!tcp_ca_needs_ecn(sk)) {
 379			/* ACK or retransmitted segment: clear ECT|CE */
 380			INET_ECN_dontxmit(sk);
 381		}
 382		if (tp->ecn_flags & TCP_ECN_DEMAND_CWR)
 383			th->ece = 1;
 384	}
 385}
 386
 387/* Constructs common control bits of non-data skb. If SYN/FIN is present,
 388 * auto increment end seqno.
 389 */
 390static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
 391{
 392	skb->ip_summed = CHECKSUM_PARTIAL;
 
 393
 394	TCP_SKB_CB(skb)->tcp_flags = flags;
 
 395
 396	tcp_skb_pcount_set(skb, 1);
 
 
 397
 398	TCP_SKB_CB(skb)->seq = seq;
 399	if (flags & (TCPHDR_SYN | TCPHDR_FIN))
 400		seq++;
 401	TCP_SKB_CB(skb)->end_seq = seq;
 402}
 403
 404static inline bool tcp_urg_mode(const struct tcp_sock *tp)
 405{
 406	return tp->snd_una != tp->snd_up;
 407}
 408
 409#define OPTION_SACK_ADVERTISE	BIT(0)
 410#define OPTION_TS		BIT(1)
 411#define OPTION_MD5		BIT(2)
 412#define OPTION_WSCALE		BIT(3)
 413#define OPTION_FAST_OPEN_COOKIE	BIT(8)
 414#define OPTION_SMC		BIT(9)
 415#define OPTION_MPTCP		BIT(10)
 416
 417static void smc_options_write(__be32 *ptr, u16 *options)
 418{
 419#if IS_ENABLED(CONFIG_SMC)
 420	if (static_branch_unlikely(&tcp_have_smc)) {
 421		if (unlikely(OPTION_SMC & *options)) {
 422			*ptr++ = htonl((TCPOPT_NOP  << 24) |
 423				       (TCPOPT_NOP  << 16) |
 424				       (TCPOPT_EXP <<  8) |
 425				       (TCPOLEN_EXP_SMC_BASE));
 426			*ptr++ = htonl(TCPOPT_SMC_MAGIC);
 427		}
 428	}
 429#endif
 430}
 431
 432struct tcp_out_options {
 433	u16 options;		/* bit field of OPTION_* */
 434	u16 mss;		/* 0 to disable */
 435	u8 ws;			/* window scale, 0 to disable */
 436	u8 num_sack_blocks;	/* number of SACK blocks to include */
 437	u8 hash_size;		/* bytes in hash_location */
 438	u8 bpf_opt_len;		/* length of BPF hdr option */
 
 439	__u8 *hash_location;	/* temporary pointer, overloaded */
 440	__u32 tsval, tsecr;	/* need to include OPTION_TS */
 441	struct tcp_fastopen_cookie *fastopen_cookie;	/* Fast open cookie */
 442	struct mptcp_out_options mptcp;
 443};
 444
 445static void mptcp_options_write(struct tcphdr *th, __be32 *ptr,
 446				struct tcp_sock *tp,
 447				struct tcp_out_options *opts)
 448{
 449#if IS_ENABLED(CONFIG_MPTCP)
 450	if (unlikely(OPTION_MPTCP & opts->options))
 451		mptcp_write_options(th, ptr, tp, &opts->mptcp);
 452#endif
 453}
 454
 455#ifdef CONFIG_CGROUP_BPF
 456static int bpf_skops_write_hdr_opt_arg0(struct sk_buff *skb,
 457					enum tcp_synack_type synack_type)
 458{
 459	if (unlikely(!skb))
 460		return BPF_WRITE_HDR_TCP_CURRENT_MSS;
 461
 462	if (unlikely(synack_type == TCP_SYNACK_COOKIE))
 463		return BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
 
 464
 465	return 0;
 466}
 467
 468/* req, syn_skb and synack_type are used when writing synack */
 469static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
 470				  struct request_sock *req,
 471				  struct sk_buff *syn_skb,
 472				  enum tcp_synack_type synack_type,
 473				  struct tcp_out_options *opts,
 474				  unsigned int *remaining)
 475{
 476	struct bpf_sock_ops_kern sock_ops;
 477	int err;
 478
 479	if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
 480					   BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) ||
 481	    !*remaining)
 482		return;
 483
 484	/* *remaining has already been aligned to 4 bytes, so *remaining >= 4 */
 485
 486	/* init sock_ops */
 487	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
 488
 489	sock_ops.op = BPF_SOCK_OPS_HDR_OPT_LEN_CB;
 490
 491	if (req) {
 492		/* The listen "sk" cannot be passed here because
 493		 * it is not locked.  It would not make too much
 494		 * sense to do bpf_setsockopt(listen_sk) based
 495		 * on individual connection request also.
 496		 *
 497		 * Thus, "req" is passed here and the cgroup-bpf-progs
 498		 * of the listen "sk" will be run.
 499		 *
 500		 * "req" is also used here for fastopen even the "sk" here is
 501		 * a fullsock "child" sk.  It is to keep the behavior
 502		 * consistent between fastopen and non-fastopen on
 503		 * the bpf programming side.
 504		 */
 505		sock_ops.sk = (struct sock *)req;
 506		sock_ops.syn_skb = syn_skb;
 507	} else {
 508		sock_owned_by_me(sk);
 509
 510		sock_ops.is_fullsock = 1;
 511		sock_ops.sk = sk;
 512	}
 513
 514	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
 515	sock_ops.remaining_opt_len = *remaining;
 516	/* tcp_current_mss() does not pass a skb */
 517	if (skb)
 518		bpf_skops_init_skb(&sock_ops, skb, 0);
 519
 520	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
 521
 522	if (err || sock_ops.remaining_opt_len == *remaining)
 523		return;
 524
 525	opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len;
 526	/* round up to 4 bytes */
 527	opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3;
 528
 529	*remaining -= opts->bpf_opt_len;
 530}
 531
 532static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
 533				    struct request_sock *req,
 534				    struct sk_buff *syn_skb,
 535				    enum tcp_synack_type synack_type,
 536				    struct tcp_out_options *opts)
 537{
 538	u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len;
 539	struct bpf_sock_ops_kern sock_ops;
 540	int err;
 541
 542	if (likely(!max_opt_len))
 543		return;
 544
 545	memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
 546
 547	sock_ops.op = BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
 548
 549	if (req) {
 550		sock_ops.sk = (struct sock *)req;
 551		sock_ops.syn_skb = syn_skb;
 552	} else {
 553		sock_owned_by_me(sk);
 554
 555		sock_ops.is_fullsock = 1;
 556		sock_ops.sk = sk;
 557	}
 558
 559	sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
 560	sock_ops.remaining_opt_len = max_opt_len;
 561	first_opt_off = tcp_hdrlen(skb) - max_opt_len;
 562	bpf_skops_init_skb(&sock_ops, skb, first_opt_off);
 563
 564	err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
 565
 566	if (err)
 567		nr_written = 0;
 568	else
 569		nr_written = max_opt_len - sock_ops.remaining_opt_len;
 570
 571	if (nr_written < max_opt_len)
 572		memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP,
 573		       max_opt_len - nr_written);
 574}
 575#else
 576static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
 577				  struct request_sock *req,
 578				  struct sk_buff *syn_skb,
 579				  enum tcp_synack_type synack_type,
 580				  struct tcp_out_options *opts,
 581				  unsigned int *remaining)
 582{
 583}
 584
 585static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
 586				    struct request_sock *req,
 587				    struct sk_buff *syn_skb,
 588				    enum tcp_synack_type synack_type,
 589				    struct tcp_out_options *opts)
 590{
 591}
 592#endif
 593
 594/* Write previously computed TCP options to the packet.
 595 *
 596 * Beware: Something in the Internet is very sensitive to the ordering of
 597 * TCP options, we learned this through the hard way, so be careful here.
 598 * Luckily we can at least blame others for their non-compliance but from
 599 * inter-operability perspective it seems that we're somewhat stuck with
 600 * the ordering which we have been using if we want to keep working with
 601 * those broken things (not that it currently hurts anybody as there isn't
 602 * particular reason why the ordering would need to be changed).
 603 *
 604 * At least SACK_PERM as the first option is known to lead to a disaster
 605 * (but it may well be that other scenarios fail similarly).
 606 */
 607static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
 608			      struct tcp_out_options *opts)
 609{
 610	__be32 *ptr = (__be32 *)(th + 1);
 611	u16 options = opts->options;	/* mungable copy */
 612
 
 
 
 
 
 
 
 
 613	if (unlikely(OPTION_MD5 & options)) {
 614		*ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
 615			       (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
 
 
 
 
 
 
 
 
 
 
 616		/* overload cookie hash location */
 617		opts->hash_location = (__u8 *)ptr;
 618		ptr += 4;
 619	}
 620
 621	if (unlikely(opts->mss)) {
 622		*ptr++ = htonl((TCPOPT_MSS << 24) |
 623			       (TCPOLEN_MSS << 16) |
 624			       opts->mss);
 625	}
 626
 627	if (likely(OPTION_TS & options)) {
 628		if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 629			*ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
 630				       (TCPOLEN_SACK_PERM << 16) |
 631				       (TCPOPT_TIMESTAMP << 8) |
 632				       TCPOLEN_TIMESTAMP);
 633			options &= ~OPTION_SACK_ADVERTISE;
 634		} else {
 635			*ptr++ = htonl((TCPOPT_NOP << 24) |
 636				       (TCPOPT_NOP << 16) |
 637				       (TCPOPT_TIMESTAMP << 8) |
 638				       TCPOLEN_TIMESTAMP);
 639		}
 640		*ptr++ = htonl(opts->tsval);
 641		*ptr++ = htonl(opts->tsecr);
 642	}
 643
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 644	if (unlikely(OPTION_SACK_ADVERTISE & options)) {
 645		*ptr++ = htonl((TCPOPT_NOP << 24) |
 646			       (TCPOPT_NOP << 16) |
 647			       (TCPOPT_SACK_PERM << 8) |
 648			       TCPOLEN_SACK_PERM);
 649	}
 650
 651	if (unlikely(OPTION_WSCALE & options)) {
 652		*ptr++ = htonl((TCPOPT_NOP << 24) |
 653			       (TCPOPT_WINDOW << 16) |
 654			       (TCPOLEN_WINDOW << 8) |
 655			       opts->ws);
 656	}
 657
 658	if (unlikely(opts->num_sack_blocks)) {
 659		struct tcp_sack_block *sp = tp->rx_opt.dsack ?
 660			tp->duplicate_sack : tp->selective_acks;
 661		int this_sack;
 662
 663		*ptr++ = htonl((TCPOPT_NOP  << 24) |
 664			       (TCPOPT_NOP  << 16) |
 665			       (TCPOPT_SACK <<  8) |
 666			       (TCPOLEN_SACK_BASE + (opts->num_sack_blocks *
 667						     TCPOLEN_SACK_PERBLOCK)));
 668
 669		for (this_sack = 0; this_sack < opts->num_sack_blocks;
 670		     ++this_sack) {
 671			*ptr++ = htonl(sp[this_sack].start_seq);
 672			*ptr++ = htonl(sp[this_sack].end_seq);
 673		}
 674
 675		tp->rx_opt.dsack = 0;
 676	}
 677
 678	if (unlikely(OPTION_FAST_OPEN_COOKIE & options)) {
 679		struct tcp_fastopen_cookie *foc = opts->fastopen_cookie;
 680		u8 *p = (u8 *)ptr;
 681		u32 len; /* Fast Open option length */
 682
 683		if (foc->exp) {
 684			len = TCPOLEN_EXP_FASTOPEN_BASE + foc->len;
 685			*ptr = htonl((TCPOPT_EXP << 24) | (len << 16) |
 686				     TCPOPT_FASTOPEN_MAGIC);
 687			p += TCPOLEN_EXP_FASTOPEN_BASE;
 688		} else {
 689			len = TCPOLEN_FASTOPEN_BASE + foc->len;
 690			*p++ = TCPOPT_FASTOPEN;
 691			*p++ = len;
 692		}
 693
 694		memcpy(p, foc->val, foc->len);
 695		if ((len & 3) == 2) {
 696			p[foc->len] = TCPOPT_NOP;
 697			p[foc->len + 1] = TCPOPT_NOP;
 698		}
 699		ptr += (len + 3) >> 2;
 700	}
 701
 702	smc_options_write(ptr, &options);
 703
 704	mptcp_options_write(th, ptr, tp, opts);
 705}
 706
 707static void smc_set_option(const struct tcp_sock *tp,
 708			   struct tcp_out_options *opts,
 709			   unsigned int *remaining)
 710{
 711#if IS_ENABLED(CONFIG_SMC)
 712	if (static_branch_unlikely(&tcp_have_smc)) {
 713		if (tp->syn_smc) {
 714			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
 715				opts->options |= OPTION_SMC;
 716				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
 717			}
 718		}
 719	}
 720#endif
 721}
 722
 723static void smc_set_option_cond(const struct tcp_sock *tp,
 724				const struct inet_request_sock *ireq,
 725				struct tcp_out_options *opts,
 726				unsigned int *remaining)
 727{
 728#if IS_ENABLED(CONFIG_SMC)
 729	if (static_branch_unlikely(&tcp_have_smc)) {
 730		if (tp->syn_smc && ireq->smc_ok) {
 731			if (*remaining >= TCPOLEN_EXP_SMC_BASE_ALIGNED) {
 732				opts->options |= OPTION_SMC;
 733				*remaining -= TCPOLEN_EXP_SMC_BASE_ALIGNED;
 734			}
 735		}
 736	}
 737#endif
 738}
 739
 740static void mptcp_set_option_cond(const struct request_sock *req,
 741				  struct tcp_out_options *opts,
 742				  unsigned int *remaining)
 743{
 744	if (rsk_is_mptcp(req)) {
 745		unsigned int size;
 746
 747		if (mptcp_synack_options(req, &size, &opts->mptcp)) {
 748			if (*remaining >= size) {
 749				opts->options |= OPTION_MPTCP;
 750				*remaining -= size;
 751			}
 752		}
 753	}
 754}
 755
 756/* Compute TCP options for SYN packets. This is not the final
 757 * network wire format yet.
 758 */
 759static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
 760				struct tcp_out_options *opts,
 761				struct tcp_md5sig_key **md5)
 762{
 763	struct tcp_sock *tp = tcp_sk(sk);
 764	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 765	struct tcp_fastopen_request *fastopen = tp->fastopen_req;
 
 
 
 766
 767	*md5 = NULL;
 768#ifdef CONFIG_TCP_MD5SIG
 769	if (static_branch_unlikely(&tcp_md5_needed.key) &&
 770	    rcu_access_pointer(tp->md5sig_info)) {
 771		*md5 = tp->af_specific->md5_lookup(sk, sk);
 772		if (*md5) {
 773			opts->options |= OPTION_MD5;
 774			remaining -= TCPOLEN_MD5SIG_ALIGNED;
 775		}
 776	}
 
 
 777#endif
 778
 779	/* We always get an MSS option.  The option bytes which will be seen in
 780	 * normal data packets should timestamps be used, must be in the MSS
 781	 * advertised.  But we subtract them from tp->mss_cache so that
 782	 * calculations in tcp_sendmsg are simpler etc.  So account for this
 783	 * fact here if necessary.  If we don't do this correctly, as a
 784	 * receiver we won't recognize data packets as being full sized when we
 785	 * should, and thus we won't abide by the delayed ACK rules correctly.
 786	 * SACKs don't matter, we never delay an ACK when we have any of those
 787	 * going out.  */
 788	opts->mss = tcp_advertise_mss(sk);
 789	remaining -= TCPOLEN_MSS_ALIGNED;
 790
 791	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps) && !*md5)) {
 792		opts->options |= OPTION_TS;
 793		opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
 794		opts->tsecr = tp->rx_opt.ts_recent;
 795		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 796	}
 797	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) {
 798		opts->ws = tp->rx_opt.rcv_wscale;
 799		opts->options |= OPTION_WSCALE;
 800		remaining -= TCPOLEN_WSCALE_ALIGNED;
 801	}
 802	if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) {
 803		opts->options |= OPTION_SACK_ADVERTISE;
 804		if (unlikely(!(OPTION_TS & opts->options)))
 805			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 806	}
 807
 808	if (fastopen && fastopen->cookie.len >= 0) {
 809		u32 need = fastopen->cookie.len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 810
 811		need += fastopen->cookie.exp ? TCPOLEN_EXP_FASTOPEN_BASE :
 812					       TCPOLEN_FASTOPEN_BASE;
 813		need = (need + 3) & ~3U;  /* Align to 32 bits */
 814		if (remaining >= need) {
 815			opts->options |= OPTION_FAST_OPEN_COOKIE;
 816			opts->fastopen_cookie = &fastopen->cookie;
 817			remaining -= need;
 818			tp->syn_fastopen = 1;
 819			tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0;
 820		}
 821	}
 822
 823	smc_set_option(tp, opts, &remaining);
 824
 825	if (sk_is_mptcp(sk)) {
 826		unsigned int size;
 827
 828		if (mptcp_syn_options(sk, skb, &size, &opts->mptcp)) {
 829			opts->options |= OPTION_MPTCP;
 830			remaining -= size;
 831		}
 832	}
 833
 834	bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
 835
 836	return MAX_TCP_OPTION_SPACE - remaining;
 837}
 838
 839/* Set up TCP options for SYN-ACKs. */
 840static unsigned int tcp_synack_options(const struct sock *sk,
 841				       struct request_sock *req,
 842				       unsigned int mss, struct sk_buff *skb,
 843				       struct tcp_out_options *opts,
 844				       const struct tcp_md5sig_key *md5,
 845				       struct tcp_fastopen_cookie *foc,
 846				       enum tcp_synack_type synack_type,
 847				       struct sk_buff *syn_skb)
 848{
 849	struct inet_request_sock *ireq = inet_rsk(req);
 850	unsigned int remaining = MAX_TCP_OPTION_SPACE;
 
 
 
 851
 852#ifdef CONFIG_TCP_MD5SIG
 853	if (md5) {
 
 854		opts->options |= OPTION_MD5;
 855		remaining -= TCPOLEN_MD5SIG_ALIGNED;
 856
 857		/* We can't fit any SACK blocks in a packet with MD5 + TS
 858		 * options. There was discussion about disabling SACK
 859		 * rather than TS in order to fit in better with old,
 860		 * buggy kernels, but that was deemed to be unnecessary.
 861		 */
 862		if (synack_type != TCP_SYNACK_COOKIE)
 863			ireq->tstamp_ok &= !ireq->sack_ok;
 864	}
 
 
 865#endif
 866
 867	/* We always send an MSS option. */
 868	opts->mss = mss;
 869	remaining -= TCPOLEN_MSS_ALIGNED;
 870
 871	if (likely(ireq->wscale_ok)) {
 872		opts->ws = ireq->rcv_wscale;
 873		opts->options |= OPTION_WSCALE;
 874		remaining -= TCPOLEN_WSCALE_ALIGNED;
 875	}
 876	if (likely(ireq->tstamp_ok)) {
 877		opts->options |= OPTION_TS;
 878		opts->tsval = tcp_skb_timestamp(skb) + tcp_rsk(req)->ts_off;
 879		opts->tsecr = req->ts_recent;
 880		remaining -= TCPOLEN_TSTAMP_ALIGNED;
 881	}
 882	if (likely(ireq->sack_ok)) {
 883		opts->options |= OPTION_SACK_ADVERTISE;
 884		if (unlikely(!ireq->tstamp_ok))
 885			remaining -= TCPOLEN_SACKPERM_ALIGNED;
 886	}
 887	if (foc != NULL && foc->len >= 0) {
 888		u32 need = foc->len;
 889
 890		need += foc->exp ? TCPOLEN_EXP_FASTOPEN_BASE :
 891				   TCPOLEN_FASTOPEN_BASE;
 892		need = (need + 3) & ~3U;  /* Align to 32 bits */
 893		if (remaining >= need) {
 894			opts->options |= OPTION_FAST_OPEN_COOKIE;
 895			opts->fastopen_cookie = foc;
 
 
 
 
 
 
 
 
 
 896			remaining -= need;
 
 
 
 
 897		}
 898	}
 899
 900	mptcp_set_option_cond(req, opts, &remaining);
 901
 902	smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
 903
 904	bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb,
 905			      synack_type, opts, &remaining);
 906
 907	return MAX_TCP_OPTION_SPACE - remaining;
 908}
 909
 910/* Compute TCP options for ESTABLISHED sockets. This is not the
 911 * final wire format yet.
 912 */
 913static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
 914					struct tcp_out_options *opts,
 915					struct tcp_md5sig_key **md5)
 916{
 917	struct tcp_sock *tp = tcp_sk(sk);
 918	unsigned int size = 0;
 919	unsigned int eff_sacks;
 920
 921	opts->options = 0;
 922
 923	*md5 = NULL;
 924#ifdef CONFIG_TCP_MD5SIG
 925	if (static_branch_unlikely(&tcp_md5_needed.key) &&
 926	    rcu_access_pointer(tp->md5sig_info)) {
 927		*md5 = tp->af_specific->md5_lookup(sk, sk);
 928		if (*md5) {
 929			opts->options |= OPTION_MD5;
 930			size += TCPOLEN_MD5SIG_ALIGNED;
 931		}
 932	}
 
 
 933#endif
 934
 935	if (likely(tp->rx_opt.tstamp_ok)) {
 936		opts->options |= OPTION_TS;
 937		opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
 938		opts->tsecr = tp->rx_opt.ts_recent;
 939		size += TCPOLEN_TSTAMP_ALIGNED;
 940	}
 941
 942	/* MPTCP options have precedence over SACK for the limited TCP
 943	 * option space because a MPTCP connection would be forced to
 944	 * fall back to regular TCP if a required multipath option is
 945	 * missing. SACK still gets a chance to use whatever space is
 946	 * left.
 947	 */
 948	if (sk_is_mptcp(sk)) {
 949		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 950		unsigned int opt_size = 0;
 951
 952		if (mptcp_established_options(sk, skb, &opt_size, remaining,
 953					      &opts->mptcp)) {
 954			opts->options |= OPTION_MPTCP;
 955			size += opt_size;
 956		}
 957	}
 958
 959	eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
 960	if (unlikely(eff_sacks)) {
 961		const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 962		if (unlikely(remaining < TCPOLEN_SACK_BASE_ALIGNED +
 963					 TCPOLEN_SACK_PERBLOCK))
 964			return size;
 965
 966		opts->num_sack_blocks =
 967			min_t(unsigned int, eff_sacks,
 968			      (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
 969			      TCPOLEN_SACK_PERBLOCK);
 970
 971		size += TCPOLEN_SACK_BASE_ALIGNED +
 972			opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
 973	}
 974
 975	if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp,
 976					    BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) {
 977		unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
 978
 979		bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
 980
 981		size = MAX_TCP_OPTION_SPACE - remaining;
 982	}
 983
 984	return size;
 985}
 986
 987
 988/* TCP SMALL QUEUES (TSQ)
 989 *
 990 * TSQ goal is to keep small amount of skbs per tcp flow in tx queues (qdisc+dev)
 991 * to reduce RTT and bufferbloat.
 992 * We do this using a special skb destructor (tcp_wfree).
 993 *
 994 * Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
 995 * needs to be reallocated in a driver.
 996 * The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
 997 *
 998 * Since transmit from skb destructor is forbidden, we use a tasklet
 999 * to process all sockets that eventually need to send more skbs.
1000 * We use one tasklet per cpu, with its own queue of sockets.
1001 */
1002struct tsq_tasklet {
1003	struct tasklet_struct	tasklet;
1004	struct list_head	head; /* queue of tcp sockets */
1005};
1006static DEFINE_PER_CPU(struct tsq_tasklet, tsq_tasklet);
1007
1008static void tcp_tsq_write(struct sock *sk)
1009{
1010	if ((1 << sk->sk_state) &
1011	    (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
1012	     TCPF_CLOSE_WAIT  | TCPF_LAST_ACK)) {
1013		struct tcp_sock *tp = tcp_sk(sk);
1014
1015		if (tp->lost_out > tp->retrans_out &&
1016		    tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) {
1017			tcp_mstamp_refresh(tp);
1018			tcp_xmit_retransmit_queue(sk);
1019		}
1020
1021		tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
1022			       0, GFP_ATOMIC);
1023	}
1024}
1025
1026static void tcp_tsq_handler(struct sock *sk)
1027{
1028	bh_lock_sock(sk);
1029	if (!sock_owned_by_user(sk))
1030		tcp_tsq_write(sk);
1031	else if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags))
1032		sock_hold(sk);
1033	bh_unlock_sock(sk);
1034}
1035/*
1036 * One tasklet per cpu tries to send more skbs.
1037 * We run in tasklet context but need to disable irqs when
1038 * transferring tsq->head because tcp_wfree() might
1039 * interrupt us (non NAPI drivers)
1040 */
1041static void tcp_tasklet_func(struct tasklet_struct *t)
1042{
1043	struct tsq_tasklet *tsq = from_tasklet(tsq,  t, tasklet);
1044	LIST_HEAD(list);
1045	unsigned long flags;
1046	struct list_head *q, *n;
1047	struct tcp_sock *tp;
1048	struct sock *sk;
1049
1050	local_irq_save(flags);
1051	list_splice_init(&tsq->head, &list);
1052	local_irq_restore(flags);
1053
1054	list_for_each_safe(q, n, &list) {
1055		tp = list_entry(q, struct tcp_sock, tsq_node);
1056		list_del(&tp->tsq_node);
1057
1058		sk = (struct sock *)tp;
1059		smp_mb__before_atomic();
1060		clear_bit(TSQ_QUEUED, &sk->sk_tsq_flags);
1061
1062		tcp_tsq_handler(sk);
1063		sk_free(sk);
1064	}
1065}
1066
1067#define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED |		\
1068			  TCPF_WRITE_TIMER_DEFERRED |	\
1069			  TCPF_DELACK_TIMER_DEFERRED |	\
1070			  TCPF_MTU_REDUCED_DEFERRED)
1071/**
1072 * tcp_release_cb - tcp release_sock() callback
1073 * @sk: socket
1074 *
1075 * called from release_sock() to perform protocol dependent
1076 * actions before socket release.
1077 */
1078void tcp_release_cb(struct sock *sk)
1079{
1080	unsigned long flags = smp_load_acquire(&sk->sk_tsq_flags);
1081	unsigned long nflags;
1082
1083	/* perform an atomic operation only if at least one flag is set */
1084	do {
1085		if (!(flags & TCP_DEFERRED_ALL))
1086			return;
1087		nflags = flags & ~TCP_DEFERRED_ALL;
1088	} while (!try_cmpxchg(&sk->sk_tsq_flags, &flags, nflags));
1089
1090	if (flags & TCPF_TSQ_DEFERRED) {
1091		tcp_tsq_write(sk);
1092		__sock_put(sk);
1093	}
1094	/* Here begins the tricky part :
1095	 * We are called from release_sock() with :
1096	 * 1) BH disabled
1097	 * 2) sk_lock.slock spinlock held
1098	 * 3) socket owned by us (sk->sk_lock.owned == 1)
1099	 *
1100	 * But following code is meant to be called from BH handlers,
1101	 * so we should keep BH disabled, but early release socket ownership
1102	 */
1103	sock_release_ownership(sk);
1104
1105	if (flags & TCPF_WRITE_TIMER_DEFERRED) {
1106		tcp_write_timer_handler(sk);
1107		__sock_put(sk);
1108	}
1109	if (flags & TCPF_DELACK_TIMER_DEFERRED) {
1110		tcp_delack_timer_handler(sk);
1111		__sock_put(sk);
1112	}
1113	if (flags & TCPF_MTU_REDUCED_DEFERRED) {
1114		inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
1115		__sock_put(sk);
1116	}
1117}
1118EXPORT_SYMBOL(tcp_release_cb);
1119
1120void __init tcp_tasklet_init(void)
1121{
1122	int i;
1123
1124	for_each_possible_cpu(i) {
1125		struct tsq_tasklet *tsq = &per_cpu(tsq_tasklet, i);
1126
1127		INIT_LIST_HEAD(&tsq->head);
1128		tasklet_setup(&tsq->tasklet, tcp_tasklet_func);
1129	}
1130}
1131
1132/*
1133 * Write buffer destructor automatically called from kfree_skb.
1134 * We can't xmit new skbs from this context, as we might already
1135 * hold qdisc lock.
1136 */
1137void tcp_wfree(struct sk_buff *skb)
1138{
1139	struct sock *sk = skb->sk;
1140	struct tcp_sock *tp = tcp_sk(sk);
1141	unsigned long flags, nval, oval;
1142	struct tsq_tasklet *tsq;
1143	bool empty;
1144
1145	/* Keep one reference on sk_wmem_alloc.
1146	 * Will be released by sk_free() from here or tcp_tasklet_func()
1147	 */
1148	WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
1149
1150	/* If this softirq is serviced by ksoftirqd, we are likely under stress.
1151	 * Wait until our queues (qdisc + devices) are drained.
1152	 * This gives :
1153	 * - less callbacks to tcp_write_xmit(), reducing stress (batches)
1154	 * - chance for incoming ACK (processed by another cpu maybe)
1155	 *   to migrate this flow (skb->ooo_okay will be eventually set)
1156	 */
1157	if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
1158		goto out;
1159
1160	oval = smp_load_acquire(&sk->sk_tsq_flags);
1161	do {
1162		if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
1163			goto out;
1164
1165		nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED;
1166	} while (!try_cmpxchg(&sk->sk_tsq_flags, &oval, nval));
1167
1168	/* queue this socket to tasklet queue */
1169	local_irq_save(flags);
1170	tsq = this_cpu_ptr(&tsq_tasklet);
1171	empty = list_empty(&tsq->head);
1172	list_add(&tp->tsq_node, &tsq->head);
1173	if (empty)
1174		tasklet_schedule(&tsq->tasklet);
1175	local_irq_restore(flags);
1176	return;
1177out:
1178	sk_free(sk);
1179}
1180
1181/* Note: Called under soft irq.
1182 * We can call TCP stack right away, unless socket is owned by user.
1183 */
1184enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
1185{
1186	struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer);
1187	struct sock *sk = (struct sock *)tp;
1188
1189	tcp_tsq_handler(sk);
1190	sock_put(sk);
1191
1192	return HRTIMER_NORESTART;
1193}
1194
1195static void tcp_update_skb_after_send(struct sock *sk, struct sk_buff *skb,
1196				      u64 prior_wstamp)
1197{
1198	struct tcp_sock *tp = tcp_sk(sk);
1199
1200	if (sk->sk_pacing_status != SK_PACING_NONE) {
1201		unsigned long rate = sk->sk_pacing_rate;
1202
1203		/* Original sch_fq does not pace first 10 MSS
1204		 * Note that tp->data_segs_out overflows after 2^32 packets,
1205		 * this is a minor annoyance.
1206		 */
1207		if (rate != ~0UL && rate && tp->data_segs_out >= 10) {
1208			u64 len_ns = div64_ul((u64)skb->len * NSEC_PER_SEC, rate);
1209			u64 credit = tp->tcp_wstamp_ns - prior_wstamp;
1210
1211			/* take into account OS jitter */
1212			len_ns -= min_t(u64, len_ns / 2, credit);
1213			tp->tcp_wstamp_ns += len_ns;
1214		}
1215	}
1216	list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
1217}
1218
1219INDIRECT_CALLABLE_DECLARE(int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1220INDIRECT_CALLABLE_DECLARE(int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl));
1221INDIRECT_CALLABLE_DECLARE(void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb));
1222
1223/* This routine actually transmits TCP packets queued in by
1224 * tcp_do_sendmsg().  This is used by both the initial
1225 * transmission and possible later retransmissions.
1226 * All SKB's seen here are completely headerless.  It is our
1227 * job to build the TCP header, and pass the packet down to
1228 * IP so it can do the same plus pass the packet off to the
1229 * device.
1230 *
1231 * We are working here with either a clone of the original
1232 * SKB, or a fresh unique copy made by the retransmit engine.
1233 */
1234static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
1235			      int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
1236{
1237	const struct inet_connection_sock *icsk = inet_csk(sk);
1238	struct inet_sock *inet;
1239	struct tcp_sock *tp;
1240	struct tcp_skb_cb *tcb;
1241	struct tcp_out_options opts;
1242	unsigned int tcp_options_size, tcp_header_size;
1243	struct sk_buff *oskb = NULL;
1244	struct tcp_md5sig_key *md5;
1245	struct tcphdr *th;
1246	u64 prior_wstamp;
1247	int err;
1248
1249	BUG_ON(!skb || !tcp_skb_pcount(skb));
1250	tp = tcp_sk(sk);
1251	prior_wstamp = tp->tcp_wstamp_ns;
1252	tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache);
1253	skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
1254	if (clone_it) {
1255		oskb = skb;
1256
1257		tcp_skb_tsorted_save(oskb) {
1258			if (unlikely(skb_cloned(oskb)))
1259				skb = pskb_copy(oskb, gfp_mask);
1260			else
1261				skb = skb_clone(oskb, gfp_mask);
1262		} tcp_skb_tsorted_restore(oskb);
1263
 
 
 
 
 
 
 
 
 
 
 
1264		if (unlikely(!skb))
1265			return -ENOBUFS;
1266		/* retransmit skbs might have a non zero value in skb->dev
1267		 * because skb->dev is aliased with skb->rbnode.rb_left
1268		 */
1269		skb->dev = NULL;
1270	}
1271
1272	inet = inet_sk(sk);
 
1273	tcb = TCP_SKB_CB(skb);
1274	memset(&opts, 0, sizeof(opts));
1275
1276	if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
1277		tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
1278	} else {
1279		tcp_options_size = tcp_established_options(sk, skb, &opts,
1280							   &md5);
1281		/* Force a PSH flag on all (GSO) packets to expedite GRO flush
1282		 * at receiver : This slightly improve GRO performance.
1283		 * Note that we do not force the PSH flag for non GSO packets,
1284		 * because they might be sent under high congestion events,
1285		 * and in this case it is better to delay the delivery of 1-MSS
1286		 * packets and thus the corresponding ACK packet that would
1287		 * release the following packet.
1288		 */
1289		if (tcp_skb_pcount(skb) > 1)
1290			tcb->tcp_flags |= TCPHDR_PSH;
1291	}
1292	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
1293
1294	/* if no packet is in qdisc/device queue, then allow XPS to select
1295	 * another queue. We can be called from tcp_tsq_handler()
1296	 * which holds one reference to sk.
1297	 *
1298	 * TODO: Ideally, in-flight pure ACK packets should not matter here.
1299	 * One way to get this would be to set skb->truesize = 2 on them.
1300	 */
1301	skb->ooo_okay = sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1);
1302
1303	/* If we had to use memory reserve to allocate this skb,
1304	 * this might cause drops if packet is looped back :
1305	 * Other socket might not have SOCK_MEMALLOC.
1306	 * Packets not looped back do not care about pfmemalloc.
1307	 */
1308	skb->pfmemalloc = 0;
1309
1310	skb_push(skb, tcp_header_size);
1311	skb_reset_transport_header(skb);
1312
1313	skb_orphan(skb);
1314	skb->sk = sk;
1315	skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
1316	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
1317
1318	skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
1319
1320	/* Build TCP header and checksum it. */
1321	th = (struct tcphdr *)skb->data;
1322	th->source		= inet->inet_sport;
1323	th->dest		= inet->inet_dport;
1324	th->seq			= htonl(tcb->seq);
1325	th->ack_seq		= htonl(rcv_nxt);
1326	*(((__be16 *)th) + 6)	= htons(((tcp_header_size >> 2) << 12) |
1327					tcb->tcp_flags);
1328
 
 
 
 
 
 
 
 
1329	th->check		= 0;
1330	th->urg_ptr		= 0;
1331
1332	/* The urg_mode check is necessary during a below snd_una win probe */
1333	if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
1334		if (before(tp->snd_up, tcb->seq + 0x10000)) {
1335			th->urg_ptr = htons(tp->snd_up - tcb->seq);
1336			th->urg = 1;
1337		} else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
1338			th->urg_ptr = htons(0xFFFF);
1339			th->urg = 1;
1340		}
1341	}
1342
1343	skb_shinfo(skb)->gso_type = sk->sk_gso_type;
1344	if (likely(!(tcb->tcp_flags & TCPHDR_SYN))) {
1345		th->window      = htons(tcp_select_window(sk));
1346		tcp_ecn_send(sk, skb, th, tcp_header_size);
1347	} else {
1348		/* RFC1323: The window in SYN & SYN/ACK segments
1349		 * is never scaled.
1350		 */
1351		th->window	= htons(min(tp->rcv_wnd, 65535U));
1352	}
1353
1354	tcp_options_write(th, tp, &opts);
1355
1356#ifdef CONFIG_TCP_MD5SIG
1357	/* Calculate the MD5 hash, as we have all we need now */
1358	if (md5) {
1359		sk_gso_disable(sk);
1360		tp->af_specific->calc_md5_hash(opts.hash_location,
1361					       md5, sk, skb);
1362	}
1363#endif
1364
1365	/* BPF prog is the last one writing header option */
1366	bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts);
1367
1368	INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check,
1369			   tcp_v6_send_check, tcp_v4_send_check,
1370			   sk, skb);
1371
1372	if (likely(tcb->tcp_flags & TCPHDR_ACK))
1373		tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
1374
1375	if (skb->len != tcp_header_size) {
1376		tcp_event_data_sent(tp, sk);
1377		tp->data_segs_out += tcp_skb_pcount(skb);
1378		tp->bytes_sent += skb->len - tcp_header_size;
1379	}
1380
1381	if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
1382		TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
1383			      tcp_skb_pcount(skb));
1384
1385	tp->segs_out += tcp_skb_pcount(skb);
1386	skb_set_hash_from_sk(skb, sk);
1387	/* OK, its time to fill skb_shinfo(skb)->gso_{segs|size} */
1388	skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
1389	skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);
1390
1391	/* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */
1392
1393	/* Cleanup our debris for IP stacks */
1394	memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
1395			       sizeof(struct inet6_skb_parm)));
1396
1397	tcp_add_tx_delay(skb, tp);
1398
1399	err = INDIRECT_CALL_INET(icsk->icsk_af_ops->queue_xmit,
1400				 inet6_csk_xmit, ip_queue_xmit,
1401				 sk, skb, &inet->cork.fl);
1402
1403	if (unlikely(err > 0)) {
1404		tcp_enter_cwr(sk);
1405		err = net_xmit_eval(err);
1406	}
1407	if (!err && oskb) {
1408		tcp_update_skb_after_send(sk, oskb, prior_wstamp);
1409		tcp_rate_skb_sent(sk, oskb);
1410	}
1411	return err;
1412}
1413
1414static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
1415			    gfp_t gfp_mask)
1416{
1417	return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
1418				  tcp_sk(sk)->rcv_nxt);
1419}
1420
1421/* This routine just queues the buffer for sending.
1422 *
1423 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
1424 * otherwise socket can stall.
1425 */
1426static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
1427{
1428	struct tcp_sock *tp = tcp_sk(sk);
1429
1430	/* Advance write_seq and place onto the write_queue. */
1431	WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
1432	__skb_header_release(skb);
1433	tcp_add_write_queue_tail(sk, skb);
1434	sk_wmem_queued_add(sk, skb->truesize);
1435	sk_mem_charge(sk, skb->truesize);
1436}
1437
1438/* Initialize TSO segments for a packet. */
1439static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
 
1440{
1441	if (skb->len <= mss_now) {
 
1442		/* Avoid the costly divide in the normal
1443		 * non-TSO case.
1444		 */
1445		tcp_skb_pcount_set(skb, 1);
1446		TCP_SKB_CB(skb)->tcp_gso_size = 0;
 
1447	} else {
1448		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1449		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
 
1450	}
1451}
1452
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1453/* Pcount in the middle of the write queue got changed, we need to do various
1454 * tweaks to fix counters
1455 */
1456static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr)
1457{
1458	struct tcp_sock *tp = tcp_sk(sk);
1459
1460	tp->packets_out -= decr;
1461
1462	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
1463		tp->sacked_out -= decr;
1464	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1465		tp->retrans_out -= decr;
1466	if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
1467		tp->lost_out -= decr;
1468
1469	/* Reno case is special. Sigh... */
1470	if (tcp_is_reno(tp) && decr > 0)
1471		tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
1472
 
 
1473	if (tp->lost_skb_hint &&
1474	    before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
1475	    (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1476		tp->lost_cnt_hint -= decr;
1477
1478	tcp_verify_left_out(tp);
1479}
1480
1481static bool tcp_has_tx_tstamp(const struct sk_buff *skb)
1482{
1483	return TCP_SKB_CB(skb)->txstamp_ack ||
1484		(skb_shinfo(skb)->tx_flags & SKBTX_ANY_TSTAMP);
1485}
1486
1487static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1488{
1489	struct skb_shared_info *shinfo = skb_shinfo(skb);
1490
1491	if (unlikely(tcp_has_tx_tstamp(skb)) &&
1492	    !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1493		struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1494		u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1495
1496		shinfo->tx_flags &= ~tsflags;
1497		shinfo2->tx_flags |= tsflags;
1498		swap(shinfo->tskey, shinfo2->tskey);
1499		TCP_SKB_CB(skb2)->txstamp_ack = TCP_SKB_CB(skb)->txstamp_ack;
1500		TCP_SKB_CB(skb)->txstamp_ack = 0;
1501	}
1502}
1503
1504static void tcp_skb_fragment_eor(struct sk_buff *skb, struct sk_buff *skb2)
1505{
1506	TCP_SKB_CB(skb2)->eor = TCP_SKB_CB(skb)->eor;
1507	TCP_SKB_CB(skb)->eor = 0;
1508}
1509
1510/* Insert buff after skb on the write or rtx queue of sk.  */
1511static void tcp_insert_write_queue_after(struct sk_buff *skb,
1512					 struct sk_buff *buff,
1513					 struct sock *sk,
1514					 enum tcp_queue tcp_queue)
1515{
1516	if (tcp_queue == TCP_FRAG_IN_WRITE_QUEUE)
1517		__skb_queue_after(&sk->sk_write_queue, skb, buff);
1518	else
1519		tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
1520}
1521
1522/* Function to create two new TCP segments.  Shrinks the given segment
1523 * to the specified size and appends a new segment with the rest of the
1524 * packet to the list.  This won't be called frequently, I hope.
1525 * Remember, these are still headerless SKBs at this point.
1526 */
1527int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1528		 struct sk_buff *skb, u32 len,
1529		 unsigned int mss_now, gfp_t gfp)
1530{
1531	struct tcp_sock *tp = tcp_sk(sk);
1532	struct sk_buff *buff;
1533	int nsize, old_factor;
1534	long limit;
1535	int nlen;
1536	u8 flags;
1537
1538	if (WARN_ON(len > skb->len))
1539		return -EINVAL;
1540
1541	nsize = skb_headlen(skb) - len;
1542	if (nsize < 0)
1543		nsize = 0;
1544
1545	/* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1546	 * We need some allowance to not penalize applications setting small
1547	 * SO_SNDBUF values.
1548	 * Also allow first and last skb in retransmit queue to be split.
1549	 */
1550	limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_LEGACY_MAX_SIZE);
1551	if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1552		     tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1553		     skb != tcp_rtx_queue_head(sk) &&
1554		     skb != tcp_rtx_queue_tail(sk))) {
1555		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1556		return -ENOMEM;
1557	}
1558
1559	if (skb_unclone_keeptruesize(skb, gfp))
1560		return -ENOMEM;
1561
1562	/* Get a new skb... force flag on. */
1563	buff = tcp_stream_alloc_skb(sk, nsize, gfp, true);
1564	if (!buff)
1565		return -ENOMEM; /* We'll just try again later. */
1566	skb_copy_decrypted(buff, skb);
1567	mptcp_skb_ext_copy(buff, skb);
1568
1569	sk_wmem_queued_add(sk, buff->truesize);
1570	sk_mem_charge(sk, buff->truesize);
1571	nlen = skb->len - len - nsize;
1572	buff->truesize += nlen;
1573	skb->truesize -= nlen;
1574
1575	/* Correct the sequence numbers. */
1576	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
1577	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
1578	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
1579
1580	/* PSH and FIN should only be set in the second packet. */
1581	flags = TCP_SKB_CB(skb)->tcp_flags;
1582	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
1583	TCP_SKB_CB(buff)->tcp_flags = flags;
1584	TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked;
1585	tcp_skb_fragment_eor(skb, buff);
1586
1587	skb_split(skb, buff, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1588
1589	skb_set_delivery_time(buff, skb->tstamp, true);
1590	tcp_fragment_tstamp(skb, buff);
 
 
 
1591
1592	old_factor = tcp_skb_pcount(skb);
1593
1594	/* Fix up tso_factor for both original and new SKB.  */
1595	tcp_set_skb_tso_segs(skb, mss_now);
1596	tcp_set_skb_tso_segs(buff, mss_now);
1597
1598	/* Update delivered info for the new segment */
1599	TCP_SKB_CB(buff)->tx = TCP_SKB_CB(skb)->tx;
1600
1601	/* If this packet has been sent out already, we must
1602	 * adjust the various packet counters.
1603	 */
1604	if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) {
1605		int diff = old_factor - tcp_skb_pcount(skb) -
1606			tcp_skb_pcount(buff);
1607
1608		if (diff)
1609			tcp_adjust_pcount(sk, skb, diff);
1610	}
1611
1612	/* Link BUFF into the send queue. */
1613	__skb_header_release(buff);
1614	tcp_insert_write_queue_after(skb, buff, sk, tcp_queue);
1615	if (tcp_queue == TCP_FRAG_IN_RTX_QUEUE)
1616		list_add(&buff->tcp_tsorted_anchor, &skb->tcp_tsorted_anchor);
1617
1618	return 0;
1619}
1620
1621/* This is similar to __pskb_pull_tail(). The difference is that pulled
1622 * data is not copied, but immediately discarded.
 
1623 */
1624static int __pskb_trim_head(struct sk_buff *skb, int len)
1625{
1626	struct skb_shared_info *shinfo;
1627	int i, k, eat;
1628
1629	eat = min_t(int, len, skb_headlen(skb));
1630	if (eat) {
1631		__skb_pull(skb, eat);
1632		len -= eat;
1633		if (!len)
1634			return 0;
1635	}
1636	eat = len;
1637	k = 0;
1638	shinfo = skb_shinfo(skb);
1639	for (i = 0; i < shinfo->nr_frags; i++) {
1640		int size = skb_frag_size(&shinfo->frags[i]);
1641
1642		if (size <= eat) {
1643			skb_frag_unref(skb, i);
1644			eat -= size;
1645		} else {
1646			shinfo->frags[k] = shinfo->frags[i];
1647			if (eat) {
1648				skb_frag_off_add(&shinfo->frags[k], eat);
1649				skb_frag_size_sub(&shinfo->frags[k], eat);
1650				eat = 0;
1651			}
1652			k++;
1653		}
1654	}
1655	shinfo->nr_frags = k;
1656
 
1657	skb->data_len -= len;
1658	skb->len = skb->data_len;
1659	return len;
1660}
1661
1662/* Remove acked data from a packet in the transmit queue. */
1663int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1664{
1665	u32 delta_truesize;
1666
1667	if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
1668		return -ENOMEM;
1669
1670	delta_truesize = __pskb_trim_head(skb, len);
 
 
 
 
1671
1672	TCP_SKB_CB(skb)->seq += len;
 
1673
1674	if (delta_truesize) {
1675		skb->truesize	   -= delta_truesize;
1676		sk_wmem_queued_add(sk, -delta_truesize);
1677		if (!skb_zcopy_pure(skb))
1678			sk_mem_uncharge(sk, delta_truesize);
1679	}
1680
1681	/* Any change of skb->len requires recalculation of tso factor. */
 
 
1682	if (tcp_skb_pcount(skb) > 1)
1683		tcp_set_skb_tso_segs(skb, tcp_skb_mss(skb));
1684
1685	return 0;
1686}
1687
1688/* Calculate MSS not accounting any TCP options.  */
1689static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
1690{
1691	const struct tcp_sock *tp = tcp_sk(sk);
1692	const struct inet_connection_sock *icsk = inet_csk(sk);
1693	int mss_now;
1694
1695	/* Calculate base mss without TCP options:
1696	   It is MMS_S - sizeof(tcphdr) of rfc1122
1697	 */
1698	mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1699
1700	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1701	if (icsk->icsk_af_ops->net_frag_header_len) {
1702		const struct dst_entry *dst = __sk_dst_get(sk);
1703
1704		if (dst && dst_allfrag(dst))
1705			mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1706	}
1707
1708	/* Clamp it (mss_clamp does not include tcp options) */
1709	if (mss_now > tp->rx_opt.mss_clamp)
1710		mss_now = tp->rx_opt.mss_clamp;
1711
1712	/* Now subtract optional transport overhead */
1713	mss_now -= icsk->icsk_ext_hdr_len;
1714
1715	/* Then reserve room for full set of TCP options and 8 bytes of data */
1716	mss_now = max(mss_now,
1717		      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss));
 
 
 
 
1718	return mss_now;
1719}
1720
1721/* Calculate MSS. Not accounting for SACKs here.  */
1722int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1723{
1724	/* Subtract TCP options size, not including SACKs */
1725	return __tcp_mtu_to_mss(sk, pmtu) -
1726	       (tcp_sk(sk)->tcp_header_len - sizeof(struct tcphdr));
1727}
1728EXPORT_SYMBOL(tcp_mtu_to_mss);
1729
1730/* Inverse of above */
1731int tcp_mss_to_mtu(struct sock *sk, int mss)
1732{
1733	const struct tcp_sock *tp = tcp_sk(sk);
1734	const struct inet_connection_sock *icsk = inet_csk(sk);
1735	int mtu;
1736
1737	mtu = mss +
1738	      tp->tcp_header_len +
1739	      icsk->icsk_ext_hdr_len +
1740	      icsk->icsk_af_ops->net_header_len;
1741
1742	/* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1743	if (icsk->icsk_af_ops->net_frag_header_len) {
1744		const struct dst_entry *dst = __sk_dst_get(sk);
1745
1746		if (dst && dst_allfrag(dst))
1747			mtu += icsk->icsk_af_ops->net_frag_header_len;
1748	}
1749	return mtu;
1750}
1751EXPORT_SYMBOL(tcp_mss_to_mtu);
1752
1753/* MTU probing init per socket */
1754void tcp_mtup_init(struct sock *sk)
1755{
1756	struct tcp_sock *tp = tcp_sk(sk);
1757	struct inet_connection_sock *icsk = inet_csk(sk);
1758	struct net *net = sock_net(sk);
1759
1760	icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1;
1761	icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) +
1762			       icsk->icsk_af_ops->net_header_len;
1763	icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss));
1764	icsk->icsk_mtup.probe_size = 0;
1765	if (icsk->icsk_mtup.enabled)
1766		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
1767}
1768EXPORT_SYMBOL(tcp_mtup_init);
1769
1770/* This function synchronize snd mss to current pmtu/exthdr set.
1771
1772   tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
1773   for TCP options, but includes only bare TCP header.
1774
1775   tp->rx_opt.mss_clamp is mss negotiated at connection setup.
1776   It is minimum of user_mss and mss received with SYN.
1777   It also does not include TCP options.
1778
1779   inet_csk(sk)->icsk_pmtu_cookie is last pmtu, seen by this function.
1780
1781   tp->mss_cache is current effective sending mss, including
1782   all tcp options except for SACKs. It is evaluated,
1783   taking into account current pmtu, but never exceeds
1784   tp->rx_opt.mss_clamp.
1785
1786   NOTE1. rfc1122 clearly states that advertised MSS
1787   DOES NOT include either tcp or ip options.
1788
1789   NOTE2. inet_csk(sk)->icsk_pmtu_cookie and tp->mss_cache
1790   are READ ONLY outside this function.		--ANK (980731)
1791 */
1792unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1793{
1794	struct tcp_sock *tp = tcp_sk(sk);
1795	struct inet_connection_sock *icsk = inet_csk(sk);
1796	int mss_now;
1797
1798	if (icsk->icsk_mtup.search_high > pmtu)
1799		icsk->icsk_mtup.search_high = pmtu;
1800
1801	mss_now = tcp_mtu_to_mss(sk, pmtu);
1802	mss_now = tcp_bound_to_half_wnd(tp, mss_now);
1803
1804	/* And store cached results */
1805	icsk->icsk_pmtu_cookie = pmtu;
1806	if (icsk->icsk_mtup.enabled)
1807		mss_now = min(mss_now, tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low));
1808	tp->mss_cache = mss_now;
1809
1810	return mss_now;
1811}
1812EXPORT_SYMBOL(tcp_sync_mss);
1813
1814/* Compute the current effective MSS, taking SACKs and IP options,
1815 * and even PMTU discovery events into account.
1816 */
1817unsigned int tcp_current_mss(struct sock *sk)
1818{
1819	const struct tcp_sock *tp = tcp_sk(sk);
1820	const struct dst_entry *dst = __sk_dst_get(sk);
1821	u32 mss_now;
1822	unsigned int header_len;
1823	struct tcp_out_options opts;
1824	struct tcp_md5sig_key *md5;
1825
1826	mss_now = tp->mss_cache;
1827
1828	if (dst) {
1829		u32 mtu = dst_mtu(dst);
1830		if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
1831			mss_now = tcp_sync_mss(sk, mtu);
1832	}
1833
1834	header_len = tcp_established_options(sk, NULL, &opts, &md5) +
1835		     sizeof(struct tcphdr);
1836	/* The mss_cache is sized based on tp->tcp_header_len, which assumes
1837	 * some common options. If this is an odd packet (because we have SACK
1838	 * blocks etc) then our calculated header_len will be different, and
1839	 * we have to adjust mss_now correspondingly */
1840	if (header_len != tp->tcp_header_len) {
1841		int delta = (int) header_len - tp->tcp_header_len;
1842		mss_now -= delta;
1843	}
1844
1845	return mss_now;
1846}
1847
1848/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
1849 * As additional protections, we do not touch cwnd in retransmission phases,
1850 * and if application hit its sndbuf limit recently.
1851 */
1852static void tcp_cwnd_application_limited(struct sock *sk)
1853{
1854	struct tcp_sock *tp = tcp_sk(sk);
1855
1856	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
1857	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
1858		/* Limited by application or receiver window. */
1859		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
1860		u32 win_used = max(tp->snd_cwnd_used, init_win);
1861		if (win_used < tcp_snd_cwnd(tp)) {
1862			tp->snd_ssthresh = tcp_current_ssthresh(sk);
1863			tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1);
1864		}
1865		tp->snd_cwnd_used = 0;
1866	}
1867	tp->snd_cwnd_stamp = tcp_jiffies32;
1868}
1869
1870static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
1871{
1872	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1873	struct tcp_sock *tp = tcp_sk(sk);
1874
1875	/* Track the strongest available signal of the degree to which the cwnd
1876	 * is fully utilized. If cwnd-limited then remember that fact for the
1877	 * current window. If not cwnd-limited then track the maximum number of
1878	 * outstanding packets in the current window. (If cwnd-limited then we
1879	 * chose to not update tp->max_packets_out to avoid an extra else
1880	 * clause with no functional impact.)
1881	 */
1882	if (!before(tp->snd_una, tp->cwnd_usage_seq) ||
1883	    is_cwnd_limited ||
1884	    (!tp->is_cwnd_limited &&
1885	     tp->packets_out > tp->max_packets_out)) {
1886		tp->is_cwnd_limited = is_cwnd_limited;
1887		tp->max_packets_out = tp->packets_out;
1888		tp->cwnd_usage_seq = tp->snd_nxt;
1889	}
1890
1891	if (tcp_is_cwnd_limited(sk)) {
1892		/* Network is feed fully. */
1893		tp->snd_cwnd_used = 0;
1894		tp->snd_cwnd_stamp = tcp_jiffies32;
1895	} else {
1896		/* Network starves. */
1897		if (tp->packets_out > tp->snd_cwnd_used)
1898			tp->snd_cwnd_used = tp->packets_out;
1899
1900		if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) &&
1901		    (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
1902		    !ca_ops->cong_control)
1903			tcp_cwnd_application_limited(sk);
1904
1905		/* The following conditions together indicate the starvation
1906		 * is caused by insufficient sender buffer:
1907		 * 1) just sent some data (see tcp_write_xmit)
1908		 * 2) not cwnd limited (this else condition)
1909		 * 3) no more data to send (tcp_write_queue_empty())
1910		 * 4) application is hitting buffer limit (SOCK_NOSPACE)
1911		 */
1912		if (tcp_write_queue_empty(sk) && sk->sk_socket &&
1913		    test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) &&
1914		    (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
1915			tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED);
1916	}
1917}
1918
1919/* Minshall's variant of the Nagle send check. */
1920static bool tcp_minshall_check(const struct tcp_sock *tp)
1921{
1922	return after(tp->snd_sml, tp->snd_una) &&
1923		!after(tp->snd_sml, tp->snd_nxt);
1924}
1925
1926/* Update snd_sml if this skb is under mss
1927 * Note that a TSO packet might end with a sub-mss segment
1928 * The test is really :
1929 * if ((skb->len % mss) != 0)
1930 *        tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1931 * But we can avoid doing the divide again given we already have
1932 *  skb_pcount = skb->len / mss_now
1933 */
1934static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now,
1935				const struct sk_buff *skb)
1936{
1937	if (skb->len < tcp_skb_pcount(skb) * mss_now)
1938		tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1939}
1940
1941/* Return false, if packet can be sent now without violation Nagle's rules:
1942 * 1. It is full sized. (provided by caller in %partial bool)
1943 * 2. Or it contains FIN. (already checked by caller)
1944 * 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
1945 * 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1946 *    With Minshall's modification: all sent small packets are ACKed.
1947 */
1948static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp,
1949			    int nonagle)
1950{
1951	return partial &&
1952		((nonagle & TCP_NAGLE_CORK) ||
1953		 (!nonagle && tp->packets_out && tcp_minshall_check(tp)));
1954}
1955
1956/* Return how many segs we'd like on a TSO packet,
1957 * depending on current pacing rate, and how close the peer is.
1958 *
1959 * Rationale is:
1960 * - For close peers, we rather send bigger packets to reduce
1961 *   cpu costs, because occasional losses will be repaired fast.
1962 * - For long distance/rtt flows, we would like to get ACK clocking
1963 *   with 1 ACK per ms.
1964 *
1965 * Use min_rtt to help adapt TSO burst size, with smaller min_rtt resulting
1966 * in bigger TSO bursts. We we cut the RTT-based allowance in half
1967 * for every 2^9 usec (aka 512 us) of RTT, so that the RTT-based allowance
1968 * is below 1500 bytes after 6 * ~500 usec = 3ms.
1969 */
1970static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
1971			    int min_tso_segs)
1972{
1973	unsigned long bytes;
1974	u32 r;
1975
1976	bytes = sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift);
1977
1978	r = tcp_min_rtt(tcp_sk(sk)) >> READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_rtt_log);
1979	if (r < BITS_PER_TYPE(sk->sk_gso_max_size))
1980		bytes += sk->sk_gso_max_size >> r;
1981
1982	bytes = min_t(unsigned long, bytes, sk->sk_gso_max_size);
1983
1984	return max_t(u32, bytes / mss_now, min_tso_segs);
1985}
1986
1987/* Return the number of segments we want in the skb we are transmitting.
1988 * See if congestion control module wants to decide; otherwise, autosize.
1989 */
1990static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
1991{
1992	const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1993	u32 min_tso, tso_segs;
1994
1995	min_tso = ca_ops->min_tso_segs ?
1996			ca_ops->min_tso_segs(sk) :
1997			READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
1998
1999	tso_segs = tcp_tso_autosize(sk, mss_now, min_tso);
2000	return min_t(u32, tso_segs, sk->sk_gso_max_segs);
2001}
2002
2003/* Returns the portion of skb which can be sent right away */
2004static unsigned int tcp_mss_split_point(const struct sock *sk,
2005					const struct sk_buff *skb,
2006					unsigned int mss_now,
2007					unsigned int max_segs,
2008					int nonagle)
2009{
2010	const struct tcp_sock *tp = tcp_sk(sk);
2011	u32 partial, needed, window, max_len;
2012
2013	window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2014	max_len = mss_now * max_segs;
2015
2016	if (likely(max_len <= window && skb != tcp_write_queue_tail(sk)))
2017		return max_len;
2018
2019	needed = min(skb->len, window);
2020
2021	if (max_len <= needed)
2022		return max_len;
2023
2024	partial = needed % mss_now;
2025	/* If last segment is not a full MSS, check if Nagle rules allow us
2026	 * to include this last segment in this skb.
2027	 * Otherwise, we'll split the skb at last MSS boundary
2028	 */
2029	if (tcp_nagle_check(partial != 0, tp, nonagle))
2030		return needed - partial;
2031
2032	return needed;
2033}
2034
2035/* Can at least one segment of SKB be sent right now, according to the
2036 * congestion window rules?  If so, return how many segments are allowed.
2037 */
2038static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
2039					 const struct sk_buff *skb)
2040{
2041	u32 in_flight, cwnd, halfcwnd;
2042
2043	/* Don't be strict about the congestion window for the final FIN.  */
2044	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) &&
2045	    tcp_skb_pcount(skb) == 1)
2046		return 1;
2047
2048	in_flight = tcp_packets_in_flight(tp);
2049	cwnd = tcp_snd_cwnd(tp);
2050	if (in_flight >= cwnd)
2051		return 0;
2052
2053	/* For better scheduling, ensure we have at least
2054	 * 2 GSO packets in flight.
2055	 */
2056	halfcwnd = max(cwnd >> 1, 1U);
2057	return min(halfcwnd, cwnd - in_flight);
2058}
2059
2060/* Initialize TSO state of a skb.
2061 * This must be invoked the first time we consider transmitting
2062 * SKB onto the wire.
2063 */
2064static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
 
2065{
2066	int tso_segs = tcp_skb_pcount(skb);
2067
2068	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
2069		tcp_set_skb_tso_segs(skb, mss_now);
2070		tso_segs = tcp_skb_pcount(skb);
2071	}
2072	return tso_segs;
2073}
2074
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2075
2076/* Return true if the Nagle test allows this packet to be
2077 * sent now.
2078 */
2079static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
2080				  unsigned int cur_mss, int nonagle)
2081{
2082	/* Nagle rule does not apply to frames, which sit in the middle of the
2083	 * write_queue (they have no chances to get new data).
2084	 *
2085	 * This is implemented in the callers, where they modify the 'nonagle'
2086	 * argument based upon the location of SKB in the send queue.
2087	 */
2088	if (nonagle & TCP_NAGLE_PUSH)
2089		return true;
2090
2091	/* Don't use the nagle rule for urgent data (or for the final FIN). */
2092	if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
2093		return true;
 
 
 
2094
2095	if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle))
2096		return true;
2097
2098	return false;
2099}
2100
2101/* Does at least the first segment of SKB fit into the send window? */
2102static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
2103			     const struct sk_buff *skb,
2104			     unsigned int cur_mss)
2105{
2106	u32 end_seq = TCP_SKB_CB(skb)->end_seq;
2107
2108	if (skb->len > cur_mss)
2109		end_seq = TCP_SKB_CB(skb)->seq + cur_mss;
2110
2111	return !after(end_seq, tcp_wnd_end(tp));
2112}
2113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2114/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
2115 * which is put after SKB on the list.  It is very much like
2116 * tcp_fragment() except that it may make several kinds of assumptions
2117 * in order to speed up the splitting operation.  In particular, we
2118 * know that all the data is in scatter-gather pages, and that the
2119 * packet has never been sent out before (and thus is not cloned).
2120 */
2121static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
2122			unsigned int mss_now, gfp_t gfp)
2123{
 
2124	int nlen = skb->len - len;
2125	struct sk_buff *buff;
2126	u8 flags;
2127
2128	/* All of a TSO frame must be composed of paged data.  */
2129	if (skb->len != skb->data_len)
2130		return tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
2131				    skb, len, mss_now, gfp);
2132
2133	buff = tcp_stream_alloc_skb(sk, 0, gfp, true);
2134	if (unlikely(!buff))
2135		return -ENOMEM;
2136	skb_copy_decrypted(buff, skb);
2137	mptcp_skb_ext_copy(buff, skb);
2138
2139	sk_wmem_queued_add(sk, buff->truesize);
2140	sk_mem_charge(sk, buff->truesize);
2141	buff->truesize += nlen;
2142	skb->truesize -= nlen;
2143
2144	/* Correct the sequence numbers. */
2145	TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
2146	TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
2147	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
2148
2149	/* PSH and FIN should only be set in the second packet. */
2150	flags = TCP_SKB_CB(skb)->tcp_flags;
2151	TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH);
2152	TCP_SKB_CB(buff)->tcp_flags = flags;
2153
2154	tcp_skb_fragment_eor(skb, buff);
 
2155
 
2156	skb_split(skb, buff, len);
2157	tcp_fragment_tstamp(skb, buff);
2158
2159	/* Fix up tso_factor for both original and new SKB.  */
2160	tcp_set_skb_tso_segs(skb, mss_now);
2161	tcp_set_skb_tso_segs(buff, mss_now);
2162
2163	/* Link BUFF into the send queue. */
2164	__skb_header_release(buff);
2165	tcp_insert_write_queue_after(skb, buff, sk, TCP_FRAG_IN_WRITE_QUEUE);
2166
2167	return 0;
2168}
2169
2170/* Try to defer sending, if possible, in order to minimize the amount
2171 * of TSO splitting we do.  View it as a kind of TSO Nagle test.
2172 *
2173 * This algorithm is from John Heffner.
2174 */
2175static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
2176				 bool *is_cwnd_limited,
2177				 bool *is_rwnd_limited,
2178				 u32 max_segs)
2179{
 
2180	const struct inet_connection_sock *icsk = inet_csk(sk);
2181	u32 send_win, cong_win, limit, in_flight;
2182	struct tcp_sock *tp = tcp_sk(sk);
2183	struct sk_buff *head;
2184	int win_divisor;
2185	s64 delta;
2186
2187	if (icsk->icsk_ca_state >= TCP_CA_Recovery)
2188		goto send_now;
2189
2190	/* Avoid bursty behavior by allowing defer
2191	 * only if the last write was recent (1 ms).
2192	 * Note that tp->tcp_wstamp_ns can be in the future if we have
2193	 * packets waiting in a qdisc or device for EDT delivery.
2194	 */
2195	delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
2196	if (delta > 0)
2197		goto send_now;
2198
2199	in_flight = tcp_packets_in_flight(tp);
2200
2201	BUG_ON(tcp_skb_pcount(skb) <= 1);
2202	BUG_ON(tcp_snd_cwnd(tp) <= in_flight);
2203
2204	send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2205
2206	/* From in_flight test above, we know that cwnd > in_flight.  */
2207	cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache;
2208
2209	limit = min(send_win, cong_win);
2210
2211	/* If a full-sized TSO skb can be sent, do it. */
2212	if (limit >= max_segs * tp->mss_cache)
2213		goto send_now;
2214
2215	/* Middle in queue won't get any more data, full sendable already? */
2216	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
2217		goto send_now;
2218
2219	win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
2220	if (win_divisor) {
2221		u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache);
2222
2223		/* If at least some fraction of a window is available,
2224		 * just use it.
2225		 */
2226		chunk /= win_divisor;
2227		if (limit >= chunk)
2228			goto send_now;
2229	} else {
2230		/* Different approach, try not to defer past a single
2231		 * ACK.  Receiver should ACK every other full sized
2232		 * frame, so if we have space for more than 3 frames
2233		 * then send now.
2234		 */
2235		if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache)
2236			goto send_now;
2237	}
2238
2239	/* TODO : use tsorted_sent_queue ? */
2240	head = tcp_rtx_queue_head(sk);
2241	if (!head)
2242		goto send_now;
2243	delta = tp->tcp_clock_cache - head->tstamp;
2244	/* If next ACK is likely to come too late (half srtt), do not defer */
2245	if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
2246		goto send_now;
2247
2248	/* Ok, it looks like it is advisable to defer.
2249	 * Three cases are tracked :
2250	 * 1) We are cwnd-limited
2251	 * 2) We are rwnd-limited
2252	 * 3) We are application limited.
2253	 */
2254	if (cong_win < send_win) {
2255		if (cong_win <= skb->len) {
2256			*is_cwnd_limited = true;
2257			return true;
2258		}
2259	} else {
2260		if (send_win <= skb->len) {
2261			*is_rwnd_limited = true;
2262			return true;
2263		}
2264	}
2265
2266	/* If this packet won't get more data, do not wait. */
2267	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) ||
2268	    TCP_SKB_CB(skb)->eor)
2269		goto send_now;
2270
2271	return true;
2272
2273send_now:
2274	return false;
2275}
2276
2277static inline void tcp_mtu_check_reprobe(struct sock *sk)
2278{
2279	struct inet_connection_sock *icsk = inet_csk(sk);
2280	struct tcp_sock *tp = tcp_sk(sk);
2281	struct net *net = sock_net(sk);
2282	u32 interval;
2283	s32 delta;
2284
2285	interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval);
2286	delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
2287	if (unlikely(delta >= interval * HZ)) {
2288		int mss = tcp_current_mss(sk);
2289
2290		/* Update current search range */
2291		icsk->icsk_mtup.probe_size = 0;
2292		icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp +
2293			sizeof(struct tcphdr) +
2294			icsk->icsk_af_ops->net_header_len;
2295		icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
2296
2297		/* Update probe time stamp */
2298		icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
2299	}
2300}
2301
2302static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2303{
2304	struct sk_buff *skb, *next;
2305
2306	skb = tcp_send_head(sk);
2307	tcp_for_write_queue_from_safe(skb, next, sk) {
2308		if (len <= skb->len)
2309			break;
2310
2311		if (unlikely(TCP_SKB_CB(skb)->eor) ||
2312		    tcp_has_tx_tstamp(skb) ||
2313		    !skb_pure_zcopy_same(skb, next))
2314			return false;
2315
2316		len -= skb->len;
2317	}
2318
2319	return true;
2320}
2321
2322/* Create a new MTU probe if we are ready.
2323 * MTU probe is regularly attempting to increase the path MTU by
2324 * deliberately sending larger packets.  This discovers routing
2325 * changes resulting in larger path MTUs.
2326 *
2327 * Returns 0 if we should wait to probe (no cwnd available),
2328 *         1 if a probe was sent,
2329 *         -1 otherwise
2330 */
2331static int tcp_mtu_probe(struct sock *sk)
2332{
 
2333	struct inet_connection_sock *icsk = inet_csk(sk);
2334	struct tcp_sock *tp = tcp_sk(sk);
2335	struct sk_buff *skb, *nskb, *next;
2336	struct net *net = sock_net(sk);
2337	int probe_size;
2338	int size_needed;
2339	int copy, len;
2340	int mss_now;
2341	int interval;
2342
2343	/* Not currently probing/verifying,
2344	 * not in recovery,
2345	 * have enough cwnd, and
2346	 * not SACKing (the variable headers throw things off)
2347	 */
2348	if (likely(!icsk->icsk_mtup.enabled ||
2349		   icsk->icsk_mtup.probe_size ||
2350		   inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
2351		   tcp_snd_cwnd(tp) < 11 ||
2352		   tp->rx_opt.num_sacks || tp->rx_opt.dsack))
2353		return -1;
2354
2355	/* Use binary search for probe_size between tcp_mss_base,
2356	 * and current mss_clamp. if (search_high - search_low)
2357	 * smaller than a threshold, backoff from probing.
2358	 */
2359	mss_now = tcp_current_mss(sk);
2360	probe_size = tcp_mtu_to_mss(sk, (icsk->icsk_mtup.search_high +
2361				    icsk->icsk_mtup.search_low) >> 1);
2362	size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
2363	interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low;
2364	/* When misfortune happens, we are reprobing actively,
2365	 * and then reprobe timer has expired. We stick with current
2366	 * probing process by not resetting search range to its orignal.
2367	 */
2368	if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) ||
2369	    interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) {
2370		/* Check whether enough time has elaplased for
2371		 * another round of probing.
2372		 */
2373		tcp_mtu_check_reprobe(sk);
2374		return -1;
2375	}
2376
2377	/* Have enough data in the send queue to probe? */
2378	if (tp->write_seq - tp->snd_nxt < size_needed)
2379		return -1;
2380
2381	if (tp->snd_wnd < size_needed)
2382		return -1;
2383	if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp)))
2384		return 0;
2385
2386	/* Do we need to wait to drain cwnd? With none in flight, don't stall */
2387	if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) {
2388		if (!tcp_packets_in_flight(tp))
2389			return -1;
2390		else
2391			return 0;
2392	}
2393
2394	if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2395		return -1;
2396
2397	/* We're allowed to probe.  Build it now. */
2398	nskb = tcp_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
2399	if (!nskb)
2400		return -1;
2401	sk_wmem_queued_add(sk, nskb->truesize);
2402	sk_mem_charge(sk, nskb->truesize);
2403
2404	skb = tcp_send_head(sk);
2405	skb_copy_decrypted(nskb, skb);
2406	mptcp_skb_ext_copy(nskb, skb);
2407
2408	TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq;
2409	TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size;
2410	TCP_SKB_CB(nskb)->tcp_flags = TCPHDR_ACK;
 
 
 
2411
2412	tcp_insert_write_queue_before(nskb, skb, sk);
2413	tcp_highest_sack_replace(sk, skb, nskb);
2414
2415	len = 0;
2416	tcp_for_write_queue_from_safe(skb, next, sk) {
2417		copy = min_t(int, skb->len, probe_size - len);
2418		skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
 
 
 
 
 
2419
2420		if (skb->len <= copy) {
2421			/* We've eaten all the data from this skb.
2422			 * Throw it away. */
2423			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2424			/* If this is the last SKB we copy and eor is set
2425			 * we need to propagate it to the new skb.
2426			 */
2427			TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2428			tcp_skb_collapse_tstamp(nskb, skb);
2429			tcp_unlink_write_queue(skb, sk);
2430			tcp_wmem_free_skb(sk, skb);
2431		} else {
2432			TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags &
2433						   ~(TCPHDR_FIN|TCPHDR_PSH);
2434			if (!skb_shinfo(skb)->nr_frags) {
2435				skb_pull(skb, copy);
 
 
 
2436			} else {
2437				__pskb_trim_head(skb, copy);
2438				tcp_set_skb_tso_segs(skb, mss_now);
2439			}
2440			TCP_SKB_CB(skb)->seq += copy;
2441		}
2442
2443		len += copy;
2444
2445		if (len >= probe_size)
2446			break;
2447	}
2448	tcp_init_tso_segs(nskb, nskb->len);
2449
2450	/* We're ready to send.  If this fails, the probe will
2451	 * be resegmented into mss-sized pieces by tcp_write_xmit().
2452	 */
2453	if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
2454		/* Decrement cwnd here because we are sending
2455		 * effectively two packets. */
2456		tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
2457		tcp_event_new_data_sent(sk, nskb);
2458
2459		icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
2460		tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
2461		tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq;
2462
2463		return 1;
2464	}
2465
2466	return -1;
2467}
2468
2469static bool tcp_pacing_check(struct sock *sk)
2470{
2471	struct tcp_sock *tp = tcp_sk(sk);
2472
2473	if (!tcp_needs_internal_pacing(sk))
2474		return false;
2475
2476	if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache)
2477		return false;
2478
2479	if (!hrtimer_is_queued(&tp->pacing_timer)) {
2480		hrtimer_start(&tp->pacing_timer,
2481			      ns_to_ktime(tp->tcp_wstamp_ns),
2482			      HRTIMER_MODE_ABS_PINNED_SOFT);
2483		sock_hold(sk);
2484	}
2485	return true;
2486}
2487
2488/* TCP Small Queues :
2489 * Control number of packets in qdisc/devices to two packets / or ~1 ms.
2490 * (These limits are doubled for retransmits)
2491 * This allows for :
2492 *  - better RTT estimation and ACK scheduling
2493 *  - faster recovery
2494 *  - high rates
2495 * Alas, some drivers / subsystems require a fair amount
2496 * of queued bytes to ensure line rate.
2497 * One example is wifi aggregation (802.11 AMPDU)
2498 */
2499static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2500				  unsigned int factor)
2501{
2502	unsigned long limit;
2503
2504	limit = max_t(unsigned long,
2505		      2 * skb->truesize,
2506		      sk->sk_pacing_rate >> READ_ONCE(sk->sk_pacing_shift));
2507	if (sk->sk_pacing_status == SK_PACING_NONE)
2508		limit = min_t(unsigned long, limit,
2509			      READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes));
2510	limit <<= factor;
2511
2512	if (static_branch_unlikely(&tcp_tx_delay_enabled) &&
2513	    tcp_sk(sk)->tcp_tx_delay) {
2514		u64 extra_bytes = (u64)sk->sk_pacing_rate * tcp_sk(sk)->tcp_tx_delay;
2515
2516		/* TSQ is based on skb truesize sum (sk_wmem_alloc), so we
2517		 * approximate our needs assuming an ~100% skb->truesize overhead.
2518		 * USEC_PER_SEC is approximated by 2^20.
2519		 * do_div(extra_bytes, USEC_PER_SEC/2) is replaced by a right shift.
2520		 */
2521		extra_bytes >>= (20 - 1);
2522		limit += extra_bytes;
2523	}
2524	if (refcount_read(&sk->sk_wmem_alloc) > limit) {
2525		/* Always send skb if rtx queue is empty.
2526		 * No need to wait for TX completion to call us back,
2527		 * after softirq/tasklet schedule.
2528		 * This helps when TX completions are delayed too much.
2529		 */
2530		if (tcp_rtx_queue_empty(sk))
2531			return false;
2532
2533		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2534		/* It is possible TX completion already happened
2535		 * before we set TSQ_THROTTLED, so we must
2536		 * test again the condition.
2537		 */
2538		smp_mb__after_atomic();
2539		if (refcount_read(&sk->sk_wmem_alloc) > limit)
2540			return true;
2541	}
2542	return false;
2543}
2544
2545static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
2546{
2547	const u32 now = tcp_jiffies32;
2548	enum tcp_chrono old = tp->chrono_type;
2549
2550	if (old > TCP_CHRONO_UNSPEC)
2551		tp->chrono_stat[old - 1] += now - tp->chrono_start;
2552	tp->chrono_start = now;
2553	tp->chrono_type = new;
2554}
2555
2556void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
2557{
2558	struct tcp_sock *tp = tcp_sk(sk);
2559
2560	/* If there are multiple conditions worthy of tracking in a
2561	 * chronograph then the highest priority enum takes precedence
2562	 * over the other conditions. So that if something "more interesting"
2563	 * starts happening, stop the previous chrono and start a new one.
2564	 */
2565	if (type > tp->chrono_type)
2566		tcp_chrono_set(tp, type);
2567}
2568
2569void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
2570{
2571	struct tcp_sock *tp = tcp_sk(sk);
2572
2573
2574	/* There are multiple conditions worthy of tracking in a
2575	 * chronograph, so that the highest priority enum takes
2576	 * precedence over the other conditions (see tcp_chrono_start).
2577	 * If a condition stops, we only stop chrono tracking if
2578	 * it's the "most interesting" or current chrono we are
2579	 * tracking and starts busy chrono if we have pending data.
2580	 */
2581	if (tcp_rtx_and_write_queues_empty(sk))
2582		tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
2583	else if (type == tp->chrono_type)
2584		tcp_chrono_set(tp, TCP_CHRONO_BUSY);
2585}
2586
2587/* This routine writes packets to the network.  It advances the
2588 * send_head.  This happens as incoming acks open up the remote
2589 * window for us.
2590 *
2591 * LARGESEND note: !tcp_urg_mode is overkill, only frames between
2592 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
2593 * account rare use of URG, this is not a big flaw.
2594 *
2595 * Send at most one packet when push_one > 0. Temporarily ignore
2596 * cwnd limit to force at most one packet out when push_one == 2.
2597
2598 * Returns true, if no segments are in flight and we have queued segments,
2599 * but cannot send anything now because of SWS or another problem.
2600 */
2601static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2602			   int push_one, gfp_t gfp)
2603{
2604	struct tcp_sock *tp = tcp_sk(sk);
2605	struct sk_buff *skb;
2606	unsigned int tso_segs, sent_pkts;
2607	int cwnd_quota;
2608	int result;
2609	bool is_cwnd_limited = false, is_rwnd_limited = false;
2610	u32 max_segs;
2611
2612	sent_pkts = 0;
2613
2614	tcp_mstamp_refresh(tp);
2615	if (!push_one) {
2616		/* Do MTU probing. */
2617		result = tcp_mtu_probe(sk);
2618		if (!result) {
2619			return false;
2620		} else if (result > 0) {
2621			sent_pkts = 1;
2622		}
2623	}
2624
2625	max_segs = tcp_tso_segs(sk, mss_now);
2626	while ((skb = tcp_send_head(sk))) {
2627		unsigned int limit;
2628
2629		if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
2630			/* "skb_mstamp_ns" is used as a start point for the retransmit timer */
2631			tp->tcp_wstamp_ns = tp->tcp_clock_cache;
2632			skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true);
2633			list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
2634			tcp_init_tso_segs(skb, mss_now);
2635			goto repair; /* Skip network transmission */
2636		}
2637
2638		if (tcp_pacing_check(sk))
2639			break;
2640
2641		tso_segs = tcp_init_tso_segs(skb, mss_now);
2642		BUG_ON(!tso_segs);
2643
2644		cwnd_quota = tcp_cwnd_test(tp, skb);
2645		if (!cwnd_quota) {
2646			if (push_one == 2)
2647				/* Force out a loss probe pkt. */
2648				cwnd_quota = 1;
2649			else
2650				break;
2651		}
2652
2653		if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) {
2654			is_rwnd_limited = true;
2655			break;
2656		}
2657
2658		if (tso_segs == 1) {
2659			if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
2660						     (tcp_skb_is_last(sk, skb) ?
2661						      nonagle : TCP_NAGLE_PUSH))))
2662				break;
2663		} else {
2664			if (!push_one &&
2665			    tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2666						 &is_rwnd_limited, max_segs))
2667				break;
2668		}
2669
2670		limit = mss_now;
2671		if (tso_segs > 1 && !tcp_urg_mode(tp))
2672			limit = tcp_mss_split_point(sk, skb, mss_now,
2673						    min_t(unsigned int,
2674							  cwnd_quota,
2675							  max_segs),
2676						    nonagle);
2677
2678		if (skb->len > limit &&
2679		    unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2680			break;
2681
2682		if (tcp_small_queue_check(sk, skb, 0))
2683			break;
2684
2685		/* Argh, we hit an empty skb(), presumably a thread
2686		 * is sleeping in sendmsg()/sk_stream_wait_memory().
2687		 * We do not want to send a pure-ack packet and have
2688		 * a strange looking rtx queue with empty packet(s).
2689		 */
2690		if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq)
2691			break;
2692
2693		if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
2694			break;
2695
2696repair:
2697		/* Advance the send_head.  This one is sent out.
2698		 * This call will increment packets_out.
2699		 */
2700		tcp_event_new_data_sent(sk, skb);
2701
2702		tcp_minshall_update(tp, mss_now, skb);
2703		sent_pkts += tcp_skb_pcount(skb);
2704
2705		if (push_one)
2706			break;
2707	}
2708
2709	if (is_rwnd_limited)
2710		tcp_chrono_start(sk, TCP_CHRONO_RWND_LIMITED);
2711	else
2712		tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
2713
2714	is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp));
2715	if (likely(sent_pkts || is_cwnd_limited))
2716		tcp_cwnd_validate(sk, is_cwnd_limited);
2717
2718	if (likely(sent_pkts)) {
2719		if (tcp_in_cwnd_reduction(sk))
2720			tp->prr_out += sent_pkts;
2721
2722		/* Send one loss probe per tail loss episode. */
2723		if (push_one != 2)
2724			tcp_schedule_loss_probe(sk, false);
2725		return false;
2726	}
2727	return !tp->packets_out && !tcp_write_queue_empty(sk);
2728}
2729
2730bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
2731{
2732	struct inet_connection_sock *icsk = inet_csk(sk);
2733	struct tcp_sock *tp = tcp_sk(sk);
2734	u32 timeout, rto_delta_us;
2735	int early_retrans;
2736
2737	/* Don't do any loss probe on a Fast Open connection before 3WHS
2738	 * finishes.
2739	 */
2740	if (rcu_access_pointer(tp->fastopen_rsk))
2741		return false;
2742
2743	early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans);
2744	/* Schedule a loss probe in 2*RTT for SACK capable connections
2745	 * not in loss recovery, that are either limited by cwnd or application.
2746	 */
2747	if ((early_retrans != 3 && early_retrans != 4) ||
2748	    !tp->packets_out || !tcp_is_sack(tp) ||
2749	    (icsk->icsk_ca_state != TCP_CA_Open &&
2750	     icsk->icsk_ca_state != TCP_CA_CWR))
2751		return false;
2752
2753	/* Probe timeout is 2*rtt. Add minimum RTO to account
2754	 * for delayed ack when there's one outstanding packet. If no RTT
2755	 * sample is available then probe after TCP_TIMEOUT_INIT.
2756	 */
2757	if (tp->srtt_us) {
2758		timeout = usecs_to_jiffies(tp->srtt_us >> 2);
2759		if (tp->packets_out == 1)
2760			timeout += TCP_RTO_MIN;
2761		else
2762			timeout += TCP_TIMEOUT_MIN;
2763	} else {
2764		timeout = TCP_TIMEOUT_INIT;
2765	}
2766
2767	/* If the RTO formula yields an earlier time, then use that time. */
2768	rto_delta_us = advancing_rto ?
2769			jiffies_to_usecs(inet_csk(sk)->icsk_rto) :
2770			tcp_rto_delta_us(sk);  /* How far in future is RTO? */
2771	if (rto_delta_us > 0)
2772		timeout = min_t(u32, timeout, usecs_to_jiffies(rto_delta_us));
2773
2774	tcp_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout, TCP_RTO_MAX);
2775	return true;
2776}
2777
2778/* Thanks to skb fast clones, we can detect if a prior transmit of
2779 * a packet is still in a qdisc or driver queue.
2780 * In this case, there is very little point doing a retransmit !
2781 */
2782static bool skb_still_in_host_queue(struct sock *sk,
2783				    const struct sk_buff *skb)
2784{
2785	if (unlikely(skb_fclone_busy(sk, skb))) {
2786		set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
2787		smp_mb__after_atomic();
2788		if (skb_fclone_busy(sk, skb)) {
2789			NET_INC_STATS(sock_net(sk),
2790				      LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
2791			return true;
2792		}
2793	}
2794	return false;
2795}
2796
2797/* When probe timeout (PTO) fires, try send a new segment if possible, else
2798 * retransmit the last segment.
2799 */
2800void tcp_send_loss_probe(struct sock *sk)
2801{
2802	struct tcp_sock *tp = tcp_sk(sk);
2803	struct sk_buff *skb;
2804	int pcount;
2805	int mss = tcp_current_mss(sk);
2806
2807	/* At most one outstanding TLP */
2808	if (tp->tlp_high_seq)
2809		goto rearm_timer;
2810
2811	tp->tlp_retrans = 0;
2812	skb = tcp_send_head(sk);
2813	if (skb && tcp_snd_wnd_test(tp, skb, mss)) {
2814		pcount = tp->packets_out;
2815		tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
2816		if (tp->packets_out > pcount)
2817			goto probe_sent;
2818		goto rearm_timer;
2819	}
2820	skb = skb_rb_last(&sk->tcp_rtx_queue);
2821	if (unlikely(!skb)) {
2822		WARN_ONCE(tp->packets_out,
2823			  "invalid inflight: %u state %u cwnd %u mss %d\n",
2824			  tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss);
2825		inet_csk(sk)->icsk_pending = 0;
2826		return;
2827	}
2828
2829	if (skb_still_in_host_queue(sk, skb))
2830		goto rearm_timer;
2831
2832	pcount = tcp_skb_pcount(skb);
2833	if (WARN_ON(!pcount))
2834		goto rearm_timer;
2835
2836	if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
2837		if (unlikely(tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
2838					  (pcount - 1) * mss, mss,
2839					  GFP_ATOMIC)))
2840			goto rearm_timer;
2841		skb = skb_rb_next(skb);
2842	}
2843
2844	if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
2845		goto rearm_timer;
2846
2847	if (__tcp_retransmit_skb(sk, skb, 1))
2848		goto rearm_timer;
2849
2850	tp->tlp_retrans = 1;
2851
2852probe_sent:
2853	/* Record snd_nxt for loss detection. */
2854	tp->tlp_high_seq = tp->snd_nxt;
2855
2856	NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
2857	/* Reset s.t. tcp_rearm_rto will restart timer from now */
2858	inet_csk(sk)->icsk_pending = 0;
2859rearm_timer:
2860	tcp_rearm_rto(sk);
2861}
2862
2863/* Push out any pending frames which were held back due to
2864 * TCP_CORK or attempt at coalescing tiny packets.
2865 * The socket must be locked by the caller.
2866 */
2867void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
2868			       int nonagle)
2869{
2870	/* If we are closed, the bytes will have to remain here.
2871	 * In time closedown will finish, we empty the write queue and
2872	 * all will be happy.
2873	 */
2874	if (unlikely(sk->sk_state == TCP_CLOSE))
2875		return;
2876
2877	if (tcp_write_xmit(sk, cur_mss, nonagle, 0,
2878			   sk_gfp_mask(sk, GFP_ATOMIC)))
2879		tcp_check_probe_timer(sk);
2880}
2881
2882/* Send _single_ skb sitting at the send head. This function requires
2883 * true push pending frames to setup probe timer etc.
2884 */
2885void tcp_push_one(struct sock *sk, unsigned int mss_now)
2886{
2887	struct sk_buff *skb = tcp_send_head(sk);
2888
2889	BUG_ON(!skb || skb->len < mss_now);
2890
2891	tcp_write_xmit(sk, mss_now, TCP_NAGLE_PUSH, 1, sk->sk_allocation);
2892}
2893
2894/* This function returns the amount that we can raise the
2895 * usable window based on the following constraints
2896 *
2897 * 1. The window can never be shrunk once it is offered (RFC 793)
2898 * 2. We limit memory per socket
2899 *
2900 * RFC 1122:
2901 * "the suggested [SWS] avoidance algorithm for the receiver is to keep
2902 *  RECV.NEXT + RCV.WIN fixed until:
2903 *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
2904 *
2905 * i.e. don't raise the right edge of the window until you can raise
2906 * it at least MSS bytes.
2907 *
2908 * Unfortunately, the recommended algorithm breaks header prediction,
2909 * since header prediction assumes th->window stays fixed.
2910 *
2911 * Strictly speaking, keeping th->window fixed violates the receiver
2912 * side SWS prevention criteria. The problem is that under this rule
2913 * a stream of single byte packets will cause the right side of the
2914 * window to always advance by a single byte.
2915 *
2916 * Of course, if the sender implements sender side SWS prevention
2917 * then this will not be a problem.
2918 *
2919 * BSD seems to make the following compromise:
2920 *
2921 *	If the free space is less than the 1/4 of the maximum
2922 *	space available and the free space is less than 1/2 mss,
2923 *	then set the window to 0.
2924 *	[ Actually, bsd uses MSS and 1/4 of maximal _window_ ]
2925 *	Otherwise, just prevent the window from shrinking
2926 *	and from being larger than the largest representable value.
2927 *
2928 * This prevents incremental opening of the window in the regime
2929 * where TCP is limited by the speed of the reader side taking
2930 * data out of the TCP receive queue. It does nothing about
2931 * those cases where the window is constrained on the sender side
2932 * because the pipeline is full.
2933 *
2934 * BSD also seems to "accidentally" limit itself to windows that are a
2935 * multiple of MSS, at least until the free space gets quite small.
2936 * This would appear to be a side effect of the mbuf implementation.
2937 * Combining these two algorithms results in the observed behavior
2938 * of having a fixed window size at almost all times.
2939 *
2940 * Below we obtain similar behavior by forcing the offered window to
2941 * a multiple of the mss when it is feasible to do so.
2942 *
2943 * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
2944 * Regular options like TIMESTAMP are taken into account.
2945 */
2946u32 __tcp_select_window(struct sock *sk)
2947{
2948	struct inet_connection_sock *icsk = inet_csk(sk);
2949	struct tcp_sock *tp = tcp_sk(sk);
2950	/* MSS for the peer's data.  Previous versions used mss_clamp
2951	 * here.  I don't know if the value based on our guesses
2952	 * of peer's MSS is better for the performance.  It's more correct
2953	 * but may be worse for the performance because of rcv_mss
2954	 * fluctuations.  --SAW  1998/11/1
2955	 */
2956	int mss = icsk->icsk_ack.rcv_mss;
2957	int free_space = tcp_space(sk);
2958	int allowed_space = tcp_full_space(sk);
2959	int full_space, window;
2960
2961	if (sk_is_mptcp(sk))
2962		mptcp_space(sk, &free_space, &allowed_space);
2963
2964	full_space = min_t(int, tp->window_clamp, allowed_space);
2965
2966	if (unlikely(mss > full_space)) {
2967		mss = full_space;
2968		if (mss <= 0)
2969			return 0;
2970	}
2971	if (free_space < (full_space >> 1)) {
2972		icsk->icsk_ack.quick = 0;
2973
2974		if (tcp_under_memory_pressure(sk))
2975			tcp_adjust_rcv_ssthresh(sk);
 
2976
2977		/* free_space might become our new window, make sure we don't
2978		 * increase it due to wscale.
2979		 */
2980		free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
2981
2982		/* if free space is less than mss estimate, or is below 1/16th
2983		 * of the maximum allowed, try to move to zero-window, else
2984		 * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
2985		 * new incoming data is dropped due to memory limits.
2986		 * With large window, mss test triggers way too late in order
2987		 * to announce zero window in time before rmem limit kicks in.
2988		 */
2989		if (free_space < (allowed_space >> 4) || free_space < mss)
2990			return 0;
2991	}
2992
2993	if (free_space > tp->rcv_ssthresh)
2994		free_space = tp->rcv_ssthresh;
2995
2996	/* Don't do rounding if we are using window scaling, since the
2997	 * scaled window will not line up with the MSS boundary anyway.
2998	 */
 
2999	if (tp->rx_opt.rcv_wscale) {
3000		window = free_space;
3001
3002		/* Advertise enough space so that it won't get scaled away.
3003		 * Import case: prevent zero window announcement if
3004		 * 1<<rcv_wscale > mss.
3005		 */
3006		window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale));
 
 
3007	} else {
3008		window = tp->rcv_wnd;
3009		/* Get the largest window that is a nice multiple of mss.
3010		 * Window clamp already applied above.
3011		 * If our current window offering is within 1 mss of the
3012		 * free space we just keep it. This prevents the divide
3013		 * and multiply from happening most of the time.
3014		 * We also don't do any window rounding when the free space
3015		 * is too small.
3016		 */
3017		if (window <= free_space - mss || window > free_space)
3018			window = rounddown(free_space, mss);
3019		else if (mss == full_space &&
3020			 free_space > window + (full_space >> 1))
3021			window = free_space;
3022	}
3023
3024	return window;
3025}
3026
3027void tcp_skb_collapse_tstamp(struct sk_buff *skb,
3028			     const struct sk_buff *next_skb)
3029{
3030	if (unlikely(tcp_has_tx_tstamp(next_skb))) {
3031		const struct skb_shared_info *next_shinfo =
3032			skb_shinfo(next_skb);
3033		struct skb_shared_info *shinfo = skb_shinfo(skb);
3034
3035		shinfo->tx_flags |= next_shinfo->tx_flags & SKBTX_ANY_TSTAMP;
3036		shinfo->tskey = next_shinfo->tskey;
3037		TCP_SKB_CB(skb)->txstamp_ack |=
3038			TCP_SKB_CB(next_skb)->txstamp_ack;
3039	}
3040}
3041
3042/* Collapses two adjacent SKB's during retransmission. */
3043static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
3044{
3045	struct tcp_sock *tp = tcp_sk(sk);
3046	struct sk_buff *next_skb = skb_rb_next(skb);
3047	int next_skb_size;
3048
 
3049	next_skb_size = next_skb->len;
3050
3051	BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
3052
3053	if (next_skb_size && !tcp_skb_shift(skb, next_skb, 1, next_skb_size))
3054		return false;
 
 
 
 
3055
3056	tcp_highest_sack_replace(sk, next_skb, skb);
 
 
 
 
3057
3058	/* Update sequence range on original skb. */
3059	TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
3060
3061	/* Merge over control information. This moves PSH/FIN etc. over */
3062	TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags;
3063
3064	/* All done, get rid of second SKB and account for it so
3065	 * packet counting does not break.
3066	 */
3067	TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
3068	TCP_SKB_CB(skb)->eor = TCP_SKB_CB(next_skb)->eor;
3069
3070	/* changed transmit queue under us so clear hints */
3071	tcp_clear_retrans_hints_partial(tp);
3072	if (next_skb == tp->retransmit_skb_hint)
3073		tp->retransmit_skb_hint = skb;
3074
3075	tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
3076
3077	tcp_skb_collapse_tstamp(skb, next_skb);
3078
3079	tcp_rtx_queue_unlink_and_free(next_skb, sk);
3080	return true;
3081}
3082
3083/* Check if coalescing SKBs is legal. */
3084static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
3085{
3086	if (tcp_skb_pcount(skb) > 1)
3087		return false;
 
 
 
3088	if (skb_cloned(skb))
3089		return false;
3090	/* Some heuristics for collapsing over SACK'd could be invented */
 
 
3091	if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
3092		return false;
3093
3094	return true;
3095}
3096
3097/* Collapse packets in the retransmit queue to make to create
3098 * less packets on the wire. This is only done on retransmission.
3099 */
3100static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
3101				     int space)
3102{
3103	struct tcp_sock *tp = tcp_sk(sk);
3104	struct sk_buff *skb = to, *tmp;
3105	bool first = true;
3106
3107	if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse))
3108		return;
3109	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3110		return;
3111
3112	skb_rbtree_walk_from_safe(skb, tmp) {
3113		if (!tcp_can_collapse(sk, skb))
3114			break;
3115
3116		if (!tcp_skb_can_collapse(to, skb))
3117			break;
3118
3119		space -= skb->len;
3120
3121		if (first) {
3122			first = false;
3123			continue;
3124		}
3125
3126		if (space < 0)
3127			break;
 
 
 
 
 
3128
3129		if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
3130			break;
3131
3132		if (!tcp_collapse_retrans(sk, to))
3133			break;
3134	}
3135}
3136
3137/* This retransmits one SKB.  Policy decisions and retransmit queue
3138 * state updates are done by the caller.  Returns non-zero if an
3139 * error occurred which prevented the send.
3140 */
3141int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
3142{
 
3143	struct inet_connection_sock *icsk = inet_csk(sk);
3144	struct tcp_sock *tp = tcp_sk(sk);
3145	unsigned int cur_mss;
3146	int diff, len, err;
3147	int avail_wnd;
3148
3149	/* Inconclusive MTU probe */
3150	if (icsk->icsk_mtup.probe_size)
3151		icsk->icsk_mtup.probe_size = 0;
 
3152
3153	if (skb_still_in_host_queue(sk, skb))
3154		return -EBUSY;
 
 
 
 
3155
3156	if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
3157		if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
3158			WARN_ON_ONCE(1);
3159			return -EINVAL;
3160		}
3161		if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
3162			return -ENOMEM;
3163	}
3164
3165	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
3166		return -EHOSTUNREACH; /* Routing failure or similar. */
3167
3168	cur_mss = tcp_current_mss(sk);
3169	avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
3170
3171	/* If receiver has shrunk his window, and skb is out of
3172	 * new window, do not retransmit it. The exception is the
3173	 * case, when window is shrunk to zero. In this case
3174	 * our retransmit of one segment serves as a zero window probe.
3175	 */
3176	if (avail_wnd <= 0) {
3177		if (TCP_SKB_CB(skb)->seq != tp->snd_una)
3178			return -EAGAIN;
3179		avail_wnd = cur_mss;
3180	}
3181
3182	len = cur_mss * segs;
3183	if (len > avail_wnd) {
3184		len = rounddown(avail_wnd, cur_mss);
3185		if (!len)
3186			len = avail_wnd;
3187	}
3188	if (skb->len > len) {
3189		if (tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb, len,
3190				 cur_mss, GFP_ATOMIC))
3191			return -ENOMEM; /* We'll try again later. */
3192	} else {
3193		if (skb_unclone_keeptruesize(skb, GFP_ATOMIC))
3194			return -ENOMEM;
 
 
 
 
 
 
 
3195
3196		diff = tcp_skb_pcount(skb);
3197		tcp_set_skb_tso_segs(skb, cur_mss);
3198		diff -= tcp_skb_pcount(skb);
3199		if (diff)
3200			tcp_adjust_pcount(sk, skb, diff);
3201		avail_wnd = min_t(int, avail_wnd, cur_mss);
3202		if (skb->len < avail_wnd)
3203			tcp_retrans_try_collapse(sk, skb, avail_wnd);
3204	}
3205
3206	/* RFC3168, section 6.1.1.1. ECN fallback */
3207	if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
3208		tcp_ecn_clear_syn(sk, skb);
3209
3210	/* Update global and local TCP statistics. */
3211	segs = tcp_skb_pcount(skb);
3212	TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
3213	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
3214		__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3215	tp->total_retrans += segs;
3216	tp->bytes_retrans += skb->len;
3217
3218	/* make sure skb->data is aligned on arches that require it
3219	 * and check if ack-trimming & collapsing extended the headroom
3220	 * beyond what csum_start can cover.
3221	 */
3222	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
3223		     skb_headroom(skb) >= 0xFFFF)) {
3224		struct sk_buff *nskb;
3225
3226		tcp_skb_tsorted_save(skb) {
3227			nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
3228			if (nskb) {
3229				nskb->dev = NULL;
3230				err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
3231			} else {
3232				err = -ENOBUFS;
3233			}
3234		} tcp_skb_tsorted_restore(skb);
3235
3236		if (!err) {
3237			tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns);
3238			tcp_rate_skb_sent(sk, skb);
3239		}
3240	} else {
3241		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3242	}
3243
3244	/* To avoid taking spuriously low RTT samples based on a timestamp
3245	 * for a transmit that never happened, always mark EVER_RETRANS
3246	 */
3247	TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
3248
3249	if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG))
3250		tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RETRANS_CB,
3251				  TCP_SKB_CB(skb)->seq, segs, err);
3252
3253	if (likely(!err)) {
3254		trace_tcp_retransmit_skb(sk, skb);
3255	} else if (err != -EBUSY) {
3256		NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
3257	}
3258	return err;
3259}
3260
3261int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
3262{
3263	struct tcp_sock *tp = tcp_sk(sk);
3264	int err = __tcp_retransmit_skb(sk, skb, segs);
3265
3266	if (err == 0) {
3267#if FASTRETRANS_DEBUG > 0
3268		if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
3269			net_dbg_ratelimited("retrans_out leaked\n");
 
3270		}
3271#endif
 
 
3272		TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS;
3273		tp->retrans_out += tcp_skb_pcount(skb);
 
 
 
 
 
 
 
 
 
 
 
3274	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3275
3276	/* Save stamp of the first (attempted) retransmit. */
3277	if (!tp->retrans_stamp)
3278		tp->retrans_stamp = tcp_skb_timestamp(skb);
3279
3280	if (tp->undo_retrans < 0)
3281		tp->undo_retrans = 0;
3282	tp->undo_retrans += tcp_skb_pcount(skb);
3283	return err;
3284}
3285
3286/* This gets called after a retransmit timeout, and the initially
3287 * retransmitted data is acknowledged.  It tries to continue
3288 * resending the rest of the retransmit queue, until either
3289 * we've sent it all or the congestion window limit is reached.
 
 
 
3290 */
3291void tcp_xmit_retransmit_queue(struct sock *sk)
3292{
3293	const struct inet_connection_sock *icsk = inet_csk(sk);
3294	struct sk_buff *skb, *rtx_head, *hole = NULL;
3295	struct tcp_sock *tp = tcp_sk(sk);
3296	bool rearm_timer = false;
3297	u32 max_segs;
 
3298	int mib_idx;
 
3299
3300	if (!tp->packets_out)
3301		return;
3302
3303	rtx_head = tcp_rtx_queue_head(sk);
3304	skb = tp->retransmit_skb_hint ?: rtx_head;
3305	max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
3306	skb_rbtree_walk_from(skb) {
3307		__u8 sacked;
3308		int segs;
 
 
 
 
 
 
 
 
 
3309
3310		if (tcp_pacing_check(sk))
3311			break;
3312
3313		/* we could do better than to assign each time */
3314		if (!hole)
3315			tp->retransmit_skb_hint = skb;
3316
3317		segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp);
3318		if (segs <= 0)
3319			break;
3320		sacked = TCP_SKB_CB(skb)->sacked;
3321		/* In case tcp_shift_skb_data() have aggregated large skbs,
3322		 * we need to make sure not sending too bigs TSO packets
3323		 */
3324		segs = min_t(int, segs, max_segs);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3325
3326		if (tp->retrans_out >= tp->lost_out) {
3327			break;
3328		} else if (!(sacked & TCPCB_LOST)) {
3329			if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
3330				hole = skb;
3331			continue;
3332
3333		} else {
 
3334			if (icsk->icsk_ca_state != TCP_CA_Loss)
3335				mib_idx = LINUX_MIB_TCPFASTRETRANS;
3336			else
3337				mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS;
3338		}
3339
3340		if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
3341			continue;
3342
3343		if (tcp_small_queue_check(sk, skb, 1))
3344			break;
3345
3346		if (tcp_retransmit_skb(sk, skb, segs))
3347			break;
3348
3349		NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
3350
3351		if (tcp_in_cwnd_reduction(sk))
3352			tp->prr_out += tcp_skb_pcount(skb);
3353
3354		if (skb == rtx_head &&
3355		    icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
3356			rearm_timer = true;
3357
 
 
 
 
3358	}
3359	if (rearm_timer)
3360		tcp_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3361				     inet_csk(sk)->icsk_rto,
3362				     TCP_RTO_MAX);
3363}
3364
3365/* We allow to exceed memory limits for FIN packets to expedite
3366 * connection tear down and (memory) recovery.
3367 * Otherwise tcp_send_fin() could be tempted to either delay FIN
3368 * or even be forced to close flow without any FIN.
3369 * In general, we want to allow one skb per socket to avoid hangs
3370 * with edge trigger epoll()
3371 */
3372void sk_forced_mem_schedule(struct sock *sk, int size)
3373{
3374	int delta, amt;
3375
3376	delta = size - sk->sk_forward_alloc;
3377	if (delta <= 0)
3378		return;
3379	amt = sk_mem_pages(delta);
3380	sk->sk_forward_alloc += amt << PAGE_SHIFT;
3381	sk_memory_allocated_add(sk, amt);
3382
3383	if (mem_cgroup_sockets_enabled && sk->sk_memcg)
3384		mem_cgroup_charge_skmem(sk->sk_memcg, amt,
3385					gfp_memcg_charge() | __GFP_NOFAIL);
3386}
3387
3388/* Send a FIN. The caller locks the socket for us.
3389 * We should try to send a FIN packet really hard, but eventually give up.
3390 */
3391void tcp_send_fin(struct sock *sk)
3392{
3393	struct sk_buff *skb, *tskb, *tail = tcp_write_queue_tail(sk);
3394	struct tcp_sock *tp = tcp_sk(sk);
 
 
3395
3396	/* Optimization, tack on the FIN if we have one skb in write queue and
3397	 * this skb was not yet sent, or we are under memory pressure.
3398	 * Note: in the latter case, FIN packet will be sent after a timeout,
3399	 * as TCP stack thinks it has already been transmitted.
3400	 */
3401	tskb = tail;
3402	if (!tskb && tcp_under_memory_pressure(sk))
3403		tskb = skb_rb_last(&sk->tcp_rtx_queue);
3404
3405	if (tskb) {
3406		TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
3407		TCP_SKB_CB(tskb)->end_seq++;
3408		tp->write_seq++;
3409		if (!tail) {
3410			/* This means tskb was already sent.
3411			 * Pretend we included the FIN on previous transmit.
3412			 * We need to set tp->snd_nxt to the value it would have
3413			 * if FIN had been sent. This is because retransmit path
3414			 * does not change tp->snd_nxt.
3415			 */
3416			WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
3417			return;
3418		}
3419	} else {
3420		skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
3421		if (unlikely(!skb))
3422			return;
3423
3424		INIT_LIST_HEAD(&skb->tcp_tsorted_anchor);
3425		skb_reserve(skb, MAX_TCP_HEADER);
3426		sk_forced_mem_schedule(sk, skb->truesize);
3427		/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
3428		tcp_init_nondata_skb(skb, tp->write_seq,
3429				     TCPHDR_ACK | TCPHDR_FIN);
3430		tcp_queue_skb(sk, skb);
3431	}
3432	__tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
3433}
3434
3435/* We get here when a process closes a file descriptor (either due to
3436 * an explicit close() or as a byproduct of exit()'ing) and there
3437 * was unread data in the receive queue.  This behavior is recommended
3438 * by RFC 2525, section 2.17.  -DaveM
3439 */
3440void tcp_send_active_reset(struct sock *sk, gfp_t priority)
3441{
3442	struct sk_buff *skb;
3443
3444	TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
3445
3446	/* NOTE: No TCP options attached and we never retransmit this. */
3447	skb = alloc_skb(MAX_TCP_HEADER, priority);
3448	if (!skb) {
3449		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3450		return;
3451	}
3452
3453	/* Reserve space for headers and prepare control bits. */
3454	skb_reserve(skb, MAX_TCP_HEADER);
3455	tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
3456			     TCPHDR_ACK | TCPHDR_RST);
3457	tcp_mstamp_refresh(tcp_sk(sk));
3458	/* Send it off. */
 
3459	if (tcp_transmit_skb(sk, skb, 0, priority))
3460		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
3461
3462	/* skb of trace_tcp_send_reset() keeps the skb that caused RST,
3463	 * skb here is different to the troublesome skb, so use NULL
3464	 */
3465	trace_tcp_send_reset(sk, NULL);
3466}
3467
3468/* Send a crossed SYN-ACK during socket establishment.
3469 * WARNING: This routine must only be called when we have already sent
3470 * a SYN packet that crossed the incoming SYN that caused this routine
3471 * to get called. If this assumption fails then the initial rcv_wnd
3472 * and rcv_wscale values will not be correct.
3473 */
3474int tcp_send_synack(struct sock *sk)
3475{
3476	struct sk_buff *skb;
3477
3478	skb = tcp_rtx_queue_head(sk);
3479	if (!skb || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
3480		pr_err("%s: wrong queue state\n", __func__);
3481		return -EFAULT;
3482	}
3483	if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) {
3484		if (skb_cloned(skb)) {
3485			struct sk_buff *nskb;
3486
3487			tcp_skb_tsorted_save(skb) {
3488				nskb = skb_copy(skb, GFP_ATOMIC);
3489			} tcp_skb_tsorted_restore(skb);
3490			if (!nskb)
3491				return -ENOMEM;
3492			INIT_LIST_HEAD(&nskb->tcp_tsorted_anchor);
3493			tcp_highest_sack_replace(sk, skb, nskb);
3494			tcp_rtx_queue_unlink_and_free(skb, sk);
3495			__skb_header_release(nskb);
3496			tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
3497			sk_wmem_queued_add(sk, nskb->truesize);
3498			sk_mem_charge(sk, nskb->truesize);
3499			skb = nskb;
3500		}
3501
3502		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
3503		tcp_ecn_send_synack(sk, skb);
3504	}
 
3505	return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3506}
3507
3508/**
3509 * tcp_make_synack - Allocate one skb and build a SYNACK packet.
3510 * @sk: listener socket
3511 * @dst: dst entry attached to the SYNACK. It is consumed and caller
3512 *       should not use it again.
3513 * @req: request_sock pointer
3514 * @foc: cookie for tcp fast open
3515 * @synack_type: Type of synack to prepare
3516 * @syn_skb: SYN packet just received.  It could be NULL for rtx case.
3517 */
3518struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3519				struct request_sock *req,
3520				struct tcp_fastopen_cookie *foc,
3521				enum tcp_synack_type synack_type,
3522				struct sk_buff *syn_skb)
3523{
 
 
3524	struct inet_request_sock *ireq = inet_rsk(req);
3525	const struct tcp_sock *tp = tcp_sk(sk);
3526	struct tcp_md5sig_key *md5 = NULL;
3527	struct tcp_out_options opts;
3528	struct sk_buff *skb;
 
3529	int tcp_header_size;
3530	struct tcphdr *th;
3531	int mss;
3532	u64 now;
3533
3534	skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
3535	if (unlikely(!skb)) {
3536		dst_release(dst);
 
3537		return NULL;
3538	}
3539	/* Reserve space for headers. */
3540	skb_reserve(skb, MAX_TCP_HEADER);
3541
3542	switch (synack_type) {
3543	case TCP_SYNACK_NORMAL:
3544		skb_set_owner_w(skb, req_to_sk(req));
3545		break;
3546	case TCP_SYNACK_COOKIE:
3547		/* Under synflood, we do not attach skb to a socket,
3548		 * to avoid false sharing.
3549		 */
3550		break;
3551	case TCP_SYNACK_FASTOPEN:
3552		/* sk is a const pointer, because we want to express multiple
3553		 * cpu might call us concurrently.
3554		 * sk->sk_wmem_alloc in an atomic, we can promote to rw.
3555		 */
3556		skb_set_owner_w(skb, (struct sock *)sk);
3557		break;
 
 
 
 
 
 
 
 
 
3558	}
3559	skb_dst_set(skb, dst);
3560
3561	mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
3562
3563	memset(&opts, 0, sizeof(opts));
3564	now = tcp_clock_ns();
3565#ifdef CONFIG_SYN_COOKIES
3566	if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
3567		skb_set_delivery_time(skb, cookie_init_timestamp(req, now),
3568				      true);
3569	else
3570#endif
3571	{
3572		skb_set_delivery_time(skb, now, true);
3573		if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */
3574			tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
3575	}
3576
3577#ifdef CONFIG_TCP_MD5SIG
3578	rcu_read_lock();
3579	md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
3580#endif
3581	skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
3582	/* bpf program will be interested in the tcp_flags */
3583	TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
3584	tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
3585					     foc, synack_type,
3586					     syn_skb) + sizeof(*th);
3587
3588	skb_push(skb, tcp_header_size);
3589	skb_reset_transport_header(skb);
3590
3591	th = (struct tcphdr *)skb->data;
3592	memset(th, 0, sizeof(struct tcphdr));
3593	th->syn = 1;
3594	th->ack = 1;
3595	tcp_ecn_make_synack(req, th);
3596	th->source = htons(ireq->ir_num);
3597	th->dest = ireq->ir_rmt_port;
3598	skb->mark = ireq->ir_mark;
3599	skb->ip_summed = CHECKSUM_PARTIAL;
3600	th->seq = htonl(tcp_rsk(req)->snt_isn);
3601	/* XXX data is queued and acked as is. No buffer/window check */
3602	th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3603
3604	/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
3605	th->window = htons(min(req->rsk_rcv_wnd, 65535U));
3606	tcp_options_write(th, NULL, &opts);
3607	th->doff = (tcp_header_size >> 2);
3608	__TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
3609
3610#ifdef CONFIG_TCP_MD5SIG
3611	/* Okay, we have all we need - do the md5 hash if needed */
3612	if (md5)
3613		tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
3614					       md5, req_to_sk(req), skb);
3615	rcu_read_unlock();
3616#endif
3617
3618	bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
3619				synack_type, &opts);
3620
3621	skb_set_delivery_time(skb, now, true);
3622	tcp_add_tx_delay(skb, tp);
3623
3624	return skb;
3625}
3626EXPORT_SYMBOL(tcp_make_synack);
3627
3628static void tcp_ca_dst_init(struct sock *sk, const struct dst_entry *dst)
3629{
3630	struct inet_connection_sock *icsk = inet_csk(sk);
3631	const struct tcp_congestion_ops *ca;
3632	u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
3633
3634	if (ca_key == TCP_CA_UNSPEC)
3635		return;
3636
3637	rcu_read_lock();
3638	ca = tcp_ca_find_key(ca_key);
3639	if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
3640		bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner);
3641		icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
3642		icsk->icsk_ca_ops = ca;
3643	}
3644	rcu_read_unlock();
3645}
3646
3647/* Do all connect socket setups that can be done AF independent. */
3648static void tcp_connect_init(struct sock *sk)
3649{
3650	const struct dst_entry *dst = __sk_dst_get(sk);
3651	struct tcp_sock *tp = tcp_sk(sk);
3652	__u8 rcv_wscale;
3653	u32 rcv_wnd;
3654
3655	/* We'll fix this up when we get a response from the other end.
3656	 * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
3657	 */
3658	tp->tcp_header_len = sizeof(struct tcphdr);
3659	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps))
3660		tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
3661
3662#ifdef CONFIG_TCP_MD5SIG
3663	if (tp->af_specific->md5_lookup(sk, sk))
3664		tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
3665#endif
3666
3667	/* If user gave his TCP_MAXSEG, record it to clamp */
3668	if (tp->rx_opt.user_mss)
3669		tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;
3670	tp->max_window = 0;
3671	tcp_mtup_init(sk);
3672	tcp_sync_mss(sk, dst_mtu(dst));
3673
3674	tcp_ca_dst_init(sk, dst);
3675
3676	if (!tp->window_clamp)
3677		tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
3678	tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
 
 
3679
3680	tcp_initialize_rcv_mss(sk);
3681
3682	/* limit the window selection if the user enforce a smaller rx buffer */
3683	if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
3684	    (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
3685		tp->window_clamp = tcp_full_space(sk);
3686
3687	rcv_wnd = tcp_rwnd_init_bpf(sk);
3688	if (rcv_wnd == 0)
3689		rcv_wnd = dst_metric(dst, RTAX_INITRWND);
3690
3691	tcp_select_initial_window(sk, tcp_full_space(sk),
3692				  tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0),
3693				  &tp->rcv_wnd,
3694				  &tp->window_clamp,
3695				  READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling),
3696				  &rcv_wscale,
3697				  rcv_wnd);
3698
3699	tp->rx_opt.rcv_wscale = rcv_wscale;
3700	tp->rcv_ssthresh = tp->rcv_wnd;
3701
3702	sk->sk_err = 0;
3703	sock_reset_flag(sk, SOCK_DONE);
3704	tp->snd_wnd = 0;
3705	tcp_init_wl(tp, 0);
3706	tcp_write_queue_purge(sk);
3707	tp->snd_una = tp->write_seq;
3708	tp->snd_sml = tp->write_seq;
3709	tp->snd_up = tp->write_seq;
3710	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3711
3712	if (likely(!tp->repair))
3713		tp->rcv_nxt = 0;
3714	else
3715		tp->rcv_tstamp = tcp_jiffies32;
3716	tp->rcv_wup = tp->rcv_nxt;
3717	WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
3718
3719	inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
3720	inet_csk(sk)->icsk_retransmits = 0;
3721	tcp_clear_retrans(tp);
3722}
3723
3724static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
3725{
3726	struct tcp_sock *tp = tcp_sk(sk);
3727	struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
3728
3729	tcb->end_seq += skb->len;
3730	__skb_header_release(skb);
3731	sk_wmem_queued_add(sk, skb->truesize);
3732	sk_mem_charge(sk, skb->truesize);
3733	WRITE_ONCE(tp->write_seq, tcb->end_seq);
3734	tp->packets_out += tcp_skb_pcount(skb);
3735}
3736
3737/* Build and send a SYN with data and (cached) Fast Open cookie. However,
3738 * queue a data-only packet after the regular SYN, such that regular SYNs
3739 * are retransmitted on timeouts. Also if the remote SYN-ACK acknowledges
3740 * only the SYN sequence, the data are retransmitted in the first ACK.
3741 * If cookie is not cached or other error occurs, falls back to send a
3742 * regular SYN with Fast Open cookie request option.
3743 */
3744static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3745{
3746	struct inet_connection_sock *icsk = inet_csk(sk);
3747	struct tcp_sock *tp = tcp_sk(sk);
3748	struct tcp_fastopen_request *fo = tp->fastopen_req;
3749	int space, err = 0;
3750	struct sk_buff *syn_data;
3751
3752	tp->rx_opt.mss_clamp = tp->advmss;  /* If MSS is not cached */
3753	if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
3754		goto fallback;
3755
3756	/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
3757	 * user-MSS. Reserve maximum option space for middleboxes that add
3758	 * private TCP options. The cost is reduced data space in SYN :(
3759	 */
3760	tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp);
3761	/* Sync mss_cache after updating the mss_clamp */
3762	tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
3763
3764	space = __tcp_mtu_to_mss(sk, icsk->icsk_pmtu_cookie) -
3765		MAX_TCP_OPTION_SPACE;
3766
3767	space = min_t(size_t, space, fo->size);
3768
3769	/* limit to order-0 allocations */
3770	space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
3771
3772	syn_data = tcp_stream_alloc_skb(sk, space, sk->sk_allocation, false);
3773	if (!syn_data)
3774		goto fallback;
3775	memcpy(syn_data->cb, syn->cb, sizeof(syn->cb));
3776	if (space) {
3777		int copied = copy_from_iter(skb_put(syn_data, space), space,
3778					    &fo->data->msg_iter);
3779		if (unlikely(!copied)) {
3780			tcp_skb_tsorted_anchor_cleanup(syn_data);
3781			kfree_skb(syn_data);
3782			goto fallback;
3783		}
3784		if (copied != space) {
3785			skb_trim(syn_data, copied);
3786			space = copied;
3787		}
3788		skb_zcopy_set(syn_data, fo->uarg, NULL);
3789	}
3790	/* No more data pending in inet_wait_for_connect() */
3791	if (space == fo->size)
3792		fo->data = NULL;
3793	fo->copied = space;
3794
3795	tcp_connect_queue_skb(sk, syn_data);
3796	if (syn_data->len)
3797		tcp_chrono_start(sk, TCP_CHRONO_BUSY);
3798
3799	err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);
3800
3801	skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true);
3802
3803	/* Now full SYN+DATA was cloned and sent (or not),
3804	 * remove the SYN from the original skb (syn_data)
3805	 * we keep in write queue in case of a retransmit, as we
3806	 * also have the SYN packet (with no data) in the same queue.
3807	 */
3808	TCP_SKB_CB(syn_data)->seq++;
3809	TCP_SKB_CB(syn_data)->tcp_flags = TCPHDR_ACK | TCPHDR_PSH;
3810	if (!err) {
3811		tp->syn_data = (fo->copied > 0);
3812		tcp_rbtree_insert(&sk->tcp_rtx_queue, syn_data);
3813		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
3814		goto done;
3815	}
3816
3817	/* data was not sent, put it in write_queue */
3818	__skb_queue_tail(&sk->sk_write_queue, syn_data);
3819	tp->packets_out -= tcp_skb_pcount(syn_data);
3820
3821fallback:
3822	/* Send a regular SYN with Fast Open cookie request option */
3823	if (fo->cookie.len > 0)
3824		fo->cookie.len = 0;
3825	err = tcp_transmit_skb(sk, syn, 1, sk->sk_allocation);
3826	if (err)
3827		tp->syn_fastopen = 0;
3828done:
3829	fo->cookie.len = -1;  /* Exclude Fast Open option for SYN retries */
3830	return err;
3831}
3832
3833/* Build a SYN and send it off. */
3834int tcp_connect(struct sock *sk)
3835{
3836	struct tcp_sock *tp = tcp_sk(sk);
3837	struct sk_buff *buff;
3838	int err;
3839
3840	tcp_call_bpf(sk, BPF_SOCK_OPS_TCP_CONNECT_CB, 0, NULL);
3841
3842	if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
3843		return -EHOSTUNREACH; /* Routing failure or similar. */
3844
3845	tcp_connect_init(sk);
3846
3847	if (unlikely(tp->repair)) {
3848		tcp_finish_connect(sk, NULL);
3849		return 0;
3850	}
3851
3852	buff = tcp_stream_alloc_skb(sk, 0, sk->sk_allocation, true);
3853	if (unlikely(!buff))
3854		return -ENOBUFS;
3855
 
3856	tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
3857	tcp_mstamp_refresh(tp);
3858	tp->retrans_stamp = tcp_time_stamp(tp);
3859	tcp_connect_queue_skb(sk, buff);
3860	tcp_ecn_send_syn(sk, buff);
3861	tcp_rbtree_insert(&sk->tcp_rtx_queue, buff);
3862
3863	/* Send off SYN; include data in Fast Open. */
3864	err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
3865	      tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
 
 
3866	if (err == -ECONNREFUSED)
3867		return err;
3868
3869	/* We change tp->snd_nxt after the tcp_transmit_skb() call
3870	 * in order to make this packet get counted in tcpOutSegs.
3871	 */
3872	WRITE_ONCE(tp->snd_nxt, tp->write_seq);
3873	tp->pushed_seq = tp->write_seq;
3874	buff = tcp_send_head(sk);
3875	if (unlikely(buff)) {
3876		WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
3877		tp->pushed_seq	= TCP_SKB_CB(buff)->seq;
3878	}
3879	TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
3880
3881	/* Timer for repeating the SYN until an answer. */
3882	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
3883				  inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
3884	return 0;
3885}
3886EXPORT_SYMBOL(tcp_connect);
3887
3888/* Send out a delayed ack, the caller does the policy checking
3889 * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
3890 * for details.
3891 */
3892void tcp_send_delayed_ack(struct sock *sk)
3893{
3894	struct inet_connection_sock *icsk = inet_csk(sk);
3895	int ato = icsk->icsk_ack.ato;
3896	unsigned long timeout;
3897
3898	if (ato > TCP_DELACK_MIN) {
3899		const struct tcp_sock *tp = tcp_sk(sk);
3900		int max_ato = HZ / 2;
3901
3902		if (inet_csk_in_pingpong_mode(sk) ||
3903		    (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
3904			max_ato = TCP_DELACK_MAX;
3905
3906		/* Slow path, intersegment interval is "high". */
3907
3908		/* If some rtt estimate is known, use it to bound delayed ack.
3909		 * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
3910		 * directly.
3911		 */
3912		if (tp->srtt_us) {
3913			int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
3914					TCP_DELACK_MIN);
3915
3916			if (rtt < max_ato)
3917				max_ato = rtt;
3918		}
3919
3920		ato = min(ato, max_ato);
3921	}
3922
3923	ato = min_t(u32, ato, inet_csk(sk)->icsk_delack_max);
3924
3925	/* Stay within the limit we were given */
3926	timeout = jiffies + ato;
3927
3928	/* Use new timeout only if there wasn't a older one earlier. */
3929	if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
3930		/* If delack timer is about to expire, send ACK now. */
3931		if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
 
 
 
3932			tcp_send_ack(sk);
3933			return;
3934		}
3935
3936		if (!time_before(timeout, icsk->icsk_ack.timeout))
3937			timeout = icsk->icsk_ack.timeout;
3938	}
3939	icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
3940	icsk->icsk_ack.timeout = timeout;
3941	sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
3942}
3943
3944/* This routine sends an ack and also updates the window. */
3945void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
3946{
3947	struct sk_buff *buff;
3948
3949	/* If we have been reset, we may not send again. */
3950	if (sk->sk_state == TCP_CLOSE)
3951		return;
3952
3953	/* We are not putting this on the write queue, so
3954	 * tcp_transmit_skb() will set the ownership to this
3955	 * sock.
3956	 */
3957	buff = alloc_skb(MAX_TCP_HEADER,
3958			 sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
3959	if (unlikely(!buff)) {
3960		struct inet_connection_sock *icsk = inet_csk(sk);
3961		unsigned long delay;
3962
3963		delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
3964		if (delay < TCP_RTO_MAX)
3965			icsk->icsk_ack.retry++;
3966		inet_csk_schedule_ack(sk);
3967		icsk->icsk_ack.ato = TCP_ATO_MIN;
3968		inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX);
 
3969		return;
3970	}
3971
3972	/* Reserve space for headers and prepare control bits. */
3973	skb_reserve(buff, MAX_TCP_HEADER);
3974	tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
3975
3976	/* We do not want pure acks influencing TCP Small Queues or fq/pacing
3977	 * too much.
3978	 * SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784
3979	 */
3980	skb_set_tcp_pure_ack(buff);
3981
3982	/* Send it off, this clears delayed acks for us. */
3983	__tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
3984}
3985EXPORT_SYMBOL_GPL(__tcp_send_ack);
3986
3987void tcp_send_ack(struct sock *sk)
3988{
3989	__tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
3990}
3991
3992/* This routine sends a packet with an out of date sequence
3993 * number. It assumes the other end will try to ack it.
3994 *
3995 * Question: what should we make while urgent mode?
3996 * 4.4BSD forces sending single byte of data. We cannot send
3997 * out of window data, because we have SND.NXT==SND.MAX...
3998 *
3999 * Current solution: to send TWO zero-length segments in urgent mode:
4000 * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is
4001 * out-of-date with SND.UNA-1 to probe window.
4002 */
4003static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
4004{
4005	struct tcp_sock *tp = tcp_sk(sk);
4006	struct sk_buff *skb;
4007
4008	/* We don't queue it, tcp_transmit_skb() sets ownership. */
4009	skb = alloc_skb(MAX_TCP_HEADER,
4010			sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
4011	if (!skb)
4012		return -1;
4013
4014	/* Reserve space for headers and set control bits. */
4015	skb_reserve(skb, MAX_TCP_HEADER);
4016	/* Use a previous sequence.  This should cause the other
4017	 * end to send an ack.  Don't queue or clone SKB, just
4018	 * send it.
4019	 */
4020	tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
4021	NET_INC_STATS(sock_net(sk), mib);
4022	return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
4023}
4024
4025/* Called from setsockopt( ... TCP_REPAIR ) */
4026void tcp_send_window_probe(struct sock *sk)
4027{
4028	if (sk->sk_state == TCP_ESTABLISHED) {
4029		tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
4030		tcp_mstamp_refresh(tcp_sk(sk));
4031		tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
4032	}
4033}
4034
4035/* Initiate keepalive or window probe from timer. */
4036int tcp_write_wakeup(struct sock *sk, int mib)
4037{
4038	struct tcp_sock *tp = tcp_sk(sk);
4039	struct sk_buff *skb;
4040
4041	if (sk->sk_state == TCP_CLOSE)
4042		return -1;
4043
4044	skb = tcp_send_head(sk);
4045	if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
4046		int err;
4047		unsigned int mss = tcp_current_mss(sk);
4048		unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
4049
4050		if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))
4051			tp->pushed_seq = TCP_SKB_CB(skb)->end_seq;
4052
4053		/* We are probing the opening of a window
4054		 * but the window size is != 0
4055		 * must have been a result SWS avoidance ( sender )
4056		 */
4057		if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq ||
4058		    skb->len > mss) {
4059			seg_size = min(seg_size, mss);
4060			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
4061			if (tcp_fragment(sk, TCP_FRAG_IN_WRITE_QUEUE,
4062					 skb, seg_size, mss, GFP_ATOMIC))
4063				return -1;
4064		} else if (!tcp_skb_pcount(skb))
4065			tcp_set_skb_tso_segs(skb, mss);
4066
4067		TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
 
4068		err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
4069		if (!err)
4070			tcp_event_new_data_sent(sk, skb);
4071		return err;
4072	} else {
4073		if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF))
4074			tcp_xmit_probe_skb(sk, 1, mib);
4075		return tcp_xmit_probe_skb(sk, 0, mib);
4076	}
4077}
4078
4079/* A window probe timeout has occurred.  If window is not closed send
4080 * a partial packet else a zero probe.
4081 */
4082void tcp_send_probe0(struct sock *sk)
4083{
4084	struct inet_connection_sock *icsk = inet_csk(sk);
4085	struct tcp_sock *tp = tcp_sk(sk);
4086	struct net *net = sock_net(sk);
4087	unsigned long timeout;
4088	int err;
4089
4090	err = tcp_write_wakeup(sk, LINUX_MIB_TCPWINPROBE);
4091
4092	if (tp->packets_out || tcp_write_queue_empty(sk)) {
4093		/* Cancel probe timer, if it is not required. */
4094		icsk->icsk_probes_out = 0;
4095		icsk->icsk_backoff = 0;
4096		icsk->icsk_probes_tstamp = 0;
4097		return;
4098	}
4099
4100	icsk->icsk_probes_out++;
4101	if (err <= 0) {
4102		if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2))
4103			icsk->icsk_backoff++;
4104		timeout = tcp_probe0_when(sk, TCP_RTO_MAX);
 
 
 
4105	} else {
4106		/* If packet was not sent due to local congestion,
4107		 * Let senders fight for local resources conservatively.
 
 
 
4108		 */
4109		timeout = TCP_RESOURCE_PROBE_INTERVAL;
4110	}
4111
4112	timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
4113	tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX);
4114}
4115
4116int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
4117{
4118	const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
4119	struct flowi fl;
4120	int res;
4121
4122	/* Paired with WRITE_ONCE() in sock_setsockopt() */
4123	if (READ_ONCE(sk->sk_txrehash) == SOCK_TXREHASH_ENABLED)
4124		tcp_rsk(req)->txhash = net_tx_rndhash();
4125	res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
4126				  NULL);
4127	if (!res) {
4128		TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
4129		NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
4130		if (unlikely(tcp_passive_fastopen(sk)))
4131			tcp_sk(sk)->total_retrans++;
4132		trace_tcp_retransmit_synack(sk, req);
4133	}
4134	return res;
4135}
4136EXPORT_SYMBOL(tcp_rtx_synack);